# Publications from 2001

## Back

2019
Yu, H., Bemana, M., Wernikowski, M., et al. A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2019).
(Accepted/in press)
Export
BibTeX
@article{Yu_VR2019, TITLE = {A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays}, AUTHOR = {Yu, Hyeonseung and Bemana, Mojtaba and Wernikowski, Marek and Chwesiuk, Micha{\l} and Tursun, Okan Tarhan and Singh, Gurprit and Myszkowski, Karol and Mantiuk, Rados{\l}aw and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {1077-2626}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2019}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2019 (IEEE VR 2019)}, }
Endnote
%0 Journal Article %A Yu, Hyeonseung %A Bemana, Mojtaba %A Wernikowski, Marek %A Chwesiuk, Micha&#322; %A Tursun, Okan Tarhan %A Singh, Gurprit %A Myszkowski, Karol %A Mantiuk, Rados&#322;aw %A Seidel, Hans-Peter %A Didyk, Piotr %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays : %G eng %U http://hdl.handle.net/21.11116/0000-0002-DCB5-A %D 2019 %J IEEE Transactions on Visualization and Computer Graphics %I IEEE Computer Society %C New York, NY %@ false %B Selected Proceedings IEEE Virtual Reality 2019 %O IEEE VR 2019 Osaka, Japan, 23rd - 27th March
Xu, W., Chatterjee, A., Zollhöfer, M., et al. Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2019).
(Accepted/in press)
Export
BibTeX
@article{Xu2019Mo2Cap2, TITLE = {{Mo2Cap2}: Real-time Mobile {3D} Motion Capture with a Cap-mounted Fisheye Camera}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Fua, Pascal and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {1077-2626}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2019}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2019 (IEEE VR 2019)}, }
Endnote
%0 Journal Article %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Fua, Pascal %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F1DB-7 %D 2019 %J IEEE Transactions on Visualization and Computer Graphics %I IEEE %C Piscataway, NJ %@ false %B Selected Proceedings IEEE Virtual Reality 2019 %O IEEE VR 2019 Osaka, Japan, March 23rd - 27th
Winter, M., Mlakar, D., Zayer, R., Seidel, H.-P., and Steinberger, M. 2019. Adaptive Sparse Matrix-Matrix Multiplication on the GPU. PPoPP’19, 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, ACM.
Export
BibTeX
@inproceedings{PPOPP:2019:ASPMM, TITLE = {Adaptive Sparse Matrix-Matrix Multiplication on the {GPU}}, AUTHOR = {Winter, Martin and Mlakar, Daniel and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-4503-6225-2}, DOI = {10.1145/3293883.3295701}, PUBLISHER = {ACM}, YEAR = {2019}, MARGINALMARK = {$\bullet$}, DATE = {2019}, BOOKTITLE = {PPoPP'19, 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming}, PAGES = {68--81}, ADDRESS = {Washington, DC, USA}, }
Endnote
%0 Conference Proceedings %A Winter, Martin %A Mlakar, Daniel %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Adaptive Sparse Matrix-Matrix Multiplication on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0002-EFE9-B %R 10.1145/3293883.3295701 %D 2019 %B 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming %Z date of event: 2019-02-16 - 2019-02-20 %C Washington, DC, USA %B PPoPP'19 %P 68 - 81 %I ACM %@ 978-1-4503-6225-2
Dokter, M., Hladký, J., Parger, M., Schmalstieg, D., Seidel, H.-P., and Steinberger, M. CPatch: Hierarchical Rasterization of Curved Primitives for GPU Vector Graphics Rendering. Computer Graphics Forum (Proc. EUROGRAPHICS 2019).
(Accepted/in press)
Export
BibTeX
@article{Dokter_EG2019, TITLE = {{CPatch}: {H}ierarchical Rasterization of Curved Primitives for {GPU} Vector Graphics Rendering}, AUTHOR = {Dokter, Mark and Hladk{\'y}, Jozef and Parger, Mathias and Schmalstieg, Dieter and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2019}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, BOOKTITLE = {The 40th Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2019)}, }
Endnote
%0 Journal Article %A Dokter, Mark %A Hladk&#253;, Jozef %A Parger, Mathias %A Schmalstieg, Dieter %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T CPatch: Hierarchical Rasterization of Curved Primitives for GPU Vector Graphics Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0002-FC80-1 %D 2019 %J Computer Graphics Forum %I Wiley-Blackwell %C Oxford %@ false %B The 40th Annual Conference of the European Association for Computer Graphics %O EUROGRAPHICS 2019 Genova, Italy, May 6-10, 2019 EG 2019
2018
Zayer, R., Mlakar, D., Steinberger, M., and Seidel, H.-P. 2018a. Layered Fields for Natural Tessellations on Surfaces. http://arxiv.org/abs/1804.09152.
(arXiv: 1804.09152)
Abstract
Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware.
Export
BibTeX
@online{Zayer_arXiv1804.09152, TITLE = {Layered Fields for Natural Tessellations on Surfaces}, AUTHOR = {Zayer, Rhaleb and Mlakar, Daniel and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.09152}, EPRINT = {1804.09152}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware.}, }
Endnote
%0 Report %A Zayer, Rhaleb %A Mlakar, Daniel %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Layered Fields for Natural Tessellations on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0002-152D-5 %U http://arxiv.org/abs/1804.09152 %D 2018 %X Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware. %K Computer Science, Graphics, cs.GR,Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
Zayer, R., Mlakar, D., Steinberger, M., and Seidel, H.-P. 2018b. Layered Fields for Natural Tessellations on Surfaces. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2018)37, 6.
Export
BibTeX
@article{Zayer:2018:LFN, TITLE = {Layered Fields for Natural Tessellations on Surfaces}, AUTHOR = {Zayer, Rhaleb and Mlakar, Daniel and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-6008-1}, DOI = {10.1145/3272127.3275072}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {37}, NUMBER = {6}, EID = {264}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2018}, }
Endnote
%0 Journal Article %A Zayer, Rhaleb %A Mlakar, Daniel %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Layered Fields for Natural Tessellations on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E0-E %R 10.1145/3272127.3275072 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 6 %Z sequence number: 264 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2018 %O ACM SIGGRAPH Asia 2018 Tokyo, Japan, December 04 - 07, 2018 SA'18 SA 2018 %@ 978-1-4503-6008-1
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2018a. MonoPerfCap: Human Performance Capture from Monocular Video. ACM Transactions on Graphics37, 2.
Abstract
We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.
Export
BibTeX
@article{Xu_ToG2018, TITLE = {{MonoPerfCap}: Human Performance Capture from Monocular Video}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Mehta, Dushyant and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3181973}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, ABSTRACT = {We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {37}, NUMBER = {2}, EID = {27}, }
Endnote
%0 Journal Article %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Mehta, Dushyant %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MonoPerfCap: Human Performance Capture from Monocular Video : %G eng %U http://hdl.handle.net/21.11116/0000-0001-E20E-1 %R 10.1145/3181973 %7 2017 %D 2018 %X We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR %J ACM Transactions on Graphics %V 37 %N 2 %Z sequence number: 27 %I ACM %C New York, NY %@ false
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2018b. Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera. http://arxiv.org/abs/1803.05959.
(arXiv: 1803.05959)
Abstract
We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines.
Export
BibTeX
@online{Xu_arXiv1803.05959, TITLE = {{Mo2Cap2}: Real-time Mobile {3D} Motion Capture with a Cap-mounted Fisheye Camera}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Fua, Pascal and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.05959}, EPRINT = {1803.05959}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines.}, }
Endnote
%0 Report %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Fua, Pascal %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3C65-B %U http://arxiv.org/abs/1803.05959 %D 2018 %X We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Wolski, K., Giunchi, D., Ye, N., et al. 2018. Dataset and Metrics for Predicting Local Visible Differences. ACM Transactions on Graphics37, 5.
Export
BibTeX
@article{wolski2018dataset, TITLE = {Dataset and Metrics for Predicting Local Visible Differences}, AUTHOR = {Wolski, Krzysztof and Giunchi, Daniele and Ye, Nanyang and Didyk, Piotr and Myszkowski, Karol and Mantiuk, Rados{\textbackslash}l{\textbraceleft}{\textbraceright}aw and Seidel, Hans-Peter and Steed, Anthony and Mantiuk, Rafa{\l} K.}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3196493}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {37}, NUMBER = {5}, EID = {172}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Giunchi, Daniele %A Ye, Nanyang %A Didyk, Piotr %A Myszkowski, Karol %A Mantiuk, Rados\l{}aw %A Seidel, Hans-Peter %A Steed, Anthony %A Mantiuk, Rafa&#322; K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Dataset and Metrics for Predicting Local Visible Differences : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F75-2 %R 10.1145/3196493 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 5 %Z sequence number: 172 %I ACM %C New York, NY %@ false
Winter, M., Mlakar, D., Zayer, R., Seidel, H.-P., and Steinberger, M. 2018. faimGraph: High Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the GPU. The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC 2018), IEEE.
Export
BibTeX
@inproceedings{Winter:2018:FHP, TITLE = {{faimGraph}: {H}igh Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the {GPU}}, AUTHOR = {Winter, Martin and Mlakar, Daniel and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-5386-8384-2}, URL = {http://conferences.computer.org/sc/2018/#!/home}, PUBLISHER = {IEEE}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC 2018)}, PAGES = {754--766}, ADDRESS = {Dallas, TX, USA}, }
Endnote
%0 Conference Proceedings %A Winter, Martin %A Mlakar, Daniel %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T faimGraph: High Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E6-8 %D 2018 %B The International Conference for High Performance Computing, Networking, Storage, and Analysis %Z date of event: 2018-11-11 - 2018-11-16 %C Dallas, TX, USA %B The International Conference for High Performance Computing, Networking, Storage, and Analysis %P 754 - 766 %I IEEE %@ 978-1-5386-8384-2
Tewari, A., Bernard, F., Garrido, P., et al. 2018. FML: Face Model Learning from Videos. http://arxiv.org/abs/1812.07603.
(arXiv: 1812.07603)
Abstract
Monocular image-based 3D reconstruction of faces is a long-standing problem in computer vision. Since image data is a 2D projection of a 3D face, the resulting depth ambiguity makes the problem ill-posed. Most existing methods rely on data-driven priors that are built from limited 3D face scans. In contrast, we propose multi-frame video-based self-supervised training of a deep network that (i) learns a face identity model both in shape and appearance while (ii) jointly learning to reconstruct 3D faces. Our face model is learned using only corpora of in-the-wild video clips collected from the Internet. This virtually endless source of training data enables learning of a highly general 3D face model. In order to achieve this, we propose a novel multi-frame consistency loss that ensures consistent shape and appearance across multiple frames of a subject's face, thus minimizing depth ambiguity. At test time we can use an arbitrary number of frames, so that we can perform both monocular as well as multi-frame reconstruction.
Export
BibTeX
@online{tewari2018fml, TITLE = {{FML}: {Face Model Learning from Videos}}, AUTHOR = {Tewari, Ayush and Bernard, Florian and Garrido, Pablo and Bharaj, Gaurav and Elgharib, Mohamed and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1812.07603}, EPRINT = {1812.07603}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Monocular image-based 3D reconstruction of faces is a long-standing problem in computer vision. Since image data is a 2D projection of a 3D face, the resulting depth ambiguity makes the problem ill-posed. Most existing methods rely on data-driven priors that are built from limited 3D face scans. In contrast, we propose multi-frame video-based self-supervised training of a deep network that (i) learns a face identity model both in shape and appearance while (ii) jointly learning to reconstruct 3D faces. Our face model is learned using only corpora of in-the-wild video clips collected from the Internet. This virtually endless source of training data enables learning of a highly general 3D face model. In order to achieve this, we propose a novel multi-frame consistency loss that ensures consistent shape and appearance across multiple frames of a subject's face, thus minimizing depth ambiguity. At test time we can use an arbitrary number of frames, so that we can perform both monocular as well as multi-frame reconstruction.}, }
Endnote
%0 Report %A Tewari, Ayush %A Bernard, Florian %A Garrido, Pablo %A Bharaj, Gaurav %A Elgharib, Mohamed %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T FML: Face Model Learning from Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0002-EF79-A %U http://arxiv.org/abs/1812.07603 %D 2018 %X Monocular image-based 3D reconstruction of faces is a long-standing problem in computer vision. Since image data is a 2D projection of a 3D face, the resulting depth ambiguity makes the problem ill-posed. Most existing methods rely on data-driven priors that are built from limited 3D face scans. In contrast, we propose multi-frame video-based self-supervised training of a deep network that (i) learns a face identity model both in shape and appearance while (ii) jointly learning to reconstruct 3D faces. Our face model is learned using only corpora of in-the-wild video clips collected from the Internet. This virtually endless source of training data enables learning of a highly general 3D face model. In order to achieve this, we propose a novel multi-frame consistency loss that ensures consistent shape and appearance across multiple frames of a subject's face, thus minimizing depth ambiguity. At test time we can use an arbitrary number of frames, so that we can perform both monocular as well as multi-frame reconstruction. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV %U https://www.youtube.com/watch?v=SG2BwxCw0lQ
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2018. An Intuitive Control Space for Material Appearance. http://arxiv.org/abs/1806.04950.
(arXiv: 1806.04950)
Abstract
Many different techniques for measuring material appearance have been proposed in the last few years. These have produced large public datasets, which have been used for accurate, data-driven appearance modeling. However, although these datasets have allowed us to reach an unprecedented level of realism in visual appearance, editing the captured data remains a challenge. In this paper, we present an intuitive control space for predictable editing of captured BRDF data, which allows for artistic creation of plausible novel material appearances, bypassing the difficulty of acquiring novel samples. We first synthesize novel materials, extending the existing MERL dataset up to 400 mathematically valid BRDFs. We then design a large-scale experiment, gathering 56,000 subjective ratings on the high-level perceptual attributes that best describe our extended dataset of materials. Using these ratings, we build and train networks of radial basis functions to act as functionals mapping the perceptual attributes to an underlying PCA-based representation of BRDFs. We show that our functionals are excellent predictors of the perceived attributes of appearance. Our control space enables many applications, including intuitive material editing of a wide range of visual properties, guidance for gamut mapping, analysis of the correlation between perceptual attributes, or novel appearance similarity metrics. Moreover, our methodology can be used to derive functionals applicable to classic analytic BRDF representations. We release our code and dataset publicly, in order to support and encourage further research in this direction.
Export
BibTeX
@online{Serrano_arXiv1806.04950, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.04950}, EPRINT = {1806.04950}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Many different techniques for measuring material appearance have been proposed in the last few years. These have produced large public datasets, which have been used for accurate, data-driven appearance modeling. However, although these datasets have allowed us to reach an unprecedented level of realism in visual appearance, editing the captured data remains a challenge. In this paper, we present an intuitive control space for predictable editing of captured BRDF data, which allows for artistic creation of plausible novel material appearances, bypassing the difficulty of acquiring novel samples. We first synthesize novel materials, extending the existing MERL dataset up to 400 mathematically valid BRDFs. We then design a large-scale experiment, gathering 56,000 subjective ratings on the high-level perceptual attributes that best describe our extended dataset of materials. Using these ratings, we build and train networks of radial basis functions to act as functionals mapping the perceptual attributes to an underlying PCA-based representation of BRDFs. We show that our functionals are excellent predictors of the perceived attributes of appearance. Our control space enables many applications, including intuitive material editing of a wide range of visual properties, guidance for gamut mapping, analysis of the correlation between perceptual attributes, or novel appearance similarity metrics. Moreover, our methodology can be used to derive functionals applicable to classic analytic BRDF representations. We release our code and dataset publicly, in order to support and encourage further research in this direction.}, }
Endnote
%0 Report %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/21.11116/0000-0002-151E-6 %U http://arxiv.org/abs/1806.04950 %D 2018 %X Many different techniques for measuring material appearance have been proposed in the last few years. These have produced large public datasets, which have been used for accurate, data-driven appearance modeling. However, although these datasets have allowed us to reach an unprecedented level of realism in visual appearance, editing the captured data remains a challenge. In this paper, we present an intuitive control space for predictable editing of captured BRDF data, which allows for artistic creation of plausible novel material appearances, bypassing the difficulty of acquiring novel samples. We first synthesize novel materials, extending the existing MERL dataset up to 400 mathematically valid BRDFs. We then design a large-scale experiment, gathering 56,000 subjective ratings on the high-level perceptual attributes that best describe our extended dataset of materials. Using these ratings, we build and train networks of radial basis functions to act as functionals mapping the perceptual attributes to an underlying PCA-based representation of BRDFs. We show that our functionals are excellent predictors of the perceived attributes of appearance. Our control space enables many applications, including intuitive material editing of a wide range of visual properties, guidance for gamut mapping, analysis of the correlation between perceptual attributes, or novel appearance similarity metrics. Moreover, our methodology can be used to derive functionals applicable to classic analytic BRDF representations. We release our code and dataset publicly, in order to support and encourage further research in this direction. %K Computer Science, Graphics, cs.GR
Myszkowski, K., Tursun, O.T., Kellnhofer, P., et al. 2018. Perceptual Display: Apparent Enhancement of Scene Detail and Depth. Electronic Imaging (Proc. HVEI 2018), SPIE/IS&T.
(Keynote Talk)
Abstract
Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.
Export
BibTeX
@inproceedings{Myszkowski2018Perceptual, TITLE = {Perceptual Display: Apparent Enhancement of Scene Detail and Depth}, AUTHOR = {Myszkowski, Karol and Tursun, Okan Tarhan and Kellnhofer, Petr and Templin, Krzysztof and Arabadzhiyska, Elena and Didyk, Piotr and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {2470-1173}, DOI = {10.2352/ISSN.2470-1173.2018.14.HVEI-501}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.}, BOOKTITLE = {Human Vision and Electronic Imaging (HVEI 2018)}, PAGES = {1--10}, EID = {501}, JOURNAL = {Electronic Imaging (Proc. HVEI)}, VOLUME = {2018}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %A Tursun, Okan Tarhan %A Kellnhofer, Petr %A Templin, Krzysztof %A Arabadzhiyska, Elena %A Didyk, Piotr %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Display: Apparent Enhancement of Scene Detail and Depth : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F64-5 %R 10.2352/ISSN.2470-1173.2018.14.HVEI-501 %D 2018 %B Human Vision and Electronic Imaging %Z date of event: 2018-01-28 - 2018-02-02 %C San Francisco, CA, USA %X Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations. %B Human Vision and Electronic Imaging %P 1 - 10 %Z sequence number: 501 %I SPIE/IS&T %J Electronic Imaging %V 2018 %@ false
Mlakar, D., Winter, M., Seidel, H.-P., Steinberger, M., and Zayer, R. 2018. AlSub: Fully Parallel and Modular Subdivision. http://arxiv.org/abs/1809.06047.
(arXiv: 1809.06047)
Abstract
In recent years, mesh subdivision---the process of forging smooth free-form surfaces from coarse polygonal meshes---has become an indispensable production instrument. Although subdivision performance is crucial during simulation, animation and rendering, state-of-the-art approaches still rely on serial implementations for complex parts of the subdivision process. Therefore, they often fail to harness the power of modern parallel devices, like the graphics processing unit (GPU), for large parts of the algorithm and must resort to time-consuming serial preprocessing. In this paper, we show that a complete parallelization of the subdivision process for modern architectures is possible. Building on sparse matrix linear algebra, we show how to structure the complete subdivision process into a sequence of algebra operations. By restructuring and grouping these operations, we adapt the process for different use cases, such as regular subdivision of dynamic meshes, uniform subdivision for immutable topology, and feature-adaptive subdivision for efficient rendering of animated models. As the same machinery is used for all use cases, identical subdivision results are achieved in all parts of the production pipeline. As a second contribution, we show how these linear algebra formulations can effectively be translated into efficient GPU kernels. Applying our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows significant speedups of our approach compared to state-of-the-art solutions, while we completely avoid serial preprocessing.
Export
BibTeX
@online{Mlakar_arXiv1809.06047, TITLE = {{AlSub}: {Fully Parallel and Modular Subdivision}}, AUTHOR = {Mlakar, Daniel and Winter, Martin and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1809.06047}, EPRINT = {1809.06047}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In recent years, mesh subdivision---the process of forging smooth free-form surfaces from coarse polygonal meshes---has become an indispensable production instrument. Although subdivision performance is crucial during simulation, animation and rendering, state-of-the-art approaches still rely on serial implementations for complex parts of the subdivision process. Therefore, they often fail to harness the power of modern parallel devices, like the graphics processing unit (GPU), for large parts of the algorithm and must resort to time-consuming serial preprocessing. In this paper, we show that a complete parallelization of the subdivision process for modern architectures is possible. Building on sparse matrix linear algebra, we show how to structure the complete subdivision process into a sequence of algebra operations. By restructuring and grouping these operations, we adapt the process for different use cases, such as regular subdivision of dynamic meshes, uniform subdivision for immutable topology, and feature-adaptive subdivision for efficient rendering of animated models. As the same machinery is used for all use cases, identical subdivision results are achieved in all parts of the production pipeline. As a second contribution, we show how these linear algebra formulations can effectively be translated into efficient GPU kernels. Applying our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows significant speedups of our approach compared to state-of-the-art solutions, while we completely avoid serial preprocessing.}, }
Endnote
%0 Report %A Mlakar, Daniel %A Winter, Martin %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T AlSub: Fully Parallel and Modular Subdivision : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E2-C %U http://arxiv.org/abs/1809.06047 %D 2018 %X In recent years, mesh subdivision---the process of forging smooth free-form surfaces from coarse polygonal meshes---has become an indispensable production instrument. Although subdivision performance is crucial during simulation, animation and rendering, state-of-the-art approaches still rely on serial implementations for complex parts of the subdivision process. Therefore, they often fail to harness the power of modern parallel devices, like the graphics processing unit (GPU), for large parts of the algorithm and must resort to time-consuming serial preprocessing. In this paper, we show that a complete parallelization of the subdivision process for modern architectures is possible. Building on sparse matrix linear algebra, we show how to structure the complete subdivision process into a sequence of algebra operations. By restructuring and grouping these operations, we adapt the process for different use cases, such as regular subdivision of dynamic meshes, uniform subdivision for immutable topology, and feature-adaptive subdivision for efficient rendering of animated models. As the same machinery is used for all use cases, identical subdivision results are achieved in all parts of the production pipeline. As a second contribution, we show how these linear algebra formulations can effectively be translated into efficient GPU kernels. Applying our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows significant speedups of our approach compared to state-of-the-art solutions, while we completely avoid serial preprocessing. %K Computer Science, Graphics, cs.GR
Meka, A., Maximov, M., Zollhöfer, M., et al. 2018a. LIME: Live Intrinsic Material Estimation. http://arxiv.org/abs/1801.01075.
(arXiv: 1801.01075)
Abstract
We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation.
Export
BibTeX
@online{Meka_arXiv1801.01075, TITLE = {LIME: {L}ive Intrinsic Material Estimation}, AUTHOR = {Meka, Abhimitra and Maximov, Maxim and Zollh{\"o}fer, Michael and Chatterjee, Avishek and Seidel, Hans-Peter and Richardt, Christian and Theobalt, Christian}, URL = {http://arxiv.org/abs/1801.01075}, EPRINT = {1801.01075}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation.}, }
Endnote
%0 Report %A Meka, Abhimitra %A Maximov, Maxim %A Zollh&#246;fer, Michael %A Chatterjee, Avishek %A Seidel, Hans-Peter %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society D2 External Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T LIME: Live Intrinsic Material Estimation : %U http://hdl.handle.net/21.11116/0000-0001-40D9-2 %U http://arxiv.org/abs/1801.01075 %D 2018 %X We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Meka, A., Maximov, M., Zollhöfer, M., et al. 2018b. LIME: Live Intrinsic Material Estimation. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018), IEEE.
Export
BibTeX
@inproceedings{Meka:2018, TITLE = {{LIME}: {L}ive Intrinsic Material Estimation}, AUTHOR = {Meka, Abhimitra and Maximov, Maxim and Zollh{\"o}fer, Michael and Chatterjee, Avishek and Seidel, Hans-Peter and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5386-6420-9}, DOI = {10.1109/CVPR.2018.00661}, PUBLISHER = {IEEE}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018)}, PAGES = {6315--6324}, ADDRESS = {Salt Lake City, UT, USA}, }
Endnote
%0 Conference Proceedings %A Meka, Abhimitra %A Maximov, Maxim %A Zollh&#246;fer, Michael %A Chatterjee, Avishek %A Seidel, Hans-Peter %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T LIME: Live Intrinsic Material Estimation : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F391-7 %R 10.1109/CVPR.2018.00661 %D 2018 %B 31st IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2018-06-18 - 2018-06-22 %C Salt Lake City, UT, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 6315 - 6324 %I IEEE %@ 978-1-5386-6420-9 %U http://gvv.mpi-inf.mpg.de/projects/LIME/
Leimkühler, T., Singh, G., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2018a. End-to-end Sampling Patterns. http://arxiv.org/abs/1806.06710.
(arXiv: 1806.06710)
Abstract
Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties.
Export
BibTeX
@online{Leimkuehler_arXiv1806.06710, TITLE = {End-to-end Sampling Patterns}, AUTHOR = {Leimk{\"u}hler, Thomas and Singh, Gurprit and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.06710}, EPRINT = {1806.06710}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties.}, }
Endnote
%0 Report %A Leimk&#252;hler, Thomas %A Singh, Gurprit %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T End-to-end Sampling Patterns : %G eng %U http://hdl.handle.net/21.11116/0000-0002-1376-4 %U http://arxiv.org/abs/1806.06710 %D 2018 %X Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties. %K Computer Science, Graphics, cs.GR
Leimkühler, T., Seidel, H.-P., and Ritschel, T. 2018b. Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2018)37, 4.
Export
BibTeX
@article{LeimkuehlerSIGGRAPH2018, TITLE = {Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction}, AUTHOR = {Leimk{\"u}hler, Thomas and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3197517.3201379}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {37}, NUMBER = {4}, PAGES = {1--11}, EID = {55}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2018}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction : %G eng %U http://hdl.handle.net/21.11116/0000-0002-0630-1 %R 10.1145/3197517.3201379 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 4 %& 1 %P 1 - 11 %Z sequence number: 55 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2018 %O ACM SIGGRAPH 2018 Vancouver, Canada , 12 - 16 August
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2018c. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. IEEE Transactions on Visualization and Computer Graphics24, 6.
Export
BibTeX
@article{Leimkuehler2018, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2017.2703612}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {24}, NUMBER = {6}, PAGES = {2037--2050}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/21.11116/0000-0001-409A-9 %R 10.1109/TVCG.2017.2703612 %7 2018 %D 2018 %J IEEE Transactions on Visualization and Computer Graphics %V 24 %N 6 %& 2037 %P 2037 - 2050 %I IEEE Computer Society %C New York, NY %@ false
Golla, B., Seidel, H.-P., and Chen, R. 2018. Piecewise Linear Mapping Optimization Based on the Complex View. Computer Graphics Forum (Proc. Pacific Graphics 2018)37, 7.
Export
BibTeX
@article{Golla_PG2018, TITLE = {Piecewise Linear Mapping Optimization Based on the Complex View}, AUTHOR = {Golla, Bj{\"o}rn and Seidel, Hans-Peter and Chen, Renjie}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13563}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {37}, NUMBER = {7}, PAGES = {233--243}, BOOKTITLE = {The 26th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2018)}, }
Endnote
%0 Journal Article %A Golla, Bj&#246;rn %A Seidel, Hans-Peter %A Chen, Renjie %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Piecewise Linear Mapping Optimization Based on the Complex View : %G eng %U http://hdl.handle.net/21.11116/0000-0002-72CD-7 %R 10.1111/cgf.13563 %7 2018 %D 2018 %J Computer Graphics Forum %V 37 %N 7 %& 233 %P 233 - 243 %I Wiley-Blackwell %C Oxford, UK %@ false %B The 26th Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2018 PG 2018 Hong Kong, 8-11 October 2018
Beigpour, S., Shekhar, S., Mansouryar, M., Myszkowski, K., and Seidel, H.-P. 2018. Light-Field Appearance Editing Based on Intrinsic Decomposition. Journal of Perceptual Imaging1, 1.
Export
BibTeX
@article{Beigpour2018, TITLE = {Light-Field Appearance Editing Based on Intrinsic Decomposition}, AUTHOR = {Beigpour, Shida and Shekhar, Sumit and Mansouryar, Mohsen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2352/J.Percept.Imaging.2018.1.1.010502}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, JOURNAL = {Journal of Perceptual Imaging}, VOLUME = {1}, NUMBER = {1}, PAGES = {1--15}, EID = {10502}, }
Endnote
%0 Journal Article %A Beigpour, Shida %A Shekhar, Sumit %A Mansouryar, Mohsen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Light-Field Appearance Editing Based on Intrinsic Decomposition : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F88-C %R 10.2352/J.Percept.Imaging.2018.1.1.010502 %7 2018 %D 2018 %J Journal of Perceptual Imaging %O JPI %V 1 %N 1 %& 1 %P 1 - 15 %Z sequence number: 10502
2017
Zayer, R., Steinberger, M., and Seidel, H.-P. 2017a. A GPU-adapted Structure for Unstructured Grids. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{Zayer2017, TITLE = {A {GPU}-adapted Structure for Unstructured Grids}, AUTHOR = {Zayer, Rhaleb and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13144}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {495--507}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Zayer, Rhaleb %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A GPU-adapted Structure for Unstructured Grids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5A05-7 %R 10.1111/cgf.13144 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 495 %P 495 - 507 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Zayer, R., Steinberger, M., and Seidel, H.-P. 2017b. Sparse Matrix Assembly on the GPU Through Multiplication Patterns. IEEE High Performance Extreme Computing Conference (HPEC 2017), IEEE.
Export
BibTeX
@inproceedings{Zayer_HPEC2017, TITLE = {Sparse Matrix Assembly on the {GPU} Through Multiplication Patterns}, AUTHOR = {Zayer, Rhaleb and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-5386-3472-1}, DOI = {10.1109/HPEC.2017.8091057}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2017)}, PAGES = {1--8}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Sparse Matrix Assembly on the GPU Through Multiplication Patterns : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3B33-5 %R 10.1109/HPEC.2017.8091057 %D 2017 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2017-09-12 - 2017-09-14 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 8 %I IEEE %@ 978-1-5386-3472-1
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2017. MonoPerfCap: Human Performance Capture from Monocular Video. http://arxiv.org/abs/1708.02136.
(arXiv: 1708.02136)
Abstract
We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.
Export
BibTeX
@online{Xu2017, TITLE = {{MonoPerfCap}: Human Performance Capture from Monocular Video}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Mehta, Dushyant and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1708.02136}, EPRINT = {1708.02136}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.}, }
Endnote
%0 Report %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Mehta, Dushyant %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MonoPerfCap: Human Performance Capture from Monocular Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05C2-9 %U http://arxiv.org/abs/1708.02136 %D 2017 %X We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Wang, Z., Martinez Esturo, J., Seidel, H.-P., and Weinkauf, T. 2017. Stream Line–Based Pattern Search in Flows. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Wang:Esturo:Seidel:Weinkauf2016, TITLE = {Stream Line--Based Pattern Search in Flows}, AUTHOR = {Wang, Zhongjie and Martinez Esturo, Janick and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12990}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {7--18}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Martinez Esturo, Janick %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stream Line&#8211;Based Pattern Search in Flows : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-4301-A %R 10.1111/cgf.12990 %7 2016 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 7 %P 7 - 18 %I Blackwell-Wiley %C Oxford %@ false
Steinberger, M., Zayer, R., and Seidel, H.-P. 2017. Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the GPU. ICS 2017, International Conference on Supercomputing, ACM.
Export
BibTeX
@inproceedings{SteinbergerICS2017, TITLE = {Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the {GPU}}, AUTHOR = {Steinberger, Markus and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-5020-4}, DOI = {10.1145/3079079.3079086}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {ICS 2017, International Conference on Supercomputing}, EID = {13}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Steinberger, Markus %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D71-2 %R 10.1145/3079079.3079086 %D 2017 %B International Conference on Supercomputing %Z date of event: 2017-06-13 - 2017-06-16 %C Chicago, IL, USA %B ICS 2017 %Z sequence number: 13 %I ACM %@ 978-1-4503-5020-4
Saikia, H., Seidel, H.-P., and Weinkauf, T. 2017. Fast Similarity Search in Scalar Fields using Merging Histograms. In: Topological Methods in Data Analysis and Visualization IV. Springer, Cham.
Export
BibTeX
@incollection{Saikia_Seidel_Weinkauf2017, TITLE = {Fast Similarity Search in Scalar Fields using Merging Histograms}, AUTHOR = {Saikia, Himangshu and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-319-44682-0}, DOI = {10.1007/978-3-319-44684-4_7}, PUBLISHER = {Springer}, ADDRESS = {Cham}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {Topological Methods in Data Analysis and Visualization IV}, EDITOR = {Carr, Hamish and Garth, Christoph and Weinkauf, Tino}, PAGES = {121--134}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Saikia, Himangshu %A Seidel, Hans-Peter %A Weinkauf, Tino %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Fast Similarity Search in Scalar Fields using Merging Histograms : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-772A-0 %R 10.1007/978-3-319-44684-4_7 %D 2017 %B Topological Methods in Data Analysis and Visualization IV %E Carr, Hamish; Garth, Christoph; Weinkauf, Tino %P 121 - 134 %I Springer %C Cham %@ 978-3-319-44682-0 %S Mathematics and Visualization
Nalbach, O., Arabadzhiyska, E., Mehta, D., Seidel, H.-P., and Ritschel, T. 2017a. Deep Shading: Convolutional Neural Networks for Screen Space Shading. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 201)36, 4.
Export
BibTeX
@article{NalbachEGSR2017, TITLE = {Deep Shading: {C}onvolutional Neural Networks for Screen Space Shading}, AUTHOR = {Nalbach, Oliver and Arabadzhiyska, Elena and Mehta, Dushyant and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13225}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {36}, NUMBER = {4}, PAGES = {65--78}, BOOKTITLE = {Eurographics Symposium on Rendering 2017}, EDITOR = {Zwicker, Matthias and Sander, Pedro}, }
Endnote
%0 Journal Article %A Nalbach, Oliver %A Arabadzhiyska, Elena %A Mehta, Dushyant %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Shading: Convolutional Neural Networks for Screen Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CD86-6 %R 10.1111/cgf.13225 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 4 %& 65 %P 65 - 78 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2017 %O Eurographics Symposium on Rendering 201 EGSR 2017 Helsinki, Finland, 19-21 June 2017
Nalbach, O., Seidel, H.-P., and Ritschel, T. 2017b. Practical Capture and Reproduction of Phosphorescent Appearance. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{Nalbach2017, TITLE = {Practical Capture and Reproduction of Phosphorescent Appearance}, AUTHOR = {Nalbach, Oliver and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13136}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {409--420}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Nalbach, Oliver %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Practical Capture and Reproduction of Phosphorescent Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A53-9 %R 10.1111/cgf.13136 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 409 %P 409 - 420 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Mehta, D., Sridhar, S., Sotnychenko, O., et al. 2017a. VNect: Real-time 3D Human Pose Estimation with a Single RGB Camera. http://arxiv.org/abs/1705.01583.
(arXiv: 1705.01583)
Abstract
We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras.
Export
BibTeX
@online{MehtaArXiv2017, TITLE = {{VNect}: Real-time {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sridhar, Srinath and Sotnychenko, Oleksandr and Rhodin, Helge and Shafiei, Mohammad and Seidel, Hans-Peter and Xu, Weipeng and Casas, Dan and Theobalt, Christian}, URL = {http://arxiv.org/abs/1705.01583}, DOI = {10.1145/3072959.3073596}, EPRINT = {1705.01583}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras.}, }
Endnote
%0 Report %A Mehta, Dushyant %A Sridhar, Srinath %A Sotnychenko, Oleksandr %A Rhodin, Helge %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Xu, Weipeng %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T VNect: Real-time 3D Human Pose Estimation with a Single RGB Camera : %U http://hdl.handle.net/11858/00-001M-0000-002D-7D78-3 %R 10.1145/3072959.3073596 %U http://arxiv.org/abs/1705.01583 %D 2017 %X We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Mehta, D., Sridhar, S., Sotnychenko, O., et al. 2017b. VNect: Real-Time 3D Human Pose Estimation With a Single RGB Camera. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{MehtaSIGGRAPH2017, TITLE = {{VNect}: {R}eal-Time {3D} Human Pose Estimation With a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sridhar, Srinath and Sotnychenko, Oleksandr and Rhodin, Helge and Shafiei, Mohammad and Seidel, Hans-Peter and Xu, Weipeng and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073596}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, EID = {44}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sridhar, Srinath %A Sotnychenko, Oleksandr %A Rhodin, Helge %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Xu, Weipeng %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T VNect: Real-Time 3D Human Pose Estimation With a Single RGB Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D95-0 %R 10.1145/3072959.3073596 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 14 %Z sequence number: 44 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Leimkühler, T., Seidel, H.-P., and Ritschel, T. 2017. Minimal Warping: Planning Incremental Novel-view Synthesis. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 201)36, 4.
Export
BibTeX
@article{LeimkuehlerEGSR2017, TITLE = {Minimal Warping: {P}lanning Incremental Novel-view Synthesis}, AUTHOR = {Leimk{\"u}hler, Thomas and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13219}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, BOOKTITLE = {Eurographics Symposium on Rendering 2017}, EDITOR = {Zwicker, Matthias and Sander, Pedro}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Minimal Warping: Planning Incremental Novel-view Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CD7C-D %R 10.1111/cgf.13219 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 4 %& 1 %P 1 - 14 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2017 %O Eurographics Symposium on Rendering 201 EGSR 2017 Helsinki, Finland, 19-21 June 2017
Kol, T.R., Klehm, O., Seidel, H.-P., and Eisemann, E. 2017. Expressive Single Scattering for Light Shaft Stylization. IEEE Transactions on Visualization and Computer Graphics23, 7.
Export
BibTeX
@article{kol2016expressive, TITLE = {Expressive Single Scattering for Light Shaft Stylization}, AUTHOR = {Kol, Timothy R. and Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2016.2554114}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {23}, NUMBER = {7}, PAGES = {1753--1766}, }
Endnote
%0 Journal Article %A Kol, Timothy R. %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Expressive Single Scattering for Light Shaft Stylization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64E7-2 %R 10.1109/TVCG.2016.2554114 %7 2016-04-14 %D 2017 %J IEEE Transactions on Visualization and Computer Graphics %V 23 %N 7 %& 1753 %P 1753 - 1766 %I IEEE Computer Society %C New York, NY %@ false
Kerbl, B., Kenzel, M., Schmalstieg, D., Seidel, H.-P., and Steinberger, M. 2017. Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Seidel_Steinberger2016, TITLE = {Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the {GPU}}, AUTHOR = {Kerbl, Bernhard and Kenzel, Michael and Schmalstieg, Dieter and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13075}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {232--246}, }
Endnote
%0 Journal Article %A Kerbl, Bernhard %A Kenzel, Michael %A Schmalstieg, Dieter %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-1823-8 %R 10.1111/cgf.13075 %7 2016-12-05 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 232 %P 232 - 246 %I Blackwell-Wiley %C Oxford %@ false
Jiang, C., Tang, C., Seidel, H.-P., and Wonka, P. 2017. Design and Volume Optimization of Space Structures. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{JiangSIGGRAPH2017, TITLE = {Design and Volume Optimization of Space Structures}, AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Wonka, Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073619}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, EID = {159}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Jiang, Caigui %A Tang, Chengcheng %A Seidel, Hans-Peter %A Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Design and Volume Optimization of Space Structures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D8E-2 %R 10.1145/3072959.3073619 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 14 %Z sequence number: 159 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Haubenwallner, K., Seidel, H.-P., and Steinberger, M. 2017. ShapeGenetics: Using Genetic Algorithms for Procedural Modeling. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{haubenwallner2017shapegenetics, TITLE = {{ShapeGenetics}: {U}sing Genetic Algorithms for Procedural Modeling}, AUTHOR = {Haubenwallner, Karl and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13120}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {213--223}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Haubenwallner, Karl %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T ShapeGenetics: Using Genetic Algorithms for Procedural Modeling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5C69-8 %R 10.1111/cgf.13120 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 213 %P 213 - 223 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Derler, A., Zayer, R., Seidel, H.-P., and Steinberger, M. 2017. Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the GPU. ICS 2017, International Conference on Supercomputing, ACM.
Export
BibTeX
@inproceedings{DerlerICS2017, TITLE = {Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the {GPU}}, AUTHOR = {Derler, Andreas and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-4503-5020-4}, DOI = {10.1145/3079079.3079085}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {ICS 2017, International Conference on Supercomputing}, EID = {7}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Derler, Andreas %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D73-D %R 10.1145/3079079.3079085 %D 2017 %B International Conference on Supercomputing %Z date of event: 2017-06-13 - 2017-06-16 %C Chicago, IL, USA %B ICS 2017 %Z sequence number: 7 %I ACM %@ 978-1-4503-5020-4
Arabadzhiyska, E., Tursun, O.T., Myszkowski, K., Seidel, H.-P., and Didyk, P. 2017. Saccade Landing Position Prediction for Gaze-Contingent Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{ArabadzhiyskaSIGGRAPH2017, TITLE = {Saccade Landing Position Prediction for Gaze-Contingent Rendering}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Okan Tarhan and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073642}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--12}, EID = {50}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Arabadzhiyska, Elena %A Tursun, Okan Tarhan %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Saccade Landing Position Prediction for Gaze-Contingent Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D82-9 %R 10.1145/3072959.3073642 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 12 %Z sequence number: 50 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017a. Towards a Quality Metric for Dense Light Fields. 30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017), IEEE Computer Society.
Export
BibTeX
@inproceedings{Vamsi2017, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISBN = {978-1-5386-0458-8}, DOI = {10.1109/CVPR.2017.396}, PUBLISHER = {IEEE Computer Society}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017)}, PAGES = {3720--3729}, ADDRESS = {Honolulu, HI, USA}, }
Endnote
%0 Conference Proceedings %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E476-3 %R 10.1109/CVPR.2017.396 %D 2017 %B 30th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2017-07-21 - 2017-07-26 %C Honolulu, HI, USA %B 30th IEEE Conference on Computer Vision and Pattern Recognition %P 3720 - 3729 %I IEEE Computer Society %@ 978-1-5386-0458-8
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017b. Towards a Quality Metric for Dense Light Fields. http://arxiv.org/abs/1704.07576.
(arXiv: 1704.07576)
Abstract
Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.
Export
BibTeX
@online{AdhikarlaArXiv17, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} K. and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, URL = {http://arxiv.org/abs/1704.07576}, EPRINT = {1704.07576}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.}, }
Endnote
%0 Report %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafa&#322; K. %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %U http://hdl.handle.net/11858/00-001M-0000-002D-2C2C-1 %U http://arxiv.org/abs/1704.07576 %D 2017 %X Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
2016
Wang, Z., Seidel, H.-P., and Weinkauf, T. 2016. Multi-field Pattern Matching Based on Sparse Feature Sampling. IEEE Transactions on Visualization and Computer Graphics22, 1.
Export
BibTeX
@article{Wang2015, TITLE = {Multi-field Pattern Matching Based on Sparse Feature Sampling}, AUTHOR = {Wang, Zhongjie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2015.2467292}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {22}, NUMBER = {1}, PAGES = {807--816}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Multi-field Pattern Matching Based on Sparse Feature Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-1A76-6 %R 10.1109/TVCG.2015.2467292 %7 2015 %D 2016 %J IEEE Transactions on Visualization and Computer Graphics %V 22 %N 1 %& 807 %P 807 - 816 %I IEEE Computer Society %C New York, NY %@ false
Von Radziewsky, P., Eisemann, E., Seidel, H.-P., and Hildebrandt, K. 2016. Optimized Subspaces for Deformation-based Modeling and Shape Interpolation. Computers and Graphics (Proc. SMI 2016)58.
Export
BibTeX
@article{Radziewsky2016, TITLE = {Optimized Subspaces for Deformation-based Modeling and Shape Interpolation}, AUTHOR = {von Radziewsky, Philipp and Eisemann, Elmar and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2016.05.016}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computers and Graphics (Proc. SMI)}, VOLUME = {58}, PAGES = {128--138}, BOOKTITLE = {Shape Modeling International 2016 (SMI 2016)}, }
Endnote
%0 Journal Article %A von Radziewsky, Philipp %A Eisemann, Elmar %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Optimized Subspaces for Deformation-based Modeling and Shape Interpolation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0144-0 %R 10.1016/j.cag.2016.05.016 %7 2016 %D 2016 %J Computers and Graphics %V 58 %& 128 %P 128 - 138 %I Elsevier %C Amsterdam %@ false %B Shape Modeling International 2016 %O SMI 2016
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Emulating Displays with Continuously Varying Frame Rates. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016)35, 4.
Export
BibTeX
@article{TemplinSIGGRAPH2016, TITLE = {Emulating Displays with Continuously Varying Frame Rates}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {67}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Emulating Displays with Continuously Varying Frame Rates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-018D-E %R 10.1145/2897824.2925879 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 67 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Steinberger, M., Derler, A., Zayer, R., and Seidel, H.-P. 2016. How Naive is Naive SpMV on the GPU? IEEE High Performance Extreme Computing Conference (HPEC 2016), IEEE.
Export
BibTeX
@inproceedings{SteinbergerHPEC2016, TITLE = {How naive is naive {SpMV} on the {GPU}?}, AUTHOR = {Steinberger, Markus and Derler, Andreas and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-5090-3525-0}, DOI = {10.1109/HPEC.2016.7761634}, PUBLISHER = {IEEE}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2016)}, PAGES = {1--8}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Steinberger, Markus %A Derler, Andreas %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T How Naive is Naive SpMV on the GPU? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98A5-F %R 10.1109/HPEC.2016.7761634 %D 2016 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2016-09-13 - 2016-09-15 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 8 %I IEEE %@ 978-1-5090-3525-0
Sridhar, S., Rhodin, H., Seidel, H.-P., Oulasvirta, A., and Theobalt, C. 2016. Real-Time Hand Tracking Using a Sum of Anisotropic Gaussians Model. http://arxiv.org/abs/1602.03860.
(arXiv: 1602.03860)
Abstract
Real-time marker-less hand tracking is of increasing importance in human-computer interaction. Robust and accurate tracking of arbitrary hand motion is a challenging problem due to the many degrees of freedom, frequent self-occlusions, fast motions, and uniform skin color. In this paper, we propose a new approach that tracks the full skeleton motion of the hand from multiple RGB cameras in real-time. The main contributions include a new generative tracking method which employs an implicit hand shape representation based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is smooth and analytically differentiable making fast gradient based pose optimization possible. This shape representation, together with a full perspective projection model, enables more accurate hand modeling than a related baseline method from literature. Our method achieves better accuracy than previous methods and runs at 25 fps. We show these improvements both qualitatively and quantitatively on publicly available datasets.
Export
BibTeX
@online{Sridhar2016arXiv1602.03860, TITLE = {Real-Time Hand Tracking Using a Sum of Anisotropic {Gaussians} Model}, AUTHOR = {Sridhar, Srinath and Rhodin, Helge and Seidel, Hans-Peter and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.03860}, EPRINT = {1602.03860}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Real-time marker-less hand tracking is of increasing importance in human-computer interaction. Robust and accurate tracking of arbitrary hand motion is a challenging problem due to the many degrees of freedom, frequent self-occlusions, fast motions, and uniform skin color. In this paper, we propose a new approach that tracks the full skeleton motion of the hand from multiple RGB cameras in real-time. The main contributions include a new generative tracking method which employs an implicit hand shape representation based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is smooth and analytically differentiable making fast gradient based pose optimization possible. This shape representation, together with a full perspective projection model, enables more accurate hand modeling than a related baseline method from literature. Our method achieves better accuracy than previous methods and runs at 25 fps. We show these improvements both qualitatively and quantitatively on publicly available datasets.}, }
Endnote
%0 Report %A Sridhar, Srinath %A Rhodin, Helge %A Seidel, Hans-Peter %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-Time Hand Tracking Using a Sum of Anisotropic Gaussians Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9878-6 %U http://arxiv.org/abs/1602.03860 %D 2016 %X Real-time marker-less hand tracking is of increasing importance in human-computer interaction. Robust and accurate tracking of arbitrary hand motion is a challenging problem due to the many degrees of freedom, frequent self-occlusions, fast motions, and uniform skin color. In this paper, we propose a new approach that tracks the full skeleton motion of the hand from multiple RGB cameras in real-time. The main contributions include a new generative tracking method which employs an implicit hand shape representation based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is smooth and analytically differentiable making fast gradient based pose optimization possible. This shape representation, together with a full perspective projection model, enables more accurate hand modeling than a related baseline method from literature. Our method achieves better accuracy than previous methods and runs at 25 fps. We show these improvements both qualitatively and quantitatively on publicly available datasets. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016a. An Intuitive Control Space for Material Appearance. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Serrano_MaterialAppearance_2016, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980242}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {186}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B8-9 %R 10.1145/2980179.2980242 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 186 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016b. Intuitive Editing of Material Appearance. ACM SIGGRAPH 2016 Posters.
Export
BibTeX
@inproceedings{SerranoSIGGRAPH2016, TITLE = {Intuitive Editing of Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISBN = {978-1-4503-4371-8}, DOI = {10.1145/2945078.2945141}, PUBLISHER = {ACM}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {ACM SIGGRAPH 2016 Posters}, PAGES = {1--2}, EID = {63}, ADDRESS = {Anaheim, CA, USA}, }
Endnote
%0 Generic %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Intuitive Editing of Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0170-C %R 10.1145/2945078.2945141 %D 2016 %Z name of event: the 43rd International Conference and Exhibition on Computer Graphics & Interactive Techniques %Z date of event: 2016-07-24 - 2016-07-28 %Z place of event: Anaheim, CA, USA %B ACM SIGGRAPH 2016 Posters %P 1 - 2 %Z sequence number: 63 %@ 978-1-4503-4371-8
Robertini, N., Casas, D., Rhodin, H., Seidel, H.-P., and Theobalt, C. 2016. Model-Based Outdoor Performance Capture. Fourth International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Robertini:2016, TITLE = {Model-Based Outdoor Performance Capture}, AUTHOR = {Robertini, Nadia and Casas, Dan and Rhodin, Helge and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5090-5407-7}, URL = {http://gvv.mpi-inf.mpg.de/projects/OutdoorPerfcap/}, DOI = {10.1109/3DV.2016.25}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Fourth International Conference on 3D Vision}, PAGES = {166--175}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Robertini, Nadia %A Casas, Dan %A Rhodin, Helge %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Model-Based Outdoor Performance Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A6D-2 %R 10.1109/3DV.2016.25 %U http://gvv.mpi-inf.mpg.de/projects/OutdoorPerfcap/ %D 2016 %B Fourth International Conference on 3D Vision %Z date of event: 2016-10-25 - 2016-10-28 %C Stanford, CA, USA %B Fourth International Conference on 3D Vision %P 166 - 175 %I IEEE Computer Society %@ 978-1-5090-5407-7
Rhodin, H., Robertini, N., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016a. A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation. http://arxiv.org/abs/1602.03725.
(arXiv: 1602.03725)
Abstract
Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation.
Export
BibTeX
@online{Rhodin2016arXiv1602.03725, TITLE = {A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.03725}, EPRINT = {1602.03725}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation.}, }
Endnote
%0 Report %A Rhodin, Helge %A Robertini, Nadia %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9875-C %U http://arxiv.org/abs/1602.03725 %D 2016 %X Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Richardt, C., Casas, D., et al. 2016b. EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Rhodin2016SGA, TITLE = {{EgoCap}: {E}gocentric Marker-less Motion Capture with Two Fisheye Cameras}, AUTHOR = {Rhodin, Helge and Richardt, Christian and Casas, Dan and Insafutdinov, Eldar and Shafiei, Mohammad and Seidel, Hans-Peter and Schiele, Bernt and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {162}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Richardt, Christian %A Casas, Dan %A Insafutdinov, Eldar %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Schiele, Bernt %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-8321-6 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 162 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Rhodin, H., Richardt, C., Casas, D., et al. 2016c. EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract). http://arxiv.org/abs/1701.00142.
(arXiv: 1701.00142)
Abstract
Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes.
Export
BibTeX
@online{DBLP:journals/corr/RhodinRCISSST17, TITLE = {{EgoCap}: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract)}, AUTHOR = {Rhodin, Helge and Richardt, Christian and Casas, Dan and Insafutdinov, Eldar and Shafiei, Mohammad and Seidel, Hans-Peter and Schiele, Bernt and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1701.00142}, EPRINT = {1701.00142}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes.}, }
Endnote
%0 Report %A Rhodin, Helge %A Richardt, Christian %A Casas, Dan %A Insafutdinov, Eldar %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Schiele, Bernt %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract) : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3B3D-B %U http://arxiv.org/abs/1701.00142 %D 2016 %X Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Robertini, N., Casas, D., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016d. General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues. http://arxiv.org/abs/1607.08659.
(arXiv: 1607.08659)
Abstract
Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation - skeleton, volumetric shape, appearance, and optionally a body surface - and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way.
Export
BibTeX
@online{Rhodin2016arXiv1607.08659, TITLE = {General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Casas, Dan and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1607.08659}, EPRINT = {1607.08659}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation -- skeleton, volumetric shape, appearance, and optionally a body surface -- and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way.}, }
Endnote
%0 Report %A Rhodin, Helge %A Robertini, Nadia %A Casas, Dan %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9883-C %U http://arxiv.org/abs/1607.08659 %D 2016 %X Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation - skeleton, volumetric shape, appearance, and optionally a body surface - and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Robertini, N., Casas, D., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016e. General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues. Computer Vision -- ECCV 2016, Springer.
Export
BibTeX
@inproceedings{RhodinECCV2016, TITLE = {General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Casas, Dan and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-3-319-46453-4}, DOI = {10.1007/978-3-319-46454-1_31}, PUBLISHER = {Springer}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Computer Vision -- ECCV 2016}, DEBUG = {author: Leibe, Bastian; author: Matas, Jiri; author: Sebe, Nicu; author: Welling, Max}, PAGES = {509--526}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {9909}, ADDRESS = {Amsterdam, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Rhodin, Helge %A Robertini, Nadia %A Casas, Dan %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-986D-F %R 10.1007/978-3-319-46454-1_31 %D 2016 %B 14th European Conference on Computer Vision %Z date of event: 2016-10-11 - 2016-10-14 %C Amsterdam, The Netherlands %B Computer Vision -- ECCV 2016 %E Leibe, Bastian; Matas, Jiri; Sebe, Nicu; Welling, Max %P 509 - 526 %I Springer %@ 978-3-319-46453-4 %B Lecture Notes in Computer Science %N 9909
Reinert, B., Ritschel, T., and Seidel, H.-P. 2016a. Animated 3D Creatures from Single-view Video by Skeletal Sketching. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{Reinert:2016:AnimatedCreatures, TITLE = {Animated {3D} Creatures from Single-view Video by Skeletal Sketching}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.17}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {133--143}, ADDRESS = {Victoria, BC, Canada}, }
Endnote
%0 Conference Proceedings %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Animated 3D Creatures from Single-view Video by Skeletal Sketching : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64EC-7 %R 10.20380/GI2016.17 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, BC, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 133 - 143 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Reinert, B., Ritschel, T., Seidel, H.-P., and Georgiev, I. 2016b. Projective Blue-Noise Sampling. Computer Graphics Forum35, 1.
Export
BibTeX
@article{ReinertCGF2016, TITLE = {Projective Blue-Noise Sampling}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter and Georgiev, Iliyan}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12725}, PUBLISHER = {Wiley}, ADDRESS = {Chichester}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum}, VOLUME = {35}, NUMBER = {1}, PAGES = {285--295}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %A Georgiev, Iliyan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Projective Blue-Noise Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-1A31-D %R 10.1111/cgf.12725 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 1 %& 285 %P 285 - 295 %I Wiley %C Chichester %@ false
Reinert, B., Kopf, J., Ritschel, T., Cuervo, E., Chu, D., and Seidel, H.-P. 2016c. Proxy-guided Image-based Rendering for Mobile Devices. Computer Graphics Forum (Proc. Pacific Graphics 2016)35, 7.
Export
BibTeX
@article{ReinertPG2016, TITLE = {Proxy-guided Image-based Rendering for Mobile Devices}, AUTHOR = {Reinert, Bernhard and Kopf, Johannes and Ritschel, Tobias and Cuervo, Eduardo and Chu, David and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13032}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {35}, NUMBER = {7}, PAGES = {353--362}, BOOKTITLE = {The 24th Pacific Conference on Computer Graphics and Applications Short Papers Proceedings (Pacific Graphics 2016)}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Kopf, Johannes %A Ritschel, Tobias %A Cuervo, Eduardo %A Chu, David %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Proxy-guided Image-based Rendering for Mobile Devices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2DD8-7 %R 10.1111/cgf.13032 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 7 %& 353 %P 353 - 362 %I Blackwell-Wiley %C Oxford %@ false %B The 24th Pacific Conference on Computer Graphics and Applications Short Papers Proceedings %O Pacific Graphics 2016 PG 2016
Nalbach, O., Arabadzhiyska, E., Mehta, D., Seidel, H.-P., and Ritschel, T. 2016. Deep Shading: Convolutional Neural Networks for Screen-Space Shading. http://arxiv.org/abs/1603.06078.
(arXiv: 1603.06078)
Abstract
In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images.
Export
BibTeX
@online{NalbacharXiv2016, TITLE = {Deep Shading: Convolutional Neural Networks for Screen-Space Shading}, AUTHOR = {Nalbach, Oliver and Arabadzhiyska, Elena and Mehta, Dushyant and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1603.06078}, EPRINT = {1603.06078}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images.}, }
Endnote
%0 Report %A Nalbach, Oliver %A Arabadzhiyska, Elena %A Mehta, Dushyant %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Shading: Convolutional Neural Networks for Screen-Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0174-4 %U http://arxiv.org/abs/1603.06078 %D 2016 %X In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images. %K Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
(Best Student Paper Award)
Export
BibTeX
@inproceedings{LeimkuehlerGI2016, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.02}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {5--12}, ADDRESS = {Victoria, Canada}, }
Endnote
%0 Conference Proceedings %A Leimk&#252;hler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-823D-1 %R 10.20380/GI2016.02 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 5 - 12 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Kellnhofer, P., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2016a. GazeStereo3D: Seamless Disparity Manipulations. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016)35, 4.
Export
BibTeX
@article{KellnhoferSIGGRAPH2016, TITLE = {{GazeStereo3D}: {S}eamless Disparity Manipulations}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925866}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {68}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T GazeStereo3D: Seamless Disparity Manipulations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0190-4 %R 10.1145/2897824.2925866 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 68 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016b. Transformation-aware Perceptual Image Metric. Journal of Electronic Imaging25, 5.
Export
BibTeX
@article{Kellnhofer2016jei, TITLE = {Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1017-9909}, DOI = {10.1117/1.JEI.25.5.053014}, PUBLISHER = {SPIE}, ADDRESS = {Bellingham, WA}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Journal of Electronic Imaging}, VOLUME = {25}, NUMBER = {5}, EID = {053014}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B3-4 %R 10.1117/1.JEI.25.5.053014 %7 2016 %D 2016 %J Journal of Electronic Imaging %V 25 %N 5 %Z sequence number: 053014 %I SPIE %C Bellingham, WA %@ false
Kellnhofer, P., Didyk, P., Ritschel, T., Masia, B., Myszkowski, K., and Seidel, H.-P. 2016c. Motion Parallax in Stereo 3D: Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Kellnhofer2016SGA, TITLE = {Motion Parallax in Stereo {3D}: {M}odel and Applications}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Ritschel, Tobias and Masia, Belen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980230}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {176}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Ritschel, Tobias %A Masia, Belen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Parallax in Stereo 3D: Model and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B6-D %R 10.1145/2980179.2980230 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 176 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Gryaditskaya, Y., Masia, B., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Gloss Editing in Light Fields. VMV 2016 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{jgryadit2016, TITLE = {Gloss Editing in Light Fields}, AUTHOR = {Gryaditskaya, Yulia and Masia, Belen and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-03868-025-3}, DOI = {10.2312/vmv.20161351}, PUBLISHER = {Eurographics Association}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {VMV 2016 Vision, Modeling and Visualization}, EDITOR = {Hullin, Matthias and Stamminger, Marc and Weinkauf, Tino}, PAGES = {127--135}, ADDRESS = {Bayreuth, Germany}, }
Endnote
%0 Conference Proceedings %A Gryaditskaya, Yulia %A Masia, Belen %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Gloss Editing in Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82C5-B %R 10.2312/vmv.20161351 %D 2016 %B 21st International Symposium on Vision, Modeling and Visualization %Z date of event: 2016-10-10 - 2016-10-12 %C Bayreuth, Germany %B VMV 2016 Vision, Modeling and Visualization %E Hullin, Matthias; Stamminger, Marc; Weinkauf, Tino %P 127 - 135 %I Eurographics Association %@ 978-3-03868-025-3
Boechat, P., Dokter, M., Kenzel, M., Seidel, H.-P., Schmalstieg, D., and Steinberger, M. 2016. Representing and Scheduling Procedural Generation using Operator Graphs. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{BoaechatSIGGRAPHAsia2016, TITLE = {Representing and Scheduling Procedural Generation using Operator Graphs}, AUTHOR = {Boechat, Pedro and Dokter, Mark and Kenzel, Michael and Seidel, Hans-Peter and Schmalstieg, Dieter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980227}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {183}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Boechat, Pedro %A Dokter, Mark %A Kenzel, Michael %A Seidel, Hans-Peter %A Schmalstieg, Dieter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Representing and Scheduling Procedural Generation using Operator Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98BB-0 %R 10.1145/2980179.2980227 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 183 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
2015
Wang, Z., Seidel, H.-P., and Weinkauf, T. 2015. Hierarchical Hashing for Pattern Search in 3D Vector Fields. VMV 2015 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{WangVMV2015, TITLE = {Hierarchical Hashing for Pattern Search in {3D} Vector Fields}, AUTHOR = {Wang, Zhongjie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-905674-95-8}, DOI = {10.2312/vmv.20151256}, PUBLISHER = {Eurographics Association}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {VMV 2015 Vision, Modeling and Visualization}, EDITOR = {Bommes, David and Ritschel, Tobias and Schultz, Thomas}, PAGES = {41--48}, ADDRESS = {Aachen, Germany}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Hierarchical Hashing for Pattern Search in 3D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-F760-4 %R 10.2312/vmv.20151256 %D 2015 %B 20th International Symposium on Vision, Modeling and Visualization %Z date of event: 2015-10-07 - 2015-10-09 %C Aachen, Germany %B VMV 2015 Vision, Modeling and Visualization %E Bommes, David; Ritschel, Tobias; Schultz, Thomas %P 41 - 48 %I Eurographics Association %@ 978-3-905674-95-8
Von Tycowicz, C., Schulz, C., Seidel, H.-P., and Hildebrandt, K. 2015. Real-time Nonlinear Shape Interpolation. ACM Transactions on Graphics34, 3.
Export
BibTeX
@article{Tycowicz2015, TITLE = {Real-time Nonlinear Shape Interpolation}, AUTHOR = {von Tycowicz, Christoph and Schulz, Christian and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, DOI = {10.1145/2729972}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {34}, NUMBER = {3}, EID = {34}, }
Endnote
%0 Journal Article %A von Tycowicz, Christoph %A Schulz, Christian %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Nonlinear Shape Interpolation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D65-9 %R 10.1145/2729972 %7 2015 %D 2015 %J ACM Transactions on Graphics %V 34 %N 3 %Z sequence number: 34 %I ACM %C New York, NY
Schulz, C., von Tycowicz, C., Seidel, H.-P., and Hildebrandt, K. 2015. Animating Articulated Characters Using Wiggly Splines. Proceedings SCA 2015, ACM.
Export
BibTeX
@inproceedings{SchulzSCA2015, TITLE = {Animating Articulated Characters Using Wiggly Splines}, AUTHOR = {Schulz, Christian and von Tycowicz, Christoph and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISBN = {978-1-4503-3496-9}, DOI = {10.1145/2786784.2786799}, PUBLISHER = {ACM}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Proceedings SCA 2015}, PAGES = {101--109}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Schulz, Christian %A von Tycowicz, Christoph %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Animating Articulated Characters Using Wiggly Splines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8EA3-0 %R 10.1145/2786784.2786799 %D 2015 %B 14th ACM SIGGRAPH / Eurographics Symposium on Computer Animation %Z date of event: 2015-08-07 - 2015-08-09 %C Los Angeles, CA, USA %B Proceedings SCA 2015 %P 101 - 109 %I ACM %@ 978-1-4503-3496-9
Rhodin, H., Robertini, N., Richardt, C., Seidel, H.-P., and Theobalt, C. 2015a. A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation. ICCV 2015, IEEE International Conference on Computer Vision, IEEE.
Export
BibTeX
@inproceedings{RhodinICCV2015, TITLE = {A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4673-8390-5}, DOI = {10.1109/ICCV.2015.94}, PUBLISHER = {IEEE}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {ICCV 2015, IEEE International Conference on Computer Vision}, PAGES = {765--773}, ADDRESS = {Santiago, Chile}, }
Endnote
%0 Conference Proceedings %A Rhodin, Helge %A Robertini, Nadia %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-52DC-4 %R 10.1109/ICCV.2015.94 %D 2015 %B IEEE International Conference on Computer Vision %Z date of event: 2015-12-13 - 2015-12-16 %C Santiago, Chile %B ICCV 2015 %P 765 - 773 %I IEEE %@ 978-1-4673-8390-5 %U http://www.cv-foundation.org/openaccess/content_iccv_2015/html/Rhodin_A_Versatile_Scene_ICCV_2015_paper.html
Rhodin, H., Tompkin, J., Kim, K.I., et al. 2015b. Generalizing Wave Gestures from Sparse Examples for Real-time Character Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2015)34, 6.
Export
BibTeX
@article{RhodinSAP2015, TITLE = {Generalizing Wave Gestures from Sparse Examples for Real-time Character Control}, AUTHOR = {Rhodin, Helge and Tompkin, James and Kim, Kwang In and de Aguiar, Edilson and Pfister, Hanspeter and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2816795.2818082}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {34}, NUMBER = {6}, PAGES = {1--12}, EID = {181}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2015}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Tompkin, James %A Kim, Kwang In %A de Aguiar, Edilson %A Pfister, Hanspeter %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Generalizing Wave Gestures from Sparse Examples for Real-time Character Control : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-2476-8 %R 10.1145/2816795.2818082 %7 2015 %D 2015 %J ACM Transactions on Graphics %O TOG %V 34 %N 6 %& 1 %P 1 - 12 %Z sequence number: 181 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2015 %O ACM SIGGRAPH Asia 2015 Kobe, Japan
Nguyen, C., Ritschel, T., and Seidel, H.-P. 2015a. Data-driven Color Manifolds. ACM Transactions on Graphics34, 2.
Export
BibTeX
@article{NguyenTOG2015, TITLE = {Data-driven Color Manifolds}, AUTHOR = {Nguyen, Chuong and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1145/2699645}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {34}, NUMBER = {2}, EID = {20}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Data-driven Color Manifolds : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-680A-D %R 10.1145/2699645 %7 2015 %D 2015 %J ACM Transactions on Graphics %V 34 %N 2 %Z sequence number: 20 %I ACM %C New York, NY
Nguyen, C., Nalbach, O., Ritschel, T., and Seidel, H.-P. 2015b. Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections. Computer Graphics Forum (Proc. EUROGRAPHICS 2015)34, 2.
Export
BibTeX
@article{NguyenEG2015, TITLE = {Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections}, AUTHOR = {Nguyen, Chuong and Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12548}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {34}, NUMBER = {2}, PAGES = {143--154}, BOOKTITLE = {The 36th Annual Conference of the European Association of Computer Graphics (EUROGRAPHICS 2015)}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D6A-0 %R 10.1111/cgf.12548 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 2 %& 143 %P 143 - 154 %I Wiley-Blackwell %C Oxford %B The 36th Annual Conference of the European Association of Computer Graphics %O EUROGRAPHICS 2015 4th &#8211; 8th May 2015, Kongresshaus in Z&#252;rich, Switzerland EG 2015
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2015. The Bounced Z-buffer for Indirect Visibility. VMV 2015 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{NalbachVMV2015, TITLE = {The Bounced {Z}-buffer for Indirect Visibility}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-95-8}, DOI = {10.2312/vmv.20151261}, PUBLISHER = {Eurographics Association}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {VMV 2015 Vision, Modeling and Visualization}, EDITOR = {Bommes, David and Ritschel, Tobias and Schultz, Thomas}, PAGES = {79--86}, ADDRESS = {Aachen, Germany}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T The Bounced Z-buffer for Indirect Visibility : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-F762-F %R 10.2312/vmv.20151261 %D 2015 %B 20th International Symposium on Vision, Modeling and Visualization %Z date of event: 2015-10-07 - 2015-10-09 %C Aachen, Germany %B VMV 2015 Vision, Modeling and Visualization %E Bommes, David; Ritschel, Tobias; Schultz, Thomas %P 79 - 86 %I Eurographics Association %@ 978-3-905674-95-8
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2015. High Dynamic Range Imaging. In: Wiley Encyclopedia of Electrical and Electronics Engineering. Wiley, New York, NY.
Export
BibTeX
@incollection{MantiukEncyclopedia2015, TITLE = {High Dynamic Range Imaging}, AUTHOR = {Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1002/047134608X.W8265}, PUBLISHER = {Wiley}, ADDRESS = {New York, NY}, YEAR = {2015}, BOOKTITLE = {Wiley Encyclopedia of Electrical and Electronics Engineering}, EDITOR = {Webster, John G.}, PAGES = {1--42}, }
Endnote
%0 Book Section %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-A376-B %R 10.1002/047134608X.W8265 %D 2015 %8 15.06.2015 %B Wiley Encyclopedia of Electrical and Electronics Engineering %E Webster, John G. %P 1 - 42 %I Wiley %C New York, NY
Li, C., Wand, M., Wu, X., and Seidel, H.-P. 2015. Approximate 3D Partial Symmetry Detection Using Co-occurrence Analysis. International Conference on 3D Vision, IEEE.
Export
BibTeX
@inproceedings{Li3DV2015, TITLE = {Approximate {3D} Partial Symmetry Detection Using Co-occurrence Analysis}, AUTHOR = {Li, Chuan and Wand, Michael and Wu, Xiaokun and Seidel, Hans-Peter}, ISBN = {978-1-4673-8333-2}, DOI = {10.1109/3DV.2015.55}, PUBLISHER = {IEEE}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {International Conference on 3D Vision}, DEBUG = {author: Theobalt, Christian}, EDITOR = {Brown, Michael and Kosecka, Jana}, PAGES = {425--433}, ADDRESS = {Lyon, France}, }
Endnote
%0 Conference Proceedings %A Li, Chuan %A Wand, Michael %A Wu, Xiaokun %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Approximate 3D Partial Symmetry Detection Using Co-occurrence Analysis : %U http://hdl.handle.net/11858/00-001M-0000-002B-34D8-0 %R 10.1109/3DV.2015.55 %D 2015 %B International Conference on 3D Vision %Z date of event: 2015-10-19 - 2015-10-22 %C Lyon, France %B International Conference on 3D Vision %E Brown, Michael; Kosecka, Jana; Theobalt, Christian %P 425 - 433 %I IEEE %@ 978-1-4673-8333-2
Klehm, O., Kol, T.R., Seidel, H.-P., and Eisemann, E. 2015. Stylized Scattering via Transfer Functions and Occluder Manipulation. Graphics Interface 2015, Graphics Interface Conference, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{KlehmGI2015, TITLE = {Stylized Scattering via Transfer Functions and Occluder Manipulation}, AUTHOR = {Klehm, Oliver and Kol, Timothy R. and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-0-9947868-0-7}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Graphics Interface 2015, Graphics Interface Conference}, EDITOR = {Zhang, Hao Richard and Tang, Tony}, PAGES = {115--121}, ADDRESS = {Halifax, Canada}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Kol, Timothy R. %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Stylized Scattering via Transfer Functions and Occluder Manipulation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-D415-8 %D 2015 %B Graphics Interface Conference %Z date of event: 2015-06-03 - 2015-06-05 %C Halifax, Canada %B Graphics Interface 2015 %E Zhang, Hao Richard; Tang, Tony %P 115 - 121 %I Canadian Information Processing Society %@ 978-0-9947868-0-7
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2015a. A Transformation-aware Perceptual Image Metric. Human Vision and Electronic Imaging XX (HVEI 2015), SPIE/IS&T.
(Best Student Paper Award)
Abstract
Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.
Export
BibTeX
@inproceedings{Kellnhofer2015, TITLE = {A Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9781628414844}, DOI = {10.1117/12.2076754}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2015}, DATE = {2015}, ABSTRACT = {Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.}, BOOKTITLE = {Human Vision and Electronic Imaging XX (HVEI 2015)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and de Ridder, Huib}, EID = {939408}, SERIES = {Proceedings of SPIE}, VOLUME = {9394}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-544A-4 %R 10.1117/12.2076754 %D 2015 %B Human Vision and Electronic Imaging XX %Z date of event: 2015-02-08 - 2015-02-12 %C San Francisco, CA, USA %X Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations. %B Human Vision and Electronic Imaging XX %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; de Ridder, Huib %Z sequence number: 939408 %I SPIE/IS&T %@ 9781628414844 %B Proceedings of SPIE %N 9394
Kellnhofer, P., Ritschel, T., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2015b. Modeling Luminance Perception at Absolute Threshold. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2015)34, 4.
Export
BibTeX
@article{Kellnhofer2015a, TITLE = {Modeling Luminance Perception at Absolute Threshold}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12687}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {34}, NUMBER = {4}, PAGES = {155--164}, BOOKTITLE = {Eurographics Symposium on Rendering 2014}, EDITOR = {Lehtinen, Jaakko and Nowrouzezahra, Derek}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling Luminance Perception at Absolute Threshold : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8E8D-4 %R 10.1111/cgf.12687 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 4 %& 155 %P 155 - 164 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2014 %O Eurographics Symposium on Rendering 2015 EGSR 2015 Darmstadt, Germany, June 24th - 26th, 2015
Kellnhofer, P., Leimkühler, T., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2015c. What Makes 2D-to-3D Stereo Conversion Perceptually Plausible? Proceedings SAP 2015, ACM.
(Best Presentation Award)
Export
BibTeX
@inproceedings{Kellnhofer2015SAP, TITLE = {What Makes {2D}-to-{3D} Stereo Conversion Perceptually Plausible?}, AUTHOR = {Kellnhofer, Petr and Leimk{\"u}hler, Thomas and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, ISBN = {978-1-4503-3812-7}, DOI = {10.1145/2804408.2804409}, PUBLISHER = {ACM}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Proceedings SAP 2015}, PAGES = {59--66}, ADDRESS = {T{\"u}bingen, Germany}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Leimk&#252;hler, Thomas %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T What Makes 2D-to-3D Stereo Conversion Perceptually Plausible? : %U http://hdl.handle.net/11858/00-001M-0000-0029-2460-7 %R 10.1145/2804408.2804409 %D 2015 %B ACM SIGGRAPH Symposium on Applied Perception %Z date of event: 2015-09-13 - 2015-09-14 %C T&#252;bingen, Germany %B Proceedings SAP 2015 %P 59 - 66 %I ACM %@ 978-1-4503-3812-7 %U http://resources.mpi-inf.mpg.de/StereoCueFusion/WhatMakes3D/
Jain, A., Chen, C., Thormählen, T., Metaxas, D., and Seidel, H.-P. 2015. Multi-layer Stencil Creation from Images. Computers and Graphics48.
Export
BibTeX
@article{JainMulti-layer2015, TITLE = {Multi-layer Stencil Creation from Images}, AUTHOR = {Jain, Arjun and Chen, Chao and Thorm{\"a}hlen, Thorsten and Metaxas, Dimitris and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2015.02.003}, PUBLISHER = {Pergamon}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computers and Graphics}, VOLUME = {48}, PAGES = {11--22}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Chen, Chao %A Thorm&#228;hlen, Thorsten %A Metaxas, Dimitris %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-layer Stencil Creation from Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-9C34-A %R 10.1016/j.cag.2015.02.003 %7 2015-02-26 %D 2015 %J Computers and Graphics %V 48 %& 11 %P 11 - 22 %I Pergamon %C New York, NY %@ false
Herzog, R., Mewes, D., Wand, M., Guibas, L., and Seidel, H.-P. 2015. LeSSS: Learned Shared Semantic Spaces for Relating Multi-modal Representations of 3D Shapes. Computer Graphics Forum (Proc. Eurographics Symposium on Geometric Processing 2015)34, 5.
Export
BibTeX
@article{HerzogSGP2015, TITLE = {{LeSSS}: {L}earned {S}hared {S}emantic {S}paces for Relating Multi-Modal Representations of {3D} Shapes}, AUTHOR = {Herzog, Robert and Mewes, Daniel and Wand, Michael and Guibas, Leonidas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12703}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Chichester}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Geometric Processing)}, VOLUME = {34}, NUMBER = {5}, PAGES = {141--151}, BOOKTITLE = {Symposium on Geometry Processing 2015 (Eurographics Symposium on Geometric Processing 2015)}, EDITOR = {Ben-Chen, Mirela and Liu, Ligang}, }
Endnote
%0 Journal Article %A Herzog, Robert %A Mewes, Daniel %A Wand, Michael %A Guibas, Leonidas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T LeSSS: Learned Shared Semantic Spaces for Relating Multi-modal Representations of 3D Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8E9A-6 %R 10.1111/cgf.12703 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 5 %& 141 %P 141 - 151 %I Wiley-Blackwell %C Chichester %@ false %B Symposium on Geometry Processing 2015 %O Graz, Austria, July 6 - 8, 2015 SGP 2015 Eurographics Symposium on Geometric Processing 2015
Gryaditskaya, Y., Pouli, T., Reinhard, E., Myszkowski, K., and Seidel, H.-P. 2015. Motion Aware Exposure Bracketing for HDR Video. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2015)34, 4.
Export
BibTeX
@article{Gryaditskaya2015, TITLE = {Motion Aware Exposure Bracketing for {HDR} Video}, AUTHOR = {Gryaditskaya, Yulia and Pouli, Tania and Reinhard, Erik and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12684}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {34}, NUMBER = {4}, PAGES = {119--130}, BOOKTITLE = {Eurographics Symposium on Rendering 2015}, EDITOR = {Lehtinen, Jaakko and Nowrouzezahra, Derek}, }
Endnote
%0 Journal Article %A Gryaditskaya, Yulia %A Pouli, Tania %A Reinhard, Erik %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Aware Exposure Bracketing for HDR Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-15D2-B %R 10.1111/cgf.12684 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 4 %& 119 %P 119 - 130 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2015 %O Eurographics Symposium on Rendering 2015 EGSR 2015 Darmstadt, Germany, June 24th - 26th, 2015
Brandt, C., Seidel, H.-P., and Hildebrandt, K. 2015. Optimal Spline Approximation via ℓ₀-Minimization. Computer Graphics Forum (Proc. EUROGRAPHICS 2015)34, 2.
Export
BibTeX
@article{Brandt2015, TITLE = {Optimal Spline Approximation via $\ell_0$-Minimization}, AUTHOR = {Brandt, Christopher and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12589}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {34}, NUMBER = {2}, PAGES = {617--626}, BOOKTITLE = {The 36th Annual Conference of the European Association of Computer Graphics (EUROGRAPHICS 2015)}, }
Endnote
%0 Journal Article %A Brandt, Christopher %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimal Spline Approximation via &#8467;&#8320;-Minimization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D67-5 %R 10.1111/cgf.12589 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 2 %& 617 %P 617 - 626 %I Wiley-Blackwell %C Oxford %B The 36th Annual Conference of the European Association of Computer Graphics %O EUROGRAPHICS 2015 4th - 8th May 2015, Kongresshaus in Z&#252;rich, Switzerland
Arpa, S., Ritschel, T., Myszkowski, K., Çapin, T., and Seidel, H.-P. 2015. Purkinje Images: Conveying Different Content for Different Luminance Adaptations in a Single Image. Computer Graphics Forum34, 1.
Export
BibTeX
@article{arpa2014purkinje, TITLE = {Purkinje Images: {Conveying} Different Content for Different Luminance Adaptations in a Single Image}, AUTHOR = {Arpa, Sami and Ritschel, Tobias and Myszkowski, Karol and {\c C}apin, Tolga and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12463}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum}, VOLUME = {34}, NUMBER = {1}, PAGES = {116--126}, }
Endnote
%0 Journal Article %A Arpa, Sami %A Ritschel, Tobias %A Myszkowski, Karol %A &#199;apin, Tolga %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Purkinje Images: Conveying Different Content for Different Luminance Adaptations in a Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D0B-6 %R 10.1111/cgf.12463 %7 2014-10-18 %D 2015 %J Computer Graphics Forum %V 34 %N 1 %& 116 %P 116 - 126 %I Wiley-Blackwell %C Oxford
2014
Wu, X., Li, C., Wand, M., Hildebrandt, K., Jansen, S., and Seidel, H.-P. 2014a. 3D Model Retargeting Using Offset Statistics. 2nd International Conference on 3D Vision, IEEE.
Export
BibTeX
@inproceedings{Wu2014a, TITLE = {{3D} Model Retargeting Using Offset Statistics}, AUTHOR = {Wu, Xiaokun and Li, Chuan and Wand, Michael and Hildebrandt, Klaus and Jansen, Silke and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4799-7000-1}, PUBLISHER = {IEEE}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {2nd International Conference on 3D Vision}, PAGES = {353--360}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Wu, Xiaokun %A Li, Chuan %A Wand, Michael %A Hildebrandt, Klaus %A Jansen, Silke %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Model Retargeting Using Offset Statistics : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D63-D %D 2014 %B 2nd International Conference on 3D Vision %Z date of event: 2014-12-08 - 2014-12-11 %C Tokyo, Japan %B 2nd International Conference on 3D Vision %P 353 - 360 %I IEEE %@ 978-1-4799-7000-1
Wu, X., Wand, M., Hildebrandt, K., Kohli, P., and Seidel, H.-P. 2014b. Real-time Symmetry-preserving Deformation. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Export
BibTeX
@article{Wu2014, TITLE = {Real-time Symmetry-preserving Deformation}, AUTHOR = {Wu, Xiaokun and Wand, Michael and Hildebrandt, Klaus and Kohli, Pushmeet and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12491}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {229--238}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Wu, Xiaokun %A Wand, Michael %A Hildebrandt, Klaus %A Kohli, Pushmeet %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Symmetry-preserving Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3D08-5 %R 10.1111/cgf.12491 %7 2014-10-28 %D 2014 %J Computer Graphics Forum %V 33 %N 7 %& 229 %P 229 - 238 %I Wiley-Blackwell %C Oxford %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Wang, Z., Martinez Esturo, J., Seidel, H.-P., and Weinkauf, T. 2014. Pattern Search in Flows based on Similarity of Stream Line Segments. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Abstract
We propose a method that allows users to define flow features in form of patterns represented as sparse sets of stream line segments. Our approach finds similar occurrences in the same or other time steps. Related approaches define patterns using dense, local stencils or support only single segments. Our patterns are defined sparsely and can have a significant extent, i.e., they are integration-based and not local. This allows for a greater flexibility in defining features of interest. Similarity is measured using intrinsic curve properties only, which enables invariance to location, orientation, and scale. Our method starts with splitting stream lines using globally-consistent segmentation criteria. It strives to maintain the visually apparent features of the flow as a collection of stream line segments. Most importantly, it provides similar segmentations for similar flow structures. For user-defined patterns of curve segments, our algorithm finds similar ones that are invariant to similarity transformations. We showcase the utility of our method using different 2D and 3D flow fields.
Export
BibTeX
@inproceedings{wang14, TITLE = {Pattern Search in Flows based on Similarity of Stream Line Segments}, AUTHOR = {Wang, Zhongjie and Martinez Esturo, Janick and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014-10}, ABSTRACT = {We propose a method that allows users to define flow features in form of patterns represented as sparse sets of stream line segments. Our approach finds similar occurrences in the same or other time steps. Related approaches define patterns using dense, local stencils or support only single segments. Our patterns are defined sparsely and can have a significant extent, i.e., they are integration-based and not local. This allows for a greater flexibility in defining features of interest. Similarity is measured using intrinsic curve properties only, which enables invariance to location, orientation, and scale. Our method starts with splitting stream lines using globally-consistent segmentation criteria. It strives to maintain the visually apparent features of the flow as a collection of stream line segments. Most importantly, it provides similar segmentations for similar flow structures. For user-defined patterns of curve segments, our algorithm finds similar ones that are invariant to similarity transformations. We showcase the utility of our method using different 2D and 3D flow fields.}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, DEBUG = {author: von Landesberger, Tatiana; author: Theisel, Holger; author: Urban, Philipp}, EDITOR = {Bender, Jan and Kuijper, Arjan}, PAGES = {23--30}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Martinez Esturo, Janick %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Pattern Search in Flows based on Similarity of Stream Line Segments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5337-3 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %X We propose a method that allows users to define flow features in form of patterns represented as sparse sets of stream line segments. Our approach finds similar occurrences in the same or other time steps. Related approaches define patterns using dense, local stencils or support only single segments. Our patterns are defined sparsely and can have a significant extent, i.e., they are integration-based and not local. This allows for a greater flexibility in defining features of interest. Similarity is measured using intrinsic curve properties only, which enables invariance to location, orientation, and scale. Our method starts with splitting stream lines using globally-consistent segmentation criteria. It strives to maintain the visually apparent features of the flow as a collection of stream line segments. Most importantly, it provides similar segmentations for similar flow structures. For user-defined patterns of curve segments, our algorithm finds similar ones that are invariant to similarity transformations. We showcase the utility of our method using different 2D and 3D flow fields. %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; von Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 23 - 30 %I Eurographics Association %U http://tinoweinkauf.net/
Vangorp, P., Mantiuk, R., Bazyluk, B., et al. 2014. Depth from HDR: Depth Induction or Increased Realism? SAP 2014, ACM Symposium on Applied Perception, ACM.
Export
BibTeX
@inproceedings{Vangorp2014, TITLE = {Depth from {HDR}: {Depth} Induction or Increased Realism?}, AUTHOR = {Vangorp, Peter and Mantiuk, Rafal and Bazyluk, Bartosz and Myszkowski, Karol and Mantiuk, Rados{\textbackslash}law and Watt, Simon J. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-3009-1}, DOI = {10.1145/2628257.2628258}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {SAP 2014, ACM Symposium on Applied Perception}, EDITOR = {Bailey, Reynold and Kuhl, Scott}, PAGES = {71--78}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Vangorp, Peter %A Mantiuk, Rafal %A Bazyluk, Bartosz %A Myszkowski, Karol %A Mantiuk, Rados\law %A Watt, Simon J. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Depth from HDR: Depth Induction or Increased Realism? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-34DB-5 %R 10.1145/2628257.2628258 %D 2014 %B ACM Symposium on Applied Perception %Z date of event: 2014-08-08 - 2014-08-09 %C Vancouver, Canada %K binocular disparity, contrast, luminance, stereo 3D %B SAP 2014 %E Bailey, Reynold; Kuhl, Scott %P 71 - 78 %I ACM %@ 978-1-4503-3009-1
Tevs, A., Huang, Q., Wand, M., Seidel, H.-P., and Guibas, L. 2014. Relating Shapes via Geometric Symmetries and Regularities. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{TevsSIGGRAPH2014, TITLE = {Relating Shapes via Geometric Symmetries and Regularities}, AUTHOR = {Tevs, Art and Huang, Qixing and Wand, Michael and Seidel, Hans-Peter and Guibas, Leonidas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601220}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--12}, EID = {119}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Tevs, Art %A Huang, Qixing %A Wand, Michael %A Seidel, Hans-Peter %A Guibas, Leonidas %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Relating Shapes via Geometric Symmetries and Regularities : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-8052-F %F ISI: 000340000100086 %R 10.1145/2601097.2601220 %7 2014-07 %D 2014 %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 12 %Z sequence number: 119 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O Vancouver, BC, Canada ACM SIGGRAPH 2014
Templin, K., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2014a. Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{Templin:2014:MOE:2601097.2601148, TITLE = {Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601148}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--8}, EID = {145}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE16-9 %R 10.1145/2601097.2601148 %7 2014 %D 2014 %K S3D, binocular, eye&#8208;tracking %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 8 %Z sequence number: 145 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O ACM SIGGRAPH 2014 Vancouver, BC, Canada
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2014b. Perceptually-motivated Stereoscopic Film Grain. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Export
BibTeX
@article{Templin2014b, TITLE = {Perceptually-motivated Stereoscopic Film Grain}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12503}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {349--358}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually-motivated Stereoscopic Film Grain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5DF2-B %R 10.1111/cgf.12503 %7 2014-10-28 %D 2014 %J Computer Graphics Forum %V 33 %N 7 %& 349 %P 349 - 358 %I Wiley-Blackwell %C Oxford %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Sridhar, S., Rhodin, H., Seidel, H.-P., Oulasvirta, A., and Theobalt, C. 2014. Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model. 3DV 2014, International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{sridhar2014real, TITLE = {Real-time Hand Tracking Using a Sum of Anisotropic {Gaussians} Model}, AUTHOR = {Sridhar, Srinath and Rhodin, Helge and Seidel, Hans-Peter and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4799-7001-8}, DOI = {10.1109/3DV.2014.37}, PUBLISHER = {IEEE Computer Society}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {3DV 2014, International Conference on 3D Vision}, PAGES = {319--326}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Sridhar, Srinath %A Rhodin, Helge %A Seidel, Hans-Peter %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-69E9-F %R 10.1109/3DV.2014.37 %D 2014 %B International Conference on 3D Vision %Z date of event: 2014-12-08 - 2014-12-11 %C Tokyo, Japan %B 3DV 2014 %P 319 - 326 %I IEEE Computer Society %@ 978-1-4799-7001-8
Schulze, M., Martinez Esturo, J., Günther, T., et al. 2014. Sets of Globally Optimal Stream Surfaces for Flow Visualization. Computer Graphics Forum (Proc. EuroVis 2014)33, 3.
Export
BibTeX
@article{Schulze2014, TITLE = {Sets of Globally Optimal Stream Surfaces for Flow Visualization}, AUTHOR = {Schulze, Maik and Martinez Esturo, Janick and G{\"u}nther, T. and R{\"o}ssl, Christian and Seidel, Hans-Peter and Weinkauf, Tino and Theisel, Holger}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12356}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. EuroVis)}, VOLUME = {33}, NUMBER = {3}, PAGES = {1--10}, BOOKTITLE = {Eurographics Conference on Visualization (EuroVis 2014)}, EDITOR = {Carr, Hamish and Rheingans, Penny and Schumann, Heidrun}, }
Endnote
%0 Journal Article %A Schulze, Maik %A Martinez Esturo, Janick %A G&#252;nther, T. %A R&#246;ssl, Christian %A Seidel, Hans-Peter %A Weinkauf, Tino %A Theisel, Holger %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Sets of Globally Optimal Stream Surfaces for Flow Visualization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-F518-1 %R 10.1111/cgf.12356 %7 2014-07-12 %D 2014 %K Categories and Subject Descriptors (according to ACM CCS), I.3.5 [Computer Graphics]: Computational Geometry and Object Modeling&#8212;Geometric algorithms, languages, and systems %J Computer Graphics Forum %V 33 %N 3 %& 1 %P 1 - 10 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Conference on Visualization %O EuroVis 2014 Swansea, Wales, UK, June 9 &#8211; 13, 2014
Schulz, C., von Tycowicz, C., Seidel, H.-P., and Hildebrandt, K. 2014. Animating Deformable Objects Using Sparse Spacetime Constraints. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{Schulz2014, TITLE = {Animating Deformable Objects Using Sparse Spacetime Constraints}, AUTHOR = {Schulz, Christian and von Tycowicz, Christoph and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601156}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--10}, EID = {109}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Schulz, Christian %A von Tycowicz, Christoph %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Animating Deformable Objects Using Sparse Spacetime Constraints : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE18-5 %R 10.1145/2601097.2601156 %7 2014 %D 2014 %K model reduction, optimal control, physically&#8208;based animation, spacetime constraints, wiggly splines %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 10 %Z sequence number: 109 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O ACM SIGGRAPH 2014 Vancouver, BC, Canada
Saikia, H., Seidel, H.-P., and Weinkauf, T. 2014. Extended Branch Decomposition Graphs: Structural Comparison of Scalar Data. Computer Graphics Forum (Proc. EuroVis 2014)33, 3.
Abstract
We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other - in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise.
Export
BibTeX
@article{saikia14a, TITLE = {Extended Branch Decomposition Graphs: {Structural} Comparison of Scalar Data}, AUTHOR = {Saikia, Himangshu and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12360}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other -- in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise.}, JOURNAL = {Computer Graphics Forum (Proc. EuroVis)}, VOLUME = {33}, NUMBER = {3}, PAGES = {41--50}, BOOKTITLE = {Eurographics Conference on Visualization 2014 (EuroVis 2014)}, EDITOR = {Carr, Hamish and Rheingans, Penny and Schumann, Heidrun}, }
Endnote
%0 Journal Article %A Saikia, Himangshu %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Extended Branch Decomposition Graphs: Structural Comparison of Scalar Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4FFB-A %R 10.1111/cgf.12360 %7 2014 %D 2014 %X We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other - in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise. %J Computer Graphics Forum %V 33 %N 3 %& 41 %P 41 - 50 %I Wiley-Blackwell %C Oxford %B Eurographics Conference on Visualization 2014 %O EuroVis 2014 Swansea, Wales, UK, June 9 &#8211; 13, 2014
Rhodin, H., Tompkin, J., Kim, K.I., Varanasi, K., Seidel, H.-P., and Theobalt, C. 2014. Interactive Motion Mapping for Real-time Character Control. Computer Graphics Forum (Proc. EUROGRAPHICS 2014)33, 2.
Export
BibTeX
@article{RhodinCGF2014, TITLE = {Interactive Motion Mapping for Real-time Character Control}, AUTHOR = {Rhodin, Helge and Tompkin, James and Kim, Kwang In and Varanasi, Kiran and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12325}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014-05}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {33}, NUMBER = {2}, PAGES = {273--282}, BOOKTITLE = {EUROGRAPHICS 2014}, EDITOR = {L{\'e}vy, Bruno and Kautz, Jan}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Tompkin, James %A Kim, Kwang In %A Varanasi, Kiran %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Motion Mapping for Real-time Character Control : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-8096-6 %F ISI: 000337543000028 %R 10.1111/cgf.12325 %7 2014 %D 2014 %J Computer Graphics Forum %V 33 %N 2 %& 273 %P 273 - 282 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2014 %O The European Association for Computer Graphics 35th Annual Conference ; Strasbourg, France, April 7th &#8211; 11th, 2014 EUROGRAPHICS 2014 EG 2014
Palmas, G., Bachynskyi, M., Oulasvirta, A., Seidel, H.-P., and Weinkauf, T. 2014a. An Edge-bundling Layout for Interactive Parallel Coordinates. PacificVis 2014, IEEE Pacific Visualization Symposium, IEEE Computer Society.
Abstract
Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing.
Export
BibTeX
@inproceedings{palmas14a, TITLE = {An Edge-bundling Layout for Interactive Parallel Coordinates}, AUTHOR = {Palmas, Gregorio and Bachynskyi, Myroslav and Oulasvirta, Antti and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, DOI = {10.1109/PacificVis.2014.40}, PUBLISHER = {IEEE Computer Society}, YEAR = {2014}, DATE = {2014-03}, ABSTRACT = {Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing.}, BOOKTITLE = {PacificVis 2014, IEEE Pacific Visualization Symposium}, PAGES = {57--64}, ADDRESS = {Yokohama, Japan}, }
Endnote
%0 Conference Proceedings %A Palmas, Gregorio %A Bachynskyi, Myroslav %A Oulasvirta, Antti %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Edge-bundling Layout for Interactive Parallel Coordinates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D29-0 %R 10.1109/PacificVis.2014.40 %D 2014 %B IEEE Pacific Visualization Symposium %Z date of event: 2014-03-04 - 2014-03-07 %C Yokohama, Japan %X Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing. %B PacificVis 2014 %P 57 - 64 %I IEEE Computer Society
Palmas, G., Bachynskyi, M., Oulasvirta, A., Seidel, H.-P., and Weinkauf, T. 2014b. MovExp: A Versatile Visualization Tool for Human-Computer Interaction Studies with 3D Performance and Biomechanical Data. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS 2014)20, 12.
Abstract
In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts.
Export
BibTeX
@article{palmas14b, TITLE = {{MovExp}: A Versatile Visualization Tool for Human-Computer Interaction Studies with {3D} Performance and Biomechanical Data}, AUTHOR = {Palmas, Gregorio and Bachynskyi, Myroslav and Oulasvirta, Antti and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.2346311}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-12}, ABSTRACT = {In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS)}, VOLUME = {20}, NUMBER = {12}, PAGES = {2359--2368}, BOOKTITLE = {IEEE Visual Analytics Science \& Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014}, DEBUG = {author: Ebert, David; author: Hauser, Helwig; author: Heer, Jeffrey; author: North, Chris; author: Tory, Melanie; author: Qu, Huamin; author: Shen, Han-Wei; author: Ynnerman, Anders}, EDITOR = {Chen, Min}, }
Endnote
%0 Journal Article %A Palmas, Gregorio %A Bachynskyi, Myroslav %A Oulasvirta, Antti %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MovExp: A Versatile Visualization Tool for Human-Computer Interaction Studies with 3D Performance and Biomechanical Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D4C-4 %R 10.1109/TVCG.2014.2346311 %7 2014 %D 2014 %X In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts. %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 12 %& 2359 %P 2359 - 2368 %I IEEE Computer Society %C Los Alamitos, CA %@ false %B IEEE Visual Analytics Science & Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014 %O Proceedings 2014 ; Paris, France, 9&#8211;14 November 2014 IEEE VIS 2014
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2014a. Deep Screen Space. Proceedings I3D 2014, ACM.
Export
BibTeX
@inproceedings{Nalbach:2014:DSS:2556700.2556708, TITLE = {Deep Screen Space}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-2717-6}, URL = {http://doi.acm.org/10.1145/2556700.2556708}, DOI = {10.1145/2556700.2556708}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Proceedings I3D 2014}, EDITOR = {Keyser, John and Sander, Pedro}, PAGES = {79--86}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Deep Screen Space : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D74-8 %R 10.1145/2556700.2556708 %U http://doi.acm.org/10.1145/2556700.2556708 %D 2014 %B 18th Meeting of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2014-03-14 - 2014-03-16 %C San Francisco, CA, USA %B Proceedings I3D 2014 %E Keyser, John; Sander, Pedro %P 79 - 86 %I ACM %@ 978-1-4503-2717-6
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2014b. Deep Screen Space for Indirect Lighting of Volumes. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{DBLP:conf/vmv/NalbachRS14, TITLE = {Deep Screen Space for Indirect Lighting of Volumes}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-74-3}, DOI = {10.2312/vmv.20141287}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, EDITOR = {Bender, Jan and Kuijper, Arjan and von Landesberger, Tatiana and Theisel, Holger and Urban, Philipp}, PAGES = {143--150}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Deep Screen Space for Indirect Lighting of Volumes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D6C-B %R 10.2312/vmv.20141287 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; von Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 143 - 150 %I Eurographics Association %@ 978-3-905674-74-3 %U http://dx.doi.org/10.2312/vmv.20141287
Lochmann, G., Reinert, B., Ritschel, T., Müller, S., and Seidel, H.-P. 2014. Real‐time Reflective and Refractive Novel‐view Synthesis. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{LochmannVMV2014, TITLE = {Real-time Reflective and Refractive Novel-view Synthesis}, AUTHOR = {Lochmann, Gerrit and Reinert, Bernhard and Ritschel, Tobias and M{\"u}ller, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2312/vmv.20141270}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, EDITOR = {Bender, Jan and Kuijper, Arjan and Landesberger, Tatiana and Theisel, Holger and Urban, Philipp}, PAGES = {9--16}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Lochmann, Gerrit %A Reinert, Bernhard %A Ritschel, Tobias %A M&#252;ller, Stefan %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real&#8208;time Reflective and Refractive Novel&#8208;view Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-533E-6 %R 10.2312/vmv.20141270 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 9 - 16 %I Eurographics Association %U http://dx.doi.org/10.2312/vmv.20141270
Kurz, C., Wu, X., Wand, M., Thormählen, T., Kohli, P., and Seidel, H.-P. 2014. Symmetry-aware Template Deformation and Fitting. Computer Graphics Forum33, 6.
Export
BibTeX
@article{Kurz2014, TITLE = {Symmetry-aware Template Deformation and Fitting}, AUTHOR = {Kurz, Christian and Wu, Xiaokun and Wand, Michael and Thorm{\"a}hlen, Thorsten and Kohli, P. and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12344}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum}, VOLUME = {33}, NUMBER = {6}, PAGES = {205--219}, }
Endnote
%0 Journal Article %A Kurz, Christian %A Wu, Xiaokun %A Wand, Michael %A Thorm&#228;hlen, Thorsten %A Kohli, P. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Symmetry-aware Template Deformation and Fitting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D2B-D %R 10.1111/cgf.12344 %7 2014-03-20 %D 2014 %J Computer Graphics Forum %V 33 %N 6 %& 205 %P 205 - 219 %I Wiley-Blackwell %C Oxford
Kozlov, Y., Esturo, J.M., Seidel, H.-P., and Weinkauf, T. 2014. Regularized Harmonic Surface Deformation. http://arxiv.org/abs/1408.3326.
(arXiv: 1408.3326)
Abstract
Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts.
Export
BibTeX
@online{kozlov14, TITLE = {Regularized Harmonic Surface Deformation}, AUTHOR = {Kozlov, Yeara and Esturo, Janick Martinez and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1408.3326}, EPRINT = {1408.3326}, EPRINTTYPE = {arXiv}, YEAR = {2014}, ABSTRACT = {Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts.}, }
Endnote
%0 Report %A Kozlov, Yeara %A Esturo, Janick Martinez %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Regularized Harmonic Surface Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-49F5-A %U http://arxiv.org/abs/1408.3326 %D 2014 %X Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts. %K Computer Science, Graphics, cs.GR
Klehm, O., Ihrke, I., Seidel, H.-P., and Eisemann, E. 2014a. Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor. IEEE Transactions on Visualization and Computer Graphics20, 7.
Export
BibTeX
@article{PLM-tvcg_Klehm2014, TITLE = {Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor}, AUTHOR = {Klehm, Oliver and Ihrke, Ivo and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.13}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-07}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {20}, NUMBER = {7}, PAGES = {983--995}, }
Endnote
%0 Journal Article %A Klehm, Oliver %A Ihrke, Ivo %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51CA-B %R 10.1109/TVCG.2014.13 %7 2014 %D 2014 %K rendering (computer graphics);artistic control;environmental lighting;image component;lighting manipulations;noise function parameters;painting metaphor;property manipulations;realistic rendering;static volume stylization;static volumes;tomographic reconstruction;volume appearance;volume properties;volumetric rendering equation;Equations;Image reconstruction;Lighting;Mathematical model;Optimization;Rendering (computer graphics);Scattering;Artist control;optimization;participating media %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 7 %& 983 %P 983 - 995 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Klehm, O., Seidel, H.-P., and Eisemann, E. 2014b. Filter-based Real-time Single Scattering using Rectified Shadow Maps. Journal of Computer Graphics Techniques3, 3.
Export
BibTeX
@article{fbss_jcgtKlehm2014, TITLE = {Filter-based Real-time Single Scattering using Rectified Shadow Maps}, AUTHOR = {Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {2331-7418}, URL = {http://jcgt.org/published/0003/03/02/}, PUBLISHER = {Williams College}, ADDRESS = {Williamstown, MA}, YEAR = {2014}, DATE = {2014-08}, JOURNAL = {Journal of Computer Graphics Techniques}, VOLUME = {3}, NUMBER = {3}, PAGES = {7--34}, }
Endnote
%0 Journal Article %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Filter-based Real-time Single Scattering using Rectified Shadow Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51B3-E %U http://jcgt.org/published/0003/03/02/ %7 2014 %D 2014 %J Journal of Computer Graphics Techniques %O JCGT %V 3 %N 3 %& 7 %P 7 - 34 %I Williams College %C Williamstown, MA %@ false %U http://jcgt.org/published/0003/03/02/
Klehm, O., Seidel, H.-P., and Eisemann, E. 2014c. Prefiltered Single Scattering. Proceedings I3D 2014, ACM.
Export
BibTeX
@inproceedings{Klehm:2014:PSS:2556700.2556704, TITLE = {Prefiltered Single Scattering}, AUTHOR = {Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-1-4503-2717-6}, DOI = {10.1145/2556700.2556704}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Proceedings I3D 2014}, EDITOR = {Keyser, John and Sander, Pedro}, PAGES = {71--78}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Prefiltered Single Scattering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51C5-6 %R 10.1145/2556700.2556704 %D 2014 %B 18th Meeting of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2014-03-14 - 2014-03-16 %C San Francisco, CA, USA %K participating media, scattering, shadow test %B Proceedings I3D 2014 %E Keyser, John; Sander, Pedro %P 71 - 78 %I ACM %@ 978-1-4503-2717-6
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2014a. Improving Perception of Binocular Stereo Motion on 3D Display Devices. Stereoscopic Displays and Applications XXV, SPIE.
Abstract
This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations.
Export
BibTeX
@inproceedings{Kellnhofer2014a, TITLE = {Improving Perception of Binocular Stereo Motion on {3D} Display Devices}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {9780819499288}, DOI = {10.1117/12.2032389}, PUBLISHER = {SPIE}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations.}, BOOKTITLE = {Stereoscopic Displays and Applications XXV}, EDITOR = {Woods, Andrew J. and Holliman, Nicolas S. and Favalora, Gregg E.}, PAGES = {1--11}, EID = {901116}, SERIES = {Proceedings of SPIE-IS\&T Electronic Imaging}, VOLUME = {9011}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Improving Perception of Binocular Stereo Motion on 3D Display Devices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-318D-7 %R 10.1117/12.2032389 %D 2014 %B Stereoscopic Displays and Applications XXV %Z date of event: 2014-02-03 - 2014-02-05 %C San Francisco, CA, USA %X This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations. %B Stereoscopic Displays and Applications XXV %E Woods, Andrew J.; Holliman, Nicolas S.; Favalora, Gregg E. %P 1 - 11 %Z sequence number: 901116 %I SPIE %@ 9780819499288 %B Proceedings of SPIE-IS&T Electronic Imaging %N 9011 %@ false
Kellnhofer, P., Ritschel, T., Vangorp, P., Myszkowski, K., and Seidel, H.-P. 2014b. Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision. ACM Transactions on Applied Perception11, 3.
Export
BibTeX
@article{kellnhofer:2014c:DarkStereo, TITLE = {Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Vangorp, Peter and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/2644813}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {11}, NUMBER = {3}, EID = {15}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Vangorp, Peter %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE0E-E %R 10.1145/2644813 %7 2014 %D 2014 %J ACM Transactions on Applied Perception %V 11 %N 3 %Z sequence number: 15 %I ACM %C New York, NY %@ false
Günther, D., Jacobson, A., Reininghaus, J., Seidel, H.-P., Sorkine-Hornung, O., and Weinkauf, T. 2014. Fast and Memory-efficient Topological Denoising of 2D and 3D Scalar Fields. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS 2014)20, 12.
Export
BibTeX
@article{guenther14c, TITLE = {Fast and Memory-efficient Topological Denoising of {2D} and {3D} Scalar Fields}, AUTHOR = {G{\"u}nther, David and Jacobson, Alec and Reininghaus, Jan and Seidel, Hans-Peter and Sorkine-Hornung, Olga and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.2346432}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-12}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS)}, VOLUME = {20}, NUMBER = {12}, PAGES = {2585--2594}, BOOKTITLE = {IEEE Visual Analytics Science \& Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014}, DEBUG = {author: Ebert, David; author: Hauser, Helwig; author: Heer, Jeffrey; author: North, Chris; author: Tory, Melanie; author: Qu, Huamin; author: Shen, Han-Wei; author: Ynnerman, Anders}, EDITOR = {Chen, Min}, }
Endnote
%0 Journal Article %A G&#252;nther, David %A Jacobson, Alec %A Reininghaus, Jan %A Seidel, Hans-Peter %A Sorkine-Hornung, Olga %A Weinkauf, Tino %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast and Memory-efficient Topological Denoising of 2D and 3D Scalar Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5349-E %R 10.1109/TVCG.2014.2346432 %7 2014 %D 2014 %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 12 %& 2585 %P 2585 - 2594 %I IEEE Computer Society %C Los Alamitos, CA %@ false %B IEEE Visual Analytics Science & Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014 %O Proceedings 2014 ; Paris, France, 9&#8211;14 November 2014 IEEE VIS 2014
Guenther, D., Reininghaus, J., Seidel, H.-P., and Weinkauf, T. 2014. Notes on the Simplification of the Morse-Smale Complex. Topological Methods in Data Analysis and Visualization III (TopoInVis 2013), Springer.
Abstract
The Morse-Smale complex can be either explicitly or implicitly represented. Depending on the type of representation, the simplification of the Morse-Smale complex works differently. In the explicit representation, the Morse-Smale complex is directly simplified by explicitly reconnecting the critical points during the simplification. In the implicit representation, on the other hand, the Morse-Smale complex is given by a combinatorial gradient field. In this setting, the simplification changes the combinatorial flow, which yields an indirect simplification of the Morse-Smale complex. The topological complexity of the Morse-Smale complex is reduced in both representations. However, the simplifications generally yield different results. In this paper, we emphasize the differences between these two representations, and provide a high-level discussion about their advantages and limitations.
Export
BibTeX
@inproceedings{guenther13a, TITLE = {Notes on the Simplification of the {Morse}-{Smale} Complex}, AUTHOR = {Guenther, David and Reininghaus, Jan and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-319-04098-1}, DOI = {10.1007/978-3-319-04099-8_9}, PUBLISHER = {Springer}, YEAR = {2013}, DATE = {2014}, ABSTRACT = {The Morse-Smale complex can be either explicitly or implicitly represented. Depending on the type of representation, the simplification of the Morse-Smale complex works differently. In the explicit representation, the Morse-Smale complex is directly simplified by explicitly reconnecting the critical points during the simplification. In the implicit representation, on the other hand, the Morse-Smale complex is given by a combinatorial gradient field. In this setting, the simplification changes the combinatorial flow, which yields an indirect simplification of the Morse-Smale complex. The topological complexity of the Morse-Smale complex is reduced in both representations. However, the simplifications generally yield different results. In this paper, we emphasize the differences between these two representations, and provide a high-level discussion about their advantages and limitations.}, BOOKTITLE = {Topological Methods in Data Analysis and Visualization III (TopoInVis 2013)}, EDITOR = {Bremer, Peer-Timo and Hotz, Ingrid and Pascucci, Valerio and Peikert, Ronald}, PAGES = {135--150}, SERIES = {Mathematics and Visualization}, ADDRESS = {Davis, CA, USA}, }
Endnote
%0 Conference Proceedings %A Guenther, David %A Reininghaus, Jan %A Seidel, Hans-Peter %A Weinkauf, Tino %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Notes on the Simplification of the Morse-Smale Complex : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-52F3-3 %R 10.1007/978-3-319-04099-8_9 %D 2014 %B TopoInVis %Z date of event: 2013-03-04 - 2013-03-06 %C Davis, CA, USA %X The Morse-Smale complex can be either explicitly or implicitly represented. Depending on the type of representation, the simplification of the Morse-Smale complex works differently. In the explicit representation, the Morse-Smale complex is directly simplified by explicitly reconnecting the critical points during the simplification. In the implicit representation, on the other hand, the Morse-Smale complex is given by a combinatorial gradient field. In this setting, the simplification changes the combinatorial flow, which yields an indirect simplification of the Morse-Smale complex. The topological complexity of the Morse-Smale complex is reduced in both representations. However, the simplifications generally yield different results. In this paper, we emphasize the differences between these two representations, and provide a high-level discussion about their advantages and limitations. %B Topological Methods in Data Analysis and Visualization III %E Bremer, Peer-Timo; Hotz, Ingrid; Pascucci, Valerio; Peikert, Ronald %P 135 - 150 %I Springer %@ 978-3-319-04098-1 %B Mathematics and Visualization
Gryaditskaya, Y., Pouli, T., Reinhard, E., and Seidel, H.-P. 2014. Sky Based Light Metering for High Dynamic Range Images. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Abstract
Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel—effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design.
Export
BibTeX
@article{CGF:Gryad:14, TITLE = {Sky Based Light Metering for High Dynamic Range Images}, AUTHOR = {Gryaditskaya, Yulia and Pouli, Tania and Reinhard, Erik and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.12474}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel---effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design.}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {61--69}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Gryaditskaya, Yulia %A Pouli, Tania %A Reinhard, Erik %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Sky Based Light Metering for High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-6C64-1 %R 10.1111/cgf.12474 %7 2014 %D 2014 %X Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel&#8212;effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design. %J Computer Graphics Forum %V 33 %N 7 %& 61 %P 61 - 69 %I Wiley-Blackwell %C Oxford, UK %@ false %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Elek, O., Ritschel, T., Dachsbacher, C., and Seidel, H.-P. 2014a. Interactive Light Scattering with Principal-ordinate Propagation. Graphics Interface 2014, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{ElekGI2014, TITLE = {Interactive Light Scattering with Principal-ordinate Propagation}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Dachsbacher, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4822-6003-8}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Graphics Interface 2014}, EDITOR = {Kry, Paul G. and Bunt, Andrea}, PAGES = {87--94}, ADDRESS = {Montreal, Canada}, }
Endnote
%0 Conference Proceedings %A Elek, Oskar %A Ritschel, Tobias %A Dachsbacher, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Light Scattering with Principal-ordinate Propagation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5181-D %D 2014 %B Graphics Interface %Z date of event: 2014-05-07 - 2014-05-09 %C Montreal, Canada %B Graphics Interface 2014 %E Kry, Paul G.; Bunt, Andrea %P 87 - 94 %I Canadian Information Processing Society %@ 978-1-4822-6003-8 %U http://people.mpi-inf.mpg.de/~oelek/Papers/PrincipalOrdinatePropagation/
Elek, O., Ritschel, T., Dachsbacher, C., and Seidel, H.-P. 2014b. Principal-ordinates Propagation for Real-time Rendering of Participating Media. Computers & Graphics45.
Export
BibTeX
@article{ElekCAG2014, TITLE = {Principal-ordinates Propagation for Real-time Rendering of Participating Media}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Dachsbacher, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2014.08.003}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computers \& Graphics}, VOLUME = {45}, PAGES = {28--39}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Ritschel, Tobias %A Dachsbacher, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Principal-ordinates Propagation for Real-time Rendering of Participating Media : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-516D-C %R 10.1016/j.cag.2014.08.003 %7 2014-09-06 %D 2014 %J Computers & Graphics %V 45 %& 28 %P 28 - 39 %I Elsevier %C Amsterdam %@ false
Elek, O., Bauszat, P., Ritschel, T., Magnor, M., and Seidel, H.-P. 2014c. Progressive Spectral Ray Differentials. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{ElekVMV2014, TITLE = {Progressive Spectral Ray Differentials}, AUTHOR = {Elek, Oskar and Bauszat, Pablo and Ritschel, Tobias and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-74-3}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, PAGES = {151--158}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Elek, Oskar %A Bauszat, Pablo %A Ritschel, Tobias %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Progressive Spectral Ray Differentials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5176-5 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %P 151 - 158 %I Eurographics Association %@ 978-3-905674-74-3
Elek, O., Bauszat, P., Ritschel, T., Magnor, M., and Seidel, H.-P. 2014d. Spectral Ray Differentials. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2014)33, 4.
Export
BibTeX
@article{Elek2014EGSR, TITLE = {Spectral Ray Differentials}, AUTHOR = {Elek, Oskar and Bauszat, Pablo and Ritschel, Tobias and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12418}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {33}, NUMBER = {4}, PAGES = {113--122}, BOOKTITLE = {Eurographics Symposium on Rendering 2014}, EDITOR = {Wojciech, Jarosz and Peers, Pieter}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Bauszat, Pablo %A Ritschel, Tobias %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Spectral Ray Differentials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4A77-B %R 10.1111/cgf.12418 %7 2014 %D 2014 %J Computer Graphics Forum %V 33 %N 4 %& 113 %P 113 - 122 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2014 %O Eurographics Symposium on Rendering 2014 EGSR 2014 Lyon, France, June 25th - 27th, 2014
Dabala, L., Kellnhofer, P., Ritschel, T., et al. 2014. Manipulating Refractive and Reflective Binocular Disparity. Computer Graphics Forum (Proc. EUROGRAPHICS 2014)33, 2.
Abstract
Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.
Export
BibTeX
@article{Kellnhofer2014b, TITLE = {Manipulating Refractive and Reflective Binocular Disparity}, AUTHOR = {Dabala, Lukasz and Kellnhofer, Petr and Ritschel, Tobias and Didyk, Piotr and Templin, Krzysztof and Rokita, Przemyslaw and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12290}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {33}, NUMBER = {2}, PAGES = {53--62}, BOOKTITLE = {EUROGRAPHICS 2014}, EDITOR = {L{\'e}vy, Bruno and Kautz, Jan}, }
Endnote
%0 Journal Article %A Dabala, Lukasz %A Kellnhofer, Petr %A Ritschel, Tobias %A Didyk, Piotr %A Templin, Krzysztof %A Rokita, Przemyslaw %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Manipulating Refractive and Reflective Binocular Disparity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0019-EEF9-6 %R 10.1111/cgf.12290 %7 2014-06-01 %D 2014 %X Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes. %J Computer Graphics Forum %V 33 %N 2 %& 53 %P 53 - 62 %I Wiley-Blackwell %C Oxford, UK %B EUROGRAPHICS 2014 %O The European Association for Computer Graphics 35th Annual Conference ; Strasbourg, France, April 7th &#8211; 11th, 2014 EUROGRAPHICS 2014 EG 2014
Brunton, A., Wand, M., Wuhrer, S., Seidel, H.-P., and Weinkauf, T. 2014. A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation. Graphical Models76, 2.
Abstract
Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms.
Export
BibTeX
@article{brunton13, TITLE = {A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation}, AUTHOR = {Brunton, Alan and Wand, Michael and Wuhrer, Stefanie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1016/j.gmod.2013.11.003}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, CA}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms.}, JOURNAL = {Graphical Models}, VOLUME = {76}, NUMBER = {2}, PAGES = {70--85}, }
Endnote
%0 Journal Article %A Brunton, Alan %A Wand, Michael %A Wuhrer, Stefanie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F6E9-5 %R 10.1016/j.gmod.2013.11.003 %7 2013-12-15 %D 2014 %X Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms. %J Graphical Models %V 76 %N 2 %& 70 %P 70 - 85 %I Academic Press %C San Diego, CA %@ false
2013
Wang, Z., Grochulla, M.P., Thormählen, T., and Seidel, H.-P. 2013. 3D Face Template Registration Using Normal Maps. 3DV 2013, International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Wang2013, TITLE = {{3D} Face Template Registration Using Normal Maps}, AUTHOR = {Wang, Zhongjie and Grochulla, Martin Peter and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-7695-5067-1}, DOI = {10.1109/3DV.2013.46}, LOCALID = {Local-ID: 220FFD3372EB9C04C1257C6000528BF3-Wang2013}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {3DV 2013, International Conference on 3D Vision}, EDITOR = {Guerrero, Juan E.}, PAGES = {295--302}, ADDRESS = {Seattle, WA, USA}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Grochulla, Martin Peter %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Face Template Registration Using Normal Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1CEC-B %R 10.1109/3DV.2013.46 %F OTHER: Local-ID: 220FFD3372EB9C04C1257C6000528BF3-Wang2013 %D 2013 %B International Conference on 3D Vision %Z date of event: 2013-06-29 - 2013-07-01 %C Seattle, WA, USA %B 3DV 2013 %E Guerrero, Juan E. %P 295 - 302 %I IEEE Computer Society %@ 978-0-7695-5067-1
Von Tycowicz, C., Schulz, C., Seidel, H.-P., and Hildebrandt, K. 2013. An Efficient Construction of Reduced Deformable Objects. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2013)32, 6.
Abstract
Many efficient computational methods for physical simulation are based on model reduction. We propose new model reduction techniques for the \emphapproximation of reduced forces} and for the \emph{construction of reduced shape spaces of deformable objects that accelerate the construction of a reduced dynamical system, increase the accuracy of the approximation, and simplify the implementation of model reduction. Based on the techniques, we introduce schemes for real-time simulation of deformable objects and interactive deformation-based editing of triangle or tet meshes. We demonstrate the effectiveness of the new techniques in different experiments with elastic solids and shells and compare them to alternative approaches.
Export
BibTeX
@article{Hildebrandt2013, TITLE = {An Efficient Construction of Reduced Deformable Objects}, AUTHOR = {von Tycowicz, Christoph and Schulz, Christian and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2508363.2508392}, LOCALID = {Local-ID: CBFBAC90E4E008EDC1257C240031E997-Hildebrandt2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Many efficient computational methods for physical simulation are based on model reduction. We propose new model reduction techniques for the \emphapproximation of reduced forces} and for the \emph{construction of reduced shape spaces of deformable objects that accelerate the construction of a reduced dynamical system, increase the accuracy of the approximation, and simplify the implementation of model reduction. Based on the techniques, we introduce schemes for real-time simulation of deformable objects and interactive deformation-based editing of triangle or tet meshes. We demonstrate the effectiveness of the new techniques in different experiments with elastic solids and shells and compare them to alternative approaches.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {32}, NUMBER = {6}, PAGES = {1--10}, EID = {213}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2013}, }
Endnote
%0 Journal Article %A von Tycowicz, Christoph %A Schulz, Christian %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Efficient Construction of Reduced Deformable Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3A34-A %R 10.1145/2508363.2508392 %F OTHER: Local-ID: CBFBAC90E4E008EDC1257C240031E997-Hildebrandt2013 %7 2013 %D 2013 %X Many efficient computational methods for physical simulation are based on model reduction. We propose new model reduction techniques for the \emphapproximation of reduced forces} and for the \emph{construction of reduced shape spaces of deformable objects that accelerate the construction of a reduced dynamical system, increase the accuracy of the approximation, and simplify the implementation of model reduction. Based on the techniques, we introduce schemes for real-time simulation of deformable objects and interactive deformation-based editing of triangle or tet meshes. We demonstrate the effectiveness of the new techniques in different experiments with elastic solids and shells and compare them to alternative approaches. %J ACM Transactions on Graphics %V 32 %N 6 %& 1 %P 1 - 10 %Z sequence number: 213 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2013 %O ACM SIGGRAPH Asia 2013 Hong Kong, 19 - 22 November 2013
Sunkel, M., Jansen, S., Wand, M., and Seidel, H.-P. 2013. A Correlated Parts Model for Object Detection in Large 3D Scans. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Abstract
This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm.
Export
BibTeX
@article{Sunkel2013, TITLE = {A Correlated Parts Model for Object Detection in Large {3D} Scans}, AUTHOR = {Sunkel, Martin and Jansen, Silke and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12040}, LOCALID = {Local-ID: 71E3D133D260E612C1257B0400475765-Sunkel2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {205--214}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, P. and Navazo, I.}, }
Endnote
%0 Journal Article %A Sunkel, Martin %A Jansen, Silke %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Correlated Parts Model for Object Detection in Large 3D Scans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1CE6-8 %R 10.1111/cgf.12040 %F OTHER: Local-ID: 71E3D133D260E612C1257B0400475765-Sunkel2013 %7 2013-05-06 %D 2013 %X This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm. %J Computer Graphics Forum %V 32 %N 2 %& 205 %P 205 - 214 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EG 2013 EUROGRAPHICS 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th &#8211; 10th, 2013
Scherbaum, K., Petterson, J., Feris, R.S., Blanz, V., and Seidel, H.-P. 2013. Fast Face Detector Training Using Tailored Views. ICCV 2013, IEEE International Conference on Computer Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Scherbaum2013, TITLE = {Fast Face Detector Training Using Tailored Views}, AUTHOR = {Scherbaum, Kristina and Petterson, James and Feris, Rogerio S. and Blanz, Volker and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1550-5499}, ISBN = {978-1-4799-2839-2}, DOI = {10.1109/ICCV.2013.354}, LOCALID = {Local-ID: BBE1AD1B44792B41C1257C600050C266-Scherbaum2013}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {ICCV 2013, IEEE International Conference on Computer Vision}, PAGES = {2848--2855}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A Scherbaum, Kristina %A Petterson, James %A Feris, Rogerio S. %A Blanz, Volker %A Seidel, Hans-Peter %+ Cluster of Excellence Multimodal Computing and Interaction External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Face Detector Training Using Tailored Views : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0019-7AC0-9 %R 10.1109/ICCV.2013.354 %F OTHER: Local-ID: BBE1AD1B44792B41C1257C600050C266-Scherbaum2013 %D 2013 %B IEEE International Conference on Computer Vision %Z date of event: 2013-12-03 - 2013-12-06 %C Sydney, Australia %B ICCV 2013 %P 2848 - 2855 %I IEEE Computer Society %@ false
Reinhard, E., Efros, A., Kautz, J., and Seidel, H.-P. 2013. On Visual Realism of Synthesized Imagery. Proceedings of the IEEE101, 9.
Export
BibTeX
@article{Reinhard2013a, TITLE = {On Visual Realism of Synthesized Imagery}, AUTHOR = {Reinhard, Erik and Efros, Alexei and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0018-9219}, DOI = {10.1109/JPROC.2013.2260711}, LOCALID = {Local-ID: 87D8785C8741C366C1257B820045FF96-Reinhard2013a}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Proceedings of the IEEE}, VOLUME = {101}, NUMBER = {9}, PAGES = {1998--2007}, }
Endnote
%0 Journal Article %A Reinhard, Erik %A Efros, Alexei %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T On Visual Realism of Synthesized Imagery : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3E31-1 %R 10.1109/JPROC.2013.2260711 %F OTHER: Local-ID: 87D8785C8741C366C1257B820045FF96-Reinhard2013a %7 2013-07-25 %D 2013 %J Proceedings of the IEEE %O Proc. IEEE %V 101 %N 9 %& 1998 %P 1998 - 2007 %I IEEE %C Piscataway, NJ %@ false
Reinert, B., Ritschel, T., and Seidel, H.-P. 2013. Interactive By-example Design of Artistic Packing Layouts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2013)32, 6.
Abstract
We propose an approach to �pack� a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system�s usability.
Export
BibTeX
@article{Reinert2013, TITLE = {Interactive By-example Design of Artistic Packing Layouts}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2508363.2508409}, LOCALID = {Local-ID: 7A381077C9181F50C1257C6F004CC475-Reinert2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {We propose an approach to {\diamond}pack{\diamond} a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system{\diamond}s usability.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {32}, NUMBER = {6}, PAGES = {1--7}, EID = {218}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2013}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive By-example Design of Artistic Packing Layouts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-18D8-6 %R 10.1145/2508363.2508409 %F OTHER: Local-ID: 7A381077C9181F50C1257C6F004CC475-Reinert2013 %D 2013 %X We propose an approach to &#65533;pack&#65533; a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system&#65533;s usability. %J ACM Transactions on Graphics %V 32 %N 6 %& 1 %P 1 - 7 %Z sequence number: 218 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2013 %O ACM SIGGRAPH Asia 2013 Hong Kong, 19 - 22 November 2013
Pouli, T., Artusi, A., Banterle, F., Akyüz, A.O., Seidel, H.-P., and Reinhard, E. 2013. Color Correction for Tone Reproduction. 21st Color and Imaging Conference Final Program and Proceedings (CIC 2013), IS&T.
Export
BibTeX
@inproceedings{PouliCIC21, TITLE = {Color Correction for Tone Reproduction}, AUTHOR = {Pouli, Tania and Artusi, Alessandro and Banterle, Francesco and Aky{\"u}z, Ahmet O. and Seidel, Hans-Peter and Reinhard, Erik}, LANGUAGE = {eng}, PUBLISHER = {IS\&T}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {21st Color and Imaging Conference Final Program and Proceedings (CIC 2013)}, PAGES = {215--220}, ADDRESS = {Albuquerque, NM, USA}, }
Endnote
%0 Conference Proceedings %A Pouli, Tania %A Artusi, Alessandro %A Banterle, Francesco %A Aky&#252;z, Ahmet O. %A Seidel, Hans-Peter %A Reinhard, Erik %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Color Correction for Tone Reproduction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-A2C6-E %D 2013 %B 21st Color and Imaging Conference %Z date of event: 2013-11-04 - 2013-11-08 %C Albuquerque, NM, USA %B 21st Color and Imaging Conference Final Program and Proceedings %P 215 - 220 %I IS&T
Nguyen, C., Scherzer, D., Ritschel, T., and Seidel, H.-P. 2013. Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Abstract
This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require diffuse'' or glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU.
Export
BibTeX
@article{Nguyen2013, TITLE = {Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization}, AUTHOR = {Nguyen, Chuong and Scherzer, Daniel and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12038}, LOCALID = {Local-ID: 4CD3871C310E2855C1257B010065285A-Nguyen2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require diffuse'' or glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {185--194}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, P. and Navazo, I.}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Scherzer, Daniel %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3810-8 %R 10.1111/cgf.12038 %F OTHER: Local-ID: 4CD3871C310E2855C1257B010065285A-Nguyen2013 %7 2013-05-06 %D 2013 %X This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require diffuse'' or glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU. %J Computer Graphics Forum %V 32 %N 2 %& 185 %P 185 - 194 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EUROGRAPHICS 2013 EG 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th - 10th, 2013
Milliez, A., Wand, M., Cani, M.-P., and Seidel, H.-P. 2013. Mutable Elastic Models for Sculpting Structured Shapes. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Export
BibTeX
@article{Milliez2013, TITLE = {Mutable Elastic Models for Sculpting Structured Shapes}, AUTHOR = {Milliez, Antoine and Wand, Michael and Cani, Marie-Paule and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12022}, LOCALID = {Local-ID: 54D78E6C8E10AB4CC1257C130048CEEA-Milliez2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {21--30}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, Pierre and Navazo, Isabel}, }
Endnote
%0 Journal Article %A Milliez, Antoine %A Wand, Michael %A Cani, Marie-Paule %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Mutable Elastic Models for Sculpting Structured Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3CCE-2 %R 10.1111/cgf.12022 %F OTHER: Local-ID: 54D78E6C8E10AB4CC1257C130048CEEA-Milliez2013 %7 2013-05-06 %D 2013 %J Computer Graphics Forum %V 32 %N 2 %& 21 %P 21 - 30 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EUROGRAPHICS 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th - 10th, 2013 EG 2013
Manakov, A., Restrepo, J.F., Klehm, O., et al. 2013. A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2013)32, 4.
Abstract
We propose a non-permanent add-on that enables plenoptic imaging with standard cameras. Our design is based on a physical copying mechanism that multiplies a sensor image into a number of identical copies that still carry the plenoptic information of interest. Via different optical filters, we can then recover the desired information. A minor modification of the design also allows for aperture sub-sampling and, hence, light-field imaging. As the filters in our design are exchangeable, a reconfiguration for different imaging purposes is possible. We show in a prototype setup that high dynamic range, multispectral, polarization, and light-field imaging can be achieved with our design.
Export
BibTeX
@article{Manakov2013, TITLE = {A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging}, AUTHOR = {Manakov, Alkhazur and Restrepo, John F. and Klehm, Oliver and Heged{\"u}s, Ramon and Eisemann, Elmar and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2461912.2461937}, LOCALID = {Local-ID: 2AF094BD6240B2D2C1257C13003B6CBD-Manakov2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {We propose a non-permanent add-on that enables plenoptic imaging with standard cameras. Our design is based on a physical copying mechanism that multiplies a sensor image into a number of identical copies that still carry the plenoptic information of interest. Via different optical filters, we can then recover the desired information. A minor modification of the design also allows for aperture sub-sampling and, hence, light-field imaging. As the filters in our design are exchangeable, a reconfiguration for different imaging purposes is possible. We show in a prototype setup that high dynamic range, multispectral, polarization, and light-field imaging can be achieved with our design.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {32}, NUMBER = {4}, PAGES = {1--14}, EID = {47}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2013}, }
Endnote
%0 Journal Article %A Manakov, Alkhazur %A Restrepo, John F. %A Klehm, Oliver %A Heged&#252;s, Ramon %A Eisemann, Elmar %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3CDD-F %R 10.1145/2461912.2461937 %F OTHER: Local-ID: 2AF094BD6240B2D2C1257C13003B6CBD-Manakov2013 %7 2013 %D 2013 %X We propose a non-permanent add-on that enables plenoptic imaging with standard cameras. Our design is based on a physical copying mechanism that multiplies a sensor image into a number of identical copies that still carry the plenoptic information of interest. Via different optical filters, we can then recover the desired information. A minor modification of the design also allows for aperture sub-sampling and, hence, light-field imaging. As the filters in our design are exchangeable, a reconfiguration for different imaging purposes is possible. We show in a prototype setup that high dynamic range, multispectral, polarization, and light-field imaging can be achieved with our design. %J ACM Transactions on Graphics %V 32 %N 4 %& 1 %P 1 - 14 %Z sequence number: 47 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2013 %O ACM SIGGRAPH 2013 Anaheim, California, 21 - 25 July 2013
Liu, Y., Gall, J., Stoll, C., Dai, Q., Seidel, H.-P., and Theobalt, C. 2013. Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence35, 11.
Export
BibTeX
@article{LiuPami2013, TITLE = {Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation}, AUTHOR = {Liu, Yebin and Gall, J{\"u}rgen and Stoll, Carsten and Dai, Qionghai and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0162-8828}, DOI = {10.1109/TPAMI.2013.47}, LOCALID = {Local-ID: 3A056CE707FBCCD9C1257C6000533A6F-LiuPami2013}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2013}, DATE = {2013}, JOURNAL = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, VOLUME = {35}, NUMBER = {11}, PAGES = {2720--2735}, }
Endnote
%0 Journal Article %A Liu, Yebin %A Gall, J&#252;rgen %A Stoll, Carsten %A Dai, Qionghai %A Seidel, Hans-Peter %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3937-8 %R 10.1109/TPAMI.2013.47 %F OTHER: Local-ID: 3A056CE707FBCCD9C1257C6000533A6F-LiuPami2013 %7 2013-02-21 %D 2013 %J IEEE Transactions on Pattern Analysis and Machine Intelligence %O IEEE Trans. Pattern Anal. Mach. Intell. %V 35 %N 11 %& 2720 %P 2720 - 2735 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Lee, S., Sips, M., and Seidel, H.-P. 2013. Perceptually Driven Visibility Optimization for Categorical Data Visualization. IEEE Transactions on Visualization and Computer Graphics19, 10.
Export
BibTeX
@article{Seidel2013, TITLE = {Perceptually Driven Visibility Optimization for Categorical Data Visualization}, AUTHOR = {Lee, Sungkil and Sips, Mike and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2012.315}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2013}, DATE = {2013}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {19}, NUMBER = {10}, PAGES = {1746--1757}, }
Endnote
%0 Journal Article %A Lee, Sungkil %A Sips, Mike %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually Driven Visibility Optimization for Categorical Data Visualization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0018-A9FB-0 %R 10.1109/TVCG.2012.315 %7 2012-11-30 %D 2013 %J IEEE Transactions on Visualization and Computer Graphics %V 19 %N 10 %& 1746 %P 1746 - 1757 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Kurz, C., Ritschel, T., Eisemann, E., Thormählen, T., and Seidel, H.-P. 2013. Generating Realistic Camera Shake for Virtual Scenes. Journal of Virtual Reality and Broadcasting10, 7.
Abstract
When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible.
Export
BibTeX
@article{Kurz2013, TITLE = {Generating Realistic Camera Shake for Virtual Scenes}, AUTHOR = {Kurz, Christian and Ritschel, Tobias and Eisemann, Elmar and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1860-2037}, URL = {urn:nbn:de:0009-6-38335}, LOCALID = {Local-ID: 43DB142DAE2CF97AC1257C69005B2D67-Kurz2013}, PUBLISHER = {Hochschulbibliothekszentrum des Landes Nordrhein-Westfalen, K{\"o}ln (HBZ)}, ADDRESS = {K{\"o}ln}, YEAR = {2013}, ABSTRACT = {When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible.}, JOURNAL = {Journal of Virtual Reality and Broadcasting}, VOLUME = {10}, NUMBER = {7}, PAGES = {1--13}, }
Endnote
%0 Journal Article %A Kurz, Christian %A Ritschel, Tobias %A Eisemann, Elmar %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Generating Realistic Camera Shake for Virtual Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-196D-1 %F OTHER: Local-ID: 43DB142DAE2CF97AC1257C69005B2D67-Kurz2013 %U urn:nbn:de:0009-6-38335 %7 2013 %D 2013 %X When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible. %J Journal of Virtual Reality and Broadcasting %V 10 %N 7 %& 1 %P 1 - 13 %I Hochschulbibliothekszentrum des Landes Nordrhein-Westfalen, K&#246;ln (HBZ) %C K&#246;ln %@ false %U http://www.jvrb.org/past-issues/10.2013/3833/1020137.pdf
Klehm, O., Ihrke, I., Seidel, H.-P., and Eisemann, E. 2013. Volume Stylizer: Tomography-based Volume Painting. Proceedings I3D 2013, ACM.
Abstract
Volumetric phenomena are an integral part of standard rendering, yet, no suitable tools to edit characteristic properties are available so far. Either simulation results are used directly, or modifications are high-level, e.g., noise functions to influence appearance. Intuitive artistic control is not possible. We propose a solution to stylize single-scattering volumetric effects. Emission, scattering and extinction become amenable to artistic control while preserving a smooth and coherent appearance when changing the viewpoint. Our approach lets the user define a number of target views to be matched when observing the volume from this perspective. Via an analysis of the volumetric rendering equation, we can show how to link this problem to tomographic reconstruction.
Export
BibTeX
@inproceedings{i3dKlehm2013, TITLE = {Volume Stylizer: {Tomography-based} Volume Painting}, AUTHOR = {Klehm, Oliver and Ihrke, Ivo and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-1-4503-1956-0}, DOI = {10.1145/2448196.2448222}, LOCALID = {Local-ID: A0B42A95204F2B1EC1257B03005B313A-i3dKlehm2013}, PUBLISHER = {ACM}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Volumetric phenomena are an integral part of standard rendering, yet, no suitable tools to edit characteristic properties are available so far. Either simulation results are used directly, or modifications are high-level, e.g., noise functions to influence appearance. Intuitive artistic control is not possible. We propose a solution to stylize single-scattering volumetric effects. Emission, scattering and extinction become amenable to artistic control while preserving a smooth and coherent appearance when changing the viewpoint. Our approach lets the user define a number of target views to be matched when observing the volume from this perspective. Via an analysis of the volumetric rendering equation, we can show how to link this problem to tomographic reconstruction.}, BOOKTITLE = {Proceedings I3D 2013}, EDITOR = {Olano, Marc and Otaduy, Miguel A. and Meenakshisundaram, Gopi and Yoon, Sung-Eui and Spencer, Stephen N.}, PAGES = {161--168}, ADDRESS = {Orlando, FL, USA}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Ihrke, Ivo %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Volume Stylizer: Tomography-based Volume Painting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3817-9 %R 10.1145/2448196.2448222 %F OTHER: Local-ID: A0B42A95204F2B1EC1257B03005B313A-i3dKlehm2013 %D 2013 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2013-03-21 - 2013-03-23 %C Orlando, FL, USA %X Volumetric phenomena are an integral part of standard rendering, yet, no suitable tools to edit characteristic properties are available so far. Either simulation results are used directly, or modifications are high-level, e.g., noise functions to influence appearance. Intuitive artistic control is not possible. We propose a solution to stylize single-scattering volumetric effects. Emission, scattering and extinction become amenable to artistic control while preserving a smooth and coherent appearance when changing the viewpoint. Our approach lets the user define a number of target views to be matched when observing the volume from this perspective. Via an analysis of the volumetric rendering equation, we can show how to link this problem to tomographic reconstruction. %B Proceedings I3D 2013 %E Olano, Marc; Otaduy, Miguel A.; Meenakshisundaram, Gopi; Yoon, Sung-Eui; Spencer, Stephen N. %P 161 - 168 %I ACM %@ 978-1-4503-1956-0
Kerber, J., Bokeloh, M., Wand, M., and Seidel, H.-P. 2013. Scalable Symmetry Detection for Urban Scenes. Computer Graphics Forum32, 1.
Export
BibTeX
@article{Kerber2013_1, TITLE = {Scalable Symmetry Detection for Urban Scenes}, AUTHOR = {Kerber, Jens and Bokeloh, Martin and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2012.03226.x}, LOCALID = {Local-ID: FC00BBDD131C5BC2C1257AED003BCDC9-Kerber2013_1}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum}, VOLUME = {32}, NUMBER = {1}, PAGES = {3--15}, }
Endnote
%0 Journal Article %A Kerber, Jens %A Bokeloh, Martin %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Scalable Symmetry Detection for Urban Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-17F3-0 %R 10.1111/j.1467-8659.2012.03226.x %F OTHER: Local-ID: FC00BBDD131C5BC2C1257AED003BCDC9-Kerber2013_1 %7 2012-10-09 %D 2013 %J Computer Graphics Forum %V 32 %N 1 %& 3 %P 3 - 15 %I Wiley-Blackwell %C Oxford, UK
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2013. Optimizing Disparity for Motion in Depth. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2013)32, 4.
Abstract
Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion.
Export
BibTeX
@article{Kellnhofer2013, TITLE = {Optimizing Disparity for Motion in Depth}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12160}, LOCALID = {Local-ID: AAA9E8B7CDD4AD1FC1257BFD004E5D30-Kellnhofer2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion.}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {32}, NUMBER = {4}, PAGES = {143--152}, BOOKTITLE = {Eurographics Symposium on Rendering 2013}, EDITOR = {Holzschuch, N. and Rusinkiewicz, S.}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimizing Disparity for Motion in Depth : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3D13-B %R 10.1111/cgf.12160 %F OTHER: Local-ID: AAA9E8B7CDD4AD1FC1257BFD004E5D30-Kellnhofer2013 %7 2013 %D 2013 %X Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion. %J Computer Graphics Forum %V 32 %N 4 %& 143 %P 143 - 152 %I Wiley-Blackwell %C Oxford, UK %@ false %B Eurographics Symposium on Rendering 2013 %O EGSR 2013 Eurographics Symposium on Rendering 2013 Zaragoza, 19 - 21 June, 2013
Helten, T., Müller, M., Seidel, H.-P., and Theobalt, C. 2013a. Real-time Body Tracking with One Depth Camera and Inertial Sensors. ICCV 2013, IEEE International Conference on Computer Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{HeltenICCV13, TITLE = {Real-time Body Tracking with One Depth Camera and Inertial Sensors}, AUTHOR = {Helten, Thomas and M{\"u}ller, Meinard and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {1550 -- 5499}, ISBN = {978-1-4799-2839-2}, DOI = {10.1109/ICCV.2013.141}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {ICCV 2013, IEEE International Conference on Computer Vision}, PAGES = {1105--1112}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A Helten, Thomas %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Body Tracking with One Depth Camera and Inertial Sensors : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-3416-E %R 10.1109/ICCV.2013.141 %D 2013 %B IEEE International Conference on Computer Vision %Z date of event: 2013-12-01 - 2013-12-08 %C Sydney, Australia %B ICCV 2013 %P 1105 - 1112 %I IEEE Computer Society %@ false
Helten, T., Baak, A., Bharaj, G., Müller, M., Seidel, H.-P., and Theobalt, C. 2013b. Personalization and Evaluation of a Real-time Depth-based Full Body Tracker. 3DV 2013, International Conference on 3D Vision, IEEE Computer Society.
Abstract
Reconstructing a three-dimensional representation of human motion in real-time constitutes an important research topic with applications in sports sciences, human-computer-interaction, and the movie industry. In this paper, we contribute with a robust algorithm for estimating a personalized human body model from just two sequentially captured depth images that is more accurate and runs an order of magnitude faster than the current state-of-the-art procedure. Then, we employ the estimated body model to track the pose in real-time from a stream of depth images using a tracking algorithm that combines local pose optimization and a stabilizing database look-up. Together, this enables accurate pose tracking that is more accurate than previous approaches. As a further contribution, we evaluate and compare our algorithm to previous work on a comprehensive benchmark dataset containing more than 15 minutes of challenging motions. This dataset comprises calibrated marker-based motion capture data, depth data, as well as ground truth tracking results and is publicly available for research purposes.
Export
BibTeX
@inproceedings{HeltenBBMST13_PersonalizedDepthTracker_3DV, TITLE = {Personalization and Evaluation of a Real-time Depth-based Full Body Tracker}, AUTHOR = {Helten, Thomas and Baak, Andreas and Bharaj, Gaurav and M{\"u}ller, Meinard and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-0-7695-5067-1}, DOI = {10.1109/3DV.2013.44}, LOCALID = {Local-ID: E6F32B3A0B6E280EC1257C580033927E-HeltenBBMST13_PersonalizedDepthTracker_3DV}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Reconstructing a three-dimensional representation of human motion in real-time constitutes an important research topic with applications in sports sciences, human-computer-interaction, and the movie industry. In this paper, we contribute with a robust algorithm for estimating a personalized human body model from just two sequentially captured depth images that is more accurate and runs an order of magnitude faster than the current state-of-the-art procedure. Then, we employ the estimated body model to track the pose in real-time from a stream of depth images using a tracking algorithm that combines local pose optimization and a stabilizing database look-up. Together, this enables accurate pose tracking that is more accurate than previous approaches. As a further contribution, we evaluate and compare our algorithm to previous work on a comprehensive benchmark dataset containing more than 15 minutes of challenging motions. This dataset comprises calibrated marker-based motion capture data, depth data, as well as ground truth tracking results and is publicly available for research purposes.}, BOOKTITLE = {3DV 2013, International Conference on 3D Vision}, EDITOR = {Guerrero, Juan E.}, PAGES = {279--286}, ADDRESS = {Seattle, WA, USA}, }
Endnote
%0 Conference Proceedings %A Helten, Thomas %A Baak, Andreas %A Bharaj, Gaurav %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Personalization and Evaluation of a Real-time Depth-based Full Body Tracker : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3998-0 %R 10.1109/3DV.2013.44 %F OTHER: Local-ID: E6F32B3A0B6E280EC1257C580033927E-HeltenBBMST13_PersonalizedDepthTracker_3DV %D 2013 %B International Conference on 3D Vision %Z date of event: 2013-06-29 - 2013-07-01 %C Seattle, WA, USA %X Reconstructing a three-dimensional representation of human motion in real-time constitutes an important research topic with applications in sports sciences, human-computer-interaction, and the movie industry. In this paper, we contribute with a robust algorithm for estimating a personalized human body model from just two sequentially captured depth images that is more accurate and runs an order of magnitude faster than the current state-of-the-art procedure. Then, we employ the estimated body model to track the pose in real-time from a stream of depth images using a tracking algorithm that combines local pose optimization and a stabilizing database look-up. Together, this enables accurate pose tracking that is more accurate than previous approaches. As a further contribution, we evaluate and compare our algorithm to previous work on a comprehensive benchmark dataset containing more than 15 minutes of challenging motions. This dataset comprises calibrated marker-based motion capture data, depth data, as well as ground truth tracking results and is publicly available for research purposes. %B 3DV 2013 %E Guerrero, Juan E. %P 279 - 286 %I IEEE Computer Society %@ 978-0-7695-5067-1
Elek, O., Ritschel, T., and Seidel, H.-P. 2013. Real-time Screen-space Scattering in Homogeneous Environments. IEEE Computer Graphics and Applications33, 3.
Abstract
This work presents an approximate algorithm for computing light scattering within homogeneous participating environments in screen space. Instead of simulating the full global illumination in participating media we model the scattering process by a physically-based point spread function. To do this efficiently we apply the point spread function by performing a discrete hierarchical convolution in a texture MIP map. We solve the main problem of this approach, illumination leaking, by designing a custom anisotropic incremental filter. Our solution is fully parallel, runs in hundreds of frames-per-second for usual screen resolutions and is directly applicable in most existing 2D or 3D rendering architectures.
Export
BibTeX
@article{Elek2013a, TITLE = {Real-time Screen-space Scattering in Homogeneous Environments}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0272-1716}, DOI = {10.1109/MCG.2013.17}, LOCALID = {Local-ID: 2CEB4CE37F3F3733C1257B030043502E-Elek2013a}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {This work presents an approximate algorithm for computing light scattering within homogeneous participating environments in screen space. Instead of simulating the full global illumination in participating media we model the scattering process by a physically-based point spread function. To do this efficiently we apply the point spread function by performing a discrete hierarchical convolution in a texture MIP map. We solve the main problem of this approach, illumination leaking, by designing a custom anisotropic incremental filter. Our solution is fully parallel, runs in hundreds of frames-per-second for usual screen resolutions and is directly applicable in most existing 2D or 3D rendering architectures.}, JOURNAL = {IEEE Computer Graphics and Applications}, VOLUME = {33}, NUMBER = {3}, PAGES = {53--65}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Screen-space Scattering in Homogeneous Environments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3812-4 %R 10.1109/MCG.2013.17 %F OTHER: Local-ID: 2CEB4CE37F3F3733C1257B030043502E-Elek2013a %7 2013 %D 2013 %X This work presents an approximate algorithm for computing light scattering within homogeneous participating environments in screen space. Instead of simulating the full global illumination in participating media we model the scattering process by a physically-based point spread function. To do this efficiently we apply the point spread function by performing a discrete hierarchical convolution in a texture MIP map. We solve the main problem of this approach, illumination leaking, by designing a custom anisotropic incremental filter. Our solution is fully parallel, runs in hundreds of frames-per-second for usual screen resolutions and is directly applicable in most existing 2D or 3D rendering architectures. %J IEEE Computer Graphics and Applications %V 33 %N 3 %& 53 %P 53 - 65 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Čadík, M., Herzog, R., Mantiuk, R., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2013. Learning to Predict Localized Distortions in Rendered Images. Computer Graphics Forum (Proc. Pacific Graphics 2013)32, 7.
Export
BibTeX
@article{CadikPG2013, TITLE = {Learning to Predict Localized Distortions in Rendered Images}, AUTHOR = {{\v C}ad{\'i}k, Martin and Herzog, Robert and Mantiuk, Rafa{\l} and Mantiuk, Rados{\l}aw and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12248}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {32}, NUMBER = {7}, PAGES = {401--410}, BOOKTITLE = {21st Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2013)}, }
Endnote
%0 Journal Article %A &#268;ad&#237;k, Martin %A Herzog, Robert %A Mantiuk, Rafa&#322; %A Mantiuk, Rados&#322;aw %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning to Predict Localized Distortions in Rendered Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5DF9-E %R 10.1111/cgf.12248 %7 2014-11-25 %D 2013 %J Computer Graphics Forum %V 32 %N 7 %& 401 %P 401 - 410 %I Wiley-Blackwell %C Oxford %B 21st Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2013 PG 2013 October 7-9, 2013, Singapore
2012
Yang, Y., Günther, D., Wuhrer, S., et al. 2012. Correspondences of Persistent Feature Points on Near-isometric Surfaces. Computer Vision - ECCV 2012, Springer.
Export
BibTeX
@inproceedings{yang12, TITLE = {Correspondences of Persistent Feature Points on Near-isometric Surfaces}, AUTHOR = {Yang, Ying and G{\"u}nther, David and Wuhrer, Stefanie and Brunton, Alan and Ivrissimtzis, Ioannis and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {0302-9743; 1611-3349}, ISBN = {978-3-642-33862-5; 978-3-642-33863-2}, DOI = {10.1007/978-3-642-33863-2_11}, LOCALID = {Local-ID: 2862002BA9203D8AC1257AD80048C3AC-yang12}, PUBLISHER = {Springer}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Computer Vision -- ECCV 2012}, EDITOR = {Fusiello, Andrea and Murino, Vittorio and Cucchiara, Rita}, PAGES = {102--112}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {7583}, ADDRESS = {Florence, Italy}, }
Endnote
%0 Conference Proceedings %A Yang, Ying %A G&#252;nther, David %A Wuhrer, Stefanie %A Brunton, Alan %A Ivrissimtzis, Ioannis %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Correspondences of Persistent Feature Points on Near-isometric Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-0DFB-2 %F OTHER: Local-ID: 2862002BA9203D8AC1257AD80048C3AC-yang12 %R 10.1007/978-3-642-33863-2_11 %D 2012 %B 12th European Conference on Computer Vision %Z date of event: 2012-10-07 - 2012-10-13 %C Florence, Italy %B Computer Vision - ECCV 2012 %E Fusiello, Andrea; Murino, Vittorio; Cucchiara, Rita %P 102 - 112 %I Springer %@ 978-3-642-33862-5 978-3-642-33863-2 %B Lecture Notes in Computer Science %N 7583 %@ false
Valgaerts, L., Wu, C., Bruhn, A., Seidel, H.-P., and Theobalt, C. 2012. Lightweight Binocular Facial Performance Capture under Uncontrolled Lighting. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Abstract
Recent progress in passive facial performance capture has shown impressively detailed results on highly articulated motion. However, most methods rely on complex multi-camera set-ups, controlled lighting or fiducial markers. This prevents them from being used in general environments, outdoor scenes, during live action on a film set, or by freelance animators and everyday users who want to capture their digital selves. In this paper, we therefore propose a lightweight passive facial performance capture approach that is able to reconstruct high-quality dynamic facial geometry from only a single pair of stereo cameras. Our method succeeds under uncontrolled and time-varying lighting, and also in outdoor scenes. Our approach builds upon and extends recent image-based scene flow computation, lighting estimation and shading-based refinement algorithms. It integrates them into a pipeline that is specifically tailored towards facial performance reconstruction from challenging binocular footage under uncontrolled lighting. In an experimental evaluation, the strong capabilities of our method become explicit: We achieve detailed and spatio-temporally coherent results for expressive facial motion in both indoor and outdoor scenes -- even from low quality input images recorded with a hand-held consumer stereo camera. We believe that our approach is the first to capture facial performances of such high quality from a single stereo rig and we demonstrate that it brings facial performance capture out of the studio, into the wild, and within the reach of everybody.
Export
BibTeX
@article{Valgaerts2012, TITLE = {Lightweight Binocular Facial Performance Capture under Uncontrolled Lighting}, AUTHOR = {Valgaerts, Levi and Wu, Chenglei and Bruhn, Andr{\'e}s and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366206}, LOCALID = {Local-ID: C52293511BC90BA6C1257AD60059643C-Valgaerts2012}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Recent progress in passive facial performance capture has shown impressively detailed results on highly articulated motion. However, most methods rely on complex multi-camera set-ups, controlled lighting or fiducial markers. This prevents them from being used in general environments, outdoor scenes, during live action on a film set, or by freelance animators and everyday users who want to capture their digital selves. In this paper, we therefore propose a lightweight passive facial performance capture approach that is able to reconstruct high-quality dynamic facial geometry from only a single pair of stereo cameras. Our method succeeds under uncontrolled and time-varying lighting, and also in outdoor scenes. Our approach builds upon and extends recent image-based scene flow computation, lighting estimation and shading-based refinement algorithms. It integrates them into a pipeline that is specifically tailored towards facial performance reconstruction from challenging binocular footage under uncontrolled lighting. In an experimental evaluation, the strong capabilities of our method become explicit: We achieve detailed and spatio-temporally coherent results for expressive facial motion in both indoor and outdoor scenes -- even from low quality input images recorded with a hand-held consumer stereo camera. We believe that our approach is the first to capture facial performances of such high quality from a single stereo rig and we demonstrate that it brings facial performance capture out of the studio, into the wild, and within the reach of everybody.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {1--11}, EID = {187}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A Valgaerts, Levi %A Wu, Chenglei %A Bruhn, Andr&#233;s %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lightweight Binocular Facial Performance Capture under Uncontrolled Lighting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1626-6 %F OTHER: Local-ID: C52293511BC90BA6C1257AD60059643C-Valgaerts2012 %R 10.1145/2366145.2366206 %7 2012-11-01 %D 2012 %X Recent progress in passive facial performance capture has shown impressively detailed results on highly articulated motion. However, most methods rely on complex multi-camera set-ups, controlled lighting or fiducial markers. This prevents them from being used in general environments, outdoor scenes, during live action on a film set, or by freelance animators and everyday users who want to capture their digital selves. In this paper, we therefore propose a lightweight passive facial performance capture approach that is able to reconstruct high-quality dynamic facial geometry from only a single pair of stereo cameras. Our method succeeds under uncontrolled and time-varying lighting, and also in outdoor scenes. Our approach builds upon and extends recent image-based scene flow computation, lighting estimation and shading-based refinement algorithms. It integrates them into a pipeline that is specifically tailored towards facial performance reconstruction from challenging binocular footage under uncontrolled lighting. In an experimental evaluation, the strong capabilities of our method become explicit: We achieve detailed and spatio-temporally coherent results for expressive facial motion in both indoor and outdoor scenes -- even from low quality input images recorded with a hand-held consumer stereo camera. We believe that our approach is the first to capture facial performances of such high quality from a single stereo rig and we demonstrate that it brings facial performance capture out of the studio, into the wild, and within the reach of everybody. %J ACM Transactions on Graphics %V 31 %N 6 %& 1 %P 1 - 11 %Z sequence number: 187 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O ACM SIGGRAPH Asia 2012 Singapore, 28 November - 1 December
Tevs, A., Berner, A., Wand, M., et al. 2012. Animation Cartography - Intrinsic Reconstruction of Shape and Motion. ACM Transactions on Graphics31, 2.
Abstract
In this paper, we consider the problem of animation reconstruction, i.e., the reconstruction of shape and motion of a deformable object from dynamic 3D scanner data, without using user provided template models. Unlike previous work that addressed this problem, we do not rely on locally convergent optimization but present a system that can handle fast motion, temporally disrupted input, and can correctly match objects that disappear for extended time periods in acquisition holes due to occlusion. Our approach is motivated by cartography: We first estimate a few landmark correspondences, which are extended to a dense matching and then used to reconstruct geometry and motion. We propose a number of algorithmic building blocks: a scheme for tracking landmarks in temporally coherent and incoherent data, an algorithm for robust estimation of dense correspondences under topological noise, and the integration of local matching techniques to refine the result. We describe and evaluate the individual components and propose a complete animation reconstruction pipeline based on these ideas. We evaluate our method on a number of standard benchmark data sets and show that we can obtain correct reconstructions in situations where other techniques fail completely or require additional user guidance such as a template model.
Export
BibTeX
@article{TevsTog2012, TITLE = {Animation Cartography -- Intrinsic Reconstruction of Shape and Motion}, AUTHOR = {Tevs, Art and Berner, Alexander and Wand, Michael and Ihrke, Ivo and Bokeloh, Martin and Kerber, Jens and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2159516.2159517}, LOCALID = {Local-ID: F830F7C449A5797BC12579CD0040CC2F-TevsTog2012}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {In this paper, we consider the problem of animation reconstruction, i.e., the reconstruction of shape and motion of a deformable object from dynamic 3D scanner data, without using user provided template models. Unlike previous work that addressed this problem, we do not rely on locally convergent optimization but present a system that can handle fast motion, temporally disrupted input, and can correctly match objects that disappear for extended time periods in acquisition holes due to occlusion. Our approach is motivated by cartography: We first estimate a few landmark correspondences, which are extended to a dense matching and then used to reconstruct geometry and motion. We propose a number of algorithmic building blocks: a scheme for tracking landmarks in temporally coherent and incoherent data, an algorithm for robust estimation of dense correspondences under topological noise, and the integration of local matching techniques to refine the result. We describe and evaluate the individual components and propose a complete animation reconstruction pipeline based on these ideas. We evaluate our method on a number of standard benchmark data sets and show that we can obtain correct reconstructions in situations where other techniques fail completely or require additional user guidance such as a template model.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {31}, NUMBER = {2}, PAGES = {1--15}, }
Endnote
%0 Journal Article %A Tevs, Art %A Berner, Alexander %A Wand, Michael %A Ihrke, Ivo %A Bokeloh, Martin %A Kerber, Jens %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Animation Cartography - Intrinsic Reconstruction of Shape and Motion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-161E-9 %F OTHER: Local-ID: F830F7C449A5797BC12579CD0040CC2F-TevsTog2012 %R 10.1145/2159516.2159517 %7 2012-04-01 %D 2012 %X In this paper, we consider the problem of animation reconstruction, i.e., the reconstruction of shape and motion of a deformable object from dynamic 3D scanner data, without using user provided template models. Unlike previous work that addressed this problem, we do not rely on locally convergent optimization but present a system that can handle fast motion, temporally disrupted input, and can correctly match objects that disappear for extended time periods in acquisition holes due to occlusion. Our approach is motivated by cartography: We first estimate a few landmark correspondences, which are extended to a dense matching and then used to reconstruct geometry and motion. We propose a number of algorithmic building blocks: a scheme for tracking landmarks in temporally coherent and incoherent data, an algorithm for robust estimation of dense correspondences under topological noise, and the integration of local matching techniques to refine the result. We describe and evaluate the individual components and propose a complete animation reconstruction pipeline based on these ideas. We evaluate our method on a number of standard benchmark data sets and show that we can obtain correct reconstructions in situations where other techniques fail completely or require additional user guidance such as a template model. %J ACM Transactions on Graphics %V 31 %N 2 %& 1 %P 1 - 15 %I ACM %C New York, NY %@ false
Templin, K., Didyk, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2012. Highlight Microdisparity for Improved Gloss Depiction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2012)31, 4.
Abstract
Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control.
Export
BibTeX
@article{Templin2012, TITLE = {Highlight Microdisparity for Improved Gloss Depiction}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2185520.2185588}, LOCALID = {Local-ID: BDB99D9DBF6B290EC1257A4500551595-Templin2012}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {31}, NUMBER = {4}, PAGES = {1--5}, EID = {92}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2012}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Highlight Microdisparity for Improved Gloss Depiction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1617-8 %F OTHER: Local-ID: BDB99D9DBF6B290EC1257A4500551595-Templin2012 %R 10.1145/2185520.2185588 %7 2012-07-01 %D 2012 %X Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control. %J ACM Transactions on Graphics %V 31 %N 4 %& 1 %P 1 - 5 %Z sequence number: 92 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2012 %O ACM SIGGRAPH 2012 Los Angeles, California, 5 - 9 August 2012
Stöter, T., Weinkauf, T., Seidel, H.-P., and Theisel, H. 2012. Implicit Integral Surfaces. VMV 2012 Vision, Modeling & Visualization, Eurographics Association.
Abstract
We present an implicit method for globally computing all four classic types of integral surfaces -- stream, path, streak, and time surfaces in 3D time-dependent vector fields. Our novel formulation is based on the representation of a time surface as implicit isosurface of a 3D scalar function advected by the flow field. The evolution of a time surface is then given as an isovolume in 4D space-time spanned by a series of advected scalar functions. Based on this, the other three integral surfaces are described as the intersection of two isovolumes derived from different scalar functions. Our method uses a dense flow integration to compute integral surfaces globally in the entire domain. This allows to change the seeding structure efficiently by simply defining new isovalues. We propose two rendering methods that exploit the implicit nature of our integral surfaces: 4D raycasting, and projection into a 3D volume. Furthermore, we present a marching cubes inspired surface extraction method to convert the implicit surface representation to an explicit triangle mesh. In contrast to previous approaches for implicit stream surfaces, our method allows for multiple voxel intersections, covers all regions of the flow field, and provides full control over the seeding line within the entire domain.
Export
BibTeX
@inproceedings{stoeter12, TITLE = {Implicit Integral Surfaces}, AUTHOR = {St{\"o}ter, Torsten and Weinkauf, Tino and Seidel, Hans-Peter and Theisel, Holger}, LANGUAGE = {eng}, ISBN = {978-3-905673-95-1}, DOI = {10.2312/PE/VMV/VMV12/127-134}, LOCALID = {Local-ID: CE2200B8F8C0B666C1257AD8003ECF58-stoeter12}, PUBLISHER = {Eurographics Association}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {We present an implicit method for globally computing all four classic types of integral surfaces -- stream, path, streak, and time surfaces in 3D time-dependent vector fields. Our novel formulation is based on the representation of a time surface as implicit isosurface of a 3D scalar function advected by the flow field. The evolution of a time surface is then given as an isovolume in 4D space-time spanned by a series of advected scalar functions. Based on this, the other three integral surfaces are described as the intersection of two isovolumes derived from different scalar functions. Our method uses a dense flow integration to compute integral surfaces globally in the entire domain. This allows to change the seeding structure efficiently by simply defining new isovalues. We propose two rendering methods that exploit the implicit nature of our integral surfaces: 4D raycasting, and projection into a 3D volume. Furthermore, we present a marching cubes inspired surface extraction method to convert the implicit surface representation to an explicit triangle mesh. In contrast to previous approaches for implicit stream surfaces, our method allows for multiple voxel intersections, covers all regions of the flow field, and provides full control over the seeding line within the entire domain.}, BOOKTITLE = {VMV 2012 Vision, Modeling \& Visualization}, EDITOR = {G{\"o}sele, Michael and Grosch, Thorsten and Theisel, Holger and Toennies, Klaus and Preim, Bernhard}, PAGES = {127--134}, ADDRESS = {Magdeburg, Germany}, }
Endnote
%0 Conference Proceedings %A St&#246;ter, Torsten %A Weinkauf, Tino %A Seidel, Hans-Peter %A Theisel, Holger %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Implicit Integral Surfaces : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-0E9C-E %F OTHER: Local-ID: CE2200B8F8C0B666C1257AD8003ECF58-stoeter12 %R 10.2312/PE/VMV/VMV12/127-134 %D 2012 %B 17th Annual Workshop on Vision, Modeling and Visualization %Z date of event: 2012-11-12 - 2012-11-14 %C Magdeburg, Germany %X We present an implicit method for globally computing all four classic types of integral surfaces -- stream, path, streak, and time surfaces in 3D time-dependent vector fields. Our novel formulation is based on the representation of a time surface as implicit isosurface of a 3D scalar function advected by the flow field. The evolution of a time surface is then given as an isovolume in 4D space-time spanned by a series of advected scalar functions. Based on this, the other three integral surfaces are described as the intersection of two isovolumes derived from different scalar functions. Our method uses a dense flow integration to compute integral surfaces globally in the entire domain. This allows to change the seeding structure efficiently by simply defining new isovalues. We propose two rendering methods that exploit the implicit nature of our integral surfaces: 4D raycasting, and projection into a 3D volume. Furthermore, we present a marching cubes inspired surface extraction method to convert the implicit surface representation to an explicit triangle mesh. In contrast to previous approaches for implicit stream surfaces, our method allows for multiple voxel intersections, covers all regions of the flow field, and provides full control over the seeding line within the entire domain. %B VMV 2012 Vision, Modeling & Visualization %E G&#246;sele, Michael; Grosch, Thorsten; Theisel, Holger; Toennies, Klaus; Preim, Bernhard %P 127 - 134 %I Eurographics Association %@ 978-3-905673-95-1
Scherzer, D., Nguyen, C., Ritschel, T., and Seidel, H.-P. 2012. Pre-convolved Radiance Caching. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2012)31, 4.
Export
BibTeX
@article{Scherzer2012PcRC, TITLE = {Pre-convolved Radiance Caching}, AUTHOR = {Scherzer, Daniel and Nguyen, Chuong and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03134.x}, LOCALID = {Local-ID: 983F2B7BB8314818C1257AD800380D3C-Scherzer2012PcRC}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {31}, NUMBER = {4}, PAGES = {1391--1397}, BOOKTITLE = {Eurographics Symposium on Rendering 2012}, EDITOR = {Durand, Fredo and Gutierrez, Diego}, }
Endnote
%0 Journal Article %A Scherzer, Daniel %A Nguyen, Chuong %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Pre-convolved Radiance Caching : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1210-B %F OTHER: Local-ID: 983F2B7BB8314818C1257AD800380D3C-Scherzer2012PcRC %R 10.1111/j.1467-8659.2012.03134.x %7 2012-07-04 %D 2012 %J Computer Graphics Forum %V 31 %N 4 %& 1391 %P 1391 - 1397 %I Wiley-Blackwell %C Oxford, UK %@ false %B Eurographics Symposium on Rendering 2012 %O Paris, France, June 27th - 29th, 2012 SR 2012 EGSR 2012 Eurographics Symposium on Rendering 2012
Ritschel, T., Templin, K., Myszkowski, K., and Seidel, H.-P. 2012. Virtual Passepartouts. Non-Photorealistic Animation and Rendering (NPAR 2012), Eurographics Association.
Abstract
In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts.
Export
BibTeX
@inproceedings{RitschelTMS2012, TITLE = {Virtual Passepartouts}, AUTHOR = {Ritschel, Tobias and Templin, Krzysztof and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-90-6}, DOI = {10.2312/PE/NPAR/NPAR12/057-063}, LOCALID = {Local-ID: AF8C88CA4485E3B1C1257A4500606C5D-RitschelTMS2012}, PUBLISHER = {Eurographics Association}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts.}, BOOKTITLE = {Non-Photorealistic Animation and Rendering (NPAR 2012)}, EDITOR = {Asente, Paul and Grimm, Cindy}, PAGES = {57--63}, ADDRESS = {Annecy, France}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Templin, Krzysztof %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Virtual Passepartouts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13D3-B %R 10.2312/PE/NPAR/NPAR12/057-063 %F OTHER: Local-ID: AF8C88CA4485E3B1C1257A4500606C5D-RitschelTMS2012 %D 2012 %B Non-Photorealistic Animation and Rendering 2012 %Z date of event: 2012-06-04 - 2012-06-06 %C Annecy, France %X In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts. %B Non-Photorealistic Animation and Rendering %E Asente, Paul; Grimm, Cindy %P 57 - 63 %I Eurographics Association %@ 978-3-905673-90-6
Richardt, C., Stoll, C., Dodgson, N.A., Seidel, H.-P., and Theobalt, C. 2012. Coherent Spatiotemporal Filtering, Upsampling and Rendering of RGBZ Videos. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Abstract
Sophisticated video processing effects require both image and geometry information. We explore the possibility to augment a video camera with a recent infrared time-of-flight depth camera, to capture high-resolution RGB and low-resolution, noisy depth at video frame rates. To turn such a setup into a practical RGBZ video camera, we develop efficient data filtering techniques that are tailored to the noise characteristics of IR depth cameras. We first remove typical artefacts in the RGBZ data and then apply an efficient spatiotemporal denoising and upsampling scheme. This allows us to record temporally coherent RGBZ videos at interactive frame rates and to use them to render a variety of effects in unprecedented quality. We show effects such as video relighting, geometry-based abstraction and stylisation, background segmentation and rendering in stereoscopic 3D.
Export
BibTeX
@article{Richardt2012, TITLE = {Coherent Spatiotemporal Filtering, Upsampling and Rendering of {RGBZ} Videos}, AUTHOR = {Richardt, Christian and Stoll, Carsten and Dodgson, Neil A. and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03003.x}, LOCALID = {Local-ID: 53A56E45860AC25EC1257AD70038C3F8-Richardt2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Sophisticated video processing effects require both image and geometry information. We explore the possibility to augment a video camera with a recent infrared time-of-flight depth camera, to capture high-resolution RGB and low-resolution, noisy depth at video frame rates. To turn such a setup into a practical RGBZ video camera, we develop efficient data filtering techniques that are tailored to the noise characteristics of IR depth cameras. We first remove typical artefacts in the RGBZ data and then apply an efficient spatiotemporal denoising and upsampling scheme. This allows us to record temporally coherent RGBZ videos at interactive frame rates and to use them to render a variety of effects in unprecedented quality. We show effects such as video relighting, geometry-based abstraction and stylisation, background segmentation and rendering in stereoscopic 3D.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {247--256}, BOOKTITLE = {EUROGRAPHICS 2012}, }
Endnote
%0 Journal Article %A Richardt, Christian %A Stoll, Carsten %A Dodgson, Neil A. %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Coherent Spatiotemporal Filtering, Upsampling and Rendering of RGBZ Videos : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-15DF-5 %F OTHER: Local-ID: 53A56E45860AC25EC1257AD70038C3F8-Richardt2012 %R 10.1111/j.1467-8659.2012.03003.x %7 2012-06-07 %D 2012 %X Sophisticated video processing effects require both image and geometry information. We explore the possibility to augment a video camera with a recent infrared time-of-flight depth camera, to capture high-resolution RGB and low-resolution, noisy depth at video frame rates. To turn such a setup into a practical RGBZ video camera, we develop efficient data filtering techniques that are tailored to the noise characteristics of IR depth cameras. We first remove typical artefacts in the RGBZ data and then apply an efficient spatiotemporal denoising and upsampling scheme. This allows us to record temporally coherent RGBZ videos at interactive frame rates and to use them to render a variety of effects in unprecedented quality. We show effects such as video relighting, geometry-based abstraction and stylisation, background segmentation and rendering in stereoscopic 3D. %J Computer Graphics Forum %V 31 %N 2 %& 247 %P 247 - 256 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O The European Association for Computer Graphics 33rd Annual Conference, Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EUROGRAPHICS 2012 EG 2012
Reuter, A., Seidel, H.-P., and Ihrke, I. 2012. BlurTags: Spatially Varying PSF Estimation with Out-of-Focus Patterns. 20th International Conference on Computer Graphics, Visualization and Computer Vision 2012 (WSCG 2012).
Export
BibTeX
@inproceedings{Reuter2012, TITLE = {{BlurTags}: {Spatially} Varying {PSF} Estimation with Out-of-Focus Patterns}, AUTHOR = {Reuter, Alexander and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-80-86943-79-4}, LOCALID = {Local-ID: 8C2D1525002E674EC1257AD7004B4A17-Reuter2012}, YEAR = {2012}, BOOKTITLE = {20th International Conference on Computer Graphics, Visualization and Computer Vision 2012 (WSCG 2012)}, EDITOR = {Skala, Vaclav}, PAGES = {239--248}, ADDRESS = {Plzen, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Reuter, Alexander %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T BlurTags: Spatially Varying PSF Estimation with Out-of-Focus Patterns : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-12F2-D %F OTHER: Local-ID: 8C2D1525002E674EC1257AD7004B4A17-Reuter2012 %D 2012 %B 20th International Conference on Computer Graphics, Visualization and Computer Vision %Z date of event: 2012-06-26 - 2012-06-28 %C Plzen, Czech Republic %B 20th International Conference on Computer Graphics, Visualization and Computer Vision 2012 %E Skala, Vaclav %P 239 - 248 %@ 978-80-86943-79-4 %U http://wscg.zcu.cz/wscg2012/short/E47-full.pdf
Reininghaus, J., Günther, D., Hotz, I., Weinkauf, T., and Seidel, H.-P. 2012. Combinatorial Gradient Fields for 2D Images with Empirically Convergent Separatrices. http://arxiv.org/abs/1208.6523.
(arXiv: 1208.6523)
Abstract
This paper proposes an efficient probabilistic method that computes combinatorial gradient fields for two dimensional image data. In contrast to existing algorithms, this approach yields a geometric Morse-Smale complex that converges almost surely to its continuous counterpart when the image resolution is increased. This approach is motivated using basic ideas from probability theory and builds upon an algorithm from discrete Morse theory with a strong mathematical foundation. While a formal proof is only hinted at, we do provide a thorough numerical evaluation of our method and compare it to established algorithms.
Export
BibTeX
@online{reininghaus12a, TITLE = {Combinatorial Gradient Fields for {2D} Images with Empirically Convergent Separatrices}, AUTHOR = {Reininghaus, Jan and G{\"u}nther, David and Hotz, Ingrid and Weinkauf, Tino and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1208.6523}, EPRINT = {1208.6523}, EPRINTTYPE = {arXiv}, LOCALID = {Local-ID: 9717F6A3BD0231CAC1257AD800438371-reininghaus12a}, PUBLISHER = {Cornell University Library}, ADDRESS = {Ithaca, NY}, YEAR = {2012}, ABSTRACT = {This paper proposes an efficient probabilistic method that computes combinatorial gradient fields for two dimensional image data. In contrast to existing algorithms, this approach yields a geometric Morse-Smale complex that converges almost surely to its continuous counterpart when the image resolution is increased. This approach is motivated using basic ideas from probability theory and builds upon an algorithm from discrete Morse theory with a strong mathematical foundation. While a formal proof is only hinted at, we do provide a thorough numerical evaluation of our method and compare it to established algorithms.}, }
Endnote
%0 Report %A Reininghaus, Jan %A G&#252;nther, David %A Hotz, Ingrid %A Weinkauf, Tino %A Seidel, Hans-Peter %+ Institute for Science and Technology Austria Computer Graphics, MPI for Informatics, Max Planck Society Institute for Science and Technology Austria Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Combinatorial Gradient Fields for 2D Images with Empirically Convergent Separatrices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-0E90-6 %U http://arxiv.org/abs/1208.6523 %F OTHER: Local-ID: 9717F6A3BD0231CAC1257AD800438371-reininghaus12a %I Cornell University Library %C Ithaca, NY %D 2012 %X This paper proposes an efficient probabilistic method that computes combinatorial gradient fields for two dimensional image data. In contrast to existing algorithms, this approach yields a geometric Morse-Smale complex that converges almost surely to its continuous counterpart when the image resolution is increased. This approach is motivated using basic ideas from probability theory and builds upon an algorithm from discrete Morse theory with a strong mathematical foundation. While a formal proof is only hinted at, we do provide a thorough numerical evaluation of our method and compare it to established algorithms. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Computational Geometry, cs.CG,Computer Science, Discrete Mathematics, cs.DM,
Reinert, B., Ritschel, T., and Seidel, H.-P. 2012. Homunculus Warping: Conveying Importance Using Self-intersection-free Non-homogeneous Mesh Deformation. Computer Graphics Forum (Proc. Pacific Graphics 2012)31, 7.
Export
BibTeX
@article{Reinert2012, TITLE = {Homunculus Warping: Conveying Importance Using Self-intersection-free Non-homogeneous Mesh Deformation}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03209.x}, LOCALID = {Local-ID: 23F7E3C2BBBCDA78C1257B01005F5059-Reinert2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, EDITOR = {Bregler, Chris and Sander, Pedro and Wimmer, Michael}, VOLUME = {31}, NUMBER = {7}, PAGES = {2165--2171}, BOOKTITLE = {Pacific Graphics 2012}, EDITOR = {Bregler, Chris and Sander, Pedro and Wimmer, Michael}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Homunculus Warping: Conveying Importance Using Self-intersection-free Non-homogeneous Mesh Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1542-1 %F OTHER: Local-ID: 23F7E3C2BBBCDA78C1257B01005F5059-Reinert2012 %R 10.1111/j.1467-8659.2012.03209.x %7 2012-10-02 %D 2012 %J Computer Graphics Forum %V 31 %N 7 %& 2165 %P 2165 - 2171 %I Wiley-Blackwell %C Oxford, UK %@ false %B Pacific Graphics 2012 %O The 20th Pacific Conference on Computer Graphics and Applications, September 12-14, 2012, Hong Kong Pacific Graphics 2012
Nguyen, C., Ritschel, T., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2012. 3D Material Style Transfer. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Abstract
This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study.
Export
BibTeX
@article{Nguyen2012z, TITLE = {{3D} Material Style Transfer}, AUTHOR = {Nguyen, Chuong and Ritschel, Tobias and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03022.x}, LOCALID = {Local-ID: 3C190E59F48516AFC1257B0100644708-Nguyen2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, VOLUME = {31}, NUMBER = {2}, PAGES = {431--438}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Ritschel, Tobias %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Material Style Transfer : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1537-C %F OTHER: Local-ID: 3C190E59F48516AFC1257B0100644708-Nguyen2012 %R 10.1111/j.1467-8659.2012.03022.x %7 2012-06-07 %D 2012 %X This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study. %J Computer Graphics Forum %V 31 %N 2 %& 431 %P 431 - 438 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O EUROGRAPHICS 2012 EG 2012 The European Association for Computer Graphics 33rd Annual Conference, Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012
Klehm, O., Reshetouski, I., Eisemanm, E., Seidel, H.-P., and Ihrke, I. 2012a. Interactive Geometry-aware Segmentation for the Decomposition of Kaleidoscopic Images. VMV 2012 Vision, Modeling and Visualization, Eurographics Association.
Abstract
Mirror systems have recently emerged as an alternative low-cost multi-view imaging solution. The use of these systems critically depends on the ability to compute the background of a multiply mirrored object. The images taken in such systems show a fractured, patterned view, making edge-guided segmentation difficult. Further, global illumination and light attenuation due to the mirrors make standard segmentation techniques fail. We therefore propose a system that allows a user to do the segmentation manually. We provide convenient tools that enable an interactive segmentation of kaleidoscopic images containing three-dimensional objects. Hereby, we explore suitable interaction and visualization schemes to guide the user. To achieve interactivity, we employ the GPU in all stages of the application, such as 2D/3D rendering as well as segmentation.
Export
BibTeX
@inproceedings{KST_VMV_Klehm2012, TITLE = {Interactive Geometry-aware Segmentation for the Decomposition of Kaleidoscopic Images}, AUTHOR = {Klehm, Oliver and Reshetouski, Ilya and Eisemanm, Elmar and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-3-905673-95-1}, DOI = {10.2312/PE/VMV/VMV12/009-014}, LOCALID = {Local-ID: 728254C7FB47D385C1257AAA003C9878-KST_VMV_Klehm2012}, PUBLISHER = {Eurographics Association}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Mirror systems have recently emerged as an alternative low-cost multi-view imaging solution. The use of these systems critically depends on the ability to compute the background of a multiply mirrored object. The images taken in such systems show a fractured, patterned view, making edge-guided segmentation difficult. Further, global illumination and light attenuation due to the mirrors make standard segmentation techniques fail. We therefore propose a system that allows a user to do the segmentation manually. We provide convenient tools that enable an interactive segmentation of kaleidoscopic images containing three-dimensional objects. Hereby, we explore suitable interaction and visualization schemes to guide the user. To achieve interactivity, we employ the GPU in all stages of the application, such as 2D/3D rendering as well as segmentation.}, BOOKTITLE = {VMV 2012 Vision, Modeling and Visualization}, EDITOR = {G{\"o}sele, Michael and Grosch, Thorsten and Preim, Bernhard and Theisel, Holger and T{\"o}nnies, Klaus-Dietz}, PAGES = {9--14}, ADDRESS = {Magdeburg, Germany}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Reshetouski, Ilya %A Eisemanm, Elmar %A Seidel, Hans-Peter %A Ihrke, Ivo %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Geometry-aware Segmentation for the Decomposition of Kaleidoscopic Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13A6-2 %R 10.2312/PE/VMV/VMV12/009-014 %F OTHER: Local-ID: 728254C7FB47D385C1257AAA003C9878-KST_VMV_Klehm2012 %D 2012 %B 17th Annual Workshop on Vision, Modeling and Visualization %Z date of event: 2012-11-12 - 2012-11-14 %C Magdeburg, Germany %X Mirror systems have recently emerged as an alternative low-cost multi-view imaging solution. The use of these systems critically depends on the ability to compute the background of a multiply mirrored object. The images taken in such systems show a fractured, patterned view, making edge-guided segmentation difficult. Further, global illumination and light attenuation due to the mirrors make standard segmentation techniques fail. We therefore propose a system that allows a user to do the segmentation manually. We provide convenient tools that enable an interactive segmentation of kaleidoscopic images containing three-dimensional objects. Hereby, we explore suitable interaction and visualization schemes to guide the user. To achieve interactivity, we employ the GPU in all stages of the application, such as 2D/3D rendering as well as segmentation. %B VMV 2012 Vision, Modeling and Visualization %E G&#246;sele, Michael; Grosch, Thorsten; Preim, Bernhard; Theisel, Holger; T&#246;nnies, Klaus-Dietz %P 9 - 14 %I Eurographics Association %@ 978-3-905673-95-1
Klehm, O., Ritschel, T., Eisemann, E., and Seidel, H.-P. 2012b. Screen-space Bent Cones: A Practical Approach. In: GPU Pro 3. CRC Press, New York, NY.
Abstract
Ambient occlusion (AO) is a popular technique for visually improving both real-time as well as offline rendering. It decouples occlusion and shading providing a gain in efficiency. This results in an average occlusion that modulates the surface shading. However, this also reduces realism due to the lack of directional information. Bent normals were proposed as an amelioration that addresses this issue for offline rendering. Here, we describe how to compute bent normals as a cheap by-product of screen-space ambient occlusion (SSAO). Bent cones extend bent normals to further improve realism. These extensions combine the speed and simplicity of AO with physically more plausible lighting.
Export
BibTeX
@incollection{SSBC_GP3_Klehm2012, TITLE = {Screen-space Bent Cones: A Practical Approach}, AUTHOR = {Klehm, Oliver and Ritschel, Tobias and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9781439887820}, LOCALID = {Local-ID: 6A3293309F071AB4C12579E900378A1E-SSBC_GP3_Klehm2012}, PUBLISHER = {CRC Press}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Ambient occlusion (AO) is a popular technique for visually improving both real-time as well as offline rendering. It decouples occlusion and shading providing a gain in efficiency. This results in an average occlusion that modulates the surface shading. However, this also reduces realism due to the lack of directional information. Bent normals were proposed as an amelioration that addresses this issue for offline rendering. Here, we describe how to compute bent normals as a cheap by-product of screen-space ambient occlusion (SSAO). Bent cones extend bent normals to further improve realism. These extensions combine the speed and simplicity of AO with physically more plausible lighting.}, BOOKTITLE = {GPU Pro 3}, EDITOR = {Engel, Wolfgang}, PAGES = {191--207}, }
Endnote
%0 Book Section %A Klehm, Oliver %A Ritschel, Tobias %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Screen-space Bent Cones: A Practical Approach : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1476-6 %F OTHER: Local-ID: 6A3293309F071AB4C12579E900378A1E-SSBC_GP3_Klehm2012 %D 2012 %X Ambient occlusion (AO) is a popular technique for visually improving both real-time as well as offline rendering. It decouples occlusion and shading providing a gain in efficiency. This results in an average occlusion that modulates the surface shading. However, this also reduces realism due to the lack of directional information. Bent normals were proposed as an amelioration that addresses this issue for offline rendering. Here, we describe how to compute bent normals as a cheap by-product of screen-space ambient occlusion (SSAO). Bent cones extend bent normals to further improve realism. These extensions combine the speed and simplicity of AO with physically more plausible lighting. %B GPU Pro 3 %E Engel, Wolfgang %P 191 - 207 %I CRC Press %C New York, NY %@ 9781439887820
Kerber, J., Wang, M., Chang, J., Zhang, J.J., Belyaev, A., and Seidel, H.-P. 2012a. Computer Assisted Relief Generation - A Survey. Computer Graphics Forum31, 8.
Abstract
In this paper we present an overview of the achievements accomplished to date in the field of computer aided relief generation. We delineate the problem, classify different solutions, analyze similarities, investigate the development and review the approaches according to their particular relative strengths and weaknesses. Moreover, we describe remaining challenges and point out prospective extensions. In consequence this survey is likewise addressed to researchers and artists through providing valuable insights into the theory behind the different concepts in this field and augmenting the options available among the methods presented with regard to practical application.
Export
BibTeX
@article{Kerber2012_2, TITLE = {Computer Assisted Relief Generation -- A Survey}, AUTHOR = {Kerber, Jens and Wang, Meili and Chang, Jian and Zhang, Jian J. and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03185.x}, LOCALID = {Local-ID: 3A79D0CC0263F875C1257ADA00444D8D-Kerber2012_2}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {In this paper we present an overview of the achievements accomplished to date in the field of computer aided relief generation. We delineate the problem, classify different solutions, analyze similarities, investigate the development and review the approaches according to their particular relative strengths and weaknesses. Moreover, we describe remaining challenges and point out prospective extensions. In consequence this survey is likewise addressed to researchers and artists through providing valuable insights into the theory behind the different concepts in this field and augmenting the options available among the methods presented with regard to practical application.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {31}, NUMBER = {8}, PAGES = {2363--2377}, }
Endnote
%0 Journal Article %A Kerber, Jens %A Wang, Meili %A Chang, Jian %A Zhang, Jian J. %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Computer Assisted Relief Generation - A Survey : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-15E7-2 %R 10.1111/j.1467-8659.2012.03185.x %F OTHER: Local-ID: 3A79D0CC0263F875C1257ADA00444D8D-Kerber2012_2 %7 2012-08-14 %D 2012 %X In this paper we present an overview of the achievements accomplished to date in the field of computer aided relief generation. We delineate the problem, classify different solutions, analyze similarities, investigate the development and review the approaches according to their particular relative strengths and weaknesses. Moreover, we describe remaining challenges and point out prospective extensions. In consequence this survey is likewise addressed to researchers and artists through providing valuable insights into the theory behind the different concepts in this field and augmenting the options available among the methods presented with regard to practical application. %J Computer Graphics Forum %V 31 %N 8 %& 2363 %P 2363 - 2377 %I Wiley-Blackwell %C Oxford, UK %@ false
Kerber, J., Wand, M., Bokeloh, M., and Seidel, H.-P. 2012b. Symmetry Detection in Large Scale City Scans. .
Abstract
In this report we present a novel method for detecting partial symmetries in very large point clouds of 3D city scans. Unlike previous work, which was limited to data sets of a few hundred megabytes maximum, our method scales to very large scenes. We map the detection problem to a nearestneighbor search in a low-dimensional feature space, followed by a cascade of tests for geometric clustering of potential matches. Our algorithm robustly handles noisy real-world scanner data, obtaining a recognition performance comparable to state-of-the-art methods. In practice, it scales linearly with the scene size and achieves a high absolute throughput, processing half a terabyte of raw scanner data over night on a dual socket commodity PC.
Export
BibTeX
@techreport{KerberBokelohWandSeidel2012, TITLE = {Symmetry Detection in Large Scale City Scans}, AUTHOR = {Kerber, Jens and Wand, Michael and Bokeloh, Martin and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2012-4-001}, YEAR = {2012}, ABSTRACT = {In this report we present a novel method for detecting partial symmetries in very large point clouds of 3D city scans. Unlike previous work, which was limited to data sets of a few hundred megabytes maximum, our method scales to very large scenes. We map the detection problem to a nearestneighbor search in a low-dimensional feature space, followed by a cascade of tests for geometric clustering of potential matches. Our algorithm robustly handles noisy real-world scanner data, obtaining a recognition performance comparable to state-of-the-art methods. In practice, it scales linearly with the scene size and achieves a high absolute throughput, processing half a terabyte of raw scanner data over night on a dual socket commodity PC.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Kerber, Jens %A Wand, Michael %A Bokeloh, Martin %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Symmetry Detection in Large Scale City Scans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-0427-4 %D 2012 %P 32 p. %X In this report we present a novel method for detecting partial symmetries in very large point clouds of 3D city scans. Unlike previous work, which was limited to data sets of a few hundred megabytes maximum, our method scales to very large scenes. We map the detection problem to a nearestneighbor search in a low-dimensional feature space, followed by a cascade of tests for geometric clustering of potential matches. Our algorithm robustly handles noisy real-world scanner data, obtaining a recognition performance comparable to state-of-the-art methods. In practice, it scales linearly with the scene size and achieves a high absolute throughput, processing half a terabyte of raw scanner data over night on a dual socket commodity PC. %B Research Report %@ false
Kalojanov, J., Bokeloh, M., Wand, M., Guibas, L., Seidel, H.-P., and Slusallek, P. 2012. Microtiles: Extracting Building Blocks from Correspondences. Computer Graphics Forum (Proc. SGP 2012)31, 5.
Export
BibTeX
@article{Kalojanov2012, TITLE = {Microtiles: Extracting Building Blocks from Correspondences}, AUTHOR = {Kalojanov, Javor and Bokeloh, Martin and Wand, Michael and Guibas, Leonidas and Seidel, Hans-Peter and Slusallek, Philipp}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2012.03165.x}, LOCALID = {Local-ID: 62EBB7ABBD784112C1257AED003C5EE4-Kalojanov2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. SGP)}, VOLUME = {31}, NUMBER = {5}, PAGES = {1597--1606}, BOOKTITLE = {Eurographics Symposium on Geometry Processing 2012 (SGP 2012)}, EDITOR = {Quak, Ewald}, }
Endnote
%0 Journal Article %A Kalojanov, Javor %A Bokeloh, Martin %A Wand, Michael %A Guibas, Leonidas %A Seidel, Hans-Peter %A Slusallek, Philipp %+ International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Universit&#228;t des Saarlandes %T Microtiles: Extracting Building Blocks from Correspondences : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-0D54-5 %F OTHER: Local-ID: 62EBB7ABBD784112C1257AED003C5EE4-Kalojanov2012 %R 10.1111/j.1467-8659.2012.03165.x %D 2012 %J Computer Graphics Forum %V 31 %N 5 %& 1597 %P 1597 - 1606 %I Wiley-Blackwell %C Oxford, UK %B Eurographics Symposium on Geometry Processing 2012 %O SGP 2012 Tallinn, Estonia, July 16 &#8211; 18, 2012 Symposium on Geometry Processing 2012
Jain, A., Thormählen, T., Ritschel, T., and Seidel, H.-P. 2012a. Exploring Shape Variations by 3D-Model Decomposition and Part-based Recombination. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Export
BibTeX
@article{JainEG2012, TITLE = {Exploring Shape Variations by {3D}-Model Decomposition and Part-based Recombination}, AUTHOR = {Jain, Arjun and Thorm{\"a}hlen, Thorsten and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03042.x}, LOCALID = {Local-ID: 41BC7691719A8E13C1257B0300430734-JainEG2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {631--640}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Thorm&#228;hlen, Thorsten %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Exploring Shape Variations by 3D-Model Decomposition and Part-based Recombination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F349-1 %R 10.1111/j.1467-8659.2012.03042.x %F OTHER: Local-ID: 41BC7691719A8E13C1257B0300430734-JainEG2012 %7 2012 %D 2012 %J Computer Graphics Forum %V 31 %N 2 %& 631 %P 631 - 640 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O EUROGRAPHICS 2012 The European Association for Computer Graphics 33rd Annual Conference ; Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EG 2012
Jain, A., Thormählen, T., Ritschel, T., and Seidel, H.-P. 2012b. Material Memex: Automatic Material Suggestions for 3D Objects. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 5.
Export
BibTeX
@article{JainSA2012, TITLE = {Material Memex: {Automatic} Material Suggestions for {3D} Objects}, AUTHOR = {Jain, Arjun and Thorm{\"a}hlen, Thorsten and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366162}, LOCALID = {Local-ID: AE59BF88F44A94C0C1257B030042253E-JainSA2012}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {5}, PAGES = {1--8}, EID = {143}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Thorm&#228;hlen, Thorsten %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Material Memex: Automatic Material Suggestions for 3D Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F34D-A %R 10.1145/2366145.2366162 %F OTHER: Local-ID: AE59BF88F44A94C0C1257B030042253E-JainSA2012 %7 2012 %D 2012 %J ACM Transactions on Graphics %V 31 %N 5 %& 1 %P 1 - 8 %Z sequence number: 143 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O ACM SIGGRAPH Asia 2012 Singapore, 28 November - 1 December
Ihrke, I., Reshetouski, I., Manakov, A., Tevs, A., Wand, M., and Seidel, H.-P. 2012a. A Kaleidoscopic Approach to Surround Geometry and Reflectance Acquisition. 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2012), IEEE.
Export
BibTeX
@inproceedings{Ihrke2012a, TITLE = {A Kaleidoscopic Approach to Surround Geometry and Reflectance Acquisition}, AUTHOR = {Ihrke, Ivo and Reshetouski, Ilya and Manakov, Alkhazur and Tevs, Art and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4673-1611-8}, DOI = {10.1109/CVPRW.2012.6239347}, LOCALID = {Local-ID: 77354E08AB6311D2C1257AD7004BEBEC-Ihrke2012a}, PUBLISHER = {IEEE}, YEAR = {2012}, DATE = {2012-06}, BOOKTITLE = {2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2012)}, PAGES = {29--36}, ADDRESS = {Providence, RI}, }
Endnote
%0 Conference Proceedings %A Ihrke, Ivo %A Reshetouski, Ilya %A Manakov, Alkhazur %A Tevs, Art %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Kaleidoscopic Approach to Surround Geometry and Reflectance Acquisition : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-124D-5 %R 10.1109/CVPRW.2012.6239347 %F OTHER: Local-ID: 77354E08AB6311D2C1257AD7004BEBEC-Ihrke2012a %D 2012 %B 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops %Z date of event: 2012-06-16 - 2012-06-21 %C Providence, RI %B 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops %P 29 - 36 %I IEEE %@ 978-1-4673-1611-8
Ihrke, I., Reshetouski, I., Manakov, A., and Seidel, H.-P. 2012b. Three-Dimensional Kaleidoscopic Imaging. Computational Optical Sensing and Imaging (COSI 2012), OSA.
Export
BibTeX
@inproceedings{Ihrke2012, TITLE = {Three-Dimensional Kaleidoscopic Imaging}, AUTHOR = {Ihrke, Ivo and Reshetouski, Ilya and Manakov, Alkhazur and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-55752-947-7}, DOI = {10.1364/COSI.2012.CTu4B.8}, LOCALID = {Local-ID: D95527BADC2F41E4C1257AD7004B7E2A-Ihrke2012}, PUBLISHER = {OSA}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Computational Optical Sensing and Imaging (COSI 2012)}, PAGES = {1--3}, ADDRESS = {Monterey, CA}, }
Endnote
%0 Conference Proceedings %A Ihrke, Ivo %A Reshetouski, Ilya %A Manakov, Alkhazur %A Seidel, Hans-Peter %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Three-Dimensional Kaleidoscopic Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-12EE-A %F OTHER: Local-ID: D95527BADC2F41E4C1257AD7004B7E2A-Ihrke2012 %R 10.1364/COSI.2012.CTu4B.8 %D 2012 %B Computational Optical Sensing and Imaging Meeting %Z date of event: 2012-06-24 - 2012-06-28 %C Monterey, CA %B Computational Optical Sensing and Imaging %P 1 - 3 %I OSA %@ 1-55752-947-7
Herzog, R., Cadík, M., Aydin, T.O., Kim, K.I., Myszkowski, K., and Seidel, H.-P. 2012. NoRM: No-reference Image Quality Metric for Realistic Image Synthesis. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Abstract
Synthetically generating images and video frames of complex 3D scenes using some photo-realistic rendering software is often prone to artifacts and requires expert knowledge to tune the parameters. The manual work required for detecting and preventing artifacts can be automated through objective quality evaluation of synthetic images. Most practical objective quality assessment methods of natural images rely on a ground-truth reference, which is often not available in rendering applications. While general purpose no-reference image quality assessment is a difficult problem, we show in a subjective study that the performance of a dedicated no-reference metric as presented in this paper can match the state-of-the-art metrics that do require a reference. This level of predictive power is achieved exploiting information about the underlying synthetic scene (e.g., 3D surfaces, textures) instead of merely considering color, and training our learning framework with typical rendering artifacts. We show that our method successfully detects various non-trivial types of artifacts such as noise and clamping bias due to insufficient virtual point light sources, and shadow map discretization artifacts. We also briefly discuss an inpainting method for automatic correction of detected artifacts.
Export
BibTeX
@article{NoRM_EG2012, TITLE = {{NoRM}: {No-reference} Image Quality Metric for Realistic Image Synthesis}, AUTHOR = {Herzog, Robert and Cad{\'i}k, Martin and Aydin, Tunc Ozan and Kim, Kwang In and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03055.x}, LOCALID = {Local-ID: 673028A8C798FD45C1257A47004B2978-NoRM_EG2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Synthetically generating images and video frames of complex 3D scenes using some photo-realistic rendering software is often prone to artifacts and requires expert knowledge to tune the parameters. The manual work required for detecting and preventing artifacts can be automated through objective quality evaluation of synthetic images. Most practical objective quality assessment methods of natural images rely on a ground-truth reference, which is often not available in rendering applications. While general purpose no-reference image quality assessment is a difficult problem, we show in a subjective study that the performance of a dedicated no-reference metric as presented in this paper can match the state-of-the-art metrics that do require a reference. This level of predictive power is achieved exploiting information about the underlying synthetic scene (e.g., 3D surfaces, textures) instead of merely considering color, and training our learning framework with typical rendering artifacts. We show that our method successfully detects various non-trivial types of artifacts such as noise and clamping bias due to insufficient virtual point light sources, and shadow map discretization artifacts. We also briefly discuss an inpainting method for automatic correction of detected artifacts.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {545--554}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Herzog, Robert %A Cad&#237;k, Martin %A Aydin, Tunc Ozan %A Kim, Kwang In %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T NoRM: No-reference Image Quality Metric for Realistic Image Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1586-9 %R 10.1111/j.1467-8659.2012.03055.x %F OTHER: Local-ID: 673028A8C798FD45C1257A47004B2978-NoRM_EG2012 %7 2012-06-14 %D 2012 %X Synthetically generating images and video frames of complex 3D scenes using some photo-realistic rendering software is often prone to artifacts and requires expert knowledge to tune the parameters. The manual work required for detecting and preventing artifacts can be automated through objective quality evaluation of synthetic images. Most practical objective quality assessment methods of natural images rely on a ground-truth reference, which is often not available in rendering applications. While general purpose no-reference image quality assessment is a difficult problem, we show in a subjective study that the performance of a dedicated no-reference metric as presented in this paper can match the state-of-the-art metrics that do require a reference. This level of predictive power is achieved exploiting information about the underlying synthetic scene (e.g., 3D surfaces, textures) instead of merely considering color, and training our learning framework with typical rendering artifacts. We show that our method successfully detects various non-trivial types of artifacts such as noise and clamping bias due to insufficient virtual point light sources, and shadow map discretization artifacts. We also briefly discuss an inpainting method for automatic correction of detected artifacts. %J Computer Graphics Forum %V 31 %N 2 %& 545 %P 545 - 554 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O EUROGRAPHICS 2012 The European Association for Computer Graphics 33rd Annual Conference, Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EG 2012
Günther, D., Seidel, H.-P., and Weinkauf, T. 2012. Extraction of Dominant Extremal Structures in Volumetric Data Using Separatrix Persistence. Computer Graphics Forum31, 8.
Abstract
Extremal lines and surfaces are features of a 3D scalar field where the scalar function becomes minimal or maximal with respect to a local neighborhood. These features are important in many applications, e.g., computer tomography, fluid dynamics, cell biology. We present a novel topological method to extract these features using discrete Morse theory. In particular, we extend the notion of Separatrix Persistence from 2D to 3D, which gives us a robust estimation of the feature strength for extremal lines and surfaces. Not only does it allow us to determine the most important (parts of) extremal lines and surfaces, it also serves as a robust filtering measure of noise-induced structures. Our purely combinatorial method does not require derivatives or any other numerical computations.
Export
BibTeX
@article{guenther12b, TITLE = {Extraction of Dominant Extremal Structures in Volumetric Data Using Separatrix Persistence}, AUTHOR = {G{\"u}nther, David and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/j.1467-8659.2012.03222.x}, LOCALID = {Local-ID: 8F6C93DF947E3889C1257AD800396653-guenther12b}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Extremal lines and surfaces are features of a 3D scalar field where the scalar function becomes minimal or maximal with respect to a local neighborhood. These features are important in many applications, e.g., computer tomography, fluid dynamics, cell biology. We present a novel topological method to extract these features using discrete Morse theory. In particular, we extend the notion of Separatrix Persistence from 2D to 3D, which gives us a robust estimation of the feature strength for extremal lines and surfaces. Not only does it allow us to determine the most important (parts of) extremal lines and surfaces, it also serves as a robust filtering measure of noise-induced structures. Our purely combinatorial method does not require derivatives or any other numerical computations.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {31}, NUMBER = {8}, PAGES = {2554--2566}, }
Endnote
%0 Journal Article %A G&#252;nther, David %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Extraction of Dominant Extremal Structures in Volumetric Data Using Separatrix Persistence : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-14F7-6 %F OTHER: Local-ID: 8F6C93DF947E3889C1257AD800396653-guenther12b %R 10.1111/j.1467-8659.2012.03222.x %7 2012-10-08 %D 2012 %X Extremal lines and surfaces are features of a 3D scalar field where the scalar function becomes minimal or maximal with respect to a local neighborhood. These features are important in many applications, e.g., computer tomography, fluid dynamics, cell biology. We present a novel topological method to extract these features using discrete Morse theory. In particular, we extend the notion of Separatrix Persistence from 2D to 3D, which gives us a robust estimation of the feature strength for extremal lines and surfaces. Not only does it allow us to determine the most important (parts of) extremal lines and surfaces, it also serves as a robust filtering measure of noise-induced structures. Our purely combinatorial method does not require derivatives or any other numerical computations. %J Computer Graphics Forum %V 31 %N 8 %& 2554 %P 2554 - 2566 %I Wiley-Blackwell %C Oxford, UK %@ false
Elhayek, A., Stoll, C., Kim, K.I., Seidel, H.-P., and Theobalt, C. 2012a. Feature-based Multi-video Synchronization with Subframe Accuracy. Pattern Recognition (DAGM 2012/OAGM 2012), Springer.
Export
BibTeX
@inproceedings{Elhayek2012, TITLE = {Feature-based Multi-video Synchronization with Subframe Accuracy}, AUTHOR = {Elhayek, Ahmed and Stoll, Carsten and Kim, Kwang In and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-3-642-32716-2}, DOI = {10.1007/978-3-642-32717-9_27}, LOCALID = {Local-ID: CA9960222B05A1B7C1257AD70076C360-Elhayek2012}, PUBLISHER = {Springer}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Pattern Recognition (DAGM 2012/OAGM 2012)}, EDITOR = {Pinz, Axel and Pock, Thomas and Bischof, Horst and Leberl, Franz}, PAGES = {266--275}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {7476}, ADDRESS = {Graz, Austria}, }
Endnote
%0 Conference Proceedings %A Elhayek, Ahmed %A Stoll, Carsten %A Kim, Kwang In %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Feature-based Multi-video Synchronization with Subframe Accuracy : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1212-7 %R 10.1007/978-3-642-32717-9_27 %F OTHER: Local-ID: CA9960222B05A1B7C1257AD70076C360-Elhayek2012 %D 2012 %B 34th Symposium of the German Association for Pattern Recognition ; 36th Annual Austrian Association for Pattern Recognition Conference %Z date of event: 2012-08-28 - 2012-08-31 %C Graz, Austria %B Pattern Recognition %E Pinz, Axel; Pock, Thomas; Bischof, Horst; Leberl, Franz %P 266 - 275 %I Springer %@ 978-3-642-32716-2 %B Lecture Notes in Computer Science %N 7476
Elhayek, A., Stoll, C., Hasler, N., Kim, K.I., Seidel, H.-P., and Theobalt, C. 2012b. Spatio-temporal Motion Tracking with Unsynchronized Cameras. 2012 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2012), IEEE.
Abstract
We present a new spatio-temporal method for markerless motion capture. We reconstruct the pose and motion of a character from a multi-view video sequence without requiring the cameras to be synchronized and without aligning captured frames in time. By formulating the model-to-image similarity measure as a temporally continuous functional, we are also able to reconstruct motion in much higher temporal detail than was possible with previous synchronized approaches. By purposefully running cameras unsynchronized we can capture even very fast motion at speeds that off-the-shelf but high quality cameras provide.
Export
BibTeX
@inproceedings{ElHayek2012a, TITLE = {Spatio-temporal Motion Tracking with Unsynchronized Cameras}, AUTHOR = {Elhayek, Ahmed and Stoll, Carsten and Hasler, Nils and Kim, Kwang In and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {1063-6919}, ISBN = {978-1-4673-1226-4}, DOI = {10.1109/CVPR.2012.6247886}, LOCALID = {Local-ID: BAE13C070CC977C1C1257AD7003977F5-ElHayek2012a}, PUBLISHER = {IEEE}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {We present a new spatio-temporal method for markerless motion capture. We reconstruct the pose and motion of a character from a multi-view video sequence without requiring the cameras to be synchronized and without aligning captured frames in time. By formulating the model-to-image similarity measure as a temporally continuous functional, we are also able to reconstruct motion in much higher temporal detail than was possible with previous synchronized approaches. By purposefully running cameras unsynchronized we can capture even very fast motion at speeds that off-the-shelf but high quality cameras provide.}, BOOKTITLE = {2012 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2012)}, PAGES = {1870--1877}, ADDRESS = {Providence, RI}, }
Endnote
%0 Conference Proceedings %A Elhayek, Ahmed %A Stoll, Carsten %A Hasler, Nils %A Kim, Kwang In %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-temporal Motion Tracking with Unsynchronized Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-138D-B %F OTHER: Local-ID: BAE13C070CC977C1C1257AD7003977F5-ElHayek2012a %R 10.1109/CVPR.2012.6247886 %D 2012 %B 2012 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2012-06-16 - 2012-06-21 %C Providence, RI %X We present a new spatio-temporal method for markerless motion capture. We reconstruct the pose and motion of a character from a multi-view video sequence without requiring the cameras to be synchronized and without aligning captured frames in time. By formulating the model-to-image similarity measure as a temporally continuous functional, we are also able to reconstruct motion in much higher temporal detail than was possible with previous synchronized approaches. By purposefully running cameras unsynchronized we can capture even very fast motion at speeds that off-the-shelf but high quality cameras provide. %B 2012 IEEE Conference on Computer Vision and Pattern Recognition %P 1870 - 1877 %I IEEE %@ false
Elek, O., Ritschel, T., Wilkie, A., and Seidel, H.-P. 2012a. Interactive Cloud Rendering Using Temporally Coherent Photon Mapping. Computers & Graphics36, 8.
Abstract
This work presents a novel interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds -- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is initially used, and is then upsampled to the density field resolution on a physical basis in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only above the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates.
Export
BibTeX
@article{Elek2012b, TITLE = {Interactive Cloud Rendering Using Temporally Coherent Photon Mapping}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Wilkie, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2012.10.002}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {This work presents a novel interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds -- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is initially used, and is then upsampled to the density field resolution on a physical basis in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only above the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates.}, JOURNAL = {Computers \& Graphics}, VOLUME = {36}, NUMBER = {8}, PAGES = {1109--1118}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Ritschel, Tobias %A Wilkie, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Cloud Rendering Using Temporally Coherent Photon Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F427-5 %R 10.1016/j.cag.2012.10.002 %F OTHER: 5A3491A9878A9FE9C1257AF0006BC7A4-Elek2012b %7 2012-01-17 %D 2012 %X This work presents a novel interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds -- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is initially used, and is then upsampled to the density field resolution on a physical basis in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only above the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates. %J Computers & Graphics %V 36 %N 8 %& 1109 %P 1109 - 1118 %I Elsevier %C Amsterdam %@ false
Elek, O., Ritschel, T., Wilkie, A., and Seidel, H.-P. 2012b. Interactive Cloud Rendering Using Temporally-coherent Photon Mapping. Graphics Interface 2012 (GI 2012), Canadian Information Processing Society.
Abstract
This paper presents an interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds --- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is used, and is then upsampled to the density field resolution in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein phase function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only on the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates.
Export
BibTeX
@inproceedings{Elek2012a, TITLE = {Interactive Cloud Rendering Using Temporally-coherent Photon Mapping}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Wilkie, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0713-5424}, ISBN = {978-1-4503-1420-6}, LOCALID = {Local-ID: 3CB9A3047BF33EE2C1257AF000674820-Elek2012a}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2012}, DATE = {2012-05}, ABSTRACT = {This paper presents an interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds --- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is used, and is then upsampled to the density field resolution in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein phase function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only on the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates.}, BOOKTITLE = {Graphics Interface 2012 (GI 2012)}, EDITOR = {Brooks, Stephen and Hawkey, Kirstie}, PAGES = {141--148}, ADDRESS = {Toronto, ON, Canada}, }
Endnote
%0 Conference Proceedings %A Elek, Oskar %A Ritschel, Tobias %A Wilkie, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Cloud Rendering Using Temporally-coherent Photon Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F42E-8 %F OTHER: Local-ID: 3CB9A3047BF33EE2C1257AF000674820-Elek2012a %D 2012 %B Graphic Interface 2012 %Z date of event: 2012-05-28 - 2012-05-30 %C Toronto, ON, Canada %X This paper presents an interactive algorithm for simulation of light transport in clouds. Exploiting the high temporal coherence of the typical illumination and morphology of clouds we build on volumetric photon mapping, which we modify to allow for interactive rendering speeds --- instead of building a fresh irregular photon map for every scene state change we accumulate photon contributions in a regular grid structure. This is then continuously being refreshed by re-shooting only a fraction of the total amount of photons in each frame. To maintain its temporal coherence and low variance, a low-resolution grid is used, and is then upsampled to the density field resolution in each frame. We also present a technique to store and reconstruct the angular illumination information by exploiting properties of the standard Henyey-Greenstein phase function, namely its ability to express anisotropic angular distributions with a single dominating direction. The presented method is physically-plausible, conceptually simple and comparatively easy to implement. Moreover, it operates only on the cloud density field, thus not requiring any precomputation, and handles all light sources typical for the given environment, i.e. where one of the light sources dominates. %B Graphics Interface 2012 %E Brooks, Stephen; Hawkey, Kirstie %P 141 - 148 %I Canadian Information Processing Society %@ false
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., Seidel, H.-P., and Matusik, W. 2012a. A Luminance-contrast-aware Disparity Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Export
BibTeX
@article{Didyk2012SigAsia, TITLE = {A Luminance-contrast-aware Disparity Model and Applications}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366203}, LOCALID = {Local-ID: C754E5AADEF5EA2AC1257AFE0056029B-Didyk2012SigAsia}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {184:1--184:10}, EID = {184}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Luminance-contrast-aware Disparity Model and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F3C4-9 %R 10.1145/2366145.2366203 %F OTHER: Local-ID: C754E5AADEF5EA2AC1257AFE0056029B-Didyk2012SigAsia %D 2012 %J ACM Transactions on Graphics %V 31 %N 6 %& 184:1 %P 184:1 - 184:10 %Z sequence number: 184 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O Singapore, 28 November - 1 December ACM SIGGRAPH Asia 2012
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2012b. Apparent Stereo: The Cornsweet Illusion Can Enhance Perceived Depth. Human Vision and Electronic Imaging XVII (HVEI 2012), SPIE/IS&T.
Export
BibTeX
@inproceedings{Didyk2012Cornsweet, TITLE = {Apparent Stereo: The {Cornsweet} Illusion Can Enhance Perceived Depth}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {9780819489388}, DOI = {10.1117/12.907612}, LOCALID = {Local-ID: B0D8F2F7DF789CF4C1257A710043B8CF-Didyk2012Cornsweet}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Human Vision and Electronic Imaging XVII (HVEI 2012)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and de Ridder, Huib}, PAGES = {1--12}, SERIES = {Proceedings of SPIE}, VOLUME = {8291}, ADDRESS = {Burlingame, CA}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Stereo: The Cornsweet Illusion Can Enhance Perceived Depth : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13C8-5 %R 10.1117/12.907612 %F OTHER: Local-ID: B0D8F2F7DF789CF4C1257A710043B8CF-Didyk2012Cornsweet %D 2012 %B Human Vision and Electronic Imaging XVII %Z date of event: 2012-01-23 - 2012-01-26 %C Burlingame, CA %B Human Vision and Electronic Imaging XVII %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; de Ridder, Huib %P 1 - 12 %I SPIE/IS&T %@ 9780819489388 %B Proceedings of SPIE %N 8291 %@ false
Čadík, M., Herzog, R., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2012. New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Export
BibTeX
@article{cadik12iqm_evaluation, TITLE = {New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts}, AUTHOR = {{\v C}ad{\'i}k, Martin and Herzog, Robert and Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366166}, LOCALID = {Local-ID: 1D6D7862B7800D8DC1257AD7003415AE-cadik12iqm_evaluation}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {1--10}, EID = {147}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A &#268;ad&#237;k, Martin %A Herzog, Robert %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-166E-6 %R 10.1145/2366145.2366166 %F OTHER: Local-ID: 1D6D7862B7800D8DC1257AD7003415AE-cadik12iqm_evaluation %7 2012 %D 2012 %J ACM Transactions on Graphics %V 31 %N 6 %& 1 %P 1 - 10 %Z sequence number: 147 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O ACM SIGGRAPH Asia 2012 Singapore, 28 November - 1 December 2012
Bokeloh, M., Wand, M., Seidel, H.-P., and Koltun, V. 2012. An Algebraic Model for Parameterized Shape Editing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2012)31, 4.
Export
BibTeX
@article{Bokeloh2012algMod, TITLE = {An Algebraic Model for Parameterized Shape Editing}, AUTHOR = {Bokeloh, Martin and Wand, Michael and Seidel, Hans-Peter and Koltun, Vladlen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2185520.2185574}, LOCALID = {Local-ID: A1326DBCE39F6AA4C1257AED003C12F0-Bokeloh2012algMod}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {31}, NUMBER = {4}, PAGES = {1--10}, EID = {78}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2012}, }
Endnote
%0 Journal Article %A Bokeloh, Martin %A Wand, Michael %A Seidel, Hans-Peter %A Koltun, Vladlen %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T An Algebraic Model for Parameterized Shape Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-14E9-6 %F OTHER: Local-ID: A1326DBCE39F6AA4C1257AED003C12F0-Bokeloh2012algMod %R 10.1145/2185520.2185574 %7 2012 %D 2012 %J ACM Transactions on Graphics %V 31 %N 4 %& 1 %P 1 - 10 %Z sequence number: 78 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2012 %O Los Angeles, California ACM SIGGRAPH 2012
Bharaj, G., Thormählen, T., Seidel, H.-P., and Theobalt, C. 2012. Automatically Rigging Multi-component Characters. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Export
BibTeX
@article{GAURAV2012, TITLE = {Automatically Rigging Multi-component Characters}, AUTHOR = {Bharaj, Gaurav and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03034.x}, LOCALID = {Local-ID: 7FDE39168BE34083C1257AE4005C8329-GAURAV2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {755--764}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Bharaj, Gaurav %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Automatically Rigging Multi-component Characters : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-14E2-3 %F OTHER: Local-ID: 7FDE39168BE34083C1257AE4005C8329-GAURAV2012 %R 10.1111/j.1467-8659.2012.03034.x %7 2012-06-07 %D 2012 %J Computer Graphics Forum %V 31 %N 2 %& 755 %P 755 - 764 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O The European Association for Computer Graphics 33rd Annual Conference ; Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EUROGRAPHICS 2012 %U http://gvv.mpi-inf.mpg.de/files/old_site_files/armc_eg_2012.pdf
Baboud, L., Eisemann, E., and Seidel, H.-P. 2012. Precomputed Safety Shapes for Efficient and Accurate Height-field Rendering. IEEE transactions on visualization and computer graphics18, 11.
Export
BibTeX
@article{Baboud2012, TITLE = {Precomputed Safety Shapes for Efficient and Accurate Height-field Rendering}, AUTHOR = {Baboud, Lionel and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2011.281}, LOCALID = {Local-ID: 418C87AB7BA9A992C1257B2800347870-Baboud2012}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2012}, DATE = {2012}, JOURNAL = {IEEE transactions on visualization and computer graphics}, VOLUME = {18}, NUMBER = {11}, PAGES = {1811--1823}, }
Endnote
%0 Journal Article %A Baboud, Lionel %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Cluster of Excellence Multimodal Computing Cluster of Excellence Multimodal Computing Computer Graphics, MPI for Informatics, Max Planck Society %T Precomputed Safety Shapes for Efficient and Accurate Height-field Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-14D0-B %F OTHER: Local-ID: 418C87AB7BA9A992C1257B2800347870-Baboud2012 %R 10.1109/TVCG.2011.281 %7 2011-12-08 %D 2012 %J IEEE transactions on visualization and computer graphics %V 18 %N 11 %& 1811 %P 1811 - 1823 %I IEEE %C Piscataway, NJ %@ false
Baak, A., Müller, M., Bharaj, G., Seidel, H.-P., and Theobalt, C. 2012. A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera. In: Consumer Depth Cameras for Computer Vision. Springer, London.
Export
BibTeX
@incollection{BaakMuBhSeTh12_DataDrivenDepthTracking_BookChapter, TITLE = {A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera}, AUTHOR = {Baak, Andreas and M{\"u}ller, Meinard and Bharaj, Gaurav and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4471-4639-1; 978-1-4471-4640-7}, DOI = {10.1007/978-1-4471-4640-7_5}, PUBLISHER = {Springer}, ADDRESS = {London}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Consumer Depth Cameras for Computer Vision}, EDITOR = {Fossati, Andrea and Gall, Juergen and Grabner, Helmut and Ren, Xiaofeng and Konolige, Kurt}, PAGES = {71--98}, SERIES = {Advances in Computer Vision and Pattern Recognition}, }
Endnote
%0 Book Section %A Baak, Andreas %A M&#252;ller, Meinard %A Bharaj, Gaurav %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13D1-F %R 10.1007/978-1-4471-4640-7_5 %D 2012 %B Consumer Depth Cameras for Computer Vision %E Fossati, Andrea; Gall, Juergen; Grabner, Helmut; Ren, Xiaofeng; Konolige, Kurt %P 71 - 98 %I Springer %C London %@ 978-1-4471-4639-1 978-1-4471-4640-7 %S Advances in Computer Vision and Pattern Recognition
2011
Xu, F., Liu, Y., Stoll, C., et al. 2011. Video-based Characters -- Creating New Human Performances from a Multi-view Video Database. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2011)30, 4.
Export
BibTeX
@article{Xu2011, TITLE = {Video-based Characters -- Creating New Human Performances from a Multi-view Video Database}, AUTHOR = {Xu, Feng and Liu, Yebin and Stoll, Carsten and Tompkin, James and Bharaj, Gaurav and Dai, Qionghai and Seidel, Hans-Peter and Kautz, Jan and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2010324.1964927}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {30}, NUMBER = {4}, PAGES = {1--20}, EID = {32}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2011}, }
Endnote
%0 Journal Article %A Xu, Feng %A Liu, Yebin %A Stoll, Carsten %A Tompkin, James %A Bharaj, Gaurav %A Dai, Qionghai %A Seidel, Hans-Peter %A Kautz, Jan %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Video-based Characters -- Creating New Human Performances from a Multi-view Video Database : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1417-3 %F EDOC: 618893 %R 10.1145/2010324.1964927 %7 2011 %D 2011 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 30 %N 4 %& 1 %P 1 - 20 %Z sequence number: 32 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2011 %O ACM SIGGRAPH 2011 Vancouver, BC, Canada
Wu, C., Varanasi, K., Liu, Y., Seidel, H.-P., and Theobalt, C. 2011. Shading-based Dynamic Shape Refinement from Multi-view Video under General Illumination. IEEE International Conference on Computer Vision (ICCV 2011), IEEE.
Export
BibTeX
@inproceedings{Wu_iccv2011, TITLE = {Shading-based Dynamic Shape Refinement from Multi-view Video under General Illumination}, AUTHOR = {Wu, Chenglei and Varanasi, Kiran and Liu, Yebin and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4577-1101-5}, DOI = {10.1109/ICCV.2011.6126358}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2011)}, PAGES = {1108--1115}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Wu, Chenglei %A Varanasi, Kiran %A Liu, Yebin %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shading-based Dynamic Shape Refinement from Multi-view Video under General Illumination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13F7-4 %F EDOC: 618885 %R 10.1109/ICCV.2011.6126358 %D 2011 %B IEEE International Conference on Computer Vision %Z date of event: 2011-11-06 - 2011-11-13 %C Barcelona, Spain %B IEEE International Conference on Computer Vision %P 1108 - 1115 %I IEEE %@ 978-1-4577-1101-5
Tevs, A., Berner, A., Wand, M., Ihrke, I., and Seidel, H.-P. 2011. Intrinsic Shape Matching by Planned Landmark Sampling. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Abstract
Recently, the problem of intrinsic shape matching has received a lot of attention. A number of algorithms have been proposed, among which random-sampling-based techniques have been particularly successful due to their generality and efficiency. We introduce a new sampling-based shape matching algorithm that uses a planning step to find optimized "landmark" points. These points are matched first in order to maximize the information gained and thus minimize the sampling costs. Our approach makes three main contributions: First, the new technique leads to a significant improvement in performance, which we demonstrate on a number of benchmark scenarios. Second, our technique does not require any keypoint detection. This is often a significant limitation for models that do not show sufficient surface features. Third, we examine the actual numerical degrees of freedom of the matching problem for a given piece of geometry. In contrast to previous results, our estimates take into account unprecise geodesics and potentially numerically unfavorable geometry of general topology, giving a more realistic complexity estimate.
Export
BibTeX
@article{TevsEG2011, TITLE = {Intrinsic Shape Matching by Planned Landmark Sampling}, AUTHOR = {Tevs, Art and Berner, Alexander and Wand, Michael and Ihrke, Ivo and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01879.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Recently, the problem of intrinsic shape matching has received a lot of attention. A number of algorithms have been proposed, among which random-sampling-based techniques have been particularly successful due to their generality and efficiency. We introduce a new sampling-based shape matching algorithm that uses a planning step to find optimized "landmark" points. These points are matched first in order to maximize the information gained and thus minimize the sampling costs. Our approach makes three main contributions: First, the new technique leads to a significant improvement in performance, which we demonstrate on a number of benchmark scenarios. Second, our technique does not require any keypoint detection. This is often a significant limitation for models that do not show sufficient surface features. Third, we examine the actual numerical degrees of freedom of the matching problem for a given piece of geometry. In contrast to previous results, our estimates take into account unprecise geodesics and potentially numerically unfavorable geometry of general topology, giving a more realistic complexity estimate.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {543--552}, BOOKTITLE = {EUROGRAPHICS 2011}, EDITOR = {Chen, Min and Deussen, Oliver}, }
Endnote
%0 Journal Article %A Tevs, Art %A Berner, Alexander %A Wand, Michael %A Ihrke, Ivo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Intrinsic Shape Matching by Planned Landmark Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13C7-0 %F EDOC: 618855 %R 10.1111/j.1467-8659.2011.01879.x %D 2011 %* Review method: peer-reviewed %X Recently, the problem of intrinsic shape matching has received a lot of attention. A number of algorithms have been proposed, among which random-sampling-based techniques have been particularly successful due to their generality and efficiency. We introduce a new sampling-based shape matching algorithm that uses a planning step to find optimized "landmark" points. These points are matched first in order to maximize the information gained and thus minimize the sampling costs. Our approach makes three main contributions: First, the new technique leads to a significant improvement in performance, which we demonstrate on a number of benchmark scenarios. Second, our technique does not require any keypoint detection. This is often a significant limitation for models that do not show sufficient surface features. Third, we examine the actual numerical degrees of freedom of the matching problem for a given piece of geometry. In contrast to previous results, our estimates take into account unprecise geodesics and potentially numerically unfavorable geometry of general topology, giving a more realistic complexity estimate. %J Computer Graphics Forum %V 30 %N 2 %& 543 %P 543 - 552 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011 EG 2011
Templin, K., Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2011. Apparent Resolution Enhancement for Animations. Proceedings SCCG 2011 (SSCG 2011), ACM.
Abstract
Presenting the variety of high resolution images captured by high-quality devices, or generated on the computer, is challenging due to the limited resolution of current display devices. Our recent work addressed this problem by taking into account human perception. By applying a specific motion to a high-resolution image shown on a low-resolution display device, human eye tracking and integration could be exploited to achieve apparent resolution enhancement. To this end, the high-resolution image is decomposed into a sequence of temporally varying low-resolution images that are displayed at high refresh rates. However, this approach is limited to a specific class of simple or constant movements, i.e. panning''. In this work, we generalize this idea to arbitrary motions, as well as to videos with arbitrary motion flow. The resulting image sequences are compared to a range of other down-sampling methods.
Export
BibTeX
@inproceedings{Templin2011, TITLE = {Apparent Resolution Enhancement for Animations}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-1978-2}, DOI = {10.1145/2461217.2461230}, PUBLISHER = {ACM}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Presenting the variety of high resolution images captured by high-quality devices, or generated on the computer, is challenging due to the limited resolution of current display devices. Our recent work addressed this problem by taking into account human perception. By applying a specific motion to a high-resolution image shown on a low-resolution display device, human eye tracking and integration could be exploited to achieve apparent resolution enhancement. To this end, the high-resolution image is decomposed into a sequence of temporally varying low-resolution images that are displayed at high refresh rates. However, this approach is limited to a specific class of simple or constant movements, i.e. panning''. In this work, we generalize this idea to arbitrary motions, as well as to videos with arbitrary motion flow. The resulting image sequences are compared to a range of other down-sampling methods.}, BOOKTITLE = {Proceedings SCCG 2011 (SSCG 2011)}, EDITOR = {Nishita, Tomoyuki and {\v D}urikovi{\v c}, Roman}, PAGES = {85--92}, ADDRESS = {Vini{\v c}n{\'e}, Slovakia}, }
Endnote
%0 Conference Proceedings %A Templin, Krzysztof %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Resolution Enhancement for Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-138B-9 %F EDOC: 618886 %R 10.1145/2461217.2461230 %D 2011 %B 27th Spring Conference on Computer Graphics %Z date of event: 2011-04-28 - 2011-04-30 %C Vini&#269;n&#233;, Slovakia %X Presenting the variety of high resolution images captured by high-quality devices, or generated on the computer, is challenging due to the limited resolution of current display devices. Our recent work addressed this problem by taking into account human perception. By applying a specific motion to a high-resolution image shown on a low-resolution display device, human eye tracking and integration could be exploited to achieve apparent resolution enhancement. To this end, the high-resolution image is decomposed into a sequence of temporally varying low-resolution images that are displayed at high refresh rates. However, this approach is limited to a specific class of simple or constant movements, i.e. panning''. In this work, we generalize this idea to arbitrary motions, as well as to videos with arbitrary motion flow. The resulting image sequences are compared to a range of other down-sampling methods. %B Proceedings SCCG 2011 %E Nishita, Tomoyuki; &#270;urikovi&#269;, Roman %P 85 - 92 %I ACM %@ 978-1-4503-1978-2
Tautges, J., Zinke, A., Krüger, B., et al. 2011. Motion Reconstruction Using Sparse Accelerometer Data. ACM Transactions on Graphics30, 3.
Export
BibTeX
@article{MotionReconstruction_TOG, TITLE = {Motion Reconstruction Using Sparse Accelerometer Data}, AUTHOR = {Tautges, Jochen and Zinke, Arno and Kr{\"u}ger, Bj{\"o}rn and Baumann, Jan and Weber, Andreas and Helten, Thomas and M{\"u}ller, Meinard and Seidel, Hans-Peter and Eberhardt, Bernd}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1966394.1966397}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {30}, NUMBER = {3}, PAGES = {1--12}, EID = {18}, }
Endnote
%0 Journal Article %A Tautges, Jochen %A Zinke, Arno %A Kr&#252;ger, Bj&#246;rn %A Baumann, Jan %A Weber, Andreas %A Helten, Thomas %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Eberhardt, Bernd %+ External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Motion Reconstruction Using Sparse Accelerometer Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13D8-A %F EDOC: 618860 %R 10.1145/1966394.1966397 %7 2011 %D 2011 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 30 %N 3 %& 1 %P 1 - 12 %Z sequence number: 18 %I ACM %C New York, NY %@ false
Sunkel, M., Jansen, S., Wand, M., Eisemann, E., and Seidel, H.-P. 2011. Learning Line Features in 3D Geometry. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Export
BibTeX
@article{SunkelEG2011, TITLE = {Learning Line Features in {3D} Geometry}, AUTHOR = {Sunkel, Martin and Jansen, Silke and Wand, Michael and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01858.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {267--276}, BOOKTITLE = {EUROGRAPHICS 2011}, EDITOR = {Chen, Min and Deussen, Olivewr}, }
Endnote
%0 Journal Article %A Sunkel, Martin %A Jansen, Silke %A Wand, Michael %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Line Features in 3D Geometry : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13CD-4 %F EDOC: 618856 %R 10.1111/j.1467-8659.2011.01858.x %7 2011 %D 2011 %* Review method: peer-reviewed %J Computer Graphics Forum %V 30 %N 2 %& 267 %P 267 - 276 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 EG 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011
Stoll, C., Hasler, N., Gall, J., Seidel, H.-P., and Theobalt, C. 2011. Fast Articulated Motion Tracking using a Sums of Gaussians Body Model. IEEE International Conference on Computer Vision (ICCV 2011), IEEE.
Export
BibTeX
@inproceedings{Stoll2011, TITLE = {Fast Articulated Motion Tracking using a Sums of {Gaussians} Body Model}, AUTHOR = {Stoll, Carsten and Hasler, Nils and Gall, Juergen and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4577-1101-5}, DOI = {10.1109/ICCV.2011.6126338}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2011)}, PAGES = {951--958}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Stoll, Carsten %A Hasler, Nils %A Gall, Juergen %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Articulated Motion Tracking using a Sums of Gaussians Body Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13BD-A %F EDOC: 618892 %R 10.1109/ICCV.2011.6126338 %D 2011 %B IEEE International Conference on Computer Vision %Z date of event: 2011-11-06 - 2011-11-13 %C Barcelona, Spain %B IEEE International Conference on Computer Vision %P 951 - 958 %I IEEE %@ 978-1-4577-1101-5
Scherbaum, K., Ritschel, T., Hullin, M., Thormählen, T., Blanz, V., and Seidel, H.-P. 2011. Computer-suggested Facial Makeup. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Export
BibTeX
@article{Scherbaum2011makeup, TITLE = {Computer-suggested Facial Makeup}, AUTHOR = {Scherbaum, Kristina and Ritschel, Tobias and Hullin, Matthias and Thorm{\"a}hlen, Thorsten and Blanz, Volker and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01874.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {485--492}, BOOKTITLE = {EUROGRAPHICS 2011}, EDITOR = {Chen, Min and Deussen, Oliver}, }
Endnote
%0 Journal Article %A Scherbaum, Kristina %A Ritschel, Tobias %A Hullin, Matthias %A Thorm&#228;hlen, Thorsten %A Blanz, Volker %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Computer-suggested Facial Makeup : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13AA-3 %F EDOC: 618869 %R 10.1111/j.1467-8659.2011.01874.x %7 2011 %D 2011 %* Review method: peer-reviewed %J Computer Graphics Forum %V 30 %N 2 %& 485 %P 485 - 492 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011 EG 2011
Saleem, W., Belyaev, A., Wang, D., and Seidel, H.-P. 2011. On Visual Complexity of 3D Shapes. Computers & Graphics35, 3.
Export
BibTeX
@article{Saleem2011, TITLE = {On Visual Complexity of {3D} Shapes}, AUTHOR = {Saleem, Waqar and Belyaev, Alexander and Wang, Danyi and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2011.03.006}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computers \& Graphics}, VOLUME = {35}, NUMBER = {3}, PAGES = {580--585}, }
Endnote
%0 Journal Article %A Saleem, Waqar %A Belyaev, Alexander %A Wang, Danyi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On Visual Complexity of 3D Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13E1-3 %F EDOC: 618927 %R 10.1016/j.cag.2011.03.006 %7 2011 %D 2011 %* Review method: peer-reviewed %J Computers & Graphics %V 35 %N 3 %& 580 %P 580 - 585 %I Elsevier %C Amsterdam %@ false
Ritschel, T., Eisemann, E., Ha, I., Kim, J.D.K., and Seidel, H.-P. 2011. Making Imperfect Shadow Maps View-adaptive: High-quality Global Illumination in Large Dynamic Scenes. Computer Graphics Forum30, 8.
Export
BibTeX
@article{Ritschel2011, TITLE = {Making Imperfect Shadow Maps View-adaptive: High-quality Global Illumination in Large Dynamic Scenes}, AUTHOR = {Ritschel, Tobias and Eisemann, Elmar and Ha, Inwoo and Kim, James D. K. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2011.01998.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computer Graphics Forum}, VOLUME = {30}, NUMBER = {8}, PAGES = {2258--2269}, }
Endnote
%0 Journal Article %A Ritschel, Tobias %A Eisemann, Elmar %A Ha, Inwoo %A Kim, James D. K. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Making Imperfect Shadow Maps View-adaptive: High-quality Global Illumination in Large Dynamic Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13CF-F %F EDOC: 618926 %R 10.1111/j.1467-8659.2011.01998.x %7 2011 %D 2011 %* Review method: peer-reviewed %J Computer Graphics Forum %V 30 %N 8 %& 2258 %P 2258 - 2269 %I Blackwell %C Oxford, UK %@ false
Reshetouski, I., Manakov, A., Seidel, H.-P., and Ihrke, I. 2011. Kaleidoscopic Imaging of Three-dimensional Objects. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011), IEEE.
Export
BibTeX
@inproceedings{Reshetouski2011, TITLE = {Kaleidoscopic Imaging of Three-dimensional Objects}, AUTHOR = {Reshetouski, Ilya and Manakov, Alkhazur and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-1-4577-0394-2}, DOI = {10.1109/CVPR.2011.5995579}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011)}, EDITOR = {Felzenszwalb, Pedro and Forsyth, David and Fua, Pascal}, PAGES = {353--360}, ADDRESS = {Colorado Springs, CO, USA}, }
Endnote
%0 Conference Proceedings %A Reshetouski, Ilya %A Manakov, Alkhazur %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Kaleidoscopic Imaging of Three-dimensional Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13CB-8 %F EDOC: 618868 %R 10.1109/CVPR.2011.5995579 %D 2011 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2011-06-20 - 2011-06-25 %C Colorado Springs, CO, USA %B IEEE Conference on Computer Vision and Pattern Recognition %E Felzenszwalb, Pedro; Forsyth, David; Fua, Pascal %P 353 - 360 %I IEEE %@ 978-1-4577-0394-2
Pons-Moll, G., Baak, A., Gall, J., et al. 2011. Outdoor Human Motion Capture using Inverse Kinematics and von Mises-Fisher Sampling. IEEE International Conference on Computer Vision (ICCV 2011), IEEE.
Export
BibTeX
@inproceedings{PonsMollBaGaMuSeRo2011_OutdoorMocap_ICCV, TITLE = {Outdoor Human Motion Capture using Inverse Kinematics and von {Mises}-{Fisher} Sampling}, AUTHOR = {Pons-Moll, Gerard and Baak, Andreas and Gall, J{\"u}rgen and Leal-Taix{\'e}, Laura and M{\"u}ller, Meinard and Seidel, Hans-Peter and Rosenhahn, Bodo}, LANGUAGE = {eng}, ISBN = {978-1-4577-1101-5}, DOI = {10.1109/ICCV.2011.6126375}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2011)}, PAGES = {1243--1250}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Pons-Moll, Gerard %A Baak, Andreas %A Gall, J&#252;rgen %A Leal-Taix&#233;, Laura %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Rosenhahn, Bodo %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Outdoor Human Motion Capture using Inverse Kinematics and von Mises-Fisher Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13E4-E %F EDOC: 618876 %R 10.1109/ICCV.2011.6126375 %D 2011 %B IEEE International Conference on Computer Vision %Z date of event: 2011-11-06 - 2011-11-13 %C Barcelona, Spain %B IEEE International Conference on Computer Vision %P 1243 - 1250 %I IEEE %@ 978-1-4577-1101-5
Pajak, D., Herzog, R., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2011. Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming. Computer Graphics Forum (Proc. EUROGPRAPHICS 2011)30, 2.
Abstract
In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware.
Export
BibTeX
@article{HerzogEG2011, TITLE = {Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming}, AUTHOR = {Pajak, Dawid and Herzog, Robert and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01871.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGPRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {415--424}, BOOKTITLE = {EUROGRAPHICS 2011 (EUROGPRAPHICS 2011)}, EDITOR = {Chen, Min and Deussen, Oliver}, }
Endnote
%0 Journal Article %A Pajak, Dawid %A Herzog, Robert %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13F2-E %F EDOC: 618866 %R 10.1111/j.1467-8659.2011.01871.x %7 2011 %D 2011 %* Review method: peer-reviewed %X In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware. %J Computer Graphics Forum %V 30 %N 2 %& 415 %P 415 - 424 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGPRAPHICS 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011 EG 2011
Manakov, A., Seidel, H.-P., and Ihrke, I. 2011. A Mathematical Model and Calibration Procedure for Galvanometric Laser Scanning Systems. Vision, Modeling, and Visualization (VMV 2011), Eurographics Association.
Export
BibTeX
@inproceedings{Manakov2011, TITLE = {A Mathematical Model and Calibration Procedure for Galvanometric Laser Scanning Systems}, AUTHOR = {Manakov, Alkhazur and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-3-905673-85-2}, DOI = {10.2312/PE/VMV/VMV11/207-214}, PUBLISHER = {Eurographics Association}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Vision, Modeling, and Visualization (VMV 2011)}, EDITOR = {Eisert, Peter and Hornegger, Joachim and Polthier, Konrad}, PAGES = {207--214}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Manakov, Alkhazur %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Mathematical Model and Calibration Procedure for Galvanometric Laser Scanning Systems : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1381-E %F EDOC: 618883 %R 10.2312/PE/VMV/VMV11/207-214 %D 2011 %B 16th International Workshop on Vision, Modeling and Visualization %Z date of event: 2011-10-04 - 2011-10-06 %C Berlin, Germany %B Vision, Modeling, and Visualization %E Eisert, Peter; Hornegger, Joachim; Polthier, Konrad %P 207 - 214 %I Eurographics Association %@ 978-3-905673-85-2
Liu, Y., Stoll, C., Gall, J., Seidel, H.-P., and Theobalt, C. 2011. Markerless Motion Capture of Interacting Characters Using Multi-view Image Segmentation. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011), IEEE.
Export
BibTeX
@inproceedings{LiuCVPR2011, TITLE = {Markerless Motion Capture of Interacting Characters Using Multi-view Image Segmentation}, AUTHOR = {Liu, Yebin and Stoll, Carsten and Gall, J{\"u}rgen and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4577-0394-2}, DOI = {10.1109/CVPR.2011.5995424}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011)}, EDITOR = {Felzenszwalb, Pedro and Forsyth, David and Fua, Pascal}, PAGES = {1249--1256}, ADDRESS = {Colorado Springs, CO, USA}, }
Endnote
%0 Conference Proceedings %A Liu, Yebin %A Stoll, Carsten %A Gall, J&#252;rgen %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Markerless Motion Capture of Interacting Characters Using Multi-view Image Segmentation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13D1-7 %F EDOC: 618867 %R 10.1109/CVPR.2011.5995424 %D 2011 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2011-06-20 - 2011-06-25 %C Colorado Springs, CO, USA %B IEEE Conference on Computer Vision and Pattern Recognition %E Felzenszwalb, Pedro; Forsyth, David; Fua, Pascal %P 1249 - 1256 %I IEEE %@ 978-1-4577-0394-2
Lasowski, R., Tevs, A., Wand, M., and Seidel, H.-P. 2011. Wavelet Belief Propagation for Large Scale Inference Problems. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011), IEEE.
Export
BibTeX
@inproceedings{LaTeWaSeCVPR11, TITLE = {Wavelet Belief Propagation for Large Scale Inference Problems}, AUTHOR = {Lasowski, Ruxandra and Tevs, Art and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4577-0394-2}, DOI = {10.1109/CVPR.2011.5995489}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011)}, EDITOR = {Felzenszwalb, Pedro and Forsyth, David and Fua, Pascal}, PAGES = {1921--1928}, ADDRESS = {Colorado Springs, CO, USA}, }
Endnote
%0 Conference Proceedings %A Lasowski, Ruxandra %A Tevs, Art %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Wavelet Belief Propagation for Large Scale Inference Problems : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-141B-C %F EDOC: 618865 %R 10.1109/CVPR.2011.5995489 %D 2011 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2011-06-20 - 2011-06-25 %C Colorado Springs, CO, USA %B IEEE Conference on Computer Vision and Pattern Recognition %E Felzenszwalb, Pedro; Forsyth, David; Fua, Pascal %P 1921 - 1928 %I IEEE %@ 978-1-4577-0394-2
Kurz, C., Thormählen, T., and Seidel, H.-P. 2011a. Bundle Adjustment for Stereoscopic 3D. Computer Vision / Computer Graphics Collaboration Techniques (MIRAGE 2011), Springer.
Abstract
The recent resurgence of stereoscopic 3D films has triggered a high demand for post-processing tools for stereoscopic image sequences. Camera motion estimation, also known as structure-from-motion (SfM) or match-moving, is an essential step in the post-processing pipeline. In order to ensure a high accuracy of the estimated camera parameters, a bundle adjustment algorithm should be employed. We present a new stereo camera model for bundle adjustment. It is designed to be applicable to a wide range of cameras employed in today's movie productions. In addition, we describe how the model can be integrated efficiently into the sparse bundle adjustment framework, enabling the processing of stereoscopic image sequences with traditional efficiency and improved accuracy. Our camera model is validated by synthetic experiments, on rendered sequences, and on a variety of real-world video sequences.
Export
BibTeX
@inproceedings{Kurz2011, TITLE = {Bundle Adjustment for Stereoscopic {3D}}, AUTHOR = {Kurz, Christian and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-24135-2}, DOI = {10.1007/978-3-642-24136-9_1}, PUBLISHER = {Springer}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {The recent resurgence of stereoscopic 3D films has triggered a high demand for post-processing tools for stereoscopic image sequences. Camera motion estimation, also known as structure-from-motion (SfM) or match-moving, is an essential step in the post-processing pipeline. In order to ensure a high accuracy of the estimated camera parameters, a bundle adjustment algorithm should be employed. We present a new stereo camera model for bundle adjustment. It is designed to be applicable to a wide range of cameras employed in today's movie productions. In addition, we describe how the model can be integrated efficiently into the sparse bundle adjustment framework, enabling the processing of stereoscopic image sequences with traditional efficiency and improved accuracy. Our camera model is validated by synthetic experiments, on rendered sequences, and on a variety of real-world video sequences.}, BOOKTITLE = {Computer Vision / Computer Graphics Collaboration Techniques (MIRAGE 2011)}, EDITOR = {Gagalowicz, Andr{\'e} and Philips, Wilfried}, PAGES = {1--12}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {6930}, ADDRESS = {Rocquencourt, France}, }
Endnote
%0 Conference Proceedings %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Bundle Adjustment for Stereoscopic 3D : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-139D-1 %F EDOC: 618888 %R 10.1007/978-3-642-24136-9_1 %D 2011 %B 5th International Conference on Computer Vision / Computer Graphics Collaboration Techniques and Applications %Z date of event: 2011-10-10 - 2011-10-11 %C Rocquencourt, France %X The recent resurgence of stereoscopic 3D films has triggered a high demand for post-processing tools for stereoscopic image sequences. Camera motion estimation, also known as structure-from-motion (SfM) or match-moving, is an essential step in the post-processing pipeline. In order to ensure a high accuracy of the estimated camera parameters, a bundle adjustment algorithm should be employed. We present a new stereo camera model for bundle adjustment. It is designed to be applicable to a wide range of cameras employed in today's movie productions. In addition, we describe how the model can be integrated efficiently into the sparse bundle adjustment framework, enabling the processing of stereoscopic image sequences with traditional efficiency and improved accuracy. Our camera model is validated by synthetic experiments, on rendered sequences, and on a variety of real-world video sequences. %B Computer Vision / Computer Graphics Collaboration Techniques %E Gagalowicz, Andr&#233;; Philips, Wilfried %P 1 - 12 %I Springer %@ 978-3-642-24135-2 %B Lecture Notes in Computer Science %N 6930
Kurz, C., Thormählen, T., and Seidel, H.-P. 2011b. Visual Fixation for 3D Video Stabilization. Journal of Virtual Reality and Broadcasting8, 2.
Abstract
Visual fixation is employed by humans and some animals to keep a specific 3D location at the center of the visual gaze. Inspired by this phenomenon in nature, this paper explores the idea to transfer this mechanism to the context of video stabilization for a hand-held video camera. A novel approach is presented that stabilizes a video by fixating on automatically extracted 3D target points. This approach is different from existing automatic solutions that stabilize the video by smoothing. To determine the 3D target points, the recorded scene is analyzed with a state-of-the-art structure-from-motion algorithm, which estimates camera motion and reconstructs a 3D point cloud of the static scene objects. Special algorithms are presented that search either virtual or real 3D target points, which back-project close to the center of the image for as long a period of time as possible. The stabilization algorithm then transforms the original images of the sequence so that these 3D target points are kept exactly in the center of the image, which, in case of real 3D target points, produces a perfectly stable result at the image center. Furthermore, different methods of additional user interaction are investigated. It is shown that the stabilization process can easily be controlled and that it can be combined with state-of-the-art tracking techniques in order to obtain a powerful image stabilization tool. The approach is evaluated on a variety of videos taken with a hand-held camera in natural scenes.
Export
BibTeX
@article{Kurz2010jvrb, TITLE = {Visual Fixation for {3D} Video Stabilization}, AUTHOR = {Kurz, Christian and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1860-2037}, URL = {urn:nbn:de:0009-6-28222}, YEAR = {2011}, ABSTRACT = {Visual fixation is employed by humans and some animals to keep a specific 3D location at the center of the visual gaze. Inspired by this phenomenon in nature, this paper explores the idea to transfer this mechanism to the context of video stabilization for a hand-held video camera. A novel approach is presented that stabilizes a video by fixating on automatically extracted 3D target points. This approach is different from existing automatic solutions that stabilize the video by smoothing. To determine the 3D target points, the recorded scene is analyzed with a state-of-the-art structure-from-motion algorithm, which estimates camera motion and reconstructs a 3D point cloud of the static scene objects. Special algorithms are presented that search either virtual or real 3D target points, which back-project close to the center of the image for as long a period of time as possible. The stabilization algorithm then transforms the original images of the sequence so that these 3D target points are kept exactly in the center of the image, which, in case of real 3D target points, produces a perfectly stable result at the image center. Furthermore, different methods of additional user interaction are investigated. It is shown that the stabilization process can easily be controlled and that it can be combined with state-of-the-art tracking techniques in order to obtain a powerful image stabilization tool. The approach is evaluated on a variety of videos taken with a hand-held camera in natural scenes.}, JOURNAL = {Journal of Virtual Reality and Broadcasting}, VOLUME = {8}, NUMBER = {2}, PAGES = {1--12}, }
Endnote
%0 Journal Article %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visual Fixation for 3D Video Stabilization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1419-0 %F EDOC: 618853 %U urn:nbn:de:0009-6-28222 %7 2011-01-31 %D 2011 %8 31.01.2011 %X Visual fixation is employed by humans and some animals to keep a specific 3D location at the center of the visual gaze. Inspired by this phenomenon in nature, this paper explores the idea to transfer this mechanism to the context of video stabilization for a hand-held video camera. A novel approach is presented that stabilizes a video by fixating on automatically extracted 3D target points. This approach is different from existing automatic solutions that stabilize the video by smoothing. To determine the 3D target points, the recorded scene is analyzed with a state-of-the-art structure-from-motion algorithm, which estimates camera motion and reconstructs a 3D point cloud of the static scene objects. Special algorithms are presented that search either virtual or real 3D target points, which back-project close to the center of the image for as long a period of time as possible. The stabilization algorithm then transforms the original images of the sequence so that these 3D target points are kept exactly in the center of the image, which, in case of real 3D target points, produces a perfectly stable result at the image center. Furthermore, different methods of additional user interaction are investigated. It is shown that the stabilization process can easily be controlled and that it can be combined with state-of-the-art tracking techniques in order to obtain a powerful image stabilization tool. The approach is evaluated on a variety of videos taken with a hand-held camera in natural scenes. %J Journal of Virtual Reality and Broadcasting %V 8 %N 2 %& 1 %P 1 - 12 %@ false
Klehm, O., Ritschel, T., Eisemann, E., and Seidel, H.-P. 2011. Bent Normals and Cones in Screen-space. Vision, Modeling, and Visualizaiton (VMV 2011), Eurographics Association.
Abstract
Ambient occlusion (AO) is a popular technique for real-time as well as offline rendering. One of its benefits is a gain in efficiency due to the fact that occlusion and shading are decoupled which results in an average occlusion that modulates the surface shading. Its main drawback is a loss of realism due to the lack of directional occlusion and lighting. As a solution, the use of bent normals was proposed for offline rendering. This work describes how to compute bent normals and bent cones in combination with screen-space ambient occlusion. These extensions combine the speed and simplicity of AO with physically more plausible lighting.
Export
BibTeX
@inproceedings{SsbcVMVKlehm2011, TITLE = {Bent Normals and Cones in Screen-space}, AUTHOR = {Klehm, Oliver and Ritschel, Tobias and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-85-2}, DOI = {10.2312/PE/VMV/VMV11/177-182}, LOCALID = {Local-ID: C125675300671F7B-553297BEE234DBA3C1257922004C3F89-SsbcVMVKlehm2011}, PUBLISHER = {Eurographics Association}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Ambient occlusion (AO) is a popular technique for real-time as well as offline rendering. One of its benefits is a gain in efficiency due to the fact that occlusion and shading are decoupled which results in an average occlusion that modulates the surface shading. Its main drawback is a loss of realism due to the lack of directional occlusion and lighting. As a solution, the use of bent normals was proposed for offline rendering. This work describes how to compute bent normals and bent cones in combination with screen-space ambient occlusion. These extensions combine the speed and simplicity of AO with physically more plausible lighting.}, BOOKTITLE = {Vision, Modeling, and Visualizaiton (VMV 2011)}, EDITOR = {Eisert, Peter and Hornegger, Joachim and Polthier, Konrad}, PAGES = {177--182}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Ritschel, Tobias %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Bent Normals and Cones in Screen-space : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-139B-5 %F EDOC: 618877 %R 10.2312/PE/VMV/VMV11/177-182 %F OTHER: Local-ID: C125675300671F7B-553297BEE234DBA3C1257922004C3F89-SsbcVMVKlehm2011 %D 2011 %B 16th International Workshop on Vision, Modeling and Visualization %Z date of event: 2011-10-04 - 2011-10-06 %C Berlin, Germany %X Ambient occlusion (AO) is a popular technique for real-time as well as offline rendering. One of its benefits is a gain in efficiency due to the fact that occlusion and shading are decoupled which results in an average occlusion that modulates the surface shading. Its main drawback is a loss of realism due to the lack of directional occlusion and lighting. As a solution, the use of bent normals was proposed for offline rendering. This work describes how to compute bent normals and bent cones in combination with screen-space ambient occlusion. These extensions combine the speed and simplicity of AO with physically more plausible lighting. %B Vision, Modeling, and Visualizaiton %E Eisert, Peter; Hornegger, Joachim; Polthier, Konrad %P 177 - 182 %I Eurographics Association %@ 978-3-905673-85-2
Kerber, J., Wand, M., Krüger, J., and Seidel, H.-P. 2011. Partial Symmetry Detection in Volume Data. Vision, Modeling, and Visualization (VMV 2011), Eurographics Association.
Abstract
In this paper, we present an algorithm for detecting partial Euclidean symmetries in volume data. Our algorithm finds subsets in voxel data that map to each other approximately under translations, rotations, and reflections. We implement the search for partial symmetries efficiently and robustly using a feature-based approach: We first reduce the volume to salient line features and then create transformation candidates from matching only local configurations of these line networks. Afterwards, only a shortlist of transformation candidates need to be verified using expensive dense volume matching. We apply our technique on both synthetic test scenes as well as real CT scans and show that we can recover a large amount of partial symmetries for complexly structured volume data sets.
Export
BibTeX
@inproceedings{Kerber2011_1, TITLE = {Partial Symmetry Detection in Volume Data}, AUTHOR = {Kerber, Jens and Wand, Michael and Kr{\"u}ger, Jens and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-85-2}, DOI = {10.2312/PE/VMV/VMV11/041-048}, PUBLISHER = {Eurographics Association}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {In this paper, we present an algorithm for detecting partial Euclidean symmetries in volume data. Our algorithm finds subsets in voxel data that map to each other approximately under translations, rotations, and reflections. We implement the search for partial symmetries efficiently and robustly using a feature-based approach: We first reduce the volume to salient line features and then create transformation candidates from matching only local configurations of these line networks. Afterwards, only a shortlist of transformation candidates need to be verified using expensive dense volume matching. We apply our technique on both synthetic test scenes as well as real CT scans and show that we can recover a large amount of partial symmetries for complexly structured volume data sets.}, BOOKTITLE = {Vision, Modeling, and Visualization (VMV 2011)}, EDITOR = {Eisert, Peter and Hornegger, Joachim and Polthier, Konrad}, PAGES = {41--48}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Kerber, Jens %A Wand, Michael %A Kr&#252;ger, Jens %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Partial Symmetry Detection in Volume Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13E9-4 %F EDOC: 618878 %R 10.2312/PE/VMV/VMV11/041-048 %D 2011 %B 16th International Workshop on Vision, Modeling and Visualization %Z date of event: 2011-10-04 - 2011-10-06 %C Berlin, Germany %X In this paper, we present an algorithm for detecting partial Euclidean symmetries in volume data. Our algorithm finds subsets in voxel data that map to each other approximately under translations, rotations, and reflections. We implement the search for partial symmetries efficiently and robustly using a feature-based approach: We first reduce the volume to salient line features and then create transformation candidates from matching only local configurations of these line networks. Afterwards, only a shortlist of transformation candidates need to be verified using expensive dense volume matching. We apply our technique on both synthetic test scenes as well as real CT scans and show that we can recover a large amount of partial symmetries for complexly structured volume data sets. %B Vision, Modeling, and Visualization %E Eisert, Peter; Hornegger, Joachim; Polthier, Konrad %P 41 - 48 %I Eurographics Association %@ 978-3-905673-85-2
Hullin, M.B., Lensch, H.P.A., Raskar, R., Seidel, H.-P., and Ihrke, I. 2011. Dynamic Display of BRDFs. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Export
BibTeX
@article{Hullin2011, TITLE = {Dynamic Display of {BRDF}s}, AUTHOR = {Hullin, Matthias B. and Lensch, Hendrik P. A. and Raskar, Ramesh and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2011.01859.x}, PUBLISHER = {North Holland}, ADDRESS = {Amsterdam}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {475--483}, BOOKTITLE = {EUROGRAPHICS 2011}, }
Endnote
%0 Journal Article %A Hullin, Matthias B. %A Lensch, Hendrik P. A. %A Raskar, Ramesh %A Seidel, Hans-Peter %A Ihrke, Ivo %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Display of BRDFs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13B2-F %F EDOC: 618859 %R 10.1111/j.1467-8659.2011.01859.x %D 2011 %* Review method: peer-reviewed %J Computer Graphics Forum %V 30 %N 2 %& 475 %P 475 - 483 %I North Holland %C Amsterdam %@ false %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 EG 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011
Helten, T., Brock, H., Müller, M., and Seidel, H.-P. 2011a. Classification of Trampoline Jumps Using Inertial Sensors. Sports Engineering14, 2-4.
Export
BibTeX
@article{HeltenBMS11_ClassificationTrampolineJumps_SE, TITLE = {Classification of Trampoline Jumps Using Inertial Sensors}, AUTHOR = {Helten, Thomas and Brock, Heike and M{\"u}ller, Meinard and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1007/s12283-011-0081-4}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {Sports Engineering}, VOLUME = {14}, NUMBER = {2-4}, PAGES = {155--164}, }
Endnote
%0 Journal Article %A Helten, Thomas %A Brock, Heike %A M&#252;ller, Meinard %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Classification of Trampoline Jumps Using Inertial Sensors : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13A3-2 %F EDOC: 618898 %R 10.1007/s12283-011-0081-4 %7 2011 %D 2011 %* Review method: peer-reviewed %J Sports Engineering %V 14 %N 2-4 %& 155 %P 155 - 164 %I Springer %C New York, NY
Helten, T., Müller, M., Tautges, J., Weber, A., and Seidel, H.-P. 2011b. Towards Cross-modal Comparison of Human Motion Data. Pattern Recognition (DAGM 2011), Springer.
Export
BibTeX
@inproceedings{HeltenMTWS11_Cross-modalComparison, TITLE = {Towards Cross-modal Comparison of Human Motion Data}, AUTHOR = {Helten, Thomas and M{\"u}ller, Meinard and Tautges, Jochen and Weber, Andreas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-23122-3}, DOI = {10.1007/978-3-642-23123-0_7}, PUBLISHER = {Springer}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Pattern Recognition (DAGM 2011)}, EDITOR = {Mester, Rudolf and Felsberg, Michael}, PAGES = {61--70}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {6835}, ADDRESS = {Frankfurt/Main, Germany}, }
Endnote
%0 Conference Proceedings %A Helten, Thomas %A M&#252;ller, Meinard %A Tautges, Jochen %A Weber, Andreas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Towards Cross-modal Comparison of Human Motion Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-140F-8 %F EDOC: 618900 %R 10.1007/978-3-642-23123-0_7 %D 2011 %B 33rd Annual Symposium of the German Association for Pattern Recognition %Z date of event: 2011-08-30 - 2011-09-02 %C Frankfurt/Main, Germany %B Pattern Recognition %E Mester, Rudolf; Felsberg, Michael %P 61 - 70 %I Springer %@ 978-3-642-23122-3 %B Lecture Notes in Computer Science %N 6835
Grochulla, M., Thormählen, T., and Seidel, H.-P. 2011. Using Spatially Distributed Patterns for Multiple View Camera Calibration. Computer Vision / Computer Graphics Collaboration Techniques (MIRAGE 2011), Springer.
Abstract
This paper presents an approach to intrinsic and extrinsic camera parameter calibration from a series of photographs or from video. For the reliable and accurate estimation of camera parameters it is common to use specially designed calibration patterns. However, using a single pattern, a globally consistent calibration is only possible from positions and viewing directions from where this single pattern is visible. To overcome this problem, the presented approach uses multiple coded patterns that can be distributed over a large area. A connection graph representing visible patterns in multiple views is generated, which is used to estimate globally consistent camera parameters for the complete scene. The approach is evaluated on synthetic and real-world ground truth examples. Furthermore, the approach is applied to calibrate the stereo-cameras of a robotic head on a moving platform.
Export
BibTeX
@inproceedings{Grochulla2011, TITLE = {Using Spatially Distributed Patterns for Multiple View Camera Calibration}, AUTHOR = {Grochulla, Martin and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-642-24135-2}, DOI = {10.1007/978-3-642-24136-9}, PUBLISHER = {Springer}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {This paper presents an approach to intrinsic and extrinsic camera parameter calibration from a series of photographs or from video. For the reliable and accurate estimation of camera parameters it is common to use specially designed calibration patterns. However, using a single pattern, a globally consistent calibration is only possible from positions and viewing directions from where this single pattern is visible. To overcome this problem, the presented approach uses multiple coded patterns that can be distributed over a large area. A connection graph representing visible patterns in multiple views is generated, which is used to estimate globally consistent camera parameters for the complete scene. The approach is evaluated on synthetic and real-world ground truth examples. Furthermore, the approach is applied to calibrate the stereo-cameras of a robotic head on a moving platform.}, BOOKTITLE = {Computer Vision / Computer Graphics Collaboration Techniques (MIRAGE 2011)}, EDITOR = {Gagalowicz, Andr{\'e} and Philips, Wilfried}, PAGES = {110--121}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {6930}, ADDRESS = {Rocquencourt, France}, }
Endnote
%0 Conference Proceedings %A Grochulla, Martin %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Using Spatially Distributed Patterns for Multiple View Camera Calibration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1414-9 %F EDOC: 618889 %R 10.1007/978-3-642-24136-9 %D 2011 %B 5th International Conference on Computer Vision / Computer Graphics Collaboration Techniques and Applications %Z date of event: 2011-10-10 - 2011-10-11 %C Rocquencourt, France %X This paper presents an approach to intrinsic and extrinsic camera parameter calibration from a series of photographs or from video. For the reliable and accurate estimation of camera parameters it is common to use specially designed calibration patterns. However, using a single pattern, a globally consistent calibration is only possible from positions and viewing directions from where this single pattern is visible. To overcome this problem, the presented approach uses multiple coded patterns that can be distributed over a large area. A connection graph representing visible patterns in multiple views is generated, which is used to estimate globally consistent camera parameters for the complete scene. The approach is evaluated on synthetic and real-world ground truth examples. Furthermore, the approach is applied to calibrate the stereo-cameras of a robotic head on a moving platform. %B Computer Vision / Computer Graphics Collaboration Techniques %E Gagalowicz, Andr&#233;; Philips, Wilfried %P 110 - 121 %I Springer %@ 978-3-642-24135-2 %B Lecture Notes in Computer Science %N 6930
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2011. A Perceptual Model for Disparity. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2011)30, 4.
Abstract
Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study.
Export
BibTeX
@article{DidykREMS2011, TITLE = {A Perceptual Model for Disparity}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2010324.1964991}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {30}, NUMBER = {4}, PAGES = {1--10}, EID = {96}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2011}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Model for Disparity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1388-F %F EDOC: 618890 %R 10.1145/2010324.1964991 %7 2011 %D 2011 %* Review method: peer-reviewed %X Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study. %J ACM Transactions on Graphics %V 30 %N 4 %& 1 %P 1 - 10 %Z sequence number: 96 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2011 %O ACM SIGGRAPH 2011 Vancouver, BC, Canada
Čadík, M., Aydin, T.O., Myszkowski, K., and Seidel, H.-P. 2011. On Evaluation of Video Quality Metrics: an HDR Dataset for Computer Graphics Applications. Human Vision and Electronic Imaging XVI (HVEI 2011), SPIE.
Export
BibTeX
@inproceedings{Cadik2011, TITLE = {On Evaluation of Video Quality Metrics: an {HDR} Dataset for Computer Graphics Applications}, AUTHOR = {{\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-8194-8402-4}, URL = {http://dx.doi.org/10.1117/12.878875}, DOI = {10.1117/12.878875}, PUBLISHER = {SPIE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Human Vision and Electronic Imaging XVI (HVEI 2011)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N.}, PAGES = {1--9}, EID = {78650R}, SERIES = {Proceedings of SPIE}, VOLUME = {7865}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On Evaluation of Video Quality Metrics: an HDR Dataset for Computer Graphics Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13DF-B %F EDOC: 618862 %R 10.1117/12.878875 %U http://dx.doi.org/10.1117/12.878875 %D 2011 %B Human Vision and Electronic Imaging XVI %Z date of event: 2011-02-24 - 2011-01-27 %C San Francisco, CA, USA %B Human Vision and Electronic Imaging XVI %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N. %P 1 - 9 %Z sequence number: 78650R %I SPIE %@ 978-0-8194-8402-4 %B Proceedings of SPIE %N 7865
Bokeloh, M., Wand, M., Koltun, V., and Seidel, H.-P. 2011. Pattern-aware Deformation Using Sliding Dockers. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2011)30, 6.
Export
BibTeX
@article{Bokeloh2011, TITLE = {Pattern-aware Deformation Using Sliding Dockers}, AUTHOR = {Bokeloh, Martin and Wand, Michael and Koltun, Vladlen and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0807-6}, DOI = {10.1145/2070781.2024157}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {30}, NUMBER = {6}, PAGES = {1--10}, EID = {123}, BOOKTITLE = {Proceedings of the 2011 SIGGRAPH Asia Conference (ACM SIGGRAPH Asia 2011)}, }
Endnote
%0 Journal Article %A Bokeloh, Martin %A Wand, Michael %A Koltun, Vladlen %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Pattern-aware Deformation Using Sliding Dockers : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13EB-F %F EDOC: 618887 %R 10.1145/2070781.2024157 %D 2011 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 30 %N 6 %& 1 %P 1 - 10 %Z sequence number: 123 %I ACM %C New York, NY %@ false %B Proceedings of the 2011 SIGGRAPH Asia Conference %O ACM SIGGRAPH Asia 2011 SA'11 ; Hong Kong, China SA 2011 %@ 978-1-4503-0807-6
Berner, A., Burghard, O., Wand, M., Mitra, N., Klein, R., and Seidel, H.-P. 2011a. A Morphable Part Model for Shape Manipulation. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We introduce morphable part models for smart shape manipulation using an assembly of deformable parts with appropriate boundary conditions. In an analysis phase, we characterize the continuous allowable variations both for the individual parts and their interconnections using Gaussian shape models with low rank covariance. The discrete aspect of how parts can be assembled is captured using a shape grammar. The parts and their interconnection rules are learned semi-automatically from symmetries within a single object or from semantically corresponding parts across a larger set of example models. The learned discrete and continuous structure is encoded as a graph. In the interaction phase, we obtain an interactive yet intuitive shape deformation framework producing realistic deformations on classes of objects that are difficult to edit using existing structure-aware deformation techniques. Unlike previous techniques, our method uses self-similarities from a single model as training input and allows the user to reassemble the identified parts in new configurations, thus exploiting both the discrete and continuous learned variations while ensuring appropriate boundary conditions across part boundaries.
Export
BibTeX
@techreport{BernerBurghardWandMitraKleinSeidel2011, TITLE = {A Morphable Part Model for Shape Manipulation}, AUTHOR = {Berner, Alexander and Burghard, Oliver and Wand, Michael and Mitra, Niloy and Klein, Reinhard and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2011-4-005}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {We introduce morphable part models for smart shape manipulation using an assembly of deformable parts with appropriate boundary conditions. In an analysis phase, we characterize the continuous allowable variations both for the individual parts and their interconnections using Gaussian shape models with low rank covariance. The discrete aspect of how parts can be assembled is captured using a shape grammar. The parts and their interconnection rules are learned semi-automatically from symmetries within a single object or from semantically corresponding parts across a larger set of example models. The learned discrete and continuous structure is encoded as a graph. In the interaction phase, we obtain an interactive yet intuitive shape deformation framework producing realistic deformations on classes of objects that are difficult to edit using existing structure-aware deformation techniques. Unlike previous techniques, our method uses self-similarities from a single model as training input and allows the user to reassemble the identified parts in new configurations, thus exploiting both the discrete and continuous learned variations while ensuring appropriate boundary conditions across part boundaries.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Berner, Alexander %A Burghard, Oliver %A Wand, Michael %A Mitra, Niloy %A Klein, Reinhard %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Morphable Part Model for Shape Manipulation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6972-0 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2011 %P 33 p. %X We introduce morphable part models for smart shape manipulation using an assembly of deformable parts with appropriate boundary conditions. In an analysis phase, we characterize the continuous allowable variations both for the individual parts and their interconnections using Gaussian shape models with low rank covariance. The discrete aspect of how parts can be assembled is captured using a shape grammar. The parts and their interconnection rules are learned semi-automatically from symmetries within a single object or from semantically corresponding parts across a larger set of example models. The learned discrete and continuous structure is encoded as a graph. In the interaction phase, we obtain an interactive yet intuitive shape deformation framework producing realistic deformations on classes of objects that are difficult to edit using existing structure-aware deformation techniques. Unlike previous techniques, our method uses self-similarities from a single model as training input and allows the user to reassemble the identified parts in new configurations, thus exploiting both the discrete and continuous learned variations while ensuring appropriate boundary conditions across part boundaries. %B Research Report %@ false
Berner, A., Wand, M., Mitra, N.J., Mewes, D., and Seidel, H.-P. 2011b. Shape Analysis with Subspace Symmetries. Computer Graphics Forum (Proc. EUROGRAPHICS 2011)30, 2.
Abstract
We address the problem of partial symmetry detection, i.e., the identification of building blocks a complex shape is composed of. Previous techniques identify parts that relate to each other by simple rigid mappings, similarity transforms, or, more recently, intrinsic isometries. Our approach generalizes the notion of partial symmetries to more general deformations. We introduce subspace symmetries whereby we characterize similarity by requiring the set of symmetric parts to form a low dimensional shape space. We present an algorithm to discover subspace symmetries based on detecting linearly correlated correspondences among graphs of invariant features. The detected subspace symmetries along with the modeled variations are useful for a variety of applications including shape completion, non-local and non-rigid denoising. We evaluate our technique on various data sets. We show that for models with pronounced surface features, subspace symmetries can be found fully automatically. For complicated cases, a small amount of user input is used to resolve ambiguities. Our technique computes dense correspondences that can subsequently be used in various applications, such as model repair and denoising.
Export
BibTeX
@article{Berner2011SubspaceSymmetry, TITLE = {Shape Analysis with Subspace Symmetries}, AUTHOR = {Berner, Alexander and Wand, Michael and Mitra, Niloy J. and Mewes, Daniel and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {http://www.mpi-inf.mpg.de/%7Eaberner/subspace_symmetry_eg11.pdf}, DOI = {10.1111/j.1467-8659.2011.01859.x}, PUBLISHER = {North Holland}, ADDRESS = {Amsterdam}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {We address the problem of partial symmetry detection, i.e., the identification of building blocks a complex shape is composed of. Previous techniques identify parts that relate to each other by simple rigid mappings, similarity transforms, or, more recently, intrinsic isometries. Our approach generalizes the notion of partial symmetries to more general deformations. We introduce subspace symmetries whereby we characterize similarity by requiring the set of symmetric parts to form a low dimensional shape space. We present an algorithm to discover subspace symmetries based on detecting linearly correlated correspondences among graphs of invariant features. The detected subspace symmetries along with the modeled variations are useful for a variety of applications including shape completion, non-local and non-rigid denoising. We evaluate our technique on various data sets. We show that for models with pronounced surface features, subspace symmetries can be found fully automatically. For complicated cases, a small amount of user input is used to resolve ambiguities. Our technique computes dense correspondences that can subsequently be used in various applications, such as model repair and denoising.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {277--286}, BOOKTITLE = {EUROGRAPHICS 2011}, }
Endnote
%0 Journal Article %A Berner, Alexander %A Wand, Michael %A Mitra, Niloy J. %A Mewes, Daniel %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shape Analysis with Subspace Symmetries : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13FA-D %F EDOC: 618857 %R 10.1111/j.1467-8659.2011.01859.x %U http://www.mpi-inf.mpg.de/%7Eaberner/subspace_symmetry_eg11.pdf %D 2011 %* Review method: peer-reviewed %X We address the problem of partial symmetry detection, i.e., the identification of building blocks a complex shape is composed of. Previous techniques identify parts that relate to each other by simple rigid mappings, similarity transforms, or, more recently, intrinsic isometries. Our approach generalizes the notion of partial symmetries to more general deformations. We introduce subspace symmetries whereby we characterize similarity by requiring the set of symmetric parts to form a low dimensional shape space. We present an algorithm to discover subspace symmetries based on detecting linearly correlated correspondences among graphs of invariant features. The detected subspace symmetries along with the modeled variations are useful for a variety of applications including shape completion, non-local and non-rigid denoising. We evaluate our technique on various data sets. We show that for models with pronounced surface features, subspace symmetries can be found fully automatically. For complicated cases, a small amount of user input is used to resolve ambiguities. Our technique computes dense correspondences that can subsequently be used in various applications, such as model repair and denoising. %J Computer Graphics Forum %V 30 %N 2 %& 277 %P 277 - 286 %I North Holland %C Amsterdam %@ false %B EUROGRAPHICS 2011 %O EUROGRAPHICS 2011 EG 2011 The European Associatoin for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011
Baboud, L., Čadík, M., Eisemann, E., and Seidel, H.-P. 2011. Automatic Photo-to-Terrain Alignment for the Annotation of Mountain Pictures. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011), IEEE.
Export
BibTeX
@inproceedings{Baboud2011, TITLE = {Automatic Photo-to-Terrain Alignment for the Annotation of Mountain Pictures}, AUTHOR = {Baboud, Lionel and {\v C}ad{\'i}k, Martin and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4577-0394-2}, DOI = {10.1109/CVPR.2011.5995727}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2011)}, EDITOR = {Felzenszwalb, Pedro and Forsyth, David and Fua, Pascal}, PAGES = {41--48}, ADDRESS = {Colorado Springs, CO, USA}, }
Endnote
%0 Conference Proceedings %A Baboud, Lionel %A &#268;ad&#237;k, Martin %A Eisemann, Elmar %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Photo-to-Terrain Alignment for the Annotation of Mountain Pictures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1393-6 %F EDOC: 618863 %R 10.1109/CVPR.2011.5995727 %D 2011 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2011-06-20 - 2011-06-25 %C Colorado Springs, CO, USA %B IEEE Conference on Computer Vision and Pattern Recognition %E Felzenszwalb, Pedro; Forsyth, David; Fua, Pascal %P 41 - 48 %I IEEE %@ 978-1-4577-0394-2
Baak, A., Müller, M., Bharaj, G., Seidel, H.-P., and Theobalt, C. 2011. A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera. IEEE International Conference on Computer Vision (ICCV 2011), IEEE.
Export
BibTeX
@inproceedings{BaakMuBhSeTh2011_DataDrivenDepthTracking_ICCV, TITLE = {A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera}, AUTHOR = {Baak, Andreas and M{\"u}ller, Meinard and Bharaj, Gaurav and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4577-1101-5}, DOI = {10.1109/ICCV.2011.6126356}, PUBLISHER = {IEEE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2011)}, PAGES = {1092--1099}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Baak, Andreas %A M&#252;ller, Meinard %A Bharaj, Gaurav %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Data-driven Approach for Real-time Full Body Pose Reconstruction from a Depth Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-137E-7 %F EDOC: 618875 %R 10.1109/ICCV.2011.6126356 %D 2011 %B IEEE International Conference on Computer Vision %Z date of event: 2011-11-06 - 2011-11-13 %C Barcelona, Spain %B IEEE International Conference on Computer Vision %P 1092 - 1099 %I IEEE %@ 978-1-4577-1101-5
2010
Yang, B., Dong, Z., Feng, J., Seidel, H.-P., and Kautz, J. 2010. Variance Soft Shadow Mapping. Computer Graphics Forum29, 7.
Abstract
We present variance soft shadow mapping (VSSM) for rendering plausible soft shadow in real-time. VSSM is based on the theoretical framework of percentage-closer soft shadows (PCSS) and exploits recent advances in variance shadow mapping (VSM). Our new formulation allows for the efficient computation of (average) blocker distances, a common bottleneck in PCSS-based methods. Furthermore, we avoid incorrectly lit pixels commonly encountered in VSM-based methods by appropriately subdividing the filter kernel. We demonstrate that VSSM renders highquality soft shadows efficiently (usually over 100 fps) for complex scene settings. Its speed is at least one order of magnitude faster than PCSS for large penumbra.
Export
BibTeX
@article{Dong:2010:VSSM, TITLE = {Variance Soft Shadow Mapping}, AUTHOR = {Yang, Baoguang and Dong, Zhao and Feng, Jieqing and Seidel, Hans-Peter and Kautz, Jan}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/j.1467-8659.2010.01800.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {We present variance soft shadow mapping (VSSM) for rendering plausible soft shadow in real-time. VSSM is based on the theoretical framework of percentage-closer soft shadows (PCSS) and exploits recent advances in variance shadow mapping (VSM). Our new formulation allows for the efficient computation of (average) blocker distances, a common bottleneck in PCSS-based methods. Furthermore, we avoid incorrectly lit pixels commonly encountered in VSM-based methods by appropriately subdividing the filter kernel. We demonstrate that VSSM renders highquality soft shadows efficiently (usually over 100 fps) for complex scene settings. Its speed is at least one order of magnitude faster than PCSS for large penumbra.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {29}, NUMBER = {7}, PAGES = {2127--2134}, }
Endnote
%0 Journal Article %A Yang, Baoguang %A Dong, Zhao %A Feng, Jieqing %A Seidel, Hans-Peter %A Kautz, Jan %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Variance Soft Shadow Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1793-7 %F EDOC: 537317 %R 10.1111/j.1467-8659.2010.01800.x %7 2010 %D 2010 %* Review method: peer-reviewed %X We present variance soft shadow mapping (VSSM) for rendering plausible soft shadow in real-time. VSSM is based on the theoretical framework of percentage-closer soft shadows (PCSS) and exploits recent advances in variance shadow mapping (VSM). Our new formulation allows for the efficient computation of (average) blocker distances, a common bottleneck in PCSS-based methods. Furthermore, we avoid incorrectly lit pixels commonly encountered in VSM-based methods by appropriately subdividing the filter kernel. We demonstrate that VSSM renders highquality soft shadows efficiently (usually over 100 fps) for complex scene settings. Its speed is at least one order of magnitude faster than PCSS for large penumbra. %J Computer Graphics Forum %V 29 %N 7 %& 2127 %P 2127 - 2134 %I Blackwell %C Oxford, UK %@ false
Wang, O., Fuchs, M., Fuchs, C., Davis, J., Seidel, H.-P., and Lensch, H.P.A. 2010. A Context-aware Light Source. IEEE International Conference on Computational Photography (ICCP 2010), IEEE.
Abstract
We present a technique that combines the visual benefits of virtual enhancement with the intuitive interaction of the real world. We accomplish this by introducing the concept of a context-aware light source. This light source provides illumination based on scene context in real-time. This allows us to project feature enhancement in-place onto an object while it is being manipulated by the user. A separate proxy light source can be employed to enable freely programmable shading responses for interactive scene analysis. We created a prototype hardware setup and have implemented several applications that demonstrate the approach, such as a sharpening light, an edge highlighting light, an accumulation light, and a light with a programmable, nonlinear shading response.
Export
BibTeX
@inproceedings{WangICCP2010, TITLE = {A Context-aware Light Source}, AUTHOR = {Wang, Oliver and Fuchs, Martin and Fuchs, Christian and Davis, James and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-4244-7022-8}, URL = {http://graphics.soe.ucsc.edu/publications/data/wango-context.pdf}, DOI = {10.1109/ICCPHOT.2010.5585091}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {We present a technique that combines the visual benefits of virtual enhancement with the intuitive interaction of the real world. We accomplish this by introducing the concept of a context-aware light source. This light source provides illumination based on scene context in real-time. This allows us to project feature enhancement in-place onto an object while it is being manipulated by the user. A separate proxy light source can be employed to enable freely programmable shading responses for interactive scene analysis. We created a prototype hardware setup and have implemented several applications that demonstrate the approach, such as a sharpening light, an edge highlighting light, an accumulation light, and a light with a programmable, nonlinear shading response.}, BOOKTITLE = {IEEE International Conference on Computational Photography (ICCP 2010)}, PAGES = {1--8}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Wang, Oliver %A Fuchs, Martin %A Fuchs, Christian %A Davis, James %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Context-aware Light Source : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1726-0 %F EDOC: 537298 %R 10.1109/ICCPHOT.2010.5585091 %U http://graphics.soe.ucsc.edu/publications/data/wango-context.pdf %D 2010 %B International Conference on Computational Photography %Z date of event: 2010-03-28 - 2010-03-30 %C Cambridge, MA, USA %X We present a technique that combines the visual benefits of virtual enhancement with the intuitive interaction of the real world. We accomplish this by introducing the concept of a context-aware light source. This light source provides illumination based on scene context in real-time. This allows us to project feature enhancement in-place onto an object while it is being manipulated by the user. A separate proxy light source can be employed to enable freely programmable shading responses for interactive scene analysis. We created a prototype hardware setup and have implemented several applications that demonstrate the approach, such as a sharpening light, an edge highlighting light, an accumulation light, and a light with a programmable, nonlinear shading response. %B IEEE International Conference on Computational Photography %P 1 - 8 %I IEEE %@ 978-1-4244-7022-8
Thormählen, T., Hasler, N., Wand, M., and Seidel, H.-P. 2010. Registration of Sub-Sequence and Multi-Camera Reconstructions for Camera Motion Estimation. Journal of Virtual Reality and Broadcasting7, 2.
Export
BibTeX
@article{Thormahlen2010jvrb, TITLE = {Registration of Sub-Sequence and Multi-Camera Reconstructions for Camera Motion Estimation}, AUTHOR = {Thorm{\"a}hlen, Thorsten and Hasler, Nils and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1860-2037}, URL = {http://www.jvrb.org/archiv/2437/720102.pdf}, PUBLISHER = {HBZ}, ADDRESS = {K{\"o}ln}, YEAR = {2010}, DATE = {2010}, JOURNAL = {Journal of Virtual Reality and Broadcasting}, VOLUME = {7}, NUMBER = {2}, PAGES = {1--10}, }
Endnote
%0 Journal Article %A Thorm&#228;hlen, Thorsten %A Hasler, Nils %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Registration of Sub-Sequence and Multi-Camera Reconstructions for Camera Motion Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1785-9 %F EDOC: 537301 %U http://www.jvrb.org/archiv/2437/720102.pdf %D 2010 %J Journal of Virtual Reality and Broadcasting %V 7 %N 2 %& 1 %P 1 - 10 %I HBZ %C K&#246;ln %@ false
Theobalt, C., de Aguiar, E., Stoll, C., Seidel, H.-P., and Thrun, S. 2010. Performance Capture from Multi-view Video. In: Image and Geometry Procesing for 3D-Cinematography. Springer, Berlin.
Export
BibTeX
@incollection{TheobaltAguiar2010, TITLE = {Performance Capture from Multi-view Video}, AUTHOR = {Theobalt, Christian and de Aguiar, Edilson and Stoll, Carsten and Seidel, Hans-Peter and Thrun, Sebastian}, LANGUAGE = {eng}, ISBN = {978-3-642-12391-7}, DOI = {10.1007/978-3-642-12392-4_6}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Image and Geometry Procesing for 3D-Cinematography}, EDITOR = {Ronfard, R{\'e}mi and Taubin, Gabriel}, PAGES = {127--149}, SERIES = {Geometry and Computing}, VOLUME = {5}, }
Endnote
%0 Book Section %A Theobalt, Christian %A de Aguiar, Edilson %A Stoll, Carsten %A Seidel, Hans-Peter %A Thrun, Sebastian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Performance Capture from Multi-view Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-177A-3 %F EDOC: 537270 %R 10.1007/978-3-642-12392-4_6 %D 2010 %B Image and Geometry Procesing for 3D-Cinematography %E Ronfard, R&#233;mi; Taubin, Gabriel %P 127 - 149 %I Springer %C Berlin %@ 978-3-642-12391-7 %S Geometry and Computing %N 5
Tevs, A., Wand, M., Ihrke, I., and Seidel, H.-P. 2010. A Bayesian Approach to Manifold Topology Reconstruction. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this paper, we investigate the problem of statistical reconstruction of piecewise linear manifold topology. Given a noisy, probably undersampled point cloud from a one- or two-manifold, the algorithm reconstructs an approximated most likely mesh in a Bayesian sense from which the sample might have been taken. We incorporate statistical priors on the object geometry to improve the reconstruction quality if additional knowledge about the class of original shapes is available. The priors can be formulated analytically or learned from example geometry with known manifold tessellation. The statistical objective function is approximated by a linear programming / integer programming problem, for which a globally optimal solution is found. We apply the algorithm to a set of 2D and 3D reconstruction examples, demon-strating that a statistics-based manifold reconstruction is feasible, and still yields plausible results in situations where sampling conditions are violated.
Export
BibTeX
@techreport{TevsTechReport2009, TITLE = {A Bayesian Approach to Manifold Topology Reconstruction}, AUTHOR = {Tevs, Art and Wand, Michael and Ihrke, Ivo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2009-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {In this paper, we investigate the problem of statistical reconstruction of piecewise linear manifold topology. Given a noisy, probably undersampled point cloud from a one- or two-manifold, the algorithm reconstructs an approximated most likely mesh in a Bayesian sense from which the sample might have been taken. We incorporate statistical priors on the object geometry to improve the reconstruction quality if additional knowledge about the class of original shapes is available. The priors can be formulated analytically or learned from example geometry with known manifold tessellation. The statistical objective function is approximated by a linear programming / integer programming problem, for which a globally optimal solution is found. We apply the algorithm to a set of 2D and 3D reconstruction examples, demon-strating that a statistics-based manifold reconstruction is feasible, and still yields plausible results in situations where sampling conditions are violated.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Tevs, Art %A Wand, Michael %A Ihrke, Ivo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Bayesian Approach to Manifold Topology Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1722-7 %F EDOC: 537282 %@ 0946-011X %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2010 %P 23 p. %X In this paper, we investigate the problem of statistical reconstruction of piecewise linear manifold topology. Given a noisy, probably undersampled point cloud from a one- or two-manifold, the algorithm reconstructs an approximated most likely mesh in a Bayesian sense from which the sample might have been taken. We incorporate statistical priors on the object geometry to improve the reconstruction quality if additional knowledge about the class of original shapes is available. The priors can be formulated analytically or learned from example geometry with known manifold tessellation. The statistical objective function is approximated by a linear programming / integer programming problem, for which a globally optimal solution is found. We apply the algorithm to a set of 2D and 3D reconstruction examples, demon-strating that a statistics-based manifold reconstruction is feasible, and still yields plausible results in situations where sampling conditions are violated. %B Research Report
Strzodka, R., Shaheen, M., Pajak, D., and Seidel, H.-P. 2010. Cache Oblivious Parallelograms in Iterative Stencil Computations. ICS’10, 24th ACM International Conference on Supercomputing, ACM.
Abstract
We present a new cache oblivious scheme for iterative stencil computations that performs beyond system bandwidth limitations as though gigabytes of data could reside in an enormous on-chip cache. We compare execution times for 2D and 3D spatial domains with up to 128 million double precision elements for constant and variable stencils against hand-optimized naive code and the automatic polyhedral parallelizer and locality optimizer PluTo and demonstrate the clear superiority of our results. The performance benefits stem from a tiling structure that caters for data locality, parallelism and vectorization simultaneously. Rather than tiling the iteration space from inside, we take an exterior approach with a predefined hierarchy, simple regular parallelogram tiles and a locality preserving parallelization. These advantages come at the cost of an irregular work-load distribution but a tightly integrated load-balancer ensures a high utilization of all resources.
Export
BibTeX
@inproceedings{StShPa_10CORALS, TITLE = {Cache Oblivious Parallelograms in Iterative Stencil Computations}, AUTHOR = {Strzodka, Robert and Shaheen, Mohammed and Pajak, Dawid and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-0018-6}, DOI = {10.1145/1810085.1810096}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {We present a new cache oblivious scheme for iterative stencil computations that performs beyond system bandwidth limitations as though gigabytes of data could reside in an enormous on-chip cache. We compare execution times for 2D and 3D spatial domains with up to 128 million double precision elements for constant and variable stencils against hand-optimized naive code and the automatic polyhedral parallelizer and locality optimizer PluTo and demonstrate the clear superiority of our results. The performance benefits stem from a tiling structure that caters for data locality, parallelism and vectorization simultaneously. Rather than tiling the iteration space from inside, we take an exterior approach with a predefined hierarchy, simple regular parallelogram tiles and a locality preserving parallelization. These advantages come at the cost of an irregular work-load distribution but a tightly integrated load-balancer ensures a high utilization of all resources.}, BOOKTITLE = {ICS'10, 24th ACM International Conference on Supercomputing}, PAGES = {49--59}, ADDRESS = {Tsukuba, Ibaraki, Japan}, }
Endnote
%0 Conference Proceedings %A Strzodka, Robert %A Shaheen, Mohammed %A Pajak, Dawid %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Graphics - Optics - Vision, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Cache Oblivious Parallelograms in Iterative Stencil Computations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1742-0 %F EDOC: 537274 %R 10.1145/1810085.1810096 %D 2010 %B 24th ACM International Conference on Supercomputing %Z date of event: 2010-06-01 - 2010-06-05 %C Tsukuba, Ibaraki, Japan %X We present a new cache oblivious scheme for iterative stencil computations that performs beyond system bandwidth limitations as though gigabytes of data could reside in an enormous on-chip cache. We compare execution times for 2D and 3D spatial domains with up to 128 million double precision elements for constant and variable stencils against hand-optimized naive code and the automatic polyhedral parallelizer and locality optimizer PluTo and demonstrate the clear superiority of our results. The performance benefits stem from a tiling structure that caters for data locality, parallelism and vectorization simultaneously. Rather than tiling the iteration space from inside, we take an exterior approach with a predefined hierarchy, simple regular parallelogram tiles and a locality preserving parallelization. These advantages come at the cost of an irregular work-load distribution but a tightly integrated load-balancer ensures a high utilization of all resources. %B ICS'10 %P 49 - 59 %I ACM %@ 978-1-4503-0018-6
Schultz, T., Theisel, H., and Seidel, H.-P. 2010. Crease Surfaces: From Theory to Extraction and Application to Diffusion Tensor MRI. IEEE Transactions on Visualization and Computer Graphics16, 1.
Abstract
Crease surfaces are two-dimensional manifolds along which a scalar field assumes a local maximum (ridge) or a local minimum (valley) in a constrained space. Unlike isosurfaces, they are able to capture extremal structures in the data. Creases have a long tradition in image processing and computer vision, and have recently become a popular tool for visualization. When extracting crease surfaces, degeneracies of the Hessian (i.e., lines along which two eigenvalues are equal) have so far been ignored. We show that these loci, however, have two important consequences for the topology of crease surfaces: First, creases are bounded not only by a side constraint on eigenvalue sign, but also by Hessian degeneracies. Second, crease surfaces are not, in general, orientable. We describe an efficient algorithm for the extraction of crease surfaces which takes these insights into account and demonstrate that it produces more accurate results than previous approaches. Finally, we show that diffusion tensor magnetic resonance imaging (DT-MRI) stream surfaces, which were previously used for the analysis of planar regions in diffusion tensor MRI data, are mathematically ill-defined. As an example application of our method, creases in a measure of planarity are presented as a viable substitute.
Export
BibTeX
@article{Schultz:TVCG10, TITLE = {Crease Surfaces: {F}rom Theory to Extraction and Application to Diffusion Tensor {MRI}}, AUTHOR = {Schultz, Thomas and Theisel, Holger and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2009.44}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Crease surfaces are two-dimensional manifolds along which a scalar field assumes a local maximum (ridge) or a local minimum (valley) in a constrained space. Unlike isosurfaces, they are able to capture extremal structures in the data. Creases have a long tradition in image processing and computer vision, and have recently become a popular tool for visualization. When extracting crease surfaces, degeneracies of the Hessian (i.e., lines along which two eigenvalues are equal) have so far been ignored. We show that these loci, however, have two important consequences for the topology of crease surfaces: First, creases are bounded not only by a side constraint on eigenvalue sign, but also by Hessian degeneracies. Second, crease surfaces are not, in general, orientable. We describe an efficient algorithm for the extraction of crease surfaces which takes these insights into account and demonstrate that it produces more accurate results than previous approaches. Finally, we show that diffusion tensor magnetic resonance imaging (DT-MRI) stream surfaces, which were previously used for the analysis of planar regions in diffusion tensor MRI data, are mathematically ill-defined. As an example application of our method, creases in a measure of planarity are presented as a viable substitute.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {16}, NUMBER = {1}, PAGES = {109--119}, }
Endnote
%0 Journal Article %A Schultz, Thomas %A Theisel, Holger %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Crease Surfaces: From Theory to Extraction and Application to Diffusion Tensor MRI : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-174A-F %F EDOC: 537280 %R 10.1109/TVCG.2009.44 %D 2010 %* Review method: peer-reviewed %X Crease surfaces are two-dimensional manifolds along which a scalar field assumes a local maximum (ridge) or a local minimum (valley) in a constrained space. Unlike isosurfaces, they are able to capture extremal structures in the data. Creases have a long tradition in image processing and computer vision, and have recently become a popular tool for visualization. When extracting crease surfaces, degeneracies of the Hessian (i.e., lines along which two eigenvalues are equal) have so far been ignored. We show that these loci, however, have two important consequences for the topology of crease surfaces: First, creases are bounded not only by a side constraint on eigenvalue sign, but also by Hessian degeneracies. Second, crease surfaces are not, in general, orientable. We describe an efficient algorithm for the extraction of crease surfaces which takes these insights into account and demonstrate that it produces more accurate results than previous approaches. Finally, we show that diffusion tensor magnetic resonance imaging (DT-MRI) stream surfaces, which were previously used for the analysis of planar regions in diffusion tensor MRI data, are mathematically ill-defined. As an example application of our method, creases in a measure of planarity are presented as a viable substitute. %J IEEE Transactions on Visualization and Computer Graphics %V 16 %N 1 %& 109 %P 109 - 119 %I IEEE %C Piscataway, NJ %@ false
Ritschel, T., Thormählen, T., Dachsbacher, C., Kautz, J., and Seidel, H.-P. 2010. Interactive On-Surface Signal Deformation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Export
BibTeX
@article{Ritschel2010SigDeform, TITLE = {Interactive On-Surface Signal Deformation}, AUTHOR = {Ritschel, Tobias and Thorm{\"a}hlen, Thorsten and Dachsbacher, Carsten and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1778765.1778773}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--8}, EID = {36}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, }
Endnote
%0 Journal Article %A Ritschel, Tobias %A Thorm&#228;hlen, Thorsten %A Dachsbacher, Carsten %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive On-Surface Signal Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1759-D %F EDOC: 537302 %R 10.1145/1778765.1778773 %7 2010 %D 2010 %* Review method: peer-reviewed %J ACM Transactions on Graphics %V 29 %N 4 %& 1 %P 1 - 8 %Z sequence number: 36 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA, [25-29 July 2010]
Pons-Moll, G., Baak, A., Helten, T., Müller, M., Seidel, H.-P., and Rosenhahn, B. 2010. Multisensor-Fusion for 3D Full-Body Human Motion Capture. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010), IEEE.
Export
BibTeX
@inproceedings{PonsBaHeMuSeRo10_MultisensorFusion_CVPR, TITLE = {Multisensor-Fusion for {3D} Full-Body Human Motion Capture}, AUTHOR = {Pons-Moll, Gerard and Baak, Andreas and Helten, Thomas and M{\"u}ller, Meinard and Seidel, Hans-Peter and Rosenhahn, Bodo}, LANGUAGE = {eng}, ISBN = {978-1-424-46984-0}, URL = {http://dx.doi.org/10.1109/CVPR.2010.5540153}, DOI = {10.1109/CVPR.2010.5540153}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010)}, PAGES = {663--670}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Pons-Moll, Gerard %A Baak, Andreas %A Helten, Thomas %A M&#252;ller, Meinard %A Seidel, Hans-Peter %A Rosenhahn, Bodo %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Multisensor-Fusion for 3D Full-Body Human Motion Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-176E-0 %F EDOC: 537294 %R 10.1109/CVPR.2010.5540153 %U http://dx.doi.org/10.1109/CVPR.2010.5540153 %D 2010 %B IEEE Conference on Computer Vision on Pattern Recognition %Z date of event: 2010-06-13 - 2010-06-18 %C San Francisco, CA, USA %B IEEE Conference on Computer Vision and Pattern Recognition %P 663 - 670 %I IEEE %@ 978-1-424-46984-0
Pajak, D., Čadík, M., Aydin, T.O., Okabe, M., Myszkowski, K., and Seidel, H.-P. 2010a. Contrast Prescription for Multiscale Image Editing. The Visual Computer26, 6.
Export
BibTeX
@article{Cadik2010, TITLE = {Contrast Prescription for Multiscale Image Editing}, AUTHOR = {Pajak, Dawid and {\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Okabe, Makoto and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-010-0485-3}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {The Visual Computer}, VOLUME = {26}, NUMBER = {6}, PAGES = {739--748}, }
Endnote
%0 Journal Article %A Pajak, Dawid %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Okabe, Makoto %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Contrast Prescription for Multiscale Image Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1748-4 %F EDOC: 537310 %R 10.1007/s00371-010-0485-3 %7 2010 %D 2010 %* Review method: peer-reviewed %J The Visual Computer %V 26 %N 6 %& 739 %P 739 - 748 %I Springer %C New York, NY %@ false
Pajak, D., Čadík, M., Aydin, T.O., Myszkowski, K., and Seidel, H.-P. 2010b. Visual Maladaptation in Contrast Domain. Human Vision and Electronic Imaging XV (HVEI 2010), SPIE.
Export
BibTeX
@inproceedings{Pajak2010, TITLE = {Visual Maladaptation in Contrast Domain}, AUTHOR = {Pajak, Dawid and {\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9780819479204}, DOI = {10.1117/12.844934}, PUBLISHER = {SPIE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Human Vision and Electronic Imaging XV (HVEI 2010)}, EDITOR = {Rogowitz, Bernice and Pappas, Thrasyvoulous N.}, PAGES = {1--12}, EID = {752710}, SERIES = {Proceedings of SPIE}, VOLUME = {2527}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Pajak, Dawid %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visual Maladaptation in Contrast Domain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-179C-6 %F EDOC: 537311 %R 10.1117/12.844934 %D 2010 %B Human Vision and Electronic Imaging XV %Z date of event: 2010-01-18 - 2010-01-21 %C San Jose, CA, USA %B Human Vision and Electronic Imaging XV %E Rogowitz, Bernice; Pappas, Thrasyvoulous N. %P 1 - 12 %Z sequence number: 752710 %I SPIE %@ 9780819479204 %B Proceedings of SPIE %N 2527
Lee, S., Eisemann, E., and Seidel, H.-P. 2010. Real-Time Lens Blur Effects and Focus Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Abstract
We present a novel rendering system for defocus-blur and lens effects. It supports physically-based rendering and outperforms previous approaches by involving a novel GPU-based tracing method. Our solution achieves more precision than competing real-time solutions and our results are mostly indistinguishable from offline rendering. Our method is also more general and can integrate advanced simulations, such as simple geometric lens models enabling various lens aberration effects. These latter are crucial for realism, but are often employed in artistic contexts too. We show that available artistic lenses can be simulated by our method. In this spirit, our work introduces an intuitive control over depth-of-field effects. The physical basis is crucial as a starting point to enable new artistic renderings based on a generalized focal surface to emphasize particular elements in the scene while retaining a realistic look. Our real-time solution provides realistic, as well as plausible expressive results.
Export
BibTeX
@article{Lee2010lensblur, TITLE = {Real-Time Lens Blur Effects and Focus Control}, AUTHOR = {Lee, Sungkil and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1778765.1778802}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {We present a novel rendering system for defocus-blur and lens effects. It supports physically-based rendering and outperforms previous approaches by involving a novel GPU-based tracing method. Our solution achieves more precision than competing real-time solutions and our results are mostly indistinguishable from offline rendering. Our method is also more general and can integrate advanced simulations, such as simple geometric lens models enabling various lens aberration effects. These latter are crucial for realism, but are often employed in artistic contexts too. We show that available artistic lenses can be simulated by our method. In this spirit, our work introduces an intuitive control over depth-of-field effects. The physical basis is crucial as a starting point to enable new artistic renderings based on a generalized focal surface to emphasize particular elements in the scene while retaining a realistic look. Our real-time solution provides realistic, as well as plausible expressive results.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--7}, EID = {65}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, VOLUME = {29}, NUMBER = {4}, }
Endnote
%0 Journal Article %A Lee, Sungkil %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-Time Lens Blur Effects and Focus Control : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1781-2 %F EDOC: 537318 %R 10.1145/1778765.1778802 %7 2010 %D 2010 %X We present a novel rendering system for defocus-blur and lens effects. It supports physically-based rendering and outperforms previous approaches by involving a novel GPU-based tracing method. Our solution achieves more precision than competing real-time solutions and our results are mostly indistinguishable from offline rendering. Our method is also more general and can integrate advanced simulations, such as simple geometric lens models enabling various lens aberration effects. These latter are crucial for realism, but are often employed in artistic contexts too. We show that available artistic lenses can be simulated by our method. In this spirit, our work introduces an intuitive control over depth-of-field effects. The physical basis is crucial as a starting point to enable new artistic renderings based on a generalized focal surface to emphasize particular elements in the scene while retaining a realistic look. Our real-time solution provides realistic, as well as plausible expressive results. %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 7 %Z sequence number: 65 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010] %V 29 %N 4
Kurz, C., Thormählen, T., Seidel, H.-P., Ritschel, T., and Eisemann, E. 2010. Camera Motion Style Transfer. Conference on Visual Media Production (CVMP 2010), IEEE Computer Society.
Export
BibTeX
@inproceedings{Kurz2010cvmp, TITLE = {Camera Motion Style Transfer}, AUTHOR = {Kurz, Christian and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter and Ritschel, Tobias and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-1-4244-8872-8}, DOI = {10.1109/CVMP.2010.9}, LOCALID = {Local-ID: C125675300671F7B-ABA8A88A84C6FB60C12577A1004736D6-Kurz2010cvmp}, PUBLISHER = {IEEE Computer Society}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Conference on Visual Media Production (CVMP 2010)}, PAGES = {9--16}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %A Ritschel, Tobias %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Camera Motion Style Transfer : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1744-C %F EDOC: 537304 %R 10.1109/CVMP.2010.9 %F OTHER: Local-ID: C125675300671F7B-ABA8A88A84C6FB60C12577A1004736D6-Kurz2010cvmp %D 2010 %B Conference on Visual Media Production %Z date of event: 2010-11-17 - 2010-11-18 %C London, UK %B Conference on Visual Media Production %P 9 - 16 %I IEEE Computer Society %@ 978-1-4244-8872-8
Kosov, S., Thormählen, T., and Seidel, H.-P. 2010. Rapid Stereo-Vision Enhanced Face Recognition. ICIP 2010, IEEE International Conference on Image Processing, IEEE.
Export
BibTeX
@inproceedings{Kosov2010recog, TITLE = {Rapid Stereo-Vision Enhanced Face Recognition}, AUTHOR = {Kosov, Sergey and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-7992-4}, DOI = {10.1109/ICIP.2010.5652010}, LOCALID = {Local-ID: C125675300671F7B-E01CE6CCFB4BA3C3C12577A100369981-Kosov2010recog}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {ICIP 2010, IEEE International Conference on Image Processing}, PAGES = {2437--2440}, ADDRESS = {Hong Kong}, }
Endnote
%0 Conference Proceedings %A Kosov, Sergey %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Rapid Stereo-Vision Enhanced Face Recognition : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-177C-0 %F EDOC: 537303 %R 10.1109/ICIP.2010.5652010 %F OTHER: Local-ID: C125675300671F7B-E01CE6CCFB4BA3C3C12577A100369981-Kosov2010recog %D 2010 %B IEEE International Conference on Image Processing %Z date of event: 2010-09-26 - 2010-09-29 %C Hong Kong %B ICIP 2010 %P 2437 - 2440 %I IEEE %@ 978-1-4244-7992-4
Kerber, J., Tevs, A., Zayer, R., Belyaev, A., and Seidel, H.-P. 2010a. Real-time Generation of Digital Bas-Reliefs. Computer-Aided Design and Applications7, 4.
Abstract
Bas-relief is a form of sculpture where carved or chiseled forms protrude partially and shallowly from the background. Occupying an intermediate place between painting and full 3D sculpture, bas-relief sculpture exploits properties of human visual perception in order to maintain perceptually salient 3D information. In this paper, we present two methods for automatic bas-relief generation from 3D digital shapes. Both methods are inspired by techniques developed for high dynamic range image compression and have the bilateral filter as the main ingredient. We demonstrate that the methods are capable of preserving fine shape features and achieving good compression without compromising the quality of surface details. For artists, bas-relief generation starts from managing the viewer's point of view and compositing the scene. Therefore we strive in our work to streamline this process by focusing on easy and intuitive user interaction which is paramount to artistic applications. Our algorithms allow for real time computation thanks to our implementation on graphics hardware. Besides interactive production of stills, this work offers the possibility for generating bas-relief animations. Last but not least, we explore the generation of artistic reliefs that mimic cubism in painting.
Export
BibTeX
@article{Kerber2010_1, TITLE = {Real-time Generation of Digital Bas-Reliefs}, AUTHOR = {Kerber, Jens and Tevs, Art and Zayer, Rhaleb and Belyaev, Alexander and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1686-4360}, DOI = {10.3722/cadaps.2010.465-478}, LOCALID = {Local-ID: C125675300671F7B-0DCEE3E97CF972D1C12577F400453F06-Kerber2010_1}, PUBLISHER = {Taylor \& Francis}, ADDRESS = {London}, YEAR = {2010}, ABSTRACT = {Bas-relief is a form of sculpture where carved or chiseled forms protrude partially and shallowly from the background. Occupying an intermediate place between painting and full 3D sculpture, bas-relief sculpture exploits properties of human visual perception in order to maintain perceptually salient 3D information. In this paper, we present two methods for automatic bas-relief generation from 3D digital shapes. Both methods are inspired by techniques developed for high dynamic range image compression and have the bilateral filter as the main ingredient. We demonstrate that the methods are capable of preserving fine shape features and achieving good compression without compromising the quality of surface details. For artists, bas-relief generation starts from managing the viewer's point of view and compositing the scene. Therefore we strive in our work to streamline this process by focusing on easy and intuitive user interaction which is paramount to artistic applications. Our algorithms allow for real time computation thanks to our implementation on graphics hardware. Besides interactive production of stills, this work offers the possibility for generating bas-relief animations. Last but not least, we explore the generation of artistic reliefs that mimic cubism in painting.}, JOURNAL = {Computer-Aided Design and Applications}, VOLUME = {7}, NUMBER = {4}, PAGES = {465--478}, }
Endnote
%0 Journal Article %A Kerber, Jens %A Tevs, Art %A Zayer, Rhaleb %A Belyaev, Alexander %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Generation of Digital Bas-Reliefs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-177F-A %F EDOC: 537313 %R 10.3722/cadaps.2010.465-478 %F OTHER: Local-ID: C125675300671F7B-0DCEE3E97CF972D1C12577F400453F06-Kerber2010_1 %7 2010 %D 2010 %* Review method: peer-reviewed %X Bas-relief is a form of sculpture where carved or chiseled forms protrude partially and shallowly from the background. Occupying an intermediate place between painting and full 3D sculpture, bas-relief sculpture exploits properties of human visual perception in order to maintain perceptually salient 3D information. In this paper, we present two methods for automatic bas-relief generation from 3D digital shapes. Both methods are inspired by techniques developed for high dynamic range image compression and have the bilateral filter as the main ingredient. We demonstrate that the methods are capable of preserving fine shape features and achieving good compression without compromising the quality of surface details. For artists, bas-relief generation starts from managing the viewer's point of view and compositing the scene. Therefore we strive in our work to streamline this process by focusing on easy and intuitive user interaction which is paramount to artistic applications. Our algorithms allow for real time computation thanks to our implementation on graphics hardware. Besides interactive production of stills, this work offers the possibility for generating bas-relief animations. Last but not least, we explore the generation of artistic reliefs that mimic cubism in painting. %J Computer-Aided Design and Applications %V 7 %N 4 %& 465 %P 465 - 478 %I Taylor & Francis %C London %@ false
Kerber, J., Bokeloh, M., Wand, M., Krüger, J., and Seidel, H.-P. 2010b. Feature Preserving Sketching of Volume Data. Vision, Modeling & Visualization (VMV 2010), Eurographics Association.
Abstract
In this paper, we present a novel method for extracting feature lines from volume data sets. This leads to a reduction of visual complexity and provides an abstraction of the original data to important structural features. We employ a new iteratively reweighted least-squares approach that allows us to detect sharp creases and to preserve important features such as corners or intersection of feature lines accurately. Traditional least-squares methods This is important for both visual quality as well as reliable further processing in feature detection algorithms. Our algorithm is efficient and easy to implement, and nevertheless effective and robust to noise. We show results for a number of different data sets.
Export
BibTeX
@inproceedings{Kerber2010_2, TITLE = {Feature Preserving Sketching of Volume Data}, AUTHOR = {Kerber, Jens and Bokeloh, Martin and Wand, Michael and Kr{\"u}ger, Jens and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-79-1}, DOI = {10.2312/PE/VMV/VMV10/195-202}, PUBLISHER = {Eurographics Association}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {In this paper, we present a novel method for extracting feature lines from volume data sets. This leads to a reduction of visual complexity and provides an abstraction of the original data to important structural features. We employ a new iteratively reweighted least-squares approach that allows us to detect sharp creases and to preserve important features such as corners or intersection of feature lines accurately. Traditional least-squares methods This is important for both visual quality as well as reliable further processing in feature detection algorithms. Our algorithm is efficient and easy to implement, and nevertheless effective and robust to noise. We show results for a number of different data sets.}, BOOKTITLE = {Vision, Modeling \& Visualization (VMV 2010)}, EDITOR = {Koch, Reinhard and Kolb, Andreas and Rezk-Salama, Christof}, PAGES = {195--202}, ADDRESS = {Siegen, Germany}, }
Endnote
%0 Conference Proceedings %A Kerber, Jens %A Bokeloh, Martin %A Wand, Michael %A Kr&#252;ger, Jens %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Feature Preserving Sketching of Volume Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1752-C %F EDOC: 537316 %R 10.2312/PE/VMV/VMV10/195-202 %D 2010 %B 15th International Workshop on Vision, Modeling and Visualization %Z date of event: 2010-11-15 - 2010-11-17 %C Siegen, Germany %X In this paper, we present a novel method for extracting feature lines from volume data sets. This leads to a reduction of visual complexity and provides an abstraction of the original data to important structural features. We employ a new iteratively reweighted least-squares approach that allows us to detect sharp creases and to preserve important features such as corners or intersection of feature lines accurately. Traditional least-squares methods This is important for both visual quality as well as reliable further processing in feature detection algorithms. Our algorithm is efficient and easy to implement, and nevertheless effective and robust to noise. We show results for a number of different data sets. %B Vision, Modeling & Visualization %E Koch, Reinhard; Kolb, Andreas; Rezk-Salama, Christof %P 195 - 202 %I Eurographics Association %@ 978-3-905673-79-1
Jain, A., Kurz, C., Thormählen, T., and Seidel, H.-P. 2010a. Exploiting Global Connectivity Constraints for Reconstruction of 3D Line Segment from Images. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010), IEEE.
Export
BibTeX
@inproceedings{Jain2010Lines, TITLE = {Exploiting Global Connectivity Constraints for Reconstruction of {3D} Line Segment from Images}, AUTHOR = {Jain, Arjun and Kurz, Christian and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-424-46984-0}, DOI = {10.1109/CVPR.2010.5539781}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010)}, PAGES = {1586--1593}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Jain, Arjun %A Kurz, Christian %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exploiting Global Connectivity Constraints for Reconstruction of 3D Line Segment from Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-174F-5 %F EDOC: 537268 %R 10.1109/CVPR.2010.5539781 %D 2010 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2010-06-13 - 2010-06-18 %C San Francisco, CA, USA %B IEEE Conference on Computer Vision and Pattern Recognition %P 1586 - 1593 %I IEEE %@ 978-1-424-46984-0
Jain, A., Thormählen, T., Seidel, H.-P., and Theobalt, C. 2010b. MovieReshape: Tracking and Reshaping of Humans in Videos. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2010)29, 6.
Export
BibTeX
@article{Jain2010MovieReshape, TITLE = {{MovieReshape}: Tracking and Reshaping of Humans in Videos}, AUTHOR = {Jain, Arjun and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1866158.1866174}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {29}, NUMBER = {6}, PAGES = {1--10}, EID = {148}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2010}, EDITOR = {Drettakis, George}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MovieReshape: Tracking and Reshaping of Humans in Videos : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1769-9 %F EDOC: 537305 %R 10.1145/1866158.1866174 %7 2010 %D 2010 %J ACM Transactions on Graphics %V 29 %N 6 %& 1 %P 1 - 10 %Z sequence number: 148 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2010 %O ACM SIGGRAPH Asia 2010 Seoul, South Korea ; [December 15 - 18, 2010] SA'10
Hu, W., Dong, Z., Ihrke, I., Grosch, T., Yuan, G., and Seidel, H.-P. 2010. Interactive Volume Caustics in Single-Scattering Media. Proceedings I3D 2010, ACM.
Abstract
Volume caustics are intricate illumination patterns formed by light first interacting with a specular surface and subsequently being scattered inside a participating medium. Although this phenomenon can be simulated by existing techniques, image synthesis is usually non-trivial and time-consuming. Motivated by interactive applications, we propose a novel volume caustics rendering method for single-scattering participating media. Our method is based on the observation that line rendering of illumination rays into the screen buffer establishes a direct light path between the viewer and the light source. This connection is introduced via a single scattering event for every pixel affected by the line primitive. Since the GPU is a parallel processor, the radiance contributions of these light paths to each of the pixels can be computed and accumulated independently. The implementation of our method is straightforward and we show that it can be seamlessly integrated with existing methods for rendering participating media. We achieve high-quality results at real-time frame rates for large and dynamic scenes containing homogeneous participating media. For inhomogeneous media, our method achieves interactive performance that is close to real-time. Our method is based on a simplified physical model and can thus be used for generating physically plausible previews of expensive lighting simulations quickly.
Export
BibTeX
@inproceedings{HDI:2010:VolumeCaustics, TITLE = {Interactive Volume Caustics in Single-Scattering Media}, AUTHOR = {Hu, Wei and Dong, Zhao and Ihrke, Ivo and Grosch, Thorsten and Yuan, Guodong and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-939-8}, DOI = {10.1145/1730804.1730822}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Volume caustics are intricate illumination patterns formed by light first interacting with a specular surface and subsequently being scattered inside a participating medium. Although this phenomenon can be simulated by existing techniques, image synthesis is usually non-trivial and time-consuming. Motivated by interactive applications, we propose a novel volume caustics rendering method for single-scattering participating media. Our method is based on the observation that line rendering of illumination rays into the screen buffer establishes a direct light path between the viewer and the light source. This connection is introduced via a single scattering event for every pixel affected by the line primitive. Since the GPU is a parallel processor, the radiance contributions of these light paths to each of the pixels can be computed and accumulated independently. The implementation of our method is straightforward and we show that it can be seamlessly integrated with existing methods for rendering participating media. We achieve high-quality results at real-time frame rates for large and dynamic scenes containing homogeneous participating media. For inhomogeneous media, our method achieves interactive performance that is close to real-time. Our method is based on a simplified physical model and can thus be used for generating physically plausible previews of expensive lighting simulations quickly.}, BOOKTITLE = {Proceedings I3D 2010}, EDITOR = {Varshney, Amitabh and Wyman, Chris and Aliaga, Daniel and Oliveira, Manuel M.}, PAGES = {109--117}, ADDRESS = {Washington, DC, US}, }
Endnote
%0 Conference Proceedings %A Hu, Wei %A Dong, Zhao %A Ihrke, Ivo %A Grosch, Thorsten %A Yuan, Guodong %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Volume Caustics in Single-Scattering Media : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-175B-9 %F EDOC: 537283 %R 10.1145/1730804.1730822 %D 2010 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2010-02-19 - 2010-02-21 %C Washington, DC, US %X Volume caustics are intricate illumination patterns formed by light first interacting with a specular surface and subsequently being scattered inside a participating medium. Although this phenomenon can be simulated by existing techniques, image synthesis is usually non-trivial and time-consuming. Motivated by interactive applications, we propose a novel volume caustics rendering method for single-scattering participating media. Our method is based on the observation that line rendering of illumination rays into the screen buffer establishes a direct light path between the viewer and the light source. This connection is introduced via a single scattering event for every pixel affected by the line primitive. Since the GPU is a parallel processor, the radiance contributions of these light paths to each of the pixels can be computed and accumulated independently. The implementation of our method is straightforward and we show that it can be seamlessly integrated with existing methods for rendering participating media. We achieve high-quality results at real-time frame rates for large and dynamic scenes containing homogeneous participating media. For inhomogeneous media, our method achieves interactive performance that is close to real-time. Our method is based on a simplified physical model and can thus be used for generating physically plausible previews of expensive lighting simulations quickly. %B Proceedings I3D 2010 %E Varshney, Amitabh; Wyman, Chris; Aliaga, Daniel; Oliveira, Manuel M. %P 109 - 117 %I ACM %@ 978-1-60558-939-8
Hullin, M.B., Hanika, J., Lensch, H.P.A., Kautz, J., and Seidel, H.-P. 2010. Acquisition and Analysis of Bispectral Bidirectional Reflectance Distribution Functions. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Abstract
In fluorescent materials, light from a certain band of incident wavelengths is reradiated at longer wavelengths, i.e., with a reduced per-photon energy. While fluorescent materials are common in everyday life, they have received little attention in computer graphics. Especially, no bidirectional reradiation measurements of fluorescent materials have been available so far. In this paper, we extend the well-known concept of the bidirectional reflectance distribution function (BRDF) to account for energy transfer between wavelengths, resulting in a Bispectral Bidirectional Reflectance and Reradiation Distribution Function (bispectral BRRDF). Using a bidirectional and bispectral measurement setup, we acquire reflectance and reradiation data of a variety of fluorescent materials, including vehicle paints, paper and fabric, and compare their renderings with RGB, RGBxRGB, and spectral BRDFs. Our acquisition is guided by a principal component analysis on complete bispectral data taken under a sparse set of angles. We show that in order to faithfully reproduce the full bispectral information for all other angles, only a very small number of wavelength pairs needs to be measured at a high angular resolution.
Export
BibTeX
@article{Hullin2010, TITLE = {Acquisition and Analysis of Bispectral Bidirectional Reflectance Distribution Functions}, AUTHOR = {Hullin, Matthias B. and Hanika, Johannes and Lensch, Hendrik P. A. and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301; 0730-0301}, DOI = {10.1145/1833349.1778834}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {In fluorescent materials, light from a certain band of incident wavelengths is reradiated at longer wavelengths, i.e., with a reduced per-photon energy. While fluorescent materials are common in everyday life, they have received little attention in computer graphics. Especially, no bidirectional reradiation measurements of fluorescent materials have been available so far. In this paper, we extend the well-known concept of the bidirectional reflectance distribution function (BRDF) to account for energy transfer between wavelengths, resulting in a Bispectral Bidirectional Reflectance and Reradiation Distribution Function (bispectral BRRDF). Using a bidirectional and bispectral measurement setup, we acquire reflectance and reradiation data of a variety of fluorescent materials, including vehicle paints, paper and fabric, and compare their renderings with RGB, RGBxRGB, and spectral BRDFs. Our acquisition is guided by a principal component analysis on complete bispectral data taken under a sparse set of angles. We show that in order to faithfully reproduce the full bispectral information for all other angles, only a very small number of wavelength pairs needs to be measured at a high angular resolution.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--7}, EID = {97}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, }
Endnote
%0 Journal Article %A Hullin, Matthias B. %A Hanika, Johannes %A Lensch, Hendrik P. A. %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Acquisition and Analysis of Bispectral Bidirectional Reflectance Distribution Functions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1729-A %F EDOC: 537321 %R 10.1145/1833349.1778834 %7 2010 %D 2010 %X In fluorescent materials, light from a certain band of incident wavelengths is reradiated at longer wavelengths, i.e., with a reduced per-photon energy. While fluorescent materials are common in everyday life, they have received little attention in computer graphics. Especially, no bidirectional reradiation measurements of fluorescent materials have been available so far. In this paper, we extend the well-known concept of the bidirectional reflectance distribution function (BRDF) to account for energy transfer between wavelengths, resulting in a Bispectral Bidirectional Reflectance and Reradiation Distribution Function (bispectral BRRDF). Using a bidirectional and bispectral measurement setup, we acquire reflectance and reradiation data of a variety of fluorescent materials, including vehicle paints, paper and fabric, and compare their renderings with RGB, RGBxRGB, and spectral BRDFs. Our acquisition is guided by a principal component analysis on complete bispectral data taken under a sparse set of angles. We show that in order to faithfully reproduce the full bispectral information for all other angles, only a very small number of wavelength pairs needs to be measured at a high angular resolution. %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 7 %Z sequence number: 97 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010] %@ false
Herzog, R., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2010. Spatio-Temporal Upsampling on the GPU. Proceedings I3D 2010, ACM.
Abstract
Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result.
Export
BibTeX
@inproceedings{HerzogI3D2010, TITLE = {Spatio-Temporal Upsampling on the {GPU}}, AUTHOR = {Herzog, Robert and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-939-8}, DOI = {10.1145/1730804.1730819}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result.}, BOOKTITLE = {Proceedings I3D 2010}, EDITOR = {Varshney, Amitabh and Wyman, Chris and Aliaga, Daniel and Oliveira, Manuel M.}, PAGES = {91--98}, ADDRESS = {Washington DC, USA}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-Temporal Upsampling on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-178C-C %F EDOC: 537285 %R 10.1145/1730804.1730819 %D 2010 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2010-02-19 - 2010-02-21 %C Washington DC, USA %X Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result. %B Proceedings I3D 2010 %E Varshney, Amitabh; Wyman, Chris; Aliaga, Daniel; Oliveira, Manuel M. %P 91 - 98 %I ACM %@ 978-1-60558-939-8
Hasler, N., Thormählen, T., Rosenhahn, B., and Seidel, H.-P. 2010a. Learning Skeletons for Shape and Pose. Proceedings I3D 2010, ACM.
Export
BibTeX
@inproceedings{HasThoRosSei10Skeleton, TITLE = {Learning Skeletons for Shape and Pose}, AUTHOR = {Hasler, Nils and Thorm{\"a}hlen, Thorsten and Rosenhahn, Bodo and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-939-8}, DOI = {10.1145/1730804.1730809}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Proceedings I3D 2010}, EDITOR = {Varshney, Amitabh and Wyman, Chris and Aliaga, Daniel and Oliveira, Manuel M.}, PAGES = {23--30}, ADDRESS = {Washington, DC, USA}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Thorm&#228;hlen, Thorsten %A Rosenhahn, Bodo %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Skeletons for Shape and Pose : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1763-6 %F EDOC: 537281 %R 10.1145/1730804.1730809 %D 2010 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2010-02-19 - 2010-02-21 %C Washington, DC, USA %B Proceedings I3D 2010 %E Varshney, Amitabh; Wyman, Chris; Aliaga, Daniel; Oliveira, Manuel M. %P 23 - 30 %I ACM %@ 978-1-60558-939-8
Hasler, N., Ackermann, H., Rosenhahn, B., Thormählen, T., and Seidel, H.-P. 2010b. Multilinear Pose and Body Shape Estimation of Dressed Subjects from Image Sets. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010), IEEE.
Export
BibTeX
@inproceedings{Hasler2010Multilinear, TITLE = {Multilinear Pose and Body Shape Estimation of Dressed Subjects from Image Sets}, AUTHOR = {Hasler, Nils and Ackermann, Hanno and Rosenhahn, Bodo and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-424-46984-0}, DOI = {10.1109/CVPR.2010.5539853}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010)}, PAGES = {1823--1830}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Hasler, Nils %A Ackermann, Hanno %A Rosenhahn, Bodo %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Multilinear Pose and Body Shape Estimation of Dressed Subjects from Image Sets : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-176B-5 %F EDOC: 537300 %R 10.1109/CVPR.2010.5539853 %D 2010 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2010-06-13 - 2010-06-18 %C San Francisco, CA, USA %B IEEE Conference on Computer Vision and Pattern Recognition %P 1823 - 1830 %I IEEE %@ 978-1-424-46984-0
Granados, M., Adjin, B., Wand, M., Theobalt, C., Seidel, H.-P., and Lensch, H.P.A. 2010. Optimal HDR Reconstruction with Linear Digital Cameras. IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010), IEEE.
Abstract
Given a multi-exposure sequence of a scene, our aim is to recover the absolute irradiance falling onto a linear camera sensor. The established approach is to perform a weighted average of the scaled input exposures. However, there is no clear consensus on the appropriate weighting to use. We propose a weighting function that produces statistically optimal estimates under the assumption of compound- Gaussian noise. Our weighting is based on a calibrated camera model that accounts for all noise sources. This model also allows us to simultaneously estimate the irradiance and its uncertainty. We evaluate our method on simulated and real world photographs, and show that we consistently improve the signal-to-noise ratio over previous approaches. Finally, we show the effectiveness of our model for optimal exposure sequence selection and HDR image denoising.
Export
BibTeX
@inproceedings{Granados2010, TITLE = {Optimal {HDR} Reconstruction with Linear Digital Cameras}, AUTHOR = {Granados, Miguel and Adjin, Boris and Wand, Michael and Theobalt, Christian and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISBN = {978-1-4244-6983-3}, DOI = {10.1109/CVPR.2010.5540208}, PUBLISHER = {IEEE}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Given a multi-exposure sequence of a scene, our aim is to recover the absolute irradiance falling onto a linear camera sensor. The established approach is to perform a weighted average of the scaled input exposures. However, there is no clear consensus on the appropriate weighting to use. We propose a weighting function that produces statistically optimal estimates under the assumption of compound- Gaussian noise. Our weighting is based on a calibrated camera model that accounts for all noise sources. This model also allows us to simultaneously estimate the irradiance and its uncertainty. We evaluate our method on simulated and real world photographs, and show that we consistently improve the signal-to-noise ratio over previous approaches. Finally, we show the effectiveness of our model for optimal exposure sequence selection and HDR image denoising.}, BOOKTITLE = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2010)}, PAGES = {215--222}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Granados, Miguel %A Adjin, Boris %A Wand, Michael %A Theobalt, Christian %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimal HDR Reconstruction with Linear Digital Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1771-6 %F EDOC: 537323 %R 10.1109/CVPR.2010.5540208 %D 2010 %B IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2010-06-13 - 2010-06-18 %C San Francisco, CA, USA %X Given a multi-exposure sequence of a scene, our aim is to recover the absolute irradiance falling onto a linear camera sensor. The established approach is to perform a weighted average of the scaled input exposures. However, there is no clear consensus on the appropriate weighting to use. We propose a weighting function that produces statistically optimal estimates under the assumption of compound- Gaussian noise. Our weighting is based on a calibrated camera model that accounts for all noise sources. This model also allows us to simultaneously estimate the irradiance and its uncertainty. We evaluate our method on simulated and real world photographs, and show that we consistently improve the signal-to-noise ratio over previous approaches. Finally, we show the effectiveness of our model for optimal exposure sequence selection and HDR image denoising. %B IEEE Conference on Computer Vision and Pattern Recognition %P 215 - 222 %I IEEE %@ 978-1-4244-6983-3
Gall, J., Rosenhahn, B., Brox, T., and Seidel, H.-P. 2010. Optimization and Filtering for Human Motion Capture : A Multi-Layer Framework. International Journal of Computer Vision87, 1-2.
Abstract
Local optimization and filtering have been widely applied to model-based 3D human motion capture. Global stochastic optimization has recently been proposed as promising alternative solution for tracking and initialization. In order to benefit from optimization and filtering, we introduce a multi-layer framework that combines stochastic optimization, filtering, and local optimization. While the first layer relies on interacting simulated annealing and some weak prior information on physical constraints, the second layer refines the estimates by filtering and local optimization such that the accuracy is increased and ambiguities are resolved over time without imposing restrictions on the dynamics. In our experimental evaluation, we demonstrate the significant improvements of the multi-layer framework and provide quantitative 3D pose tracking results for the complete \texttt{HumanEva-II} dataset. The paper further comprises a comparison of global stochastic optimization with particle filtering, annealed particle filtering, and local optimization.
Export
BibTeX
@article{Gall2008c, TITLE = {Optimization and Filtering for Human Motion Capture : A Multi-Layer Framework}, AUTHOR = {Gall, J{\"u}rgen and Rosenhahn, Bodo and Brox, Thomas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0920-5691}, URL = {http://www.springerlink.com/content/21410805552725x4/fulltext.pdf}, DOI = {10.1007/s11263-008-0173-1}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Local optimization and filtering have been widely applied to model-based 3D human motion capture. Global stochastic optimization has recently been proposed as promising alternative solution for tracking and initialization. In order to benefit from optimization and filtering, we introduce a multi-layer framework that combines stochastic optimization, filtering, and local optimization. While the first layer relies on interacting simulated annealing and some weak prior information on physical constraints, the second layer refines the estimates by filtering and local optimization such that the accuracy is increased and ambiguities are resolved over time without imposing restrictions on the dynamics. In our experimental evaluation, we demonstrate the significant improvements of the multi-layer framework and provide quantitative 3D pose tracking results for the complete \texttt{HumanEva-II} dataset. The paper further comprises a comparison of global stochastic optimization with particle filtering, annealed particle filtering, and local optimization.}, JOURNAL = {International Journal of Computer Vision}, VOLUME = {87}, NUMBER = {1-2}, PAGES = {75--92}, }
Endnote
%0 Journal Article %A Gall, J&#252;rgen %A Rosenhahn, Bodo %A Brox, Thomas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Optimization and Filtering for Human Motion Capture : A Multi-Layer Framework : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1773-2 %F EDOC: 537278 %R 10.1007/s11263-008-0173-1 %U http://www.springerlink.com/content/21410805552725x4/fulltext.pdf %7 2008-11-15 %D 2010 %* Review method: peer-reviewed %X Local optimization and filtering have been widely applied to model-based 3D human motion capture. Global stochastic optimization has recently been proposed as promising alternative solution for tracking and initialization. In order to benefit from optimization and filtering, we introduce a multi-layer framework that combines stochastic optimization, filtering, and local optimization. While the first layer relies on interacting simulated annealing and some weak prior information on physical constraints, the second layer refines the estimates by filtering and local optimization such that the accuracy is increased and ambiguities are resolved over time without imposing restrictions on the dynamics. In our experimental evaluation, we demonstrate the significant improvements of the multi-layer framework and provide quantitative 3D pose tracking results for the complete \texttt{HumanEva-II} dataset. The paper further comprises a comparison of global stochastic optimization with particle filtering, annealed particle filtering, and local optimization. %J International Journal of Computer Vision %V 87 %N 1-2 %& 75 %P 75 - 92 %I Springer %C New York, NY %@ false %U http://www.springerlink.com/content/21410805552725x4/fulltext.pdf
Fuchs, M., Chen, T., Wang, O., Raskar, R., Seidel, H.-P., and Lensch, H.P.A. 2010. Real-Time Temporal Shaping of High-Speed Video Streams. Computers & Graphics (Proc. SBIM 2009)34, 5.
Abstract
Digital movie cameras only perform a discrete sampling of real-world imagery. While spatial sampling effects are well studied in the literature, there has not been as much work in regards to temporal sampling. As cameras get faster and faster, the need for conventional frame-rate video that matches the abilities of human perception remains. In this article, we introduce a system with controlled temporal sampling behavior. It transforms a high fps input stream into a conventional speed output video in real-time. We investigate the effect of different temporal sampling kernels and demonstrate that extended, overlapping kernels can mitigate aliasing artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be achieved. By applying Fourier transforms in the temporal domain, we can also obtain novel tools for analyzing and visualizing time dependent effects. We study the properties of both contemporary and idealized display devices and demonstrate the effect of different sampling kernels in creating enhanced movies and stills of fast motion.
Export
BibTeX
@article{Fuchs2009, TITLE = {Real-Time Temporal Shaping of High-Speed Video Streams}, AUTHOR = {Fuchs, Martin and Chen, Tongbo and Wang, Oliver and Raskar, Ramesh and Seidel, Hans-Peter and Lensch, Hendrik P. A.}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2010.05.017}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Digital movie cameras only perform a discrete sampling of real-world imagery. While spatial sampling effects are well studied in the literature, there has not been as much work in regards to temporal sampling. As cameras get faster and faster, the need for conventional frame-rate video that matches the abilities of human perception remains. In this article, we introduce a system with controlled temporal sampling behavior. It transforms a high fps input stream into a conventional speed output video in real-time. We investigate the effect of different temporal sampling kernels and demonstrate that extended, overlapping kernels can mitigate aliasing artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be achieved. By applying Fourier transforms in the temporal domain, we can also obtain novel tools for analyzing and visualizing time dependent effects. We study the properties of both contemporary and idealized display devices and demonstrate the effect of different sampling kernels in creating enhanced movies and stills of fast motion.}, JOURNAL = {Computers \& Graphics (Proc. SBIM)}, VOLUME = {34}, NUMBER = {5}, PAGES = {575--584}, BOOKTITLE = {Extended papers from the 2009 Sketch-Based Interfaces and Modeling Conference Vision, Modeling \& Visualization (SBIM 2009)}, }
Endnote
%0 Journal Article %A Fuchs, Martin %A Chen, Tongbo %A Wang, Oliver %A Raskar, Ramesh %A Seidel, Hans-Peter %A Lensch, Hendrik P. A. %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Real-Time Temporal Shaping of High-Speed Video Streams : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1783-D %F EDOC: 537299 %R 10.1016/j.cag.2010.05.017 %7 2010 %D 2010 %* Review method: peer-reviewed %X Digital movie cameras only perform a discrete sampling of real-world imagery. While spatial sampling effects are well studied in the literature, there has not been as much work in regards to temporal sampling. As cameras get faster and faster, the need for conventional frame-rate video that matches the abilities of human perception remains. In this article, we introduce a system with controlled temporal sampling behavior. It transforms a high fps input stream into a conventional speed output video in real-time. We investigate the effect of different temporal sampling kernels and demonstrate that extended, overlapping kernels can mitigate aliasing artifacts. Furthermore, NPR effects, such as enhanced motion blur, can be achieved. By applying Fourier transforms in the temporal domain, we can also obtain novel tools for analyzing and visualizing time dependent effects. We study the properties of both contemporary and idealized display devices and demonstrate the effect of different sampling kernels in creating enhanced movies and stills of fast motion. %J Computers & Graphics %V 34 %N 5 %& 575 %P 575 - 584 %I Elsevier %C Amsterdam %@ false %B Extended papers from the 2009 Sketch-Based Interfaces and Modeling Conference Vision, Modeling & Visualization %O SBIM 2009 Sixth Eurographics/ACM Symposium on Sketch-Based Interfaces and Modeling
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2010a. Adaptive Image-space Stereo View Synthesis. Vision, Modeling & Visualization (VMV 2010), Eurographics Association.
Export
BibTeX
@inproceedings{Didyk2010b, TITLE = {Adaptive Image-space Stereo View Synthesis}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-79-1}, DOI = {10.2312/PE/VMV/VMV10/299-306}, PUBLISHER = {Eurographics Association}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Vision, Modeling \& Visualization (VMV 2010)}, EDITOR = {Koch, Reinhard and Kolb, Andreas and Rezk-Salama, Christof}, PAGES = {299--306}, ADDRESS = {Siegen, Germany}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Adaptive Image-space Stereo View Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-172C-4 %F EDOC: 537308 %R 10.2312/PE/VMV/VMV10/299-306 %D 2010 %B 15th International Workshop on Vision, Modeling, and Visualization %Z date of event: 2010-11-15 - 2010-11-02 %C Siegen, Germany %B Vision, Modeling & Visualization %E Koch, Reinhard; Kolb, Andreas; Rezk-Salama, Christof %P 299 - 306 %I Eurographics Association %@ 978-3-905673-79-1
Didyk, P., Eisemann, E., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2010b. Apparent Display Resolution Enhancement for Moving Images. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Export
BibTeX
@article{Didyk2010a, TITLE = {Apparent Display Resolution Enhancement for Moving Images}, AUTHOR = {Didyk, Piotr and Eisemann, Elmar and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0210-4}, DOI = {10.1145/1833349.1778850}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--8}, EID = {113}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, EDITOR = {Hoppe, Hugues}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Eisemann, Elmar %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Display Resolution Enhancement for Moving Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1734-0 %F EDOC: 537269 %R 10.1145/1833349.1778850 %7 2010 %D 2010 %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 8 %Z sequence number: 113 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010] %@ 978-1-4503-0210-4
Didyk, P., Eisemann, E., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2010c. Perceptually-motivated Real-time Temporal Upsampling of 3D Content for High-refresh-rate Displays. Computer Graphics Forum (Proc. EUROGRAPHICS 2010)29, 2.
Abstract
High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays.
Export
BibTeX
@article{Didyk2010, TITLE = {Perceptually-motivated Real-time Temporal Upsampling of {3D} Content for High-refresh-rate Displays}, AUTHOR = {Didyk, Piotr and Eisemann, Elmar and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01641.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {29}, NUMBER = {2}, PAGES = {713--722}, BOOKTITLE = {EUROGRAPHICS 2010}, EDITOR = {Akenine-M{\"o}ller, Tomas and Zwicker, Matthias}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Eisemann, Elmar %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually-motivated Real-time Temporal Upsampling of 3D Content for High-refresh-rate Displays : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1778-7 %F EDOC: 537284 %R 10.1111/j.1467-8659.2009.01641.x %7 2010 %D 2010 %X High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays. %J Computer Graphics Forum %V 29 %N 2 %& 713 %P 713 - 722 %I Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2010 %O EUROGRAPHICS 2010 The European Association for Computer Graphics 31st Annual Conference ; Norrk&#246;ping, Sweden, May3rd - 7th, 2010 EG 2010
Bokeloh, M., Wand, M., and Seidel, H.-P. 2010. A Connection between Partial Symmetry and Inverse Procedural Modeling. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Export
BibTeX
@article{Bokeloh2010, TITLE = {A Connection between Partial Symmetry and Inverse Procedural Modeling}, AUTHOR = {Bokeloh, Martin and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0210-4}, DOI = {10.1145/1778765.1778841}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, EDITOR = {Hoppe, Hugues}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--10}, EID = {104}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, }
Endnote
%0 Journal Article %A Bokeloh, Martin %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Connection between Partial Symmetry and Inverse Procedural Modeling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1724-3 %F EDOC: 537324 %R 10.1145/1778765.1778841 %7 2010 %D 2010 %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 10 %Z sequence number: 104 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010] %@ 978-1-4503-0210-4
Aydin, T.O., Čadík, M., Myszkowski, K., and Seidel, H.-P. 2010a. Video Quality Assessment for Computer Graphics Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2010)29, 6.
Export
BibTeX
@article{TuncSGAsia2010, TITLE = {Video Quality Assessment for Computer Graphics Applications}, AUTHOR = {Aydin, Tunc Ozan and {\v C}ad{\'i}k, Martin and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0439-9}, DOI = {10.1145/1866158.1866187}, LOCALID = {Local-ID: C125675300671F7B-0ED72325CD8F187FC12577CF005BA5C5-TuncSGAsia2010}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {29}, NUMBER = {6}, PAGES = {1--12}, EID = {161}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2010}, EDITOR = {Drettakis, George}, }
Endnote
%0 Journal Article %A Aydin, Tunc Ozan %A &#268;ad&#237;k, Martin %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Quality Assessment for Computer Graphics Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1797-0 %F EDOC: 537307 %R 10.1145/1866158.1866187 %F OTHER: Local-ID: C125675300671F7B-0ED72325CD8F187FC12577CF005BA5C5-TuncSGAsia2010 %D 2010 %J ACM Transactions on Graphics %V 29 %N 6 %& 1 %P 1 - 12 %Z sequence number: 161 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2010 %O ACM SIGGRAPH Asia 2010 Seoul, South Korea %@ 978-1-4503-0439-9
Aydin, T.O., Čadík, M., Myszkowski, K., and Seidel, H.-P. 2010b. Visually Significant Edges. ACM Transactions on Applied Perception7, 4.
Abstract
Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden.
Export
BibTeX
@article{TuncTAP2010, TITLE = {Visually Significant Edges}, AUTHOR = {Aydin, Tunc Ozan and {\v C}ad{\'i}k, Martin and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/1823738.1823745}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden.}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {7}, NUMBER = {4}, PAGES = {1--14}, EID = {27}, }
Endnote
%0 Journal Article %A Aydin, Tunc Ozan %A &#268;ad&#237;k, Martin %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visually Significant Edges : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-179A-A %F EDOC: 537306 %R 10.1145/1823738.1823745 %7 2010 %D 2010 %* Review method: peer-reviewed %X Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden. %J ACM Transactions on Applied Perception %V 7 %N 4 %& 1 %P 1 - 14 %Z sequence number: 27 %I ACM %C New York, NY %@ false
Adams, B., Wicke, M., Ovsjanikov, M., Wand, M., Seidel, H.-P., and Guibas, L. 2010. Meshless Shape and Motion Design for Multiple Deformable Objects. Computer Graphics Forum29, 1.
Export
BibTeX
@article{Adams2010z, TITLE = {Meshless Shape and Motion Design for Multiple Deformable Objects}, AUTHOR = {Adams, Bart and Wicke, Martin and Ovsjanikov, Maks and Wand, Michael and Seidel, Hans-Peter and Guibas, Leonidas}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01536.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford}, YEAR = {2010}, DATE = {2010}, JOURNAL = {Computer Graphics Forum}, VOLUME = {29}, NUMBER = {1}, PAGES = {43--