Last Year

Article
Alvarez-Cortez, S., Kunkel, T., and Masia, B. 2016. Practical Low-Cost Recovery of Spectral Power Distributions. Computer Graphics Forum 35, 1.
Export
BibTeX
@article{MasiaCGF2016, TITLE = {Practical Low-Cost Recovery of Spectral Power Distributions}, AUTHOR = {Alvarez-Cortez, Sara and Kunkel, Timo and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12717}, PUBLISHER = {Wiley}, ADDRESS = {Chichester}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum}, VOLUME = {35}, NUMBER = {1}, PAGES = {166--178}, }
Endnote
%0 Journal Article %A Alvarez-Cortez, Sara %A Kunkel, Timo %A Masia, Belen %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Practical Low-Cost Recovery of Spectral Power Distributions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-1A2F-4 %R 10.1111/cgf.12717 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 1 %& 166 %P 166 - 178 %I Wiley %C Chichester %@ false
Boechat, P., Dokter, M., Kenzel, M., Seidel, H.-P., Schmalstieg, D., and Steinberger, M. 2016. Representing and Scheduling Procedural Generation using Operator Graphs. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016) 35, 6.
Export
BibTeX
@article{BoaechatSIGGRAPHAsia2016, TITLE = {Representing and Scheduling Procedural Generation using Operator Graphs}, AUTHOR = {Boechat, Pedro and Dokter, Mark and Kenzel, Michael and Seidel, Hans-Peter and Schmalstieg, Dieter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980227}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {183}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Boechat, Pedro %A Dokter, Mark %A Kenzel, Michael %A Seidel, Hans-Peter %A Schmalstieg, Dieter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Representing and Scheduling Procedural Generation using Operator Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98BB-0 %R 10.1145/2980179.2980227 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 183 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Brandt, C., von Tycowicz, C., and Hildebrandt, K. 2016. Geometric Flows of Curves in Shape Space for Processing Motion of Deformable Objects. Computer Graphics Forum (Proc. EUROGRAPHICS 2016) 35, 2.
Export
BibTeX
@article{Hildebrandt_EG2016, TITLE = {Geometric Flows of Curves in Shape Space for Processing Motion of Deformable Objects}, AUTHOR = {Brandt, Christopher and von Tycowicz, Christoph and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12832}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {35}, NUMBER = {2}, PAGES = {295--305}, BOOKTITLE = {The European Association for Computer Graphics 37th Annual Conference (EUROGRAPHICS 2016)}, }
Endnote
%0 Journal Article %A Brandt, Christopher %A von Tycowicz, Christoph %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Geometric Flows of Curves in Shape Space for Processing Motion of Deformable Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-D22B-8 %R 10.1111/cgf.12832 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 2 %& 295 %P 295 - 305 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 37th Annual Conference %O EUROGRAPHICS 2016 Lisbon, Portugal, 9th-13th May 2016 EG 2016
Calagari, K., Elgamal, T., Diab, K., et al. 2016. Depth Personalization and Streaming of Stereoscopic Sports Videos. ACM Transactions on Multimedia Computing, Communications, and Applications 12, 3.
Export
BibTeX
@article{CalagariTMC2016, TITLE = {Depth Personalization and Streaming of Stereoscopic Sports Videos}, AUTHOR = {Calagari, Kiana and Elgamal, Tarek and Diab, Khaled and Templin, Krzysztof and Didyk, Piotr and Matusik, Wojciech and Hefeeda, Mohamed}, LANGUAGE = {eng}, DOI = {10.1145/2890103}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Multimedia Computing, Communications, and Applications}, VOLUME = {12}, NUMBER = {3}, EID = {41}, }
Endnote
%0 Journal Article %A Calagari, Kiana %A Elgamal, Tarek %A Diab, Khaled %A Templin, Krzysztof %A Didyk, Piotr %A Matusik, Wojciech %A Hefeeda, Mohamed %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Depth Personalization and Streaming of Stereoscopic Sports Videos : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-079A-B %R 10.1145/2890103 %7 2016 %D 2016 %J ACM Transactions on Multimedia Computing, Communications, and Applications %O TOMM %V 12 %N 3 %Z sequence number: 41 %I ACM %C New York, NY
Chen, R. and Gotsman, C. 2016a. Complex Transfinite Barycentric Mappings with Similarity Kernels. Computer Graphics Forum (Proc. Eurographics Symposium on Geometric Processing 2016) 35, 5.
Export
BibTeX
@article{ChenSGP2016, TITLE = {Complex Transfinite Barycentric Mappings with Similarity Kernels}, AUTHOR = {Chen, Renjie and Gotsman, Craig}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.1296}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Chichester}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Geometric Processing)}, VOLUME = {35}, NUMBER = {5}, PAGES = {51--53}, BOOKTITLE = {Symposium on Geometry Processing 2016 (Eurographics Symposium on Geometric Processing 2016)}, EDITOR = {Ovsjanikov, Maks and Panozzo, Daniele}, }
Endnote
%0 Journal Article %A Chen, Renjie %A Gotsman, Craig %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Complex Transfinite Barycentric Mappings with Similarity Kernels : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-430B-5 %R 10.1111/cgf.1296 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 5 %& 51 %P 51 - 53 %I Wiley-Blackwell %C Chichester %@ false %B Symposium on Geometry Processing 2016 %O Berlin, Germany ; June 20 - 24, 2016 SGP 2016 Eurographics Symposium on Geometric Processing 2016
Chen, R. and Gotsman, C. 2016b. On Pseudo-harmonic Barycentric Coordinates. Computer Aided Geometric Design 44.
Export
BibTeX
@article{Chen_Gotsman2016, TITLE = {On Pseudo-harmonic Barycentric Coordinates}, AUTHOR = {Chen, Renjie and Gotsman, Craig}, LANGUAGE = {eng}, ISSN = {0167-8396}, DOI = {10.1016/j.cagd.2016.04.005}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Aided Geometric Design}, VOLUME = {44}, PAGES = {15--35}, }
Endnote
%0 Journal Article %A Chen, Renjie %A Gotsman, Craig %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T On Pseudo-harmonic Barycentric Coordinates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-05AD-6 %R 10.1016/j.cagd.2016.04.005 %7 2016 %D 2016 %J Computer Aided Geometric Design %V 44 %& 15 %P 15 - 35 %I Elsevier %C Amsterdam %@ false
Chen, R. and Gotsman, C. 2016c. Generalized As-Similar-As-Possible Warping with Applications in Digital Photography. Computer Graphics Forum (Proc. EUROGRAPHICS 2016) 35, 2.
Export
BibTeX
@article{ChenEG2016, TITLE = {Generalized As-Similar-As-Possible Warping with Applications in Digital Photography}, AUTHOR = {Chen, Renjie and Gotsman, Craig}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12813}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {35}, NUMBER = {2}, PAGES = {81--92}, BOOKTITLE = {The European Association for Computer Graphics 37th Annual Conference (EUROGRAPHICS 2016)}, }
Endnote
%0 Journal Article %A Chen, Renjie %A Gotsman, Craig %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Generalized As-Similar-As-Possible Warping with Applications in Digital Photography : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-8BBD-4 %R 10.1111/cgf.12813 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 2 %& 81 %P 81 - 92 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 37th Annual Conference %O EUROGRAPHICS 2016 Lisbon, Portugal, 9th-13th May 2016 EG 2016
Chien, E., Chen, R., and Weber, O. 2016. Bounded Distortion Harmonic Shape Interpolation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016) 35, 4.
Export
BibTeX
@article{ChienSIGGRAPH2016, TITLE = {Bounded Distortion Harmonic Shape Interpolation}, AUTHOR = {Chien, Edward and Chen, Renjie and Weber, Ofir}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925926}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {105}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Chien, Edward %A Chen, Renjie %A Weber, Ofir %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Bounded Distortion Harmonic Shape Interpolation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0793-A %R 10.1145/2897824.2925926 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 105 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Dąbała, Ł., Ziegler, M., Didyk, P., et al. 2016. Efficient Multi-Image Correspondences for Online Light Field Video Processing. Computer Graphics Forum (Proc. Pacific Graphics 2016) 35, 7.
Export
BibTeX
@article{DabalaPG2016, TITLE = {Efficient Multi-Image Correspondences for Online Light Field Video Processing}, AUTHOR = {D{\c a}ba{\l}a, {\L}ukasz and Ziegler, Matthias and Didyk, Piotr and Zilly, Frederik and Keinert, Joachim and Myszkowski, Karol and Rokita, Przemyslaw and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13037}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {35}, NUMBER = {7}, PAGES = {401--410}, BOOKTITLE = {The 24th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2016)}, }
Endnote
%0 Journal Article %A Dąbała, Łukasz %A Ziegler, Matthias %A Didyk, Piotr %A Zilly, Frederik %A Keinert, Joachim %A Myszkowski, Karol %A Rokita, Przemyslaw %A Ritschel, Tobias %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Multi-Image Correspondences for Online Light Field Video Processing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82BA-5 %R 10.1111/cgf.13037 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 7 %& 401 %P 401 - 410 %I Wiley-Blackwell %C Oxford, UK %@ false %B The 24th Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2016 PG 2016
Efrat, N., Didyk, P., Foshey, M., Matusik, W., and Levin, A. 2016. Cinema 3D: Large Scale Automultiscopic Display. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016) 35, 4.
Export
BibTeX
@article{EfratSIGGRAPH2016, TITLE = {Cinema {3D}: {L}arge Scale Automultiscopic Display}, AUTHOR = {Efrat, Netalee and Didyk, Piotr and Foshey, Mike and Matusik, Wojciech and Levin, Anat}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925921}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {59}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Efrat, Netalee %A Didyk, Piotr %A Foshey, Mike %A Matusik, Wojciech %A Levin, Anat %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Cinema 3D: Large Scale Automultiscopic Display : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0189-5 %R 10.1145/2897824.2925921 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 59 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Garrido, P., Zollhöfer, M., Wu, C., et al. 2016a. Corrective 3D Reconstruction of Lips from Monocular Video. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016) 35, 6.
Export
BibTeX
@article{Garrido2016SGA, TITLE = {Corrective {3D} Reconstruction of Lips from Monocular Video}, AUTHOR = {Garrido, Pablo and Zollh{\"o}fer, Michael and Wu, Chenglei and Bradley, Derek and Perez, Patrick and Beeler, Thabo and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {219}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Garrido, Pablo %A Zollhöfer, Michael %A Wu, Chenglei %A Bradley, Derek %A Perez, Patrick %A Beeler, Thabo %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Corrective 3D Reconstruction of Lips from Monocular Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-23CE-F %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 219 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Garrido, P., Zollhöfer, M., Casas, D., et al. 2016b. Reconstruction of Personalized 3D Face Rigs from Monocular Video. ACM Transactions on Graphics 35, 3.
Export
BibTeX
@article{GarridoTOG2016, TITLE = {Reconstruction of Personalized 3{D} Face Rigs from Monocular Video}, AUTHOR = {Garrido, Pablo and Zollh{\"o}fer, Michael and Casas, Dan and Valgaerts, Levi and Varanasi, Kiran and P{\'e}rez, Patrick and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2890493}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {35}, NUMBER = {3}, EID = {28}, }
Endnote
%0 Journal Article %A Garrido, Pablo %A Zollhöfer, Michael %A Casas, Dan %A Valgaerts, Levi %A Varanasi, Kiran %A Pérez, Patrick %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Reconstruction of Personalized 3D Face Rigs from Monocular Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-F544-D %R 10.1145/2890493 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 3 %Z sequence number: 28 %I Association for Computing Machinery %C New York, NY %@ false
Havran, V., Filip, J., and Myszkowski, K. 2016. Perceptually Motivated BRDF Comparison using Single Image. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2016) 35, 4.
Export
BibTeX
@article{havran2016perceptually, TITLE = {Perceptually Motivated {BRDF} Comparison using Single Image}, AUTHOR = {Havran, Vlastimil and Filip, Jiri and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12944}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {35}, NUMBER = {4}, PAGES = {1--12}, BOOKTITLE = {Eurographics Symposium on Rendering 2016}, EDITOR = {Eisemann, Elmar and Fiume, Eugene}, }
Endnote
%0 Journal Article %A Havran, Vlastimil %A Filip, Jiri %A Myszkowski, Karol %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually Motivated BRDF Comparison using Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82C0-6 %R 10.1111/cgf.12944 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 4 %& 1 %P 1 - 12 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2016 %O Eurographics Symposium on Rendering 2016 EGSR 2016 Dublin, Ireland, 22-24 June 2016
Kellnhofer, P., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2016a. GazeStereo3D: Seamless Disparity Manipulations. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016) 35, 4.
Export
BibTeX
@article{KellnhoferSIGGRAPH2016, TITLE = {{GazeStereo3D}: {S}eamless Disparity Manipulations}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925866}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {68}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T GazeStereo3D: Seamless Disparity Manipulations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0190-4 %R 10.1145/2897824.2925866 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 68 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Kellnhofer, P., Didyk, P., Ritschel, T., Masia, B., Myszkowski, K., and Seidel, H.-P. 2016b. Motion Parallax in Stereo 3D: Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016) 35, 6.
Export
BibTeX
@article{Kellnhofer2016SGA, TITLE = {Motion Parallax in Stereo {3D}: {M}odel and Applications}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Ritschel, Tobias and Masia, Belen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980230}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {176}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Ritschel, Tobias %A Masia, Belen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Parallax in Stereo 3D: Model and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B6-D %R 10.1145/2980179.2980230 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 176 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016c. Transformation-aware Perceptual Image Metric. Journal of Electronic Imaging 25, 5.
Export
BibTeX
@article{Kellnhofer2016jei, TITLE = {Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1017-9909}, DOI = {10.1117/1.JEI.25.5.053014}, PUBLISHER = {SPIE}, ADDRESS = {Bellingham, WA}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Journal of Electronic Imaging}, VOLUME = {25}, NUMBER = {5}, EID = {053014}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B3-4 %R 10.1117/1.JEI.25.5.053014 %7 2016 %D 2016 %J Journal of Electronic Imaging %V 25 %N 5 %Z sequence number: 053014 %I SPIE %C Bellingham, WA %@ false
Kerbl, B., Kenzel, M., Schmalstieg, D., Seidel, H.-P., and Steinberger, M. 2016. Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU. Computer Graphics Forum Early View.
Export
BibTeX
@article{Seidel_Steinberger2016, TITLE = {Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the {GPU}}, AUTHOR = {Kerbl, Bernhard and Kenzel, Michael and Schmalstieg, Dieter and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13075}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum}, VOLUME = {Early View}, }
Endnote
%0 Journal Article %A Kerbl, Bernhard %A Kenzel, Michael %A Schmalstieg, Dieter %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-1823-8 %R 10.1111/cgf.13075 %7 2016-12-05 %D 2016 %8 05.12.2016 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V Early View %I Blackwell-Wiley %C Oxford %@ false
Kol, T.R., Klehm, O., Seidel, H.-P., and Eisemann, E. 2016. Expressive Single Scattering for Light Shaft Stylization. IEEE Transactions on Visualization and Computer Graphics Online First.
Export
BibTeX
@article{kol2016expressive, TITLE = {Expressive Single Scattering for Light Shaft Stylization}, AUTHOR = {Kol, Timothy R. and Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2016.2554114}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {Online First}, }
Endnote
%0 Journal Article %A Kol, Timothy R. %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Expressive Single Scattering for Light Shaft Stylization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64E7-2 %R 10.1109/TVCG.2016.2554114 %7 2016-04-14 %D 2016 %8 14.04.2016 %J IEEE Transactions on Visualization and Computer Graphics %V Online First %I IEEE Computer Society %C New York, NY %@ false
Lavoué, G., Liu, H., Myszkowski, K., and Lin, W. 2016. Quality Assessment and Perception in Computer Graphics. IEEE Computer Graphics and Applications 36, 4.
Export
BibTeX
@article{Lavoue2016, TITLE = {Quality Assessment and Perception in Computer Graphics}, AUTHOR = {Lavou{\'e}, Guillaume and Liu, Hantao and Myszkowski, Karol and Lin, Weisi}, LANGUAGE = {eng}, ISSN = {0272-1716}, DOI = {10.1109/MCG.2016.72}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {IEEE Computer Graphics and Applications}, VOLUME = {36}, NUMBER = {4}, PAGES = {21--22}, }
Endnote
%0 Journal Article %A Lavoué, Guillaume %A Liu, Hantao %A Myszkowski, Karol %A Lin, Weisi %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Quality Assessment and Perception in Computer Graphics : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-8411-2 %R 10.1109/MCG.2016.72 %7 2016-07-29 %D 2016 %J IEEE Computer Graphics and Applications %V 36 %N 4 %& 21 %P 21 - 22 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Lurie, K.L., Angst, R., Seibel, E.J., Liao, J.C., and Ellerbee Bowden, A.K. 2016. Registration of Free-hand OCT Daughter Endoscopy to 3D Organ Reconstruction. Biomedical Optics Express 7, 12.
Export
BibTeX
@article{Lurie2016, TITLE = {Registration of Free-hand {OCT} Daughter Endoscopy to {3D} Organ Reconstruction}, AUTHOR = {Lurie, Kristen L. and Angst, Roland and Seibel, Eric J. and Liao, Joseph C. and Ellerbee Bowden, Audrey K.}, LANGUAGE = {eng}, ISSN = {2156-7085}, DOI = {10.1364/BOE.7.004995}, PUBLISHER = {Optical Society of America}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Biomedical Optics Express}, VOLUME = {7}, NUMBER = {12}, PAGES = {4995--5009}, }
Endnote
%0 Journal Article %A Lurie, Kristen L. %A Angst, Roland %A Seibel, Eric J. %A Liao, Joseph C. %A Ellerbee Bowden, Audrey K. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Registration of Free-hand OCT Daughter Endoscopy to 3D Organ Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2F94-F %R 10.1364/BOE.7.004995 %7 2016 %D 2016 %J Biomedical Optics Express %V 7 %N 12 %& 4995 %P 4995 - 5009 %I Optical Society of America %@ false
Meka, A., Zollhöfer, M., Richardt, C., and Theobalt, C. 2016. Live Intrinsic Video. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016) 35, 4.
Export
BibTeX
@article{MekaSIGGRAPH2016, TITLE = {Live Intrinsic Video}, AUTHOR = {Meka, Abhimitra and Zollh{\"o}fer, Michael and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925907}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {109}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Meka, Abhimitra %A Zollhöfer, Michael %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Live Intrinsic Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-07C8-3 %R 10.1145/2897824.2925907 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 109 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Piovarči, M., Levin, D.I.W., Rebello, J., et al. 2016. An Interaction-Aware, Perceptual Model for Non-Linear Elastic Objects. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016) 35, 4.
Export
BibTeX
@article{PiovarciSIGGRAPH2016, TITLE = {An Interaction-Aware, Perceptual Model for Non-Linear Elastic Objects}, AUTHOR = {Piovar{\v c}i, Michal and Levin, David I. W. and Rebello, Jason and Chen, Desai and {\v D}urikovi{\v c}, Roman and Pfister, Hanspeter and Matusik, Wojciech and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925885}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {55}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Piovarči, Michal %A Levin, David I. W. %A Rebello, Jason %A Chen, Desai %A Ďurikovič, Roman %A Pfister, Hanspeter %A Matusik, Wojciech %A Didyk, Piotr %+ External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T An Interaction-Aware, Perceptual Model for Non-Linear Elastic Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0187-9 %R 10.1145/2897824.2925885 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 55 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Reinert, B., Kopf, J., Ritschel, T., Cuervo, E., Chu, D., and Seidel, H.-P. 2016a. Proxy-guided Image-based Rendering for Mobile Devices. Computer Graphics Forum (Proc. Pacific Graphics 2016) 35, 7.
Export
BibTeX
@article{ReinertPG2016, TITLE = {Proxy-guided Image-based Rendering for Mobile Devices}, AUTHOR = {Reinert, Bernhard and Kopf, Johannes and Ritschel, Tobias and Cuervo, Eduardo and Chu, David and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13032}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {35}, NUMBER = {7}, PAGES = {353--362}, BOOKTITLE = {The 24th Pacific Conference on Computer Graphics and Applications Short Papers Proceedings (Pacific Graphics 2016)}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Kopf, Johannes %A Ritschel, Tobias %A Cuervo, Eduardo %A Chu, David %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Proxy-guided Image-based Rendering for Mobile Devices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2DD8-7 %R 10.1111/cgf.13032 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 7 %& 353 %P 353 - 362 %I Blackwell-Wiley %C Oxford %@ false %B The 24th Pacific Conference on Computer Graphics and Applications Short Papers Proceedings %O Pacific Graphics 2016 PG 2016
Reinert, B., Ritschel, T., Seidel, H.-P., and Georgiev, I. 2016b. Projective Blue-Noise Sampling. Computer Graphics Forum 35, 1.
Export
BibTeX
@article{ReinertCGF2016, TITLE = {Projective Blue-Noise Sampling}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter and Georgiev, Iliyan}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12725}, PUBLISHER = {Wiley}, ADDRESS = {Chichester}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum}, VOLUME = {35}, NUMBER = {1}, PAGES = {285--295}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %A Georgiev, Iliyan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Projective Blue-Noise Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-1A31-D %R 10.1111/cgf.12725 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 1 %& 285 %P 285 - 295 %I Wiley %C Chichester %@ false
Rematas, K., Nguyen, C., Ritschel, T., Fritz, M., and Tuytelaars, T. 2016. Novel Views of Objects from a Single Image. IEEE Transactions on Pattern Analysis and Machine Intelligence.
Export
BibTeX
@article{rematas16tpami, TITLE = {Novel Views of Objects from a Single Image}, AUTHOR = {Rematas, Konstantinos and Nguyen, Chuong and Ritschel, Tobias and Fritz, Mario and Tuytelaars, Tinne}, LANGUAGE = {eng}, ISSN = {0162-8828}, DOI = {10.1109/TPAMI.2016.2601093}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, JOURNAL = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, }
Endnote
%0 Journal Article %A Rematas, Konstantinos %A Nguyen, Chuong %A Ritschel, Tobias %A Fritz, Mario %A Tuytelaars, Tinne %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations %T Novel Views of Objects from a Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-058A-1 %R 10.1109/TPAMI.2016.2601093 %7 2016 %D 2016 %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR %J IEEE Transactions on Pattern Analysis and Machine Intelligence %O IEEE Trans. Pattern Anal. Mach. Intell. %I IEEE Computer Society %C Los Alamitos, CA %@ false
Rhodin, H., Richardt, C., Casas, D., et al. 2016a. EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016) 35, 6.
Export
BibTeX
@article{Rhodin2016SGA, TITLE = {{EgoCap}: {E}gocentric Marker-less Motion Capture with Two Fisheye Cameras}, AUTHOR = {Rhodin, Helge and Richardt, Christian and Casas, Dan and Insafutdinov, Eldar and Shafiei, Mohammad and Seidel, Hans-Peter and Schiele, Bernt and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {162}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Richardt, Christian %A Casas, Dan %A Insafutdinov, Eldar %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Schiele, Bernt %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-8321-6 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 162 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Serrano, A., Heide, F., Gutierrez, D., Wetzstein, G., and Masia, B. 2016a. Convolutional Sparse Coding for High Dynamic Range Imaging. Computer Graphics Forum (Proc. EUROGRAPHICS 2016) 35, 2.
Export
BibTeX
@article{CSHDR_EG2016, TITLE = {Convolutional Sparse Coding for High Dynamic Range Imaging}, AUTHOR = {Serrano, Ana and Heide, Felix and Gutierrez, Diego and Wetzstein, Gordon and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12819}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {35}, NUMBER = {2}, PAGES = {153--163}, BOOKTITLE = {The European Association of Computer Graphics 37th Annual Conference (EUROGRAPHICS 2016)}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Heide, Felix %A Gutierrez, Diego %A Wetzstein, Gordon %A Masia, Belen %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Convolutional Sparse Coding for High Dynamic Range Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-78E5-3 %R 10.1111/cgf.12819 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 2 %& 153 %P 153 - 163 %I Wiley-Blackwell %C Oxford %@ false %B The European Association of Computer Graphics 37th Annual Conference %O EUROGRAPHICS 2016 Lisbon, Portugal, 9th-13th May 2016 EG 2016
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016b. An Intuitive Control Space for Material Appearance. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016) 35, 6.
Export
BibTeX
@article{Serrano_MaterialAppearance_2016, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980242}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {186}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B8-9 %R 10.1145/2980179.2980242 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 186 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Emulating Displays with Continuously Varying Frame Rates. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016) 35, 4.
Export
BibTeX
@article{TemplinSIGGRAPH2016, TITLE = {Emulating Displays with Continuously Varying Frame Rates}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {67}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Emulating Displays with Continuously Varying Frame Rates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-018D-E %R 10.1145/2897824.2925879 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 67 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Velten, A., Wu, D., Masia, B., et al. 2016. Imaging the Propagation of Light through Scenes at Picosecond Resolution. Communications of the ACM 59, 9.
Export
BibTeX
@article{Velten2016, TITLE = {Imaging the Propagation of Light through Scenes at Picosecond Resolution}, AUTHOR = {Velten, Andreas and Wu, Di and Masia, Belen and Jarabo, Adrian and Barsi, Christopher and Joshi, Chinmaya and Lawson, Everett and Bawendi, Moungi and Gutierrez, Diego and Raskar, Ramesh}, LANGUAGE = {eng}, ISSN = {0001-0782}, DOI = {10.1145/2975165}, PUBLISHER = {Association for Computing Machinery, Inc.}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Communications of the ACM}, VOLUME = {59}, NUMBER = {9}, PAGES = {79--86}, }
Endnote
%0 Journal Article %A Velten, Andreas %A Wu, Di %A Masia, Belen %A Jarabo, Adrian %A Barsi, Christopher %A Joshi, Chinmaya %A Lawson, Everett %A Bawendi, Moungi %A Gutierrez, Diego %A Raskar, Ramesh %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations %T Imaging the Propagation of Light through Scenes at Picosecond Resolution : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-7E47-4 %R 10.1145/2975165 %7 2016 %D 2016 %J Communications of the ACM %V 59 %N 9 %& 79 %P 79 - 86 %I Association for Computing Machinery, Inc. %C New York, NY %@ false
Von Radziewsky, P., Eisemann, E., Seidel, H.-P., and Hildebrandt, K. 2016. Optimized Subspaces for Deformation-based Modeling and Shape Interpolation. Computers and Graphics (Proc. SMI 2016) 58.
Export
BibTeX
@article{Radziewsky2016, TITLE = {Optimized Subspaces for Deformation-based Modeling and Shape Interpolation}, AUTHOR = {von Radziewsky, Philipp and Eisemann, Elmar and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2016.05.016}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {Computers and Graphics (Proc. SMI)}, VOLUME = {58}, PAGES = {128--138}, BOOKTITLE = {Shape Modeling International 2016 (SMI 2016)}, }
Endnote
%0 Journal Article %A von Radziewsky, Philipp %A Eisemann, Elmar %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Optimized Subspaces for Deformation-based Modeling and Shape Interpolation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0144-0 %R 10.1016/j.cag.2016.05.016 %7 2016 %D 2016 %J Computers and Graphics %V 58 %& 128 %P 128 - 138 %I Elsevier %C Amsterdam %@ false %B Shape Modeling International 2016 %O SMI 2016
Wang, Z., Martinez Esturo, J., Seidel, H.-P., and Weinkauf, T. 2016a. Stream Line–Based Pattern Search in Flows. Computer Graphics Forum Early View.
Export
BibTeX
@article{Wang:Esturo:Seidel:Weinkauf2016, TITLE = {Stream Line--Based Pattern Search in Flows}, AUTHOR = {Wang, Zhongjie and Martinez Esturo, Janick and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12990}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum}, VOLUME = {Early View}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Martinez Esturo, Janick %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stream Line–Based Pattern Search in Flows : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-4301-A %R 10.1111/cgf.12990 %7 2016 %D 2016 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V Early View %I Blackwell-Wiley %C Oxford %@ false
Wang, Z., Seidel, H.-P., and Weinkauf, T. 2016b. Multi-field Pattern Matching Based on Sparse Feature Sampling. IEEE Transactions on Visualization and Computer Graphics 22, 1.
Export
BibTeX
@article{Wang2015, TITLE = {Multi-field Pattern Matching Based on Sparse Feature Sampling}, AUTHOR = {Wang, Zhongjie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2015.2467292}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {22}, NUMBER = {1}, PAGES = {807--816}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Multi-field Pattern Matching Based on Sparse Feature Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-1A76-6 %R 10.1109/TVCG.2015.2467292 %7 2015 %D 2016 %J IEEE Transactions on Visualization and Computer Graphics %V 22 %N 1 %& 807 %P 807 - 816 %I IEEE Computer Society %C New York, NY %@ false
Wu, C., Bradley, D., Garrido, P., et al. 2016. Model-Based Teeth Reconstruction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016) 35, 6.
Export
BibTeX
@article{Wu2016SGA, TITLE = {Model-Based Teeth Reconstruction}, AUTHOR = {Wu, Chenglei and Bradley, Derek and Garrido, Pablo and Zollh{\"o}fer, Michael and Theobalt, Christian and Gross, Markus and Beeler, Thabo}, LANGUAGE = {eng}, ISSN = {0730-0301}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {220}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Wu, Chenglei %A Bradley, Derek %A Garrido, Pablo %A Zollhöfer, Michael %A Theobalt, Christian %A Gross, Markus %A Beeler, Thabo %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Model-Based Teeth Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-23D0-7 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 220 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Book Item
Mantiuk, R.K. and Myszkowski, K. 2016. Perception-Inspired High Dynamic Range Video Coding and Compression. In: CHIPS 2020 VOL. 2. Springer, New York, NY.
Export
BibTeX
@incollection{Mantiuk_Chips2020, TITLE = {Perception-Inspired High Dynamic Range Video Coding and Compression}, AUTHOR = {Mantiuk, Rafa{\l} K. and Myszkowski, Karol}, LANGUAGE = {eng}, ISBN = {978-3-319-22092-5}, DOI = {10.1007/978-3-319-22093-2_14}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {CHIPS 2020 VOL. 2}, EDITOR = {Hoefflinger, Bernd}, PAGES = {211--220}, SERIES = {The Frontiers Collection}, }
Endnote
%0 Book Section %A Mantiuk, Rafał K. %A Myszkowski, Karol %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-Inspired High Dynamic Range Video Coding and Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2DE8-3 %R 10.1007/978-3-319-22093-2_14 %D 2016 %B CHIPS 2020 VOL. 2 %E Hoefflinger, Bernd %P 211 - 220 %I Springer %C New York, NY %@ 978-3-319-22092-5 %S The Frontiers Collection
Conference Paper
Groeger, D., Chong Loo, E., and Steimle, J. 2016. HotFlex: Post-print Customization of 3D Prints Using Embedded State Change. CHI 2016, 34th Annual ACM Conference on Human Factors in Computing Systems, ACM.
Export
BibTeX
@inproceedings{Groeger_chi2016, TITLE = {{HotFlex}: {P}ost-print Customization of {3D} Prints Using Embedded State Change}, AUTHOR = {Groeger, Daniel and Chong Loo, Elena and Steimle, J{\"u}rgen}, LANGUAGE = {eng}, ISBN = {978-1-4503-3362-7}, DOI = {10.1145/2858036.2858191}, PUBLISHER = {ACM}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {CHI 2016, 34th Annual ACM Conference on Human Factors in Computing Systems}, PAGES = {420--432}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Groeger, Daniel %A Chong Loo, Elena %A Steimle, Jürgen %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T HotFlex: Post-print Customization of 3D Prints Using Embedded State Change : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-07BA-3 %R 10.1145/2858036.2858191 %D 2016 %B 34th Annual ACM Conference on Human Factors in Computing Systems %Z date of event: 2016-05-07 - 2016-05-12 %C San Jose, CA, USA %B CHI 2016 %P 420 - 432 %I ACM %@ 978-1-4503-3362-7
Gryaditskaya, Y., Masia, B., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Gloss Editing in Light Fields. VMV 2016 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{jgryadit2016, TITLE = {Gloss Editing in Light Fields}, AUTHOR = {Gryaditskaya, Yulia and Masia, Belen and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-03868-025-3}, DOI = {10.2312/vmv.20161351}, PUBLISHER = {Eurographics Association}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {VMV 2016 Vision, Modeling and Visualization}, EDITOR = {Hullin, Matthias and Stamminger, Marc and Weinkauf, Tino}, PAGES = {127--135}, ADDRESS = {Bayreuth, Germany}, }
Endnote
%0 Conference Proceedings %A Gryaditskaya, Yulia %A Masia, Belen %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Gloss Editing in Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82C5-B %R 10.2312/vmv.20161351 %D 2016 %B 21st International Symposium on Vision, Modeling and Visualization %Z date of event: 2016-10-10 - 2016-10-12 %C Bayreuth, Germany %B VMV 2016 Vision, Modeling and Visualization %E Hullin, Matthias; Stamminger, Marc; Weinkauf, Tino %P 127 - 135 %I Eurographics Association %@ 978-3-03868-025-3
Innmann, M., Zollhöfer, M., Nießner, M., Theobalt, C., and Stamminger, M. 2016a. VolumeDeform: Real-Time Volumetric Non-rigid Reconstruction. Computer Vision -- ECCV 2016, Springer.
Export
BibTeX
@inproceedings{InnmannECCV2016, TITLE = {{VolumeDeform}: {R}eal-Time Volumetric Non-rigid Reconstruction}, AUTHOR = {Innmann, Matthias and Zollh{\"o}fer, Michael and Nie{\ss}ner, Matthias and Theobalt, Christian and Stamminger, Marc}, LANGUAGE = {eng}, ISBN = {978-3-319-46483-1}, DOI = {10.1007/978-3-319-46484-8_22}, PUBLISHER = {Springer}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Computer Vision -- ECCV 2016}, EDITOR = {Leibe, Bastian and Matas, Jiri and Sebe, Nicu and Welling, Max}, PAGES = {362--379}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {9912}, ADDRESS = {Amsterdam, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Innmann, Matthias %A Zollhöfer, Michael %A Nießner, Matthias %A Theobalt, Christian %A Stamminger, Marc %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T VolumeDeform: Real-Time Volumetric Non-rigid Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A41-0 %R 10.1007/978-3-319-46484-8_22 %D 2016 %B 14th European Conference on Computer Vision %Z date of event: 2016-10-11 - 2016-10-14 %C Amsterdam, The Netherlands %B Computer Vision -- ECCV 2016 %E Leibe, Bastian; Matas, Jiri; Sebe, Nicu; Welling, Max %P 362 - 379 %I Springer %@ 978-3-319-46483-1 %B Lecture Notes in Computer Science %N 9912
Kim, H., Richardt, C., and Theobalt, C. 2016a. Video Depth-from-Defocus. Fourth International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Kim3DV2016, TITLE = {Video Depth-from-Defocus}, AUTHOR = {Kim, Hyeongwoo and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5090-5407-7}, DOI = {10.1109/3DV.2016.46}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Fourth International Conference on 3D Vision}, PAGES = {370--379}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kim, Hyeongwoo %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society %T Video Depth-from-Defocus : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-557E-5 %R 10.1109/3DV.2016.46 %D 2016 %B Fourth International Conference on 3D Vision %Z date of event: 2016-10-25 - 2016-10-28 %C Stanford, CA, USA %B Fourth International Conference on 3D Vision %P 370 - 379 %I IEEE Computer Society %@ 978-1-5090-5407-7
Krafka, K., Khosla, A., Kellnhofer, P., et al. 2016. Eye Tracking for Everyone. 29th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2016), IEEE Computer Society.
Export
BibTeX
@inproceedings{KrafkaCVPR2016, TITLE = {Eye Tracking for Everyone}, AUTHOR = {Krafka, Kyle and Khosla, Aditya and Kellnhofer, Petr and Kannan, Harini and Bhandarkar, Suchendra and Matusik, Wojciech and Torralba, Antonio}, LANGUAGE = {eng}, DOI = {10.1109/CVPR.2016.239}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {29th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2016)}, PAGES = {2176--2184}, ADDRESS = {Las Vegas, NV, USA}, }
Endnote
%0 Conference Proceedings %A Krafka, Kyle %A Khosla, Aditya %A Kellnhofer, Petr %A Kannan, Harini %A Bhandarkar, Suchendra %A Matusik, Wojciech %A Torralba, Antonio %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations %T Eye Tracking for Everyone : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-8245-D %R 10.1109/CVPR.2016.239 %D 2016 %B 29th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2016-06-26 - 2016-07-01 %C Las Vegas, NV, USA %B 29th IEEE Conference on Computer Vision and Pattern Recognition %P 2176 - 2184 %I IEEE Computer Society %U http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Krafka_Eye_Tracking_for_CVPR_2016_paper.pdf
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{LeimkuehlerGI2016, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.02}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {5--12}, ADDRESS = {Victoria, Canada}, }
Endnote
%0 Conference Proceedings %A Leimkühler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-823D-1 %R 10.20380/GI2016.02 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 5 - 12 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Lochmann, G., Reinert, B., Buchacher, A., and Ritschel, T. 2016. Real-time Novel-view Synthesis for Volume Rendering Using a Piecewise-analytic Representation. VMV 2016 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{Lochmann:2016:vmv, TITLE = {Real-time Novel-view Synthesis for Volume Rendering Using a Piecewise-analytic Representation}, AUTHOR = {Lochmann, Gerrit and Reinert, Bernhard and Buchacher, Arend and Ritschel, Tobias}, LANGUAGE = {eng}, ISBN = {978-3-03868-025-3}, DOI = {10.2312/vmv.20161346}, PUBLISHER = {Eurographics Association}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {VMV 2016 Vision, Modeling and Visualization}, EDITOR = {Hullin, Matthias and Stamminger, Marc and Weinkauf, Tino}, PAGES = {85--92}, ADDRESS = {Bayreuth, Germany}, }
Endnote
%0 Conference Proceedings %A Lochmann, Gerrit %A Reinert, Bernhard %A Buchacher, Arend %A Ritschel, Tobias %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Real-time Novel-view Synthesis for Volume Rendering Using a Piecewise-analytic Representation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64EA-B %R 10.2312/vmv.20161346 %D 2016 %B 21st International Symposium on Vision, Modeling and Visualization %Z date of event: 2016-10-10 - 2016-10-12 %C Bayreuth, Germany %B VMV 2016 Vision, Modeling and Visualization %E Hullin, Matthias; Stamminger, Marc; Weinkauf, Tino %P 85 - 92 %I Eurographics Association %@ 978-3-03868-025-3
Moran, S. and Rashtchian, C. 2016. Shattered Sets and the Hilbert Function. 41st International Symposium on Mathematical Foundations of Computer Science (MFCS 2016), Schloss Dagstuhl.
Export
BibTeX
@inproceedings{MoranMFCS2016, TITLE = {Shattered Sets and the {H}ilbert Function}, AUTHOR = {Moran, Shay and Rashtchian, Cyrus}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-016-3}, URL = {urn:nbn:de:0030-drops-64814}, DOI = {10.4230/LIPIcs.MFCS.2016.70}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {41st International Symposium on Mathematical Foundations of Computer Science (MFCS 2016)}, EDITOR = {Sankowski, Piotr and Muscholl, Anca and Niedermeier, Rolf}, PAGES = {1--14}, EID = {70}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {58}, ADDRESS = {Krak{\'o}w, Poland}, }
Endnote
%0 Conference Proceedings %A Moran, Shay %A Rashtchian, Cyrus %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Shattered Sets and the Hilbert Function : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-51D6-E %U urn:nbn:de:0030-drops-64814 %R 10.4230/LIPIcs.MFCS.2016.70 %D 2016 %B 41st International Symposium on Mathematical Foundations of Computer Science %Z date of event: 2016-08-22 - 2016-08-26 %C Kraków, Poland %B 41st International Symposium on Mathematical Foundations of Computer Science %E Sankowski, Piotr; Muscholl, Anca; Niedermeier, Rolf %P 1 - 14 %Z sequence number: 70 %I Schloss Dagstuhl %@ 978-3-95977-016-3 %B Leibniz International Proceedings in Informatics %N 58 %@ false %U http://drops.dagstuhl.de/doku/urheberrecht1.htmlhttp://drops.dagstuhl.de/opus/volltexte/2016/6481/
Nittala, A.S. and Steimle, J. 2016. Digital Fabrication Pipeline for On-Body Sensors: Design Goals and Challenges. UbiComp’16 Adjunct, ACM.
Export
BibTeX
@inproceedings{NittalaUbiComp2016, TITLE = {Digital Fabrication Pipeline for On-Body Sensors: {D}esign Goals and Challenges}, AUTHOR = {Nittala, Aditya Shekhar and Steimle, J{\"u}rgen}, LANGUAGE = {eng}, ISBN = {978-1-4503-4462-3}, DOI = {10.1145/2968219.2979140}, PUBLISHER = {ACM}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {UbiComp'16 Adjunct}, PAGES = {950--953}, ADDRESS = {Heidelberg, Germany}, }
Endnote
%0 Conference Proceedings %A Nittala, Aditya Shekhar %A Steimle, Jürgen %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Digital Fabrication Pipeline for On-Body Sensors: Design Goals and Challenges : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-989E-1 %R 10.1145/2968219.2979140 %D 2016 %B ACM International Joint Conference on Pervasive and Ubiquitous Computing %Z date of event: 2016-09-12 - 2016-09-16 %C Heidelberg, Germany %B UbiComp'16 Adjunct %P 950 - 953 %I ACM %@ 978-1-4503-4462-3
Pandey, A., Saxena, N., and Sinhababu, A. 2016. Algebraic Independence over Positive Characteristic: New Criterion and Applications to Locally Low Algebraic Rank Circuits. 41st International Symposium on Mathematical Foundations of Computer Science (MFCS 2016), Schloss Dagstuhl.
Export
BibTeX
@inproceedings{pandey_et_al:LIPIcs:2016:6505, TITLE = {Algebraic Independence over Positive Characteristic: {N}ew Criterion and Applications to Locally Low Algebraic Rank Circuits}, AUTHOR = {Pandey, Anurag and Saxena, Nitin and Sinhababu, Amit}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-016-3}, URL = {urn:nbn:de:0030-drops-65057}, DOI = {10.4230/LIPIcs.MFCS.2016.74}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {41st International Symposium on Mathematical Foundations of Computer Science (MFCS 2016)}, EDITOR = {Sankowski, Piotr and Muscholl, Anca and Niedermeier, Rolf}, PAGES = {1--15}, EID = {74}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {58}, ADDRESS = {Krak{\'o}w, Poland}, }
Endnote
%0 Conference Proceedings %A Pandey, Anurag %A Saxena, Nitin %A Sinhababu, Amit %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Algebraic Independence over Positive Characteristic: New Criterion and Applications to Locally Low Algebraic Rank Circuits : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5895-6 %U urn:nbn:de:0030-drops-65057 %R 10.4230/LIPIcs.MFCS.2016.74 %D 2016 %B 41st International Symposium on Mathematical Foundations of Computer Science %Z date of event: 2016-08-22 - 2016-08-26 %C Kraków, Poland %B 41st International Symposium on Mathematical Foundations of Computer Science %E Sankowski, Piotr; Muscholl, Anca; Niedermeier, Rolf %P 1 - 15 %Z sequence number: 74 %I Schloss Dagstuhl %@ 978-3-95977-016-3 %B Leibniz International Proceedings in Informatics %N 58 %@ false %U http://drops.dagstuhl.de/doku/urheberrecht1.htmlhttp://drops.dagstuhl.de/opus/volltexte/2016/6505/
Reinert, B., Ritschel, T., and Seidel, H.-P. 2016c. Animated 3D Creatures from Single-view Video by Skeletal Sketching. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{Reinert:2016:AnimatedCreatures, TITLE = {Animated {3D} Creatures from Single-view Video by Skeletal Sketching}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.17}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {133--143}, ADDRESS = {Victoria, BC, Canada}, }
Endnote
%0 Conference Proceedings %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Animated 3D Creatures from Single-view Video by Skeletal Sketching : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64EC-7 %R 10.20380/GI2016.17 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, BC, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 133 - 143 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Rhodin, H., Robertini, N., Casas, D., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016b. General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues. Computer Vision -- ECCV 2016, Springer.
Export
BibTeX
@inproceedings{RhodinECCV2016, TITLE = {General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Casas, Dan and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-3-319-46453-4}, DOI = {10.1007/978-3-319-46454-1_31}, PUBLISHER = {Springer}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Computer Vision -- ECCV 2016}, DEBUG = {author: Leibe, Bastian; author: Matas, Jiri; author: Sebe, Nicu; author: Welling, Max}, PAGES = {509--526}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {9909}, ADDRESS = {Amsterdam, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Rhodin, Helge %A Robertini, Nadia %A Casas, Dan %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-986D-F %R 10.1007/978-3-319-46454-1_31 %D 2016 %B 14th European Conference on Computer Vision %Z date of event: 2016-10-11 - 2016-10-14 %C Amsterdam, The Netherlands %B Computer Vision -- ECCV 2016 %E Leibe, Bastian; Matas, Jiri; Sebe, Nicu; Welling, Max %P 509 - 526 %I Springer %@ 978-3-319-46453-4 %B Lecture Notes in Computer Science %N 9909
Richardt, C., Kim, H., Valgaerts, L., and Theobalt, C. 2016a. Dense Wide-Baseline Scene Flow from Two Handheld Video Cameras. Fourth International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Richardt3DV2016, TITLE = {Dense Wide-Baseline Scene Flow from Two Handheld Video Cameras}, AUTHOR = {Richardt, Christian and Kim, Hyeongwoo and Valgaerts, Levi and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5090-5407-7}, DOI = {10.1109/3DV.2016.36}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Fourth International Conference on 3D Vision}, PAGES = {276--285}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Richardt, Christian %A Kim, Hyeongwoo %A Valgaerts, Levi %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dense Wide-Baseline Scene Flow from Two Handheld Video Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-557C-9 %R 10.1109/3DV.2016.36 %D 2016 %B Fourth International Conference on 3D Vision %Z date of event: 2016-10-25 - 2016-10-28 %C Stanford, CA, USA %B Fourth International Conference on 3D Vision %P 276 - 285 %I IEEE Computer Society %@ 978-1-5090-5407-7
Robertini, N., Casas, D., Rhodin, H., Seidel, H.-P., and Theobalt, C. 2016a. Model-Based Outdoor Performance Capture. Fourth International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Robertini:2016, TITLE = {Model-Based Outdoor Performance Capture}, AUTHOR = {Robertini, Nadia and Casas, Dan and Rhodin, Helge and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5090-5407-7}, URL = {http://gvv.mpi-inf.mpg.de/projects/OutdoorPerfcap/}, DOI = {10.1109/3DV.2016.25}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Fourth International Conference on 3D Vision}, PAGES = {166--175}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Robertini, Nadia %A Casas, Dan %A Rhodin, Helge %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Model-Based Outdoor Performance Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A6D-2 %R 10.1109/3DV.2016.25 %U http://gvv.mpi-inf.mpg.de/projects/OutdoorPerfcap/ %D 2016 %B Fourth International Conference on 3D Vision %Z date of event: 2016-10-25 - 2016-10-28 %C Stanford, CA, USA %B Fourth International Conference on 3D Vision %P 166 - 175 %I IEEE Computer Society %@ 978-1-5090-5407-7
Sridhar, S., Müller, F., Zollhöfer, M., Casas, D., Oulasvirta, A., and Theobalt, C. 2016a. Real-Time Joint Tracking of a Hand Manipulating an Object from RGB-D Input. Computer Vision -- ECCV 2016, Springer.
Export
BibTeX
@inproceedings{SridharECCV2016, TITLE = {Real-Time Joint Tracking of a Hand Manipulating an Object from {RGB}-{D} Input}, AUTHOR = {Sridhar, Srinath and M{\"u}ller, Franziska and Zollh{\"o}fer, Michael and Casas, Dan and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-3-319-46474-9}, DOI = {10.1007/978-3-319-46475-6_19}, PUBLISHER = {Springer}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Computer Vision -- ECCV 2016}, EDITOR = {Leibe, Bastian and Matas, Jiri and Sebe, Nicu and Welling, Max}, PAGES = {294--310}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {9906}, ADDRESS = {Amsterdam, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Sridhar, Srinath %A Müller, Franziska %A Zollhöfer, Michael %A Casas, Dan %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-Time Joint Tracking of a Hand Manipulating an Object from RGB-D Input : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A3D-B %R 10.1007/978-3-319-46475-6_19 %D 2016 %B 14th European Conference on Computer Vision %Z date of event: 2016-10-11 - 2016-10-14 %C Amsterdam, The Netherlands %B Computer Vision -- ECCV 2016 %E Leibe, Bastian; Matas, Jiri; Sebe, Nicu; Welling, Max %P 294 - 310 %I Springer %@ 978-3-319-46474-9 %B Lecture Notes in Computer Science %N 9906
Steinberger, M., Derler, A., Zayer, R., and Seidel, H.-P. 2016. How Naive is Naive SpMV on the GPU? IEEE High Performance Extreme Computing Conference (HPEC 2016), IEEE.
Export
BibTeX
@inproceedings{SteinbergerHPEC2016, TITLE = {How naive is naive {SpMV} on the {GPU}?}, AUTHOR = {Steinberger, Markus and Derler, Andreas and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-5090-3525-0}, DOI = {10.1109/HPEC.2016.7761634}, PUBLISHER = {IEEE}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2016)}, PAGES = {1--8}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Steinberger, Markus %A Derler, Andreas %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T How Naive is Naive SpMV on the GPU? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98A5-F %R 10.1109/HPEC.2016.7761634 %D 2016 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2016-09-13 - 2016-09-15 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 8 %I IEEE %@ 978-1-5090-3525-0
Thies, J., Zollhöfer, M., Stamminger, M., Theobalt, C., and Nießner, M. 2016a. Face2Face: Real-Time Face Capture and Reenactment of RGB Videos. 29th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2016), IEEE Computer Society.
Export
BibTeX
@inproceedings{thies2016face, TITLE = {{Face2Face}: {R}eal-Time Face Capture and Reenactment of {RGB} Videos}, AUTHOR = {Thies, Justus and Zollh{\"o}fer, Michael and Stamminger, Marc and Theobalt, Christian and Nie{\ss}ner, Matthias}, LANGUAGE = {eng}, ISBN = {978-1-4673-8852-8}, DOI = {10.1109/CVPR.2016.262}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {29th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2016)}, PAGES = {2387--2395}, ADDRESS = {Las Vegas, NV, USA}, }
Endnote
%0 Conference Proceedings %A Thies, Justus %A Zollhöfer, Michael %A Stamminger, Marc %A Theobalt, Christian %A Nießner, Matthias %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Face2Face: Real-Time Face Capture and Reenactment of RGB Videos : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A43-B %R 10.1109/CVPR.2016.262 %D 2016 %B 29th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2016-06-26 - 2016-07-01 %C Las Vegas, NV, USA %B 29th IEEE Conference on Computer Vision and Pattern Recognition %P 2387 - 2395 %I IEEE Computer Society %@ 978-1-4673-8852-8
Thies, J., Zollhöfer, M., Stamminger, M., Theobalt, C., and Nießner, M. 2016b. Demo of Face2Face: Real-time Face Capture and Reenactment of RGB Videos. ACM SIGGRAPH 2016 Emerging Technologies, ACM.
Export
BibTeX
@inproceedings{ThiesSIGGRAPH2016, TITLE = {Demo of {Face2Face}: {R}eal-time face capture and reenactment of {RGB} videos}, AUTHOR = {Thies, Justus and Zollh{\"o}fer, Michael and Stamminger, Marc and Theobalt, Christian and Nie{\ss}ner, Matthias}, LANGUAGE = {eng}, ISBN = {978-1-4503-4372-5}, DOI = {10.1145/2929464.2929475}, PUBLISHER = {ACM}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {ACM SIGGRAPH 2016 Emerging Technologies}, EID = {5}, ADDRESS = {Anaheim, CA, USA}, }
Endnote
%0 Conference Proceedings %A Thies, Justus %A Zollhöfer, Michael %A Stamminger, Marc %A Theobalt, Christian %A Nießner, Matthias %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Demo of Face2Face: Real-time Face Capture and Reenactment of RGB Videos : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A4C-9 %R 10.1145/2929464.2929475 %D 2016 %B 43rd International Conference and Exhibition on Computer Graphics & Interactive Techniques %Z date of event: 2016-07-24 - 2016-07-28 %C Anaheim, CA, USA %B ACM SIGGRAPH 2016 Emerging Technologies %Z sequence number: 5 %I ACM %@ 978-1-4503-4372-5
Thies, L., Zollhöfer, M., Richardt, C., Theobalt, C., and Greiner, G. 2016c. Real-time Halfway Domain Reconstruction of Motion and Geometry. Fourth International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Thies3DV2016, TITLE = {Real-time Halfway Domain Reconstruction of Motion and Geometry}, AUTHOR = {Thies, Lucas and Zollh{\"o}fer, Michael and Richardt, Christian and Theobalt, Christian and Greiner, G{\"u}nther}, LANGUAGE = {eng}, ISBN = {978-1-5090-5407-7}, DOI = {10.1109/3DV.2016.55}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Fourth International Conference on 3D Vision}, PAGES = {450--459}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Thies, Lucas %A Zollhöfer, Michael %A Richardt, Christian %A Theobalt, Christian %A Greiner, Günther %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Real-time Halfway Domain Reconstruction of Motion and Geometry : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-B033-8 %R 10.1109/3DV.2016.55 %D 2016 %B Fourth International Conference on 3D Vision %Z date of event: 2016-10-25 - 2016-10-28 %C Stanford, CA, USA %B Fourth International Conference on 3D Vision %P 450 - 459 %I IEEE Computer Society %@ 978-1-5090-5407-7
Vogelreiter, P., Hofmann, M., Ebner, C., et al. 2016. Visualization-Guided Evaluation of Simulated Minimally Invasive Cancer Treatment. Eurographics Workshop on Visual Computing for Biology and Medicine (EG VCBM 2016), Eurographics Association.
Export
BibTeX
@inproceedings{Voglreiter:VES:20161284, TITLE = {Visualization-Guided Evaluation of Simulated Minimally Invasive Cancer Treatment}, AUTHOR = {Vogelreiter, Philip and Hofmann, Michael and Ebner, Christoph and Blanco Sequeiros, Roberto and Portugaller, Horst Rupert and F{\"u}tterer, J{\"u}rgen and Moche, Michael and Steinberger, Markus and Schmalstieg, Dieter}, LANGUAGE = {eng}, DOI = {10.2312/vcbm.20161284}, PUBLISHER = {Eurographics Association}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {Eurographics Workshop on Visual Computing for Biology and Medicine (EG VCBM 2016)}, EDITOR = {Bruckner, Stefan and Preim, Bernhard and Vilanova, Anna}, PAGES = {163--172}, ADDRESS = {Bergen, Norway}, }
Endnote
%0 Conference Proceedings %A Vogelreiter, Philip %A Hofmann, Michael %A Ebner, Christoph %A Blanco Sequeiros, Roberto %A Portugaller, Horst Rupert %A Fütterer, Jürgen %A Moche, Michael %A Steinberger, Markus %A Schmalstieg, Dieter %+ External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Visualization-Guided Evaluation of Simulated Minimally Invasive Cancer Treatment : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98CD-8 %R 10.2312/vcbm.20161284 %D 2016 %B Eurographics Workshop on Visual Computing for Biology and Medicine %Z date of event: 2016-09-07 - 2016-09-09 %C Bergen, Norway %B Eurographics Workshop on Visual Computing for Biology and Medicine %E Bruckner, Stefan; Preim, Bernhard; Vilanova, Anna %P 163 - 172 %I Eurographics Association
Conference Poster
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016c. Intuitive Editing of Material Appearance. ACM SIGGRAPH 2016 Posters.
Export
BibTeX
@inproceedings{SerranoSIGGRAPH2016, TITLE = {Intuitive Editing of Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISBN = {978-1-4503-4371-8}, DOI = {10.1145/2945078.2945141}, PUBLISHER = {ACM}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, BOOKTITLE = {ACM SIGGRAPH 2016 Posters}, PAGES = {1--2}, EID = {63}, ADDRESS = {Anaheim, CA, USA}, }
Endnote
%0 Generic %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Intuitive Editing of Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0170-C %R 10.1145/2945078.2945141 %D 2016 %Z name of event: the 43rd International Conference and Exhibition on Computer Graphics & Interactive Techniques %Z date of event: 2016-07-24 - 2016-07-28 %Z place of event: Anaheim, CA, USA %B ACM SIGGRAPH 2016 Posters %P 1 - 2 %Z sequence number: 63 %@ 978-1-4503-4371-8
Paper
Dai, A., Nießner, M., Zollhöfer, M., Izadi, S., and Theobalt, C. 2016. BundleFusion: Real-time Globally Consistent 3D Reconstruction using On-the-fly Surface Re-integration. http://arxiv.org/abs/1604.01093.
(arXiv: 1604.01093)
Abstract
Real-time, high-quality, 3D scanning of large-scale scenes is key to mixed reality and robotic applications. However, scalability brings challenges of drift in pose estimation, introducing significant errors in the accumulated model. Approaches often require hours of offline processing to globally correct model errors. Recent online methods demonstrate compelling results, but suffer from: (1) needing minutes to perform online correction preventing true real-time use; (2) brittle frame-to-frame (or frame-to-model) pose estimation resulting in many tracking failures; or (3) supporting only unstructured point-based representations, which limit scan quality and applicability. We systematically address these issues with a novel, real-time, end-to-end reconstruction framework. At its core is a robust pose estimation strategy, optimizing per frame for a global set of camera poses by considering the complete history of RGB-D input with an efficient hierarchical approach. We remove the heavy reliance on temporal tracking, and continually localize to the globally optimized frames instead. We contribute a parallelizable optimization framework, which employs correspondences based on sparse features and dense geometric and photometric matching. Our approach estimates globally optimized (i.e., bundle adjusted) poses in real-time, supports robust tracking with recovery from gross tracking failures (i.e., relocalization), and re-estimates the 3D model in real-time to ensure global consistency; all within a single framework. Our approach outperforms state-of-the-art online systems with quality on par to offline methods, but with unprecedented speed and scan completeness. Our framework leads to a comprehensive online scanning solution for large indoor environments, enabling ease of use and high-quality results.
Export
BibTeX
@online{DaiarXiv1604.01093, TITLE = {{BundleFusion}: {R}eal-time Globally Consistent {3D} Reconstruction using On-the-fly Surface Re-integration}, AUTHOR = {Dai, Angela and Nie{\ss}ner, Matthias and Zollh{\"o}fer, Michael and Izadi, Shahram and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1604.01093}, EPRINT = {1604.01093}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Real-time, high-quality, 3D scanning of large-scale scenes is key to mixed reality and robotic applications. However, scalability brings challenges of drift in pose estimation, introducing significant errors in the accumulated model. Approaches often require hours of offline processing to globally correct model errors. Recent online methods demonstrate compelling results, but suffer from: (1) needing minutes to perform online correction preventing true real-time use; (2) brittle frame-to-frame (or frame-to-model) pose estimation resulting in many tracking failures; or (3) supporting only unstructured point-based representations, which limit scan quality and applicability. We systematically address these issues with a novel, real-time, end-to-end reconstruction framework. At its core is a robust pose estimation strategy, optimizing per frame for a global set of camera poses by considering the complete history of RGB-D input with an efficient hierarchical approach. We remove the heavy reliance on temporal tracking, and continually localize to the globally optimized frames instead. We contribute a parallelizable optimization framework, which employs correspondences based on sparse features and dense geometric and photometric matching. Our approach estimates globally optimized (i.e., bundle adjusted) poses in real-time, supports robust tracking with recovery from gross tracking failures (i.e., relocalization), and re-estimates the 3D model in real-time to ensure global consistency; all within a single framework. Our approach outperforms state-of-the-art online systems with quality on par to offline methods, but with unprecedented speed and scan completeness. Our framework leads to a comprehensive online scanning solution for large indoor environments, enabling ease of use and high-quality results.}, }
Endnote
%0 Report %A Dai, Angela %A Nießner, Matthias %A Zollhöfer, Michael %A Izadi, Shahram %A Theobalt, Christian %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T BundleFusion: Real-time Globally Consistent 3D Reconstruction using On-the-fly Surface Re-integration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A9F-2 %U http://arxiv.org/abs/1604.01093 %D 2016 %X Real-time, high-quality, 3D scanning of large-scale scenes is key to mixed reality and robotic applications. However, scalability brings challenges of drift in pose estimation, introducing significant errors in the accumulated model. Approaches often require hours of offline processing to globally correct model errors. Recent online methods demonstrate compelling results, but suffer from: (1) needing minutes to perform online correction preventing true real-time use; (2) brittle frame-to-frame (or frame-to-model) pose estimation resulting in many tracking failures; or (3) supporting only unstructured point-based representations, which limit scan quality and applicability. We systematically address these issues with a novel, real-time, end-to-end reconstruction framework. At its core is a robust pose estimation strategy, optimizing per frame for a global set of camera poses by considering the complete history of RGB-D input with an efficient hierarchical approach. We remove the heavy reliance on temporal tracking, and continually localize to the globally optimized frames instead. We contribute a parallelizable optimization framework, which employs correspondences based on sparse features and dense geometric and photometric matching. Our approach estimates globally optimized (i.e., bundle adjusted) poses in real-time, supports robust tracking with recovery from gross tracking failures (i.e., relocalization), and re-estimates the 3D model in real-time to ensure global consistency; all within a single framework. Our approach outperforms state-of-the-art online systems with quality on par to offline methods, but with unprecedented speed and scan completeness. Our framework leads to a comprehensive online scanning solution for large indoor environments, enabling ease of use and high-quality results. %K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV
DeVito, Z., Mara, M., Zollhöfer, M., et al. 2016. Opt: A Domain Specific Language for Non-linear Least Squares Optimization in Graphics and Imaging. http://arxiv.org/abs/1604.06525.
(arXiv: 1604.06525)
Abstract
Many graphics and vision problems are naturally expressed as optimizations with either linear or non-linear least squares objective functions over visual data, such as images and meshes. The mathematical descriptions of these functions are extremely concise, but their implementation in real code is tedious, especially when optimized for real-time performance in interactive applications. We propose a new language, Opt (available under http://optlang.org), in which a user simply writes energy functions over image- or graph-structured unknowns, and a compiler automatically generates state-of-the-art GPU optimization kernels. The end result is a system in which real-world energy functions in graphics and vision applications are expressible in tens of lines of code. They compile directly into highly-optimized GPU solver implementations with performance competitive with the best published hand-tuned, application-specific GPU solvers, and 1-2 orders of magnitude beyond a general-purpose auto-generated solver.
Export
BibTeX
@online{DeVito1604.06525, TITLE = {Opt: A Domain Specific Language for Non-linear Least Squares Optimization in Graphics and Imaging}, AUTHOR = {DeVito, Zachary and Mara, Michael and Zollh{\"o}fer, Michael and Bernstein, Gilbert and Ragan-Kelley, Jonathan and Theobalt, Christian and Hanrahan, Pat and Fisher, Matthew and Nie{\ss}ner, Matthias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1604.06525}, EPRINT = {1604.06525}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Many graphics and vision problems are naturally expressed as optimizations with either linear or non-linear least squares objective functions over visual data, such as images and meshes. The mathematical descriptions of these functions are extremely concise, but their implementation in real code is tedious, especially when optimized for real-time performance in interactive applications. We propose a new language, Opt (available under http://optlang.org), in which a user simply writes energy functions over image- or graph-structured unknowns, and a compiler automatically generates state-of-the-art GPU optimization kernels. The end result is a system in which real-world energy functions in graphics and vision applications are expressible in tens of lines of code. They compile directly into highly-optimized GPU solver implementations with performance competitive with the best published hand-tuned, application-specific GPU solvers, and 1-2 orders of magnitude beyond a general-purpose auto-generated solver.}, }
Endnote
%0 Report %A DeVito, Zachary %A Mara, Michael %A Zollhöfer, Michael %A Bernstein, Gilbert %A Ragan-Kelley, Jonathan %A Theobalt, Christian %A Hanrahan, Pat %A Fisher, Matthew %A Nießner, Matthias %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Opt: A Domain Specific Language for Non-linear Least Squares Optimization in Graphics and Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9AA6-0 %U http://arxiv.org/abs/1604.06525 %D 2016 %X Many graphics and vision problems are naturally expressed as optimizations with either linear or non-linear least squares objective functions over visual data, such as images and meshes. The mathematical descriptions of these functions are extremely concise, but their implementation in real code is tedious, especially when optimized for real-time performance in interactive applications. We propose a new language, Opt (available under http://optlang.org), in which a user simply writes energy functions over image- or graph-structured unknowns, and a compiler automatically generates state-of-the-art GPU optimization kernels. The end result is a system in which real-world energy functions in graphics and vision applications are expressible in tens of lines of code. They compile directly into highly-optimized GPU solver implementations with performance competitive with the best published hand-tuned, application-specific GPU solvers, and 1-2 orders of magnitude beyond a general-purpose auto-generated solver. %K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Programming Languages, cs.PL
Garrido, P., Valgaerts, L., Rehmsen, O., Thormählen, T., Perez, P., and Theobalt, C. 2016c. Automatic Face Reenactment. http://arxiv.org/abs/1602.02651.
(arXiv: 1602.02651)
Abstract
We propose an image-based, facial reenactment system that replaces the face of an actor in an existing target video with the face of a user from a source video, while preserving the original target performance. Our system is fully automatic and does not require a database of source expressions. Instead, it is able to produce convincing reenactment results from a short source video captured with an off-the-shelf camera, such as a webcam, where the user performs arbitrary facial gestures. Our reenactment pipeline is conceived as part image retrieval and part face transfer: The image retrieval is based on temporal clustering of target frames and a novel image matching metric that combines appearance and motion to select candidate frames from the source video, while the face transfer uses a 2D warping strategy that preserves the user's identity. Our system excels in simplicity as it does not rely on a 3D face model, it is robust under head motion and does not require the source and target performance to be similar. We show convincing reenactment results for videos that we recorded ourselves and for low-quality footage taken from the Internet.
Export
BibTeX
@online{GarridoarXiv1602.02651, TITLE = {Automatic Face Reenactment}, AUTHOR = {Garrido, Pablo and Valgaerts, Levi and Rehmsen, Ole and Thorm{\"a}hlen, Thorsten and Perez, Patrick and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.02651}, EPRINT = {1602.02651}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We propose an image-based, facial reenactment system that replaces the face of an actor in an existing target video with the face of a user from a source video, while preserving the original target performance. Our system is fully automatic and does not require a database of source expressions. Instead, it is able to produce convincing reenactment results from a short source video captured with an off-the-shelf camera, such as a webcam, where the user performs arbitrary facial gestures. Our reenactment pipeline is conceived as part image retrieval and part face transfer: The image retrieval is based on temporal clustering of target frames and a novel image matching metric that combines appearance and motion to select candidate frames from the source video, while the face transfer uses a 2D warping strategy that preserves the user's identity. Our system excels in simplicity as it does not rely on a 3D face model, it is robust under head motion and does not require the source and target performance to be similar. We show convincing reenactment results for videos that we recorded ourselves and for low-quality footage taken from the Internet.}, }
Endnote
%0 Report %A Garrido, Pablo %A Valgaerts, Levi %A Rehmsen, Ole %A Thormählen, Thorsten %A Perez, Patrick %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Automatic Face Reenactment : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A53-8 %U http://arxiv.org/abs/1602.02651 %D 2016 %X We propose an image-based, facial reenactment system that replaces the face of an actor in an existing target video with the face of a user from a source video, while preserving the original target performance. Our system is fully automatic and does not require a database of source expressions. Instead, it is able to produce convincing reenactment results from a short source video captured with an off-the-shelf camera, such as a webcam, where the user performs arbitrary facial gestures. Our reenactment pipeline is conceived as part image retrieval and part face transfer: The image retrieval is based on temporal clustering of target frames and a novel image matching metric that combines appearance and motion to select candidate frames from the source video, while the face transfer uses a 2D warping strategy that preserves the user's identity. Our system excels in simplicity as it does not rely on a 3D face model, it is robust under head motion and does not require the source and target performance to be similar. We show convincing reenactment results for videos that we recorded ourselves and for low-quality footage taken from the Internet. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Georgoulis, S., Rematas, K., Ritschel, T., Fritz, M., Tuytelaars, T., and Van Gool, L. 2016. Natural Illumination from Multiple Materials Using Deep Learning. http://arxiv.org/abs/1611.09325.
(arXiv: 1611.09325)
Abstract
Recovering natural illumination from a single Low-Dynamic Range (LDR) image is a challenging task. To remedy this situation we exploit two properties often found in everyday images. First, images rarely show a single material, but rather multiple ones that all reflect the same illumination. However, the appearance of each material is observed only for some surface orientations, not all. Second, parts of the illumination are often directly observed in the background, without being affected by reflection. Typically, this directly observed part of the illumination is even smaller. We propose a deep Convolutional Neural Network (CNN) that combines prior knowledge about the statistics of illumination and reflectance with an input that makes explicit use of these two observations. Our approach maps multiple partial LDR material observations represented as reflectance maps and a background image to a spherical High-Dynamic Range (HDR) illumination map. For training and testing we propose a new data set comprising of synthetic and real images with multiple materials observed under the same illumination. Qualitative and quantitative evidence shows how both multi-material and using a background are essential to improve illumination estimations.
Export
BibTeX
@online{Fritzarxiv16, TITLE = {Natural Illumination from Multiple Materials Using Deep Learning}, AUTHOR = {Georgoulis, Stamatios and Rematas, Konstantinos and Ritschel, Tobias and Fritz, Mario and Tuytelaars, Tinne and Van Gool, Luc}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1611.09325}, EPRINT = {1611.09325}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Recovering natural illumination from a single Low-Dynamic Range (LDR) image is a challenging task. To remedy this situation we exploit two properties often found in everyday images. First, images rarely show a single material, but rather multiple ones that all reflect the same illumination. However, the appearance of each material is observed only for some surface orientations, not all. Second, parts of the illumination are often directly observed in the background, without being affected by reflection. Typically, this directly observed part of the illumination is even smaller. We propose a deep Convolutional Neural Network (CNN) that combines prior knowledge about the statistics of illumination and reflectance with an input that makes explicit use of these two observations. Our approach maps multiple partial LDR material observations represented as reflectance maps and a background image to a spherical High-Dynamic Range (HDR) illumination map. For training and testing we propose a new data set comprising of synthetic and real images with multiple materials observed under the same illumination. Qualitative and quantitative evidence shows how both multi-material and using a background are essential to improve illumination estimations.}, }
Endnote
%0 Report %A Georgoulis, Stamatios %A Rematas, Konstantinos %A Ritschel, Tobias %A Fritz, Mario %A Tuytelaars, Tinne %A Van Gool, Luc %+ External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Natural Illumination from Multiple Materials Using Deep Learning : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-270F-0 %U http://arxiv.org/abs/1611.09325 %D 2016 %X Recovering natural illumination from a single Low-Dynamic Range (LDR) image is a challenging task. To remedy this situation we exploit two properties often found in everyday images. First, images rarely show a single material, but rather multiple ones that all reflect the same illumination. However, the appearance of each material is observed only for some surface orientations, not all. Second, parts of the illumination are often directly observed in the background, without being affected by reflection. Typically, this directly observed part of the illumination is even smaller. We propose a deep Convolutional Neural Network (CNN) that combines prior knowledge about the statistics of illumination and reflectance with an input that makes explicit use of these two observations. Our approach maps multiple partial LDR material observations represented as reflectance maps and a background image to a spherical High-Dynamic Range (HDR) illumination map. For training and testing we propose a new data set comprising of synthetic and real images with multiple materials observed under the same illumination. Qualitative and quantitative evidence shows how both multi-material and using a background are essential to improve illumination estimations. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Innmann, M., Zollhöfer, M., Nießner, M., Theobalt, C., and Stamminger, M. 2016b. VolumeDeform: Real-time Volumetric Non-rigid Reconstruction. http://arxiv.org/abs/1603.08161.
(arXiv: 1603.08161)
Abstract
We present a novel approach for the reconstruction of dynamic geometric shapes using a single hand-held consumer-grade RGB-D sensor at real-time rates. Our method does not require a pre-defined shape template to start with and builds up the scene model from scratch during the scanning process. Geometry and motion are parameterized in a unified manner by a volumetric representation that encodes a distance field of the surface geometry as well as the non-rigid space deformation. Motion tracking is based on a set of extracted sparse color features in combination with a dense depth-based constraint formulation. This enables accurate tracking and drastically reduces drift inherent to standard model-to-depth alignment. We cast finding the optimal deformation of space as a non-linear regularized variational optimization problem by enforcing local smoothness and proximity to the input constraints. The problem is tackled in real-time at the camera's capture rate using a data-parallel flip-flop optimization strategy. Our results demonstrate robust tracking even for fast motion and scenes that lack geometric features.
Export
BibTeX
@online{InnmannarXiv1603.08161, TITLE = {{VolumeDeform}: Real-time Volumetric Non-rigid Reconstruction}, AUTHOR = {Innmann, Matthias and Zollh{\"o}fer, Michael and Nie{\ss}ner, Matthias and Theobalt, Christian and Stamminger, Marc}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1603.08161}, EPRINT = {1603.08161}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present a novel approach for the reconstruction of dynamic geometric shapes using a single hand-held consumer-grade RGB-D sensor at real-time rates. Our method does not require a pre-defined shape template to start with and builds up the scene model from scratch during the scanning process. Geometry and motion are parameterized in a unified manner by a volumetric representation that encodes a distance field of the surface geometry as well as the non-rigid space deformation. Motion tracking is based on a set of extracted sparse color features in combination with a dense depth-based constraint formulation. This enables accurate tracking and drastically reduces drift inherent to standard model-to-depth alignment. We cast finding the optimal deformation of space as a non-linear regularized variational optimization problem by enforcing local smoothness and proximity to the input constraints. The problem is tackled in real-time at the camera's capture rate using a data-parallel flip-flop optimization strategy. Our results demonstrate robust tracking even for fast motion and scenes that lack geometric features.}, }
Endnote
%0 Report %A Innmann, Matthias %A Zollhöfer, Michael %A Nießner, Matthias %A Theobalt, Christian %A Stamminger, Marc %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T VolumeDeform: Real-time Volumetric Non-rigid Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A8E-6 %U http://arxiv.org/abs/1603.08161 %D 2016 %X We present a novel approach for the reconstruction of dynamic geometric shapes using a single hand-held consumer-grade RGB-D sensor at real-time rates. Our method does not require a pre-defined shape template to start with and builds up the scene model from scratch during the scanning process. Geometry and motion are parameterized in a unified manner by a volumetric representation that encodes a distance field of the surface geometry as well as the non-rigid space deformation. Motion tracking is based on a set of extracted sparse color features in combination with a dense depth-based constraint formulation. This enables accurate tracking and drastically reduces drift inherent to standard model-to-depth alignment. We cast finding the optimal deformation of space as a non-linear regularized variational optimization problem by enforcing local smoothness and proximity to the input constraints. The problem is tackled in real-time at the camera's capture rate using a data-parallel flip-flop optimization strategy. Our results demonstrate robust tracking even for fast motion and scenes that lack geometric features. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Kim, H., Richardt, C., and Theobalt, C. 2016b. Video Depth-From-Defocus. http://arxiv.org/abs/1610.03782.
(arXiv: 1610.03782)
Abstract
Many compelling video post-processing effects, in particular aesthetic focus editing and refocusing effects, are feasible if per-frame depth information is available. Existing computational methods to capture RGB and depth either purposefully modify the optics (coded aperture, light-field imaging), or employ active RGB-D cameras. Since these methods are less practical for users with normal cameras, we present an algorithm to capture all-in-focus RGB-D video of dynamic scenes with an unmodified commodity video camera. Our algorithm turns the often unwanted defocus blur into a valuable signal. The input to our method is a video in which the focus plane is continuously moving back and forth during capture, and thus defocus blur is provoked and strongly visible. This can be achieved by manually turning the focus ring of the lens during recording. The core algorithmic ingredient is a new video-based depth-from-defocus algorithm that computes space-time-coherent depth maps, deblurred all-in-focus video, and the focus distance for each frame. We extensively evaluate our approach, and show that it enables compelling video post-processing effects, such as different types of refocusing.
Export
BibTeX
@online{Kim1610.03782, TITLE = {Video Depth-From-Defocus}, AUTHOR = {Kim, Hyeongwoo and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1610.03782}, EPRINT = {1610.03782}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Many compelling video post-processing effects, in particular aesthetic focus editing and refocusing effects, are feasible if per-frame depth information is available. Existing computational methods to capture RGB and depth either purposefully modify the optics (coded aperture, light-field imaging), or employ active RGB-D cameras. Since these methods are less practical for users with normal cameras, we present an algorithm to capture all-in-focus RGB-D video of dynamic scenes with an unmodified commodity video camera. Our algorithm turns the often unwanted defocus blur into a valuable signal. The input to our method is a video in which the focus plane is continuously moving back and forth during capture, and thus defocus blur is provoked and strongly visible. This can be achieved by manually turning the focus ring of the lens during recording. The core algorithmic ingredient is a new video-based depth-from-defocus algorithm that computes space-time-coherent depth maps, deblurred all-in-focus video, and the focus distance for each frame. We extensively evaluate our approach, and show that it enables compelling video post-processing effects, such as different types of refocusing.}, }
Endnote
%0 Report %A Kim, Hyeongwoo %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society %T Video Depth-From-Defocus : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-B02D-7 %U http://arxiv.org/abs/1610.03782 %D 2016 %X Many compelling video post-processing effects, in particular aesthetic focus editing and refocusing effects, are feasible if per-frame depth information is available. Existing computational methods to capture RGB and depth either purposefully modify the optics (coded aperture, light-field imaging), or employ active RGB-D cameras. Since these methods are less practical for users with normal cameras, we present an algorithm to capture all-in-focus RGB-D video of dynamic scenes with an unmodified commodity video camera. Our algorithm turns the often unwanted defocus blur into a valuable signal. The input to our method is a video in which the focus plane is continuously moving back and forth during capture, and thus defocus blur is provoked and strongly visible. This can be achieved by manually turning the focus ring of the lens during recording. The core algorithmic ingredient is a new video-based depth-from-defocus algorithm that computes space-time-coherent depth maps, deblurred all-in-focus video, and the focus distance for each frame. We extensively evaluate our approach, and show that it enables compelling video post-processing effects, such as different types of refocusing. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Kim, K.I., Tompkin, J., Pfister, H., and Theobalt, C. 2016c. Local High-order Regularization on Data Manifolds. http://arxiv.org/abs/1602.03805.
(arXiv: 1602.03805)
Abstract
The common graph Laplacian regularizer is well-established in semi-supervised learning and spectral dimensionality reduction. However, as a first-order regularizer, it can lead to degenerate functions in high-dimensional manifolds. The iterated graph Laplacian enables high-order regularization, but it has a high computational complexity and so cannot be applied to large problems. We introduce a new regularizer which is globally high order and so does not suffer from the degeneracy of the graph Laplacian regularizer, but is also sparse for efficient computation in semi-supervised learning applications. We reduce computational complexity by building a local first-order approximation of the manifold as a surrogate geometry, and construct our high-order regularizer based on local derivative evaluations therein. Experiments on human body shape and pose analysis demonstrate the effectiveness and efficiency of our method.
Export
BibTeX
@online{Kim1602.03805, TITLE = {Local High-order Regularization on Data Manifolds}, AUTHOR = {Kim, Kwang In and Tompkin, James and Pfister, Hanspeter and Theobalt, Christian}, URL = {http://arxiv.org/abs/1602.03805}, EPRINT = {1602.03805}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {The common graph Laplacian regularizer is well-established in semi-supervised learning and spectral dimensionality reduction. However, as a first-order regularizer, it can lead to degenerate functions in high-dimensional manifolds. The iterated graph Laplacian enables high-order regularization, but it has a high computational complexity and so cannot be applied to large problems. We introduce a new regularizer which is globally high order and so does not suffer from the degeneracy of the graph Laplacian regularizer, but is also sparse for efficient computation in semi-supervised learning applications. We reduce computational complexity by building a local first-order approximation of the manifold as a surrogate geometry, and construct our high-order regularizer based on local derivative evaluations therein. Experiments on human body shape and pose analysis demonstrate the effectiveness and efficiency of our method.}, }
Endnote
%0 Report %A Kim, Kwang In %A Tompkin, James %A Pfister, Hanspeter %A Theobalt, Christian %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Local High-order Regularization on Data Manifolds : %U http://hdl.handle.net/11858/00-001M-0000-002C-2428-A %U http://arxiv.org/abs/1602.03805 %D 2016 %X The common graph Laplacian regularizer is well-established in semi-supervised learning and spectral dimensionality reduction. However, as a first-order regularizer, it can lead to degenerate functions in high-dimensional manifolds. The iterated graph Laplacian enables high-order regularization, but it has a high computational complexity and so cannot be applied to large problems. We introduce a new regularizer which is globally high order and so does not suffer from the degeneracy of the graph Laplacian regularizer, but is also sparse for efficient computation in semi-supervised learning applications. We reduce computational complexity by building a local first-order approximation of the manifold as a surrogate geometry, and construct our high-order regularizer based on local derivative evaluations therein. Experiments on human body shape and pose analysis demonstrate the effectiveness and efficiency of our method. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Kim, K.I., Tompkin, J., Pfister, H., and Theobalt, C. 2016d. Context-guided Diffusion for Label Propagation on Graphs. http://arxiv.org/abs/1602.06439.
(arXiv: 1602.06439)
Abstract
Existing approaches for diffusion on graphs, e.g., for label propagation, are mainly focused on isotropic diffusion, which is induced by the commonly-used graph Laplacian regularizer. Inspired by the success of diffusivity tensors for anisotropic diffusion in image processing, we presents anisotropic diffusion on graphs and the corresponding label propagation algorithm. We develop positive definite diffusivity operators on the vector bundles of Riemannian manifolds, and discretize them to diffusivity operators on graphs. This enables us to easily define new robust diffusivity operators which significantly improve semi-supervised learning performance over existing diffusion algorithms.
Export
BibTeX
@online{KimarXiv1602.06439, TITLE = {Context-guided Diffusion for Label Propagation on Graphs}, AUTHOR = {Kim, Kwang In and Tompkin, James and Pfister, Hanspeter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.06439}, EPRINT = {1602.06439}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Existing approaches for diffusion on graphs, e.g., for label propagation, are mainly focused on isotropic diffusion, which is induced by the commonly-used graph Laplacian regularizer. Inspired by the success of diffusivity tensors for anisotropic diffusion in image processing, we presents anisotropic diffusion on graphs and the corresponding label propagation algorithm. We develop positive definite diffusivity operators on the vector bundles of Riemannian manifolds, and discretize them to diffusivity operators on graphs. This enables us to easily define new robust diffusivity operators which significantly improve semi-supervised learning performance over existing diffusion algorithms.}, }
Endnote
%0 Report %A Kim, Kwang In %A Tompkin, James %A Pfister, Hanspeter %A Theobalt, Christian %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Context-guided Diffusion for Label Propagation on Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A84-9 %U http://arxiv.org/abs/1602.06439 %D 2016 %X Existing approaches for diffusion on graphs, e.g., for label propagation, are mainly focused on isotropic diffusion, which is induced by the commonly-used graph Laplacian regularizer. Inspired by the success of diffusivity tensors for anisotropic diffusion in image processing, we presents anisotropic diffusion on graphs and the corresponding label propagation algorithm. We develop positive definite diffusivity operators on the vector bundles of Riemannian manifolds, and discretize them to diffusivity operators on graphs. This enables us to easily define new robust diffusivity operators which significantly improve semi-supervised learning performance over existing diffusion algorithms. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Kim, K.I., Tompkin, J., Pfister, H., and Theobalt, C. 2016e. Semi-supervised Learning with Explicit Relationship Regularization. http://arxiv.org/abs/1602.03808.
(arXiv: 1602.03808)
Abstract
In many learning tasks, the structure of the target space of a function holds rich information about the relationships between evaluations of functions on different data points. Existing approaches attempt to exploit this relationship information implicitly by enforcing smoothness on function evaluations only. However, what happens if we explicitly regularize the relationships between function evaluations? Inspired by homophily, we regularize based on a smooth relationship function, either defined from the data or with labels. In experiments, we demonstrate that this significantly improves the performance of state-of-the-art algorithms in semi-supervised classification and in spectral data embedding for constrained clustering and dimensionality reduction.
Export
BibTeX
@online{KimarXiv1602.03808, TITLE = {Semi-supervised Learning with Explicit Relationship Regularization}, AUTHOR = {Kim, Kwang In and Tompkin, James and Pfister, Hanspeter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.03808}, EPRINT = {1602.03808}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In many learning tasks, the structure of the target space of a function holds rich information about the relationships between evaluations of functions on different data points. Existing approaches attempt to exploit this relationship information implicitly by enforcing smoothness on function evaluations only. However, what happens if we explicitly regularize the relationships between function evaluations? Inspired by homophily, we regularize based on a smooth relationship function, either defined from the data or with labels. In experiments, we demonstrate that this significantly improves the performance of state-of-the-art algorithms in semi-supervised classification and in spectral data embedding for constrained clustering and dimensionality reduction.}, }
Endnote
%0 Report %A Kim, Kwang In %A Tompkin, James %A Pfister, Hanspeter %A Theobalt, Christian %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Semi-supervised Learning with Explicit Relationship Regularization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A62-6 %U http://arxiv.org/abs/1602.03808 %D 2016 %X In many learning tasks, the structure of the target space of a function holds rich information about the relationships between evaluations of functions on different data points. Existing approaches attempt to exploit this relationship information implicitly by enforcing smoothness on function evaluations only. However, what happens if we explicitly regularize the relationships between function evaluations? Inspired by homophily, we regularize based on a smooth relationship function, either defined from the data or with labels. In experiments, we demonstrate that this significantly improves the performance of state-of-the-art algorithms in semi-supervised classification and in spectral data embedding for constrained clustering and dimensionality reduction. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Learning, cs.LG
Nalbach, O., Arabadzhiyska, E., Mehta, D., Seidel, H.-P., and Ritschel, T. 2016. Deep Shading: Convolutional Neural Networks for Screen-Space Shading. http://arxiv.org/abs/1603.06078.
(arXiv: 1603.06078)
Abstract
In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images.
Export
BibTeX
@online{NalbacharXiv2016, TITLE = {Deep Shading: Convolutional Neural Networks for Screen-Space Shading}, AUTHOR = {Nalbach, Oliver and Arabadzhiyska, Elena and Mehta, Dushyant and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1603.06078}, EPRINT = {1603.06078}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images.}, }
Endnote
%0 Report %A Nalbach, Oliver %A Arabadzhiyska, Elena %A Mehta, Dushyant %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Shading: Convolutional Neural Networks for Screen-Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0174-4 %U http://arxiv.org/abs/1603.06078 %D 2016 %X In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images. %K Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Rhodin, H., Robertini, N., Casas, D., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016c. General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues. http://arxiv.org/abs/1607.08659.
(arXiv: 1607.08659)
Abstract
Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation - skeleton, volumetric shape, appearance, and optionally a body surface - and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way.
Export
BibTeX
@online{Rhodin2016arXiv1607.08659, TITLE = {General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Casas, Dan and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1607.08659}, EPRINT = {1607.08659}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation -- skeleton, volumetric shape, appearance, and optionally a body surface -- and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way.}, }
Endnote
%0 Report %A Rhodin, Helge %A Robertini, Nadia %A Casas, Dan %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9883-C %U http://arxiv.org/abs/1607.08659 %D 2016 %X Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation - skeleton, volumetric shape, appearance, and optionally a body surface - and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Robertini, N., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016d. A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation. http://arxiv.org/abs/1602.03725.
(arXiv: 1602.03725)
Abstract
Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation.
Export
BibTeX
@online{Rhodin2016arXiv1602.03725, TITLE = {A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.03725}, EPRINT = {1602.03725}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation.}, }
Endnote
%0 Report %A Rhodin, Helge %A Robertini, Nadia %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9875-C %U http://arxiv.org/abs/1602.03725 %D 2016 %X Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Richardt, C., Kim, H., Valgaerts, L., and Theobalt, C. 2016b. Dense Wide-Baseline Scene Flow From Two Handheld Video Cameras. http://arxiv.org/abs/1609.05115.
(arXiv: 1609.05115)
Abstract
We propose a new technique for computing dense scene flow from two handheld videos with wide camera baselines and different photometric properties due to different sensors or camera settings like exposure and white balance. Our technique innovates in two ways over existing methods: (1) it supports independently moving cameras, and (2) it computes dense scene flow for wide-baseline scenarios.We achieve this by combining state-of-the-art wide-baseline correspondence finding with a variational scene flow formulation. First, we compute dense, wide-baseline correspondences using DAISY descriptors for matching between cameras and over time. We then detect and replace occluded pixels in the correspondence fields using a novel edge-preserving Laplacian correspondence completion technique. We finally refine the computed correspondence fields in a variational scene flow formulation. We show dense scene flow results computed from challenging datasets with independently moving, handheld cameras of varying camera settings.
Export
BibTeX
@online{RichardtarXiv1609.05115, TITLE = {Dense Wide-Baseline Scene Flow From Two Handheld Video Cameras}, AUTHOR = {Richardt, Christian and Kim, Hyeongwoo and Valgaerts, Levi and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1609.05115}, EPRINT = {1609.05115}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We propose a new technique for computing dense scene flow from two handheld videos with wide camera baselines and different photometric properties due to different sensors or camera settings like exposure and white balance. Our technique innovates in two ways over existing methods: (1) it supports independently moving cameras, and (2) it computes dense scene flow for wide-baseline scenarios.We achieve this by combining state-of-the-art wide-baseline correspondence finding with a variational scene flow formulation. First, we compute dense, wide-baseline correspondences using DAISY descriptors for matching between cameras and over time. We then detect and replace occluded pixels in the correspondence fields using a novel edge-preserving Laplacian correspondence completion technique. We finally refine the computed correspondence fields in a variational scene flow formulation. We show dense scene flow results computed from challenging datasets with independently moving, handheld cameras of varying camera settings.}, }
Endnote
%0 Report %A Richardt, Christian %A Kim, Hyeongwoo %A Valgaerts, Levi %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dense Wide-Baseline Scene Flow From Two Handheld Video Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9AAF-D %U http://arxiv.org/abs/1609.05115 %D 2016 %X We propose a new technique for computing dense scene flow from two handheld videos with wide camera baselines and different photometric properties due to different sensors or camera settings like exposure and white balance. Our technique innovates in two ways over existing methods: (1) it supports independently moving cameras, and (2) it computes dense scene flow for wide-baseline scenarios.We achieve this by combining state-of-the-art wide-baseline correspondence finding with a variational scene flow formulation. First, we compute dense, wide-baseline correspondences using DAISY descriptors for matching between cameras and over time. We then detect and replace occluded pixels in the correspondence fields using a novel edge-preserving Laplacian correspondence completion technique. We finally refine the computed correspondence fields in a variational scene flow formulation. We show dense scene flow results computed from challenging datasets with independently moving, handheld cameras of varying camera settings. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Robertini, N., de Aguiar, E., Helten, T., and Theobalt, C. 2016b. Efficient Multi-view Performance Capture of Fine-Scale Surface Detail. http://arxiv.org/abs/1602.02023.
(arXiv: 1602.02023)
Abstract
We present a new effective way for performance capture of deforming meshes with fine-scale time-varying surface detail from multi-view video. Our method builds up on coarse 4D surface reconstructions, as obtained with commonly used template-based methods. As they only capture models of coarse-to-medium scale detail, fine scale deformation detail is often done in a second pass by using stereo constraints, features, or shading-based refinement. In this paper, we propose a new effective and stable solution to this second step. Our framework creates an implicit representation of the deformable mesh using a dense collection of 3D Gaussian functions on the surface, and a set of 2D Gaussians for the images. The fine scale deformation of all mesh vertices that maximizes photo-consistency can be efficiently found by densely optimizing a new model-to-image consistency energy on all vertex positions. A principal advantage is that our problem formulation yields a smooth closed form energy with implicit occlusion handling and analytic derivatives. Error-prone correspondence finding, or discrete sampling of surface displacement values are also not needed. We show several reconstructions of human subjects wearing loose clothing, and we qualitatively and quantitatively show that we robustly capture more detail than related methods.
Export
BibTeX
@online{Robertini_arXiv2016, TITLE = {Efficient Multi-view Performance Capture of Fine-Scale Surface Detail}, AUTHOR = {Robertini, Nadia and de Aguiar, Edilson and Helten, Thomas and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.02023}, DOI = {10.1109/3DV.2014.46}, EPRINT = {1602.02023}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present a new effective way for performance capture of deforming meshes with fine-scale time-varying surface detail from multi-view video. Our method builds up on coarse 4D surface reconstructions, as obtained with commonly used template-based methods. As they only capture models of coarse-to-medium scale detail, fine scale deformation detail is often done in a second pass by using stereo constraints, features, or shading-based refinement. In this paper, we propose a new effective and stable solution to this second step. Our framework creates an implicit representation of the deformable mesh using a dense collection of 3D Gaussian functions on the surface, and a set of 2D Gaussians for the images. The fine scale deformation of all mesh vertices that maximizes photo-consistency can be efficiently found by densely optimizing a new model-to-image consistency energy on all vertex positions. A principal advantage is that our problem formulation yields a smooth closed form energy with implicit occlusion handling and analytic derivatives. Error-prone correspondence finding, or discrete sampling of surface displacement values are also not needed. We show several reconstructions of human subjects wearing loose clothing, and we qualitatively and quantitatively show that we robustly capture more detail than related methods.}, }
Endnote
%0 Report %A Robertini, Nadia %A de Aguiar, Edilson %A Helten, Thomas %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Multi-view Performance Capture of Fine-Scale Surface Detail : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-07CD-A %R 10.1109/3DV.2014.46 %U http://arxiv.org/abs/1602.02023 %D 2016 %X We present a new effective way for performance capture of deforming meshes with fine-scale time-varying surface detail from multi-view video. Our method builds up on coarse 4D surface reconstructions, as obtained with commonly used template-based methods. As they only capture models of coarse-to-medium scale detail, fine scale deformation detail is often done in a second pass by using stereo constraints, features, or shading-based refinement. In this paper, we propose a new effective and stable solution to this second step. Our framework creates an implicit representation of the deformable mesh using a dense collection of 3D Gaussian functions on the surface, and a set of 2D Gaussians for the images. The fine scale deformation of all mesh vertices that maximizes photo-consistency can be efficiently found by densely optimizing a new model-to-image consistency energy on all vertex positions. A principal advantage is that our problem formulation yields a smooth closed form energy with implicit occlusion handling and analytic derivatives. Error-prone correspondence finding, or discrete sampling of surface displacement values are also not needed. We show several reconstructions of human subjects wearing loose clothing, and we qualitatively and quantitatively show that we robustly capture more detail than related methods. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Sridhar, S., Rhodin, H., Seidel, H.-P., Oulasvirta, A., and Theobalt, C. 2016b. Real-Time Hand Tracking Using a Sum of Anisotropic Gaussians Model. http://arxiv.org/abs/1602.03860.
(arXiv: 1602.03860)
Abstract
Real-time marker-less hand tracking is of increasing importance in human-computer interaction. Robust and accurate tracking of arbitrary hand motion is a challenging problem due to the many degrees of freedom, frequent self-occlusions, fast motions, and uniform skin color. In this paper, we propose a new approach that tracks the full skeleton motion of the hand from multiple RGB cameras in real-time. The main contributions include a new generative tracking method which employs an implicit hand shape representation based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is smooth and analytically differentiable making fast gradient based pose optimization possible. This shape representation, together with a full perspective projection model, enables more accurate hand modeling than a related baseline method from literature. Our method achieves better accuracy than previous methods and runs at 25 fps. We show these improvements both qualitatively and quantitatively on publicly available datasets.
Export
BibTeX
@online{Sridhar2016arXiv1602.03860, TITLE = {Real-Time Hand Tracking Using a Sum of Anisotropic {Gaussians} Model}, AUTHOR = {Sridhar, Srinath and Rhodin, Helge and Seidel, Hans-Peter and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.03860}, EPRINT = {1602.03860}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Real-time marker-less hand tracking is of increasing importance in human-computer interaction. Robust and accurate tracking of arbitrary hand motion is a challenging problem due to the many degrees of freedom, frequent self-occlusions, fast motions, and uniform skin color. In this paper, we propose a new approach that tracks the full skeleton motion of the hand from multiple RGB cameras in real-time. The main contributions include a new generative tracking method which employs an implicit hand shape representation based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is smooth and analytically differentiable making fast gradient based pose optimization possible. This shape representation, together with a full perspective projection model, enables more accurate hand modeling than a related baseline method from literature. Our method achieves better accuracy than previous methods and runs at 25 fps. We show these improvements both qualitatively and quantitatively on publicly available datasets.}, }
Endnote
%0 Report %A Sridhar, Srinath %A Rhodin, Helge %A Seidel, Hans-Peter %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-Time Hand Tracking Using a Sum of Anisotropic Gaussians Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9878-6 %U http://arxiv.org/abs/1602.03860 %D 2016 %X Real-time marker-less hand tracking is of increasing importance in human-computer interaction. Robust and accurate tracking of arbitrary hand motion is a challenging problem due to the many degrees of freedom, frequent self-occlusions, fast motions, and uniform skin color. In this paper, we propose a new approach that tracks the full skeleton motion of the hand from multiple RGB cameras in real-time. The main contributions include a new generative tracking method which employs an implicit hand shape representation based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is smooth and analytically differentiable making fast gradient based pose optimization possible. This shape representation, together with a full perspective projection model, enables more accurate hand modeling than a related baseline method from literature. Our method achieves better accuracy than previous methods and runs at 25 fps. We show these improvements both qualitatively and quantitatively on publicly available datasets. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Sridhar, S., Müller, F., Oulasvirta, A., and Theobalt, C. 2016c. Fast and Robust Hand Tracking Using Detection-Guided Optimization. http://arxiv.org/abs/1602.04124.
(arXiv: 1602.04124)
Abstract
Markerless tracking of hands and fingers is a promising enabler for human-computer interaction. However, adoption has been limited because of tracking inaccuracies, incomplete coverage of motions, low framerate, complex camera setups, and high computational requirements. In this paper, we present a fast method for accurately tracking rapid and complex articulations of the hand using a single depth camera. Our algorithm uses a novel detection-guided optimization strategy that increases the robustness and speed of pose estimation. In the detection step, a randomized decision forest classifies pixels into parts of the hand. In the optimization step, a novel objective function combines the detected part labels and a Gaussian mixture representation of the depth to estimate a pose that best fits the depth. Our approach needs comparably less computational resources which makes it extremely fast (50 fps without GPU support). The approach also supports varying static, or moving, camera-to-scene arrangements. We show the benefits of our method by evaluating on public datasets and comparing against previous work.
Export
BibTeX
@online{SridhararXiv1602.04124, TITLE = {Fast and Robust Hand Tracking Using Detection-Guided Optimization}, AUTHOR = {Sridhar, Srinath and M{\"u}ller, Franziska and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.04124}, EPRINT = {1602.04124}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Markerless tracking of hands and fingers is a promising enabler for human-computer interaction. However, adoption has been limited because of tracking inaccuracies, incomplete coverage of motions, low framerate, complex camera setups, and high computational requirements. In this paper, we present a fast method for accurately tracking rapid and complex articulations of the hand using a single depth camera. Our algorithm uses a novel detection-guided optimization strategy that increases the robustness and speed of pose estimation. In the detection step, a randomized decision forest classifies pixels into parts of the hand. In the optimization step, a novel objective function combines the detected part labels and a Gaussian mixture representation of the depth to estimate a pose that best fits the depth. Our approach needs comparably less computational resources which makes it extremely fast (50 fps without GPU support). The approach also supports varying static, or moving, camera-to-scene arrangements. We show the benefits of our method by evaluating on public datasets and comparing against previous work.}, }
Endnote
%0 Report %A Sridhar, Srinath %A Müller, Franziska %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast and Robust Hand Tracking Using Detection-Guided Optimization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9A76-9 %U http://arxiv.org/abs/1602.04124 %D 2016 %X Markerless tracking of hands and fingers is a promising enabler for human-computer interaction. However, adoption has been limited because of tracking inaccuracies, incomplete coverage of motions, low framerate, complex camera setups, and high computational requirements. In this paper, we present a fast method for accurately tracking rapid and complex articulations of the hand using a single depth camera. Our algorithm uses a novel detection-guided optimization strategy that increases the robustness and speed of pose estimation. In the detection step, a randomized decision forest classifies pixels into parts of the hand. In the optimization step, a novel objective function combines the detected part labels and a Gaussian mixture representation of the depth to estimate a pose that best fits the depth. Our approach needs comparably less computational resources which makes it extremely fast (50 fps without GPU support). The approach also supports varying static, or moving, camera-to-scene arrangements. We show the benefits of our method by evaluating on public datasets and comparing against previous work. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Thies, J., Zollhöfer, M., Stamminger, M., Theobalt, C., and Nießner, M. 2016d. FaceVR: Real-Time Facial Reenactment and Eye Gaze Control in Virtual Reality. http://arxiv.org/abs/1610.03151.
(arXiv: 1610.03151)
Abstract
We introduce FaceVR, a novel method for gaze-aware facial reenactment in the Virtual Reality (VR) context. The key component of FaceVR is a robust algorithm to perform real-time facial motion capture of an actor who is wearing a head-mounted display (HMD), as well as a new data-driven approach for eye tracking from monocular videos. In addition to these face reconstruction components, FaceVR incorporates photo-realistic re-rendering in real time, thus allowing artificial modifications of face and eye appearances. For instance, we can alter facial expressions, change gaze directions, or remove the VR goggles in realistic re-renderings. In a live setup with a source and a target actor, we apply these newly-introduced algorithmic components. We assume that the source actor is wearing a VR device, and we capture his facial expressions and eye movement in real-time. For the target video, we mimic a similar tracking process; however, we use the source input to drive the animations of the target video, thus enabling gaze-aware facial reenactment. To render the modified target video on a stereo display, we augment our capture and reconstruction process with stereo data. In the end, FaceVR produces compelling results for a variety of applications, such as gaze-aware facial reenactment, reenactment in virtual reality, removal of VR goggles, and re-targeting of somebody's gaze direction in a video conferencing call.
Export
BibTeX
@online{thies16FaceVR, TITLE = {{FaceVR}: Real-Time Facial Reenactment and Eye Gaze Control in Virtual Reality}, AUTHOR = {Thies, Justus and Zollh{\"o}fer, Michael and Stamminger, Marc and Theobalt, Christian and Nie{\ss}ner, Matthias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1610.03151}, EPRINT = {1610.03151}, EPRINTTYPE = {arXiv}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We introduce FaceVR, a novel method for gaze-aware facial reenactment in the Virtual Reality (VR) context. The key component of FaceVR is a robust algorithm to perform real-time facial motion capture of an actor who is wearing a head-mounted display (HMD), as well as a new data-driven approach for eye tracking from monocular videos. In addition to these face reconstruction components, FaceVR incorporates photo-realistic re-rendering in real time, thus allowing artificial modifications of face and eye appearances. For instance, we can alter facial expressions, change gaze directions, or remove the VR goggles in realistic re-renderings. In a live setup with a source and a target actor, we apply these newly-introduced algorithmic components. We assume that the source actor is wearing a VR device, and we capture his facial expressions and eye movement in real-time. For the target video, we mimic a similar tracking process; however, we use the source input to drive the animations of the target video, thus enabling gaze-aware facial reenactment. To render the modified target video on a stereo display, we augment our capture and reconstruction process with stereo data. In the end, FaceVR produces compelling results for a variety of applications, such as gaze-aware facial reenactment, reenactment in virtual reality, removal of VR goggles, and re-targeting of somebody's gaze direction in a video conferencing call.}, }
Endnote
%0 Report %A Thies, Justus %A Zollhöfer, Michael %A Stamminger, Marc %A Theobalt, Christian %A Nießner, Matthias %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T FaceVR: Real-Time Facial Reenactment and Eye Gaze Control in Virtual Reality : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A40-2 %U http://arxiv.org/abs/1610.03151 %D 2016 %X We introduce FaceVR, a novel method for gaze-aware facial reenactment in the Virtual Reality (VR) context. The key component of FaceVR is a robust algorithm to perform real-time facial motion capture of an actor who is wearing a head-mounted display (HMD), as well as a new data-driven approach for eye tracking from monocular videos. In addition to these face reconstruction components, FaceVR incorporates photo-realistic re-rendering in real time, thus allowing artificial modifications of face and eye appearances. For instance, we can alter facial expressions, change gaze directions, or remove the VR goggles in realistic re-renderings. In a live setup with a source and a target actor, we apply these newly-introduced algorithmic components. We assume that the source actor is wearing a VR device, and we capture his facial expressions and eye movement in real-time. For the target video, we mimic a similar tracking process; however, we use the source input to drive the animations of the target video, thus enabling gaze-aware facial reenactment. To render the modified target video on a stereo display, we augment our capture and reconstruction process with stereo data. In the end, FaceVR produces compelling results for a variety of applications, such as gaze-aware facial reenactment, reenactment in virtual reality, removal of VR goggles, and re-targeting of somebody's gaze direction in a video conferencing call. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Report
Sridhar, S., Müller, F., Zollhöfer, M., Casas, D., Oulasvirta, A., and Theobalt, C. 2016d. Real-time Joint Tracking of a Hand Manipulating an Object from RGB-D Input. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Real-time simultaneous tracking of hands manipulating and interacting with external objects has many potential applications in augmented reality, tangible computing, and wearable computing. However, due to dicult occlusions, fast motions, and uniform hand appearance, jointly tracking hand and object pose is more challenging than tracking either of the two separately. Many previous approaches resort to complex multi-camera setups to remedy the occlusion problem and often employ expensive segmentation and optimization steps which makes real-time tracking impossible. In this paper, we propose a real-time solution that uses a single commodity RGB-D camera. The core of our approach is a 3D articulated Gaussian mixture alignment strategy tailored to hand-object tracking that allows fast pose optimization. The alignment energy uses novel regularizers to address occlusions and hand-object contacts. For added robustness, we guide the optimization with discriminative part classication of the hand and segmentation of the object. We conducted extensive experiments on several existing datasets and introduce a new annotated hand-object dataset. Quantitative and qualitative results show the key advantages of our method: speed, accuracy, and robustness.
Export
BibTeX
@techreport{Report2016-4-001, TITLE = {Real-time Joint Tracking of a Hand Manipulating an Object from {RGB-D} Input}, AUTHOR = {Sridhar, Srinath and M{\"u}ller, Franziska and Zollh{\"o}fer, Michael and Casas, Dan and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2016-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Real-time simultaneous tracking of hands manipulating and interacting with external objects has many potential applications in augmented reality, tangible computing, and wearable computing. However, due to dicult occlusions, fast motions, and uniform hand appearance, jointly tracking hand and object pose is more challenging than tracking either of the two separately. Many previous approaches resort to complex multi-camera setups to remedy the occlusion problem and often employ expensive segmentation and optimization steps which makes real-time tracking impossible. In this paper, we propose a real-time solution that uses a single commodity RGB-D camera. The core of our approach is a 3D articulated Gaussian mixture alignment strategy tailored to hand-object tracking that allows fast pose optimization. The alignment energy uses novel regularizers to address occlusions and hand-object contacts. For added robustness, we guide the optimization with discriminative part classication of the hand and segmentation of the object. We conducted extensive experiments on several existing datasets and introduce a new annotated hand-object dataset. Quantitative and qualitative results show the key advantages of our method: speed, accuracy, and robustness.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Sridhar, Srinath %A Müller, Franziska %A Zollhöfer, Michael %A Casas, Dan %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Joint Tracking of a Hand Manipulating an Object from RGB-D Input : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-5510-A %Y Max-Planck-Institut für Informatik %C Saarbrücken %D 2016 %P 31 p. %X Real-time simultaneous tracking of hands manipulating and interacting with external objects has many potential applications in augmented reality, tangible computing, and wearable computing. However, due to dicult occlusions, fast motions, and uniform hand appearance, jointly tracking hand and object pose is more challenging than tracking either of the two separately. Many previous approaches resort to complex multi-camera setups to remedy the occlusion problem and often employ expensive segmentation and optimization steps which makes real-time tracking impossible. In this paper, we propose a real-time solution that uses a single commodity RGB-D camera. The core of our approach is a 3D articulated Gaussian mixture alignment strategy tailored to hand-object tracking that allows fast pose optimization. The alignment energy uses novel regularizers to address occlusions and hand-object contacts. For added robustness, we guide the optimization with discriminative part classication of the hand and segmentation of the object. We conducted extensive experiments on several existing datasets and introduce a new annotated hand-object dataset. Quantitative and qualitative results show the key advantages of our method: speed, accuracy, and robustness. %B Research Report %@ false
Sridhar, S., Bailly, G., Heydrich, E., Oulasvirta, A., and Theobalt, C. 2016e. FullHand: Markerless Skeleton-based Tracking for Free-Hand Interaction. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
This paper advances a novel markerless hand tracking method for interactive applications. FullHand uses input from RGB and depth cameras in a desktop setting. It combines, in a voting scheme, a discriminative, part-based pose retrieval with a generative pose estimation method based on local optimization. We develop this approach to enable: (1) capturing hand articulations with high number of degrees of freedom, including the motion of all fingers, (2) sufficient precision, shown in a dataset of user-generated gestures, and (3) a high framerate of 50 fps for one hand. We discuss the design of free-hand interactions with the tracker and present several demonstrations ranging from simple (few DOFs) to complex (finger individuation plus global hand motion), including mouse operation, a first-person shooter and virtual globe navigation. A user study on the latter shows that free-hand interactions implemented for the tracker can equal mouse-based interactions in user performance.
Export
BibTeX
@techreport{Report2016-4-002, TITLE = {{FullHand}: {M}arkerless Skeleton-based Tracking for Free-Hand Interaction}, AUTHOR = {Sridhar, Srinath and Bailly, Gilles and Heydrich, Elias and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2016-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, ABSTRACT = {This paper advances a novel markerless hand tracking method for interactive applications. FullHand uses input from RGB and depth cameras in a desktop setting. It combines, in a voting scheme, a discriminative, part-based pose retrieval with a generative pose estimation method based on local optimization. We develop this approach to enable: (1) capturing hand articulations with high number of degrees of freedom, including the motion of all fingers, (2) sufficient precision, shown in a dataset of user-generated gestures, and (3) a high framerate of 50 fps for one hand. We discuss the design of free-hand interactions with the tracker and present several demonstrations ranging from simple (few DOFs) to complex (finger individuation plus global hand motion), including mouse operation, a first-person shooter and virtual globe navigation. A user study on the latter shows that free-hand interactions implemented for the tracker can equal mouse-based interactions in user performance.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Sridhar, Srinath %A Bailly, Gilles %A Heydrich, Elias %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T FullHand: Markerless Skeleton-based Tracking for Free-Hand Interaction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-7456-7 %Y Max-Planck-Institut für Informatik %C Saarbrücken %D 2016 %P 11 p. %X This paper advances a novel markerless hand tracking method for interactive applications. FullHand uses input from RGB and depth cameras in a desktop setting. It combines, in a voting scheme, a discriminative, part-based pose retrieval with a generative pose estimation method based on local optimization. We develop this approach to enable: (1) capturing hand articulations with high number of degrees of freedom, including the motion of all fingers, (2) sufficient precision, shown in a dataset of user-generated gestures, and (3) a high framerate of 50 fps for one hand. We discuss the design of free-hand interactions with the tracker and present several demonstrations ranging from simple (few DOFs) to complex (finger individuation plus global hand motion), including mouse operation, a first-person shooter and virtual globe navigation. A user study on the latter shows that free-hand interactions implemented for the tracker can equal mouse-based interactions in user performance. %B Research Report %@ false
Thesis
Bachynskyi, M. 2016. Biomechanical Models for Human-Computer Interaction. urn:nbn:de:bsz:291-scidok-66888.
Abstract
Post-desktop user interfaces, such as smartphones, tablets, interactive tabletops, public displays and mid-air interfaces, already are a ubiquitous part of everyday human life, or have the potential to be. One of the key features of these interfaces is the reduced number or even absence of input movement constraints imposed by a device form-factor. This freedom is advantageous for users, allowing them to interact with computers using more natural limb movements; however, it is a source of 4 issues for research and design of post-desktop interfaces which make traditional analysis methods inefficient: the new movement space is orders of magnitude larger than the one analyzed for traditional desktops; the existing knowledge on post-desktop input methods is sparse and sporadic; the movement space is non-uniform with respect to performance; and traditional methods are ineffective or inefficient in tackling physical ergonomics pitfalls in post-desktop interfaces. These issues lead to the research problem of efficient assessment, analysis and design methods for high-throughput ergonomic post-desktop interfaces. To solve this research problem and support researchers and designers, this thesis proposes efficient experiment- and model-based assessment methods for post-desktop user interfaces. We achieve this through the following contributions: - adopt optical motion capture and biomechanical simulation for HCI experiments as a versatile source of both performance and ergonomics data describing an input method; - identify applicability limits of the method for a range of HCI tasks; - validate the method outputs against ground truth recordings in typical HCI setting; - demonstrate the added value of the method in analysis of performance and ergonomics of touchscreen devices; and - summarize performance and ergonomics of a movement space through a clustering of physiological data. The proposed method successfully deals with the 4 above-mentioned issues of post-desktop input. The efficiency of the methods makes it possible to effectively tackle the issue of large post-desktop movement spaces both at early design stages (through a generic model of a movement space) as well as at later design stages (through user studies). The method provides rich data on physical ergonomics (joint angles and moments, muscle forces and activations, energy expenditure and fatigue), making it possible to solve the issue of ergonomics pitfalls. Additionally, the method provides performance data (speed, accuracy and throughput) which can be related to the physiological data to solve the issue of non-uniformity of movement space. In our adaptation the method does not require experimenters to have specialized expertise, thus making it accessible to a wide range of researchers and designers and contributing towards the solution of the issue of post-desktop knowledge sparsity.
Export
BibTeX
@phdthesis{Bachyphd16, TITLE = {Biomechanical Models for Human-Computer Interaction}, AUTHOR = {Bachynskyi, Myroslav}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-66888}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, ABSTRACT = {Post-desktop user interfaces, such as smartphones, tablets, interactive tabletops, public displays and mid-air interfaces, already are a ubiquitous part of everyday human life, or have the potential to be. One of the key features of these interfaces is the reduced number or even absence of input movement constraints imposed by a device form-factor. This freedom is advantageous for users, allowing them to interact with computers using more natural limb movements; however, it is a source of 4 issues for research and design of post-desktop interfaces which make traditional analysis methods inefficient: the new movement space is orders of magnitude larger than the one analyzed for traditional desktops; the existing knowledge on post-desktop input methods is sparse and sporadic; the movement space is non-uniform with respect to performance; and traditional methods are ineffective or inefficient in tackling physical ergonomics pitfalls in post-desktop interfaces. These issues lead to the research problem of efficient assessment, analysis and design methods for high-throughput ergonomic post-desktop interfaces. To solve this research problem and support researchers and designers, this thesis proposes efficient experiment- and model-based assessment methods for post-desktop user interfaces. We achieve this through the following contributions: - adopt optical motion capture and biomechanical simulation for HCI experiments as a versatile source of both performance and ergonomics data describing an input method; - identify applicability limits of the method for a range of HCI tasks; - validate the method outputs against ground truth recordings in typical HCI setting; - demonstrate the added value of the method in analysis of performance and ergonomics of touchscreen devices; and - summarize performance and ergonomics of a movement space through a clustering of physiological data. The proposed method successfully deals with the 4 above-mentioned issues of post-desktop input. The efficiency of the methods makes it possible to effectively tackle the issue of large post-desktop movement spaces both at early design stages (through a generic model of a movement space) as well as at later design stages (through user studies). The method provides rich data on physical ergonomics (joint angles and moments, muscle forces and activations, energy expenditure and fatigue), making it possible to solve the issue of ergonomics pitfalls. Additionally, the method provides performance data (speed, accuracy and throughput) which can be related to the physiological data to solve the issue of non-uniformity of movement space. In our adaptation the method does not require experimenters to have specialized expertise, thus making it accessible to a wide range of researchers and designers and contributing towards the solution of the issue of post-desktop knowledge sparsity.}, }
Endnote
%0 Thesis %A Bachynskyi, Myroslav %Y Steimle, Jürgen %A referee: Schmidt, Albrecht %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Biomechanical Models for Human-Computer Interaction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-0FD4-9 %U urn:nbn:de:bsz:291-scidok-66888 %I Universität des Saarlandes %C Saarbrücken %D 2016 %P xiv, 206 p. %V phd %9 phd %X Post-desktop user interfaces, such as smartphones, tablets, interactive tabletops, public displays and mid-air interfaces, already are a ubiquitous part of everyday human life, or have the potential to be. One of the key features of these interfaces is the reduced number or even absence of input movement constraints imposed by a device form-factor. This freedom is advantageous for users, allowing them to interact with computers using more natural limb movements; however, it is a source of 4 issues for research and design of post-desktop interfaces which make traditional analysis methods inefficient: the new movement space is orders of magnitude larger than the one analyzed for traditional desktops; the existing knowledge on post-desktop input methods is sparse and sporadic; the movement space is non-uniform with respect to performance; and traditional methods are ineffective or inefficient in tackling physical ergonomics pitfalls in post-desktop interfaces. These issues lead to the research problem of efficient assessment, analysis and design methods for high-throughput ergonomic post-desktop interfaces. To solve this research problem and support researchers and designers, this thesis proposes efficient experiment- and model-based assessment methods for post-desktop user interfaces. We achieve this through the following contributions: - adopt optical motion capture and biomechanical simulation for HCI experiments as a versatile source of both performance and ergonomics data describing an input method; - identify applicability limits of the method for a range of HCI tasks; - validate the method outputs against ground truth recordings in typical HCI setting; - demonstrate the added value of the method in analysis of performance and ergonomics of touchscreen devices; and - summarize performance and ergonomics of a movement space through a clustering of physiological data. The proposed method successfully deals with the 4 above-mentioned issues of post-desktop input. The efficiency of the methods makes it possible to effectively tackle the issue of large post-desktop movement spaces both at early design stages (through a generic model of a movement space) as well as at later design stages (through user studies). The method provides rich data on physical ergonomics (joint angles and moments, muscle forces and activations, energy expenditure and fatigue), making it possible to solve the issue of ergonomics pitfalls. Additionally, the method provides performance data (speed, accuracy and throughput) which can be related to the physiological data to solve the issue of non-uniformity of movement space. In our adaptation the method does not require experimenters to have specialized expertise, thus making it accessible to a wide range of researchers and designers and contributing towards the solution of the issue of post-desktop knowledge sparsity. %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2016/6688/
Elek, O. 2016. Efficient Methods for Physically-based Rendering of Participating Media. urn:nbn:de:bsz:291-scidok-65357.
Export
BibTeX
@phdthesis{ElekPhD2016, TITLE = {Efficient Methods for Physically-based Rendering of Participating Media}, AUTHOR = {Elek, Oskar}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-65357}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, }
Endnote
%0 Thesis %A Elek, Oskar %Y Seidel, Hans-Peter %A referee: Ritschel, Tobias %A referee: Dachsbacher, Karsten %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Efficient Methods for Physically-based Rendering of Participating Media : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-F94D-E %U urn:nbn:de:bsz:291-scidok-65357 %I Universität des Saarlandes %C Saarbrücken %D 2016 %V phd %9 phd %U http://scidok.sulb.uni-saarland.de/volltexte/2016/6535/http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=de
Hatefi Ardakani, H. 2016. Finite Horizon Analysis of Markov Automata. urn:nbn:de:bsz:291-scidok-67438.
Abstract
Markov automata constitute an expressive continuous-time compositional modelling formalism, featuring stochastic timing and nondeterministic as well as probabilistic branching, all supported in one model. They span as special cases, the models of discrete and continuous-time Markov chains, as well as interactive Markov chains and probabilistic automata. Moreover, they might be equipped with reward and resource structures in order to be used for analysing quantitative aspects of systems, like performance metrics, energy consumption, repair and maintenance costs. Due to their expressive nature, they serve as semantic backbones of engineering frameworks, control applications and safety critical systems. The Architecture Analysis and Design Language (AADL), Dynamic Fault Trees (DFT) and Generalised Stochastic Petri Nets (GSPN) are just some examples. Their expressiveness thus far prevents them from efficient analysis by stochastic solvers and probabilistic model checkers. A major problem context of this thesis lies in their analysis under some budget constraints, i.e. when only a finite budget of resources can be spent by the model. We study mathematical foundations of Markov automata since these are essential for the analysis addressed in this thesis. This includes, in particular, understanding their measurability and establishing their probability measure. Furthermore, we address the analysis of Markov automata in the presence of both reward acquisition and resource consumption within a finite budget of resources. More specifically, we put the problem of computing the optimal expected resource-bounded reward in our focus. In our general setting, we support transient, instantaneous and final reward collection as well as transient resource consumption. Our general formulation of the problem encompasses in particular the optimal time-bound reward and reachability as well as resource-bounded reachability. We develop a sound theory together with a stable approximation scheme with a strict error bound to solve the problem in an efficient way. We report on an implementation of our approach in a supporting tool and also demonstrate its effectiveness and usability over an extensive collection of industrial and academic case studies.
Export
BibTeX
@phdthesis{Hatefiphd17, TITLE = {Finite Horizon Analysis of {M}arkov Automata}, AUTHOR = {Hatefi Ardakani, Hassan}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-67438}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, ABSTRACT = {Markov automata constitute an expressive continuous-time compositional modelling formalism, featuring stochastic timing and nondeterministic as well as probabilistic branching, all supported in one model. They span as special cases, the models of discrete and continuous-time Markov chains, as well as interactive Markov chains and probabilistic automata. Moreover, they might be equipped with reward and resource structures in order to be used for analysing quantitative aspects of systems, like performance metrics, energy consumption, repair and maintenance costs. Due to their expressive nature, they serve as semantic backbones of engineering frameworks, control applications and safety critical systems. The Architecture Analysis and Design Language (AADL), Dynamic Fault Trees (DFT) and Generalised Stochastic Petri Nets (GSPN) are just some examples. Their expressiveness thus far prevents them from efficient analysis by stochastic solvers and probabilistic model checkers. A major problem context of this thesis lies in their analysis under some budget constraints, i.e. when only a finite budget of resources can be spent by the model. We study mathematical foundations of Markov automata since these are essential for the analysis addressed in this thesis. This includes, in particular, understanding their measurability and establishing their probability measure. Furthermore, we address the analysis of Markov automata in the presence of both reward acquisition and resource consumption within a finite budget of resources. More specifically, we put the problem of computing the optimal expected resource-bounded reward in our focus. In our general setting, we support transient, instantaneous and final reward collection as well as transient resource consumption. Our general formulation of the problem encompasses in particular the optimal time-bound reward and reachability as well as resource-bounded reachability. We develop a sound theory together with a stable approximation scheme with a strict error bound to solve the problem in an efficient way. We report on an implementation of our approach in a supporting tool and also demonstrate its effectiveness and usability over an extensive collection of industrial and academic case studies.}, }
Endnote
%0 Thesis %A Hatefi Ardakani, Hassan %Y Hermanns, Holger %A referee: Buchholz, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Finite Horizon Analysis of Markov Automata : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-9E81-C %U urn:nbn:de:bsz:291-scidok-67438 %I Universität des Saarlandes %C Saarbrücken %D 2016 %P X, 175 p. %V phd %9 phd %X Markov automata constitute an expressive continuous-time compositional modelling formalism, featuring stochastic timing and nondeterministic as well as probabilistic branching, all supported in one model. They span as special cases, the models of discrete and continuous-time Markov chains, as well as interactive Markov chains and probabilistic automata. Moreover, they might be equipped with reward and resource structures in order to be used for analysing quantitative aspects of systems, like performance metrics, energy consumption, repair and maintenance costs. Due to their expressive nature, they serve as semantic backbones of engineering frameworks, control applications and safety critical systems. The Architecture Analysis and Design Language (AADL), Dynamic Fault Trees (DFT) and Generalised Stochastic Petri Nets (GSPN) are just some examples. Their expressiveness thus far prevents them from efficient analysis by stochastic solvers and probabilistic model checkers. A major problem context of this thesis lies in their analysis under some budget constraints, i.e. when only a finite budget of resources can be spent by the model. We study mathematical foundations of Markov automata since these are essential for the analysis addressed in this thesis. This includes, in particular, understanding their measurability and establishing their probability measure. Furthermore, we address the analysis of Markov automata in the presence of both reward acquisition and resource consumption within a finite budget of resources. More specifically, we put the problem of computing the optimal expected resource-bounded reward in our focus. In our general setting, we support transient, instantaneous and final reward collection as well as transient resource consumption. Our general formulation of the problem encompasses in particular the optimal time-bound reward and reachability as well as resource-bounded reachability. We develop a sound theory together with a stable approximation scheme with a strict error bound to solve the problem in an efficient way. We report on an implementation of our approach in a supporting tool and also demonstrate its effectiveness and usability over an extensive collection of industrial and academic case studies. %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2017/6743/
Kellnhofer, P. 2016. Perceptual Modeling for Stereoscopic 3D. urn:nbn:de:bsz:291-scidok-66813.
Abstract
Virtual and Augmented Reality applications typically rely on both stereoscopic presentation and involve intensive object and observer motion. A combination of high dynamic range and stereoscopic capabilities become popular for consumer displays, and is a desirable functionality of head mounted displays to come. The thesis is focused on complex interactions between all these visual cues on digital displays. The first part investigates challenges of the stereoscopic 3D and motion combination. We consider an interaction between the continuous motion presented as discrete frames. Then, we discuss a disparity processing for accurate reproduction of objects moving in the depth direction. Finally, we investigate the depth perception as a function of motion parallax and eye fixation changes by means of saccadic motion. The second part focuses on the role of high dynamic range imaging for stereoscopic displays. We go beyond the current display capabilities by considering the full perceivable luminance range and we simulate the real world experience in such adaptation conditions. In particular, we address the problems of disparity retargeting across such wide luminance ranges and reflective/refractive surface rendering. The core of our research methodology is perceptual modeling supported by our own experimental studies to overcome limitations of current display technologies and improve the viewer experience by enhancing perceived depth, reducing visual artifacts or improving viewing comfort.
Export
BibTeX
@phdthesis{Kellnhoferphd2016, TITLE = {Perceptual Modeling for Stereoscopic {3D}}, AUTHOR = {Kellnhofer, Petr}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-66813}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, ABSTRACT = {Virtual and Augmented Reality applications typically rely on both stereoscopic presentation and involve intensive object and observer motion. A combination of high dynamic range and stereoscopic capabilities become popular for consumer displays, and is a desirable functionality of head mounted displays to come. The thesis is focused on complex interactions between all these visual cues on digital displays. The first part investigates challenges of the stereoscopic 3D and motion combination. We consider an interaction between the continuous motion presented as discrete frames. Then, we discuss a disparity processing for accurate reproduction of objects moving in the depth direction. Finally, we investigate the depth perception as a function of motion parallax and eye fixation changes by means of saccadic motion. The second part focuses on the role of high dynamic range imaging for stereoscopic displays. We go beyond the current display capabilities by considering the full perceivable luminance range and we simulate the real world experience in such adaptation conditions. In particular, we address the problems of disparity retargeting across such wide luminance ranges and reflective/refractive surface rendering. The core of our research methodology is perceptual modeling supported by our own experimental studies to overcome limitations of current display technologies and improve the viewer experience by enhancing perceived depth, reducing visual artifacts or improving viewing comfort.}, }
Endnote
%0 Thesis %A Kellnhofer, Petr %Y Myszkowski, Karol %A referee: Seidel, Hans-Peter %A referee: Masia, Belen %A referee: Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Perceptual Modeling for Stereoscopic 3D : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-BBA6-1 %U urn:nbn:de:bsz:291-scidok-66813 %I Universität des Saarlandes %C Saarbrücken %D 2016 %P xxiv, 158 p. %V phd %9 phd %X Virtual and Augmented Reality applications typically rely on both stereoscopic presentation and involve intensive object and observer motion. A combination of high dynamic range and stereoscopic capabilities become popular for consumer displays, and is a desirable functionality of head mounted displays to come. The thesis is focused on complex interactions between all these visual cues on digital displays. The first part investigates challenges of the stereoscopic 3D and motion combination. We consider an interaction between the continuous motion presented as discrete frames. Then, we discuss a disparity processing for accurate reproduction of objects moving in the depth direction. Finally, we investigate the depth perception as a function of motion parallax and eye fixation changes by means of saccadic motion. The second part focuses on the role of high dynamic range imaging for stereoscopic displays. We go beyond the current display capabilities by considering the full perceivable luminance range and we simulate the real world experience in such adaptation conditions. In particular, we address the problems of disparity retargeting across such wide luminance ranges and reflective/refractive surface rendering. The core of our research methodology is perceptual modeling supported by our own experimental studies to overcome limitations of current display technologies and improve the viewer experience by enhancing perceived depth, reducing visual artifacts or improving viewing comfort. %U http://scidok.sulb.uni-saarland.de/volltexte/2016/6681/http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=de
Klehm, O. 2016. User-Guided Scene Stylization using Efficient Rendering Techniques. urn:nbn:de:bsz:291-scidok-65321.
Export
BibTeX
@phdthesis{Klehmphd2016, TITLE = {User-Guided Scene Stylization using Efficient Rendering Techniques}, AUTHOR = {Klehm, Oliver}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-65321}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, }
Endnote
%0 Thesis %A Klehm, Oliver %Y Seidel, Hans-Peter %A referee: Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T User-Guided Scene Stylization using Efficient Rendering Techniques : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-9C13-A %U urn:nbn:de:bsz:291-scidok-65321 %I Universität des Saarlandes %C Saarbrücken %D 2016 %P XIII, 111 p. %V phd %9 phd %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2016/6532/
Reinert, B. 2016. Interactive, Example-driven Synthesis and Manipulation of Visual Media. urn:nbn:de:bsz:291-scidok-67660.
Export
BibTeX
@phdthesis{Reinertbphd17, TITLE = {Interactive, Example-driven Synthesis and Manipulation of Visual Media}, AUTHOR = {Reinert, Bernhard}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-67660}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, }
Endnote
%0 Thesis %A Reinert, Bernhard %Y Seidel, Hans-Peter %A referee: Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive, Example-driven Synthesis and Manipulation of Visual Media : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5A03-B %U urn:nbn:de:bsz:291-scidok-67660 %I Universität des Saarlandes %C Saarbrücken %D 2016 %P XX, 116, XVII p. %V phd %9 phd %U http://scidok.sulb.uni-saarland.de/volltexte/2017/6766/http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=de
Rhodin, H. 2016. From Motion Capture to Interactive Virtual Worlds: Towards Unconstrained Motion-Capture Algorithms for Real-time Performance-Driven Character Animation. urn:nbn:de:bsz:291-scidok-67413.
Export
BibTeX
@phdthesis{RhodinPhD2016, TITLE = {From Motion Capture to Interactive Virtual Worlds: {T}owards Unconstrained Motion-Capture Algorithms for Real-time Performance-Driven Character Animatio}, AUTHOR = {Rhodin, Helge}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-67413}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, }
Endnote
%0 Thesis %A Rhodin, Helge %Y Theobalt, Christian %A referee: Seidel, Hans-Peter %A referee: Bregler, Christoph %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T From Motion Capture to Interactive Virtual Worlds: Towards Unconstrained Motion-Capture Algorithms for Real-time Performance-Driven Character Animation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-6310-C %U urn:nbn:de:bsz:291-scidok-67413 %I Universität des Saarlandes %C Saarbrücken %D 2016 %P 179 p. %V phd %9 phd %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2017/6741/
Sridhar, S. 2016. Tracking Hands in Action for Gesture-based Computer Input. urn:nbn:de:bsz:291-scidok-67712.
Export
BibTeX
@phdthesis{SridharPhD2016, TITLE = {Tracking Hands in Action for Gesture-based Computer Input}, AUTHOR = {Sridhar, Srinath}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-67712}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, }
Endnote
%0 Thesis %A Sridhar, Srinath %Y Theobalt, Christian %A referee: Oulasvirta, Antti %A referee: Schiele, Bernt %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Tracking Hands in Action for Gesture-based Computer Input : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-631C-3 %U urn:nbn:de:bsz:291-scidok-67712 %I Universität des Saarlandes %C Saarbrücken %D 2016 %P XXIII, 161 p. %V phd %9 phd %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2017/6771/
Templin, K. 2016. Depth, Shading, and Stylization in Stereoscopic Cinematograph. urn:nbn:de:bsz:291-scidok-64390.
Export
BibTeX
@phdthesis{Templinphd15, TITLE = {Depth, Shading, and Stylization in Stereoscopic Cinematograph}, AUTHOR = {Templin, Krzysztof}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-64390}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2016}, }
Endnote
%0 Thesis %A Templin, Krzysztof %Y Seidel, Hans-Peter %A referee: Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Depth, Shading, and Stylization in Stereoscopic Cinematograph : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-19FA-2 %U urn:nbn:de:bsz:291-scidok-64390 %I Universität des Saarlandes %C Saarbrücken %D 2016 %P xii, 100 p. %V phd %9 phd %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2016/6439/