Last Year

Article
Arabadzhiyska, E., Tursun, O.T., Myszkowski, K., Seidel, H.-P., and Didyk, P. 2017. Saccade Landing Position Prediction for Gaze-Contingent Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{ArabadzhiyskaSIGGRAPH2017, TITLE = {Saccade Landing Position Prediction for Gaze-Contingent Rendering}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Okan Tarhan and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073642}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--12}, EID = {50}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Arabadzhiyska, Elena %A Tursun, Okan Tarhan %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Saccade Landing Position Prediction for Gaze-Contingent Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D82-9 %R 10.1145/3072959.3073642 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 12 %Z sequence number: 50 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Babaei, V., Vidimče, K., Foshey, M., Kaspar, A., Didyk, P., and Matusik, W. 2017a. Color Contoning for 3D Printing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{BabaeiSIGGRAPH2017, TITLE = {Color Contoning for {3D} Printing}, AUTHOR = {Babaei, Vahid and Vidim{\v c}e, Kiril and Foshey, Michael and Kaspar, Alexandre and Didyk, Piotr and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073605}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--15}, EID = {124}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Babaei, Vahid %A Vidimče, Kiril %A Foshey, Michael %A Kaspar, Alexandre %A Didyk, Piotr %A Matusik, Wojciech %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Color Contoning for 3D Printing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-F88E-E %R 10.1145/3072959.3073605 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 15 %Z sequence number: 124 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Chen, R. and Gotsman, C. 2017. Approximating Planar Conformal Maps Using Regular Polygonal Meshes. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Chen2017c, TITLE = {Approximating Planar Conformal Maps Using Regular Polygonal Meshes}, AUTHOR = {Chen, Renjie and Gotsman, Craig}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13157}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {629--642}, }
Endnote
%0 Journal Article %A Chen, Renjie %A Gotsman, Craig %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Approximating Planar Conformal Maps Using Regular Polygonal Meshes : %G eng %U http://hdl.handle.net/21.11116/0000-0000-0422-5 %R 10.1111/cgf.13157 %7 2017 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 629 %P 629 - 642 %I Blackwell-Wiley %C Oxford %@ false
Chen, R. and Weber, O. 2017. GPU-Accelerated Locally Injective Shape Deformation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2017)36, 6.
Export
BibTeX
@article{ChenSIGASIA2017, TITLE = {{GPU}-Accelerated Locally Injective Shape Deformation}, AUTHOR = {Chen, Renjie and Weber, Ofir}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3130800.3130843}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {36}, NUMBER = {6}, EID = {214}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2017}, }
Endnote
%0 Journal Article %A Chen, Renjie %A Weber, Ofir %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T GPU-Accelerated Locally Injective Shape Deformation : %G eng %U http://hdl.handle.net/21.11116/0000-0000-045E-3 %R 10.1145/3130800.3130843 %7 2017 %D 2017 %J ACM Transactions on Graphics %O TOG %V 36 %N 6 %Z sequence number: 214 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2017 %O ACM SIGGRAPH Asia 2017 Bangkok, Thailand, 27 - 30 November 2017
Dai, A., Nießner, M., Zollhöfer, M., Izadi, S., and Theobalt, C. 2017. BundleFusion: Real-time Globally Consistent 3D Reconstruction using On-the-fly Surface Re-integration. ACM Transactions on Graphics36, 3.
Export
BibTeX
@article{dai2016bundleTOG, TITLE = {{BundleFusion}: {R}eal-time Globally Consistent {3D} Reconstruction using On-the-fly Surface Re-integration}, AUTHOR = {Dai, Angela and Nie{\ss}ner, Matthias and Zollh{\"o}fer, Michael and Izadi, Shahram and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3054739}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {36}, NUMBER = {3}, EID = {24}, }
Endnote
%0 Journal Article %A Dai, Angela %A Nießner, Matthias %A Zollhöfer, Michael %A Izadi, Shahram %A Theobalt, Christian %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T BundleFusion: Real-time Globally Consistent 3D Reconstruction using On-the-fly Surface Re-integration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-6796-B %R 10.1145/3054739 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 3 %Z sequence number: 24 %I ACM %C New York, NY %@ false
DeVito, Z., Mara, M., Zollhöfer, M., et al. 2017. Opt: A Domain Specific Language for Non-linear Least Squares Optimization in Graphics and Imaging. ACM Transactions on Graphics36, 5.
Export
BibTeX
@article{DeVitoTOG2017, TITLE = {Opt: A Domain Specific Language for Non-linear Least Squares Optimization in Graphics and Imaging}, AUTHOR = {DeVito, Zachary and Mara, Michael and Zollh{\"o}fer, Michael and Bernstein, Gilbert and Ragan-Kelley, Jonathan and Theobalt, Christian and Hanrahan, Pat and Fisher, Matthew and Nie{\ss}ner, Matthias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3132188}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {36}, NUMBER = {5}, EID = {171}, }
Endnote
%0 Journal Article %A DeVito, Zachary %A Mara, Michael %A Zollhöfer, Michael %A Bernstein, Gilbert %A Ragan-Kelley, Jonathan %A Theobalt, Christian %A Hanrahan, Pat %A Fisher, Matthew %A Nießner, Matthias %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Opt: A Domain Specific Language for Non-linear Least Squares Optimization in Graphics and Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-3111-9 %R 10.1145/3132188 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 5 %Z sequence number: 171 %I Association for Computing Machinery %C New York, NY %@ false
Dunn, D., Tippets, C., Torell, K., et al. 2017a. Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2017)23, 4.
(Best Paper Award)
Export
BibTeX
@article{DunnVR2017, TITLE = {Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors}, AUTHOR = {Dunn, David and Tippets, Cary and Torell, Kent and Kellnhofer, Petr and Ak{\c s}it, Kaan and Didyk, Piotr and Myszkowski, Karol and Luebke, David and Fuchs, Henry}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2017.2657058}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, VOLUME = {23}, NUMBER = {4}, PAGES = {1322--1331}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2017 (IEEE VR 2017)}, }
Endnote
%0 Journal Article %A Dunn, David %A Tippets, Cary %A Torell, Kent %A Kellnhofer, Petr %A Akşit, Kaan %A Didyk, Piotr %A Myszkowski, Karol %A Luebke, David %A Fuchs, Henry %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors : (Best Paper Award) %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-3095-4 %R 10.1109/TVCG.2017.2657058 %7 2017 %D 2017 %J IEEE Transactions on Visualization and Computer Graphics %V 23 %N 4 %& 1322 %P 1322 - 1331 %I IEEE Computer Society %C New York, NY %@ false %B Selected Proceedings IEEE Virtual Reality 2017 %O IEEE VR 2017 Los Angeles, California on March 18-22, 2017 %U http://telepresence.web.unc.edu/research/dynamic-focus-augmented-reality-display/
Elek, O., Sumin, D., Zhang, R., et al. 2017. Scattering-aware Texture Reproduction for 3D Printing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2017)36, 6.
Export
BibTeX
@article{ElekSIGASIA2017, TITLE = {Scattering-aware Texture Reproduction for {3D} Printing}, AUTHOR = {Elek, Oskar and Sumin, Denis and Zhang, Ran and Weyrich, Tim and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and K{\v r}iv{\'a}nek, Jaroslav}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3130800.3130890}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {36}, NUMBER = {6}, EID = {241}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2017}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Sumin, Denis %A Zhang, Ran %A Weyrich, Tim %A Myszkowski, Karol %A Bickel, Bernd %A Wilkie, Alexander %A Křivánek, Jaroslav %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Scattering-aware Texture Reproduction for 3D Printing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E485-1 %R 10.1145/3130800.3130890 %7 2017 %D 2017 %J ACM Transactions on Graphics %O TOG %V 36 %N 6 %Z sequence number: 241 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2017 %O ACM SIGGRAPH Asia 2017 Bangkok, Thailand, 27 - 30 November 2017
Elhayek, A., de Aguiar, E., Jain, A., et al. 2017. MARCOnI -- ConvNet-Based MARker-Less Motion Capture in Outdoor and Indoor Scenes. IEEE Transactions on Pattern Analysis and Machine Intelligence39, 3.
Export
BibTeX
@article{Elhayek2017, TITLE = {{MARCOnI} -- {ConvNet}-Based {MARker}-Less Motion Capture in Outdoor and Indoor Scenes}, AUTHOR = {Elhayek, Ahmed and de Aguiar, Edilson and Jain, Arjun and Tompson, Jonathan and Pishchulin, Leonid and Andriluka, Mykhaylo and Bregler, Christoph and Schiele, Bernt and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0162-8828}, DOI = {10.1109/TPAMI.2016.2557779}, PUBLISHER = {IEEE Computer Society.}, ADDRESS = {New York}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, VOLUME = {39}, NUMBER = {3}, PAGES = {501--514}, }
Endnote
%0 Journal Article %A Elhayek, Ahmed %A de Aguiar, Edilson %A Jain, Arjun %A Tompson, Jonathan %A Pishchulin, Leonid %A Andriluka, Mykhaylo %A Bregler, Christoph %A Schiele, Bernt %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MARCOnI -- ConvNet-Based MARker-Less Motion Capture in Outdoor and Indoor Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05D5-0 %R 10.1109/TPAMI.2016.2557779 %7 2017 %D 2017 %J IEEE Transactions on Pattern Analysis and Machine Intelligence %O IEEE Trans. Pattern Anal. Mach. Intell. PAMI %V 39 %N 3 %& 501 %P 501 - 514 %I IEEE Computer Society. %C New York %@ false
Haubenwallner, K., Seidel, H.-P., and Steinberger, M. 2017. ShapeGenetics: Using Genetic Algorithms for Procedural Modeling. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{haubenwallner2017shapegenetics, TITLE = {{ShapeGenetics}: {U}sing Genetic Algorithms for Procedural Modeling}, AUTHOR = {Haubenwallner, Karl and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13120}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {213--223}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Haubenwallner, Karl %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T ShapeGenetics: Using Genetic Algorithms for Procedural Modeling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5C69-8 %R 10.1111/cgf.13120 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 213 %P 213 - 223 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Jiang, C., Tang, C., Seidel, H.-P., and Wonka, P. 2017. Design and Volume Optimization of Space Structures. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{JiangSIGGRAPH2017, TITLE = {Design and Volume Optimization of Space Structures}, AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Wonka, Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073619}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, EID = {159}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Jiang, Caigui %A Tang, Chengcheng %A Seidel, Hans-Peter %A Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Design and Volume Optimization of Space Structures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D8E-2 %R 10.1145/3072959.3073619 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 14 %Z sequence number: 159 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Kellnhofer, P., Didyk, P., Wang, S.-P., et al. 2017. 3DTV at Home: Eulerian-Lagrangian Stereo-to-Multiview Conversion. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{KellnhoferSIGGRAPH2017, TITLE = {{3DTV} at Home: {Eulerian}-{Lagrangian} Stereo-to-Multiview Conversion}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Wang, Szu-Po and Sitthi-Amorn, Pitchaya and Freeman, William and Durand, Fredo and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073617}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--13}, EID = {146}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Wang, Szu-Po %A Sitthi-Amorn, Pitchaya %A Freeman, William %A Durand, Fredo %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations %T 3DTV at Home: Eulerian-Lagrangian Stereo-to-Multiview Conversion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-F894-D %R 10.1145/3072959.3073617 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 13 %Z sequence number: 146 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Kerbl, B., Kenzel, M., Schmalstieg, D., Seidel, H.-P., and Steinberger, M. 2017. Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Seidel_Steinberger2016, TITLE = {Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the {GPU}}, AUTHOR = {Kerbl, Bernhard and Kenzel, Michael and Schmalstieg, Dieter and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13075}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {232--246}, }
Endnote
%0 Journal Article %A Kerbl, Bernhard %A Kenzel, Michael %A Schmalstieg, Dieter %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-1823-8 %R 10.1111/cgf.13075 %7 2016-12-05 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 232 %P 232 - 246 %I Blackwell-Wiley %C Oxford %@ false
Kol, T.R., Klehm, O., Seidel, H.-P., and Eisemann, E. 2017. Expressive Single Scattering for Light Shaft Stylization. IEEE Transactions on Visualization and Computer Graphics23, 7.
Export
BibTeX
@article{kol2016expressive, TITLE = {Expressive Single Scattering for Light Shaft Stylization}, AUTHOR = {Kol, Timothy R. and Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2016.2554114}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {23}, NUMBER = {7}, PAGES = {1753--1766}, }
Endnote
%0 Journal Article %A Kol, Timothy R. %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Expressive Single Scattering for Light Shaft Stylization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64E7-2 %R 10.1109/TVCG.2016.2554114 %7 2016-04-14 %D 2017 %J IEEE Transactions on Visualization and Computer Graphics %V 23 %N 7 %& 1753 %P 1753 - 1766 %I IEEE Computer Society %C New York, NY %@ false
Leimkühler, T., Seidel, H.-P., and Ritschel, T. 2017. Minimal Warping: Planning Incremental Novel-view Synthesis. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 201)36, 4.
Export
BibTeX
@article{LeimkuehlerEGSR2017, TITLE = {Minimal Warping: {P}lanning Incremental Novel-view Synthesis}, AUTHOR = {Leimk{\"u}hler, Thomas and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13219}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, BOOKTITLE = {Eurographics Symposium on Rendering 2017}, EDITOR = {Zwicker, Matthias and Sander, Pedro}, }
Endnote
%0 Journal Article %A Leimkühler, Thomas %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Minimal Warping: Planning Incremental Novel-view Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CD7C-D %R 10.1111/cgf.13219 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 4 %& 1 %P 1 - 14 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2017 %O Eurographics Symposium on Rendering 201 EGSR 2017 Helsinki, Finland, 19-21 June 2017
Lurie, K.L., Angst, R., Zlatev, D.V., Liao, J.C., and Bowden, A.K.E. 2017. 3D Reconstruction of Cystoscopy Videos for Comprehensive Bladder Records. Biomedical Optics Express8, 4.
Export
BibTeX
@article{AngstOSA2017, TITLE = {{3D} reconstruction of cystoscopy videos for comprehensive bladder records}, AUTHOR = {Lurie, Kristen L. and Angst, Roland and Zlatev, Dimitar V. and Liao, Joseph C. and Bowden, Audrey K. Ellerbee}, LANGUAGE = {eng}, ISSN = {2156-7085}, DOI = {10.1364/BOE.8.002106}, PUBLISHER = {OSA Publishing}, ADDRESS = {Washington, DC}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, JOURNAL = {Biomedical Optics Express}, VOLUME = {8}, NUMBER = {4}, PAGES = {2106--2123}, }
Endnote
%0 Journal Article %A Lurie, Kristen L. %A Angst, Roland %A Zlatev, Dimitar V. %A Liao, Joseph C. %A Bowden, Audrey K. Ellerbee %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T 3D Reconstruction of Cystoscopy Videos for Comprehensive Bladder Records : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-BDC5-8 %R 10.1364/BOE.8.002106 %7 2017 %D 2017 %J Biomedical Optics Express %V 8 %N 4 %& 2106 %P 2106 - 2123 %I OSA Publishing %C Washington, DC %@ false
Masia, B., Serrano, A., and Gutierrez, D. 2017. Dynamic Range Expansion Based on Image Statistics. Multimedia Tools and Applications76, 1.
Export
BibTeX
@article{RTM_MMTA2015, TITLE = {Dynamic Range Expansion Based on Image Statistics}, AUTHOR = {Masia, Belen and Serrano, Ana and Gutierrez, Diego}, LANGUAGE = {eng}, ISSN = {1380-7501}, DOI = {10.1007/s11042-015-3036-0}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Multimedia Tools and Applications}, VOLUME = {76}, NUMBER = {1}, PAGES = {631--648}, }
Endnote
%0 Journal Article %A Masia, Belen %A Serrano, Ana %A Gutierrez, Diego %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Dynamic Range Expansion Based on Image Statistics : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-78ED-4 %R 10.1007/s11042-015-3036-0 %7 2015-11-17 %D 2017 %J Multimedia Tools and Applications %V 76 %N 1 %& 631 %P 631 - 648 %I Springer %C New York, NY %@ false
Mehta, D., Sridhar, S., Sotnychenko, O., et al. 2017a. VNect: Real-Time 3D Human Pose Estimation With a Single RGB Camera. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{MehtaSIGGRAPH2017, TITLE = {{VNect}: {R}eal-Time {3D} Human Pose Estimation With a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sridhar, Srinath and Sotnychenko, Oleksandr and Rhodin, Helge and Shafiei, Mohammad and Seidel, Hans-Peter and Xu, Weipeng and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073596}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, EID = {44}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sridhar, Srinath %A Sotnychenko, Oleksandr %A Rhodin, Helge %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Xu, Weipeng %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T VNect: Real-Time 3D Human Pose Estimation With a Single RGB Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D95-0 %R 10.1145/3072959.3073596 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 14 %Z sequence number: 44 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Meka, A., Fox, G., Zollhöfer, M., Richardt, C., and Theobalt, C. 2017. Live User-guided Intrinsic Video For Static Scenes. IEEE Transactions on Visualization and Computer Graphics23, 11.
Abstract
We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection.We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance.
Export
BibTeX
@article{Meka2017, TITLE = {Live User-guided Intrinsic Video For Static Scenes}, AUTHOR = {Meka, Abhimitra and Fox, Gereon and Zollh{\"o}fer, Michael and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2017.2734425}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection.We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {23}, NUMBER = {11}, PAGES = {2447--2454}, }
Endnote
%0 Journal Article %A Meka, Abhimitra %A Fox, Gereon %A Zollhöfer, Michael %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Live User-guided Intrinsic Video For Static Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05CC-6 %R 10.1109/TVCG.2017.2734425 %7 2017 %D 2017 %X We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection.We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance. %J IEEE Transactions on Visualization and Computer Graphics %V 23 %N 11 %& 2447 %P 2447 - 2454 %I IEEE Computer Society %C New York, NY %@ false
Molnos, S., Mamdouh, T., Petri, S., Nocke, T., Weinkauf, T., and Coumou, D. 2017. A Network-based Detection Scheme for the Jet Stream Core. Earth System Dynamics8, 1.
Export
BibTeX
@article{Molnos2017, TITLE = {A Network-based Detection Scheme for the Jet Stream Core}, AUTHOR = {Molnos, Sonja and Mamdouh, Tarek and Petri, Stefan and Nocke, Thomas and Weinkauf, Tino and Coumou, Dim}, LANGUAGE = {eng}, ISSN = {2190-4979}, DOI = {10.5194/esd-8-75-2017}, PUBLISHER = {Copernicus GmbH}, ADDRESS = {New York}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Earth System Dynamics}, VOLUME = {8}, NUMBER = {1}, PAGES = {75--89}, }
Endnote
%0 Journal Article %A Molnos, Sonja %A Mamdouh, Tarek %A Petri, Stefan %A Nocke, Thomas %A Weinkauf, Tino %A Coumou, Dim %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Network-based Detection Scheme for the Jet Stream Core : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-CBEC-2 %R 10.5194/esd-8-75-2017 %7 2017 %D 2017 %J Earth System Dynamics %O Earth Syst. Dyn. %V 8 %N 1 %& 75 %P 75 - 89 %I Copernicus GmbH %C New York %@ false
Nalbach, O., Arabadzhiyska, E., Mehta, D., Seidel, H.-P., and Ritschel, T. 2017a. Deep Shading: Convolutional Neural Networks for Screen Space Shading. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 201)36, 4.
Export
BibTeX
@article{NalbachEGSR2017, TITLE = {Deep Shading: {C}onvolutional Neural Networks for Screen Space Shading}, AUTHOR = {Nalbach, Oliver and Arabadzhiyska, Elena and Mehta, Dushyant and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13225}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {36}, NUMBER = {4}, PAGES = {65--78}, BOOKTITLE = {Eurographics Symposium on Rendering 2017}, EDITOR = {Zwicker, Matthias and Sander, Pedro}, }
Endnote
%0 Journal Article %A Nalbach, Oliver %A Arabadzhiyska, Elena %A Mehta, Dushyant %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Shading: Convolutional Neural Networks for Screen Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CD86-6 %R 10.1111/cgf.13225 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 4 %& 65 %P 65 - 78 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2017 %O Eurographics Symposium on Rendering 201 EGSR 2017 Helsinki, Finland, 19-21 June 2017
Nalbach, O., Seidel, H.-P., and Ritschel, T. 2017b. Practical Capture and Reproduction of Phosphorescent Appearance. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{Nalbach2017, TITLE = {Practical Capture and Reproduction of Phosphorescent Appearance}, AUTHOR = {Nalbach, Oliver and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13136}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {409--420}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Nalbach, Oliver %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Practical Capture and Reproduction of Phosphorescent Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A53-9 %R 10.1111/cgf.13136 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 409 %P 409 - 420 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Pishchulin, L., Wuhrer, S., Helten, T., Theobalt, C., and Schiele, B. 2017. Building Statistical Shape Spaces for 3D Human Modeling. Pattern Recognition67.
Export
BibTeX
@article{Pishchulin2017, TITLE = {Building statistical shape spaces for {3D} human modeling}, AUTHOR = {Pishchulin, Leonid and Wuhrer, Stefanie and Helten, Thomas and Theobalt, Christian and Schiele, Bernt}, LANGUAGE = {eng}, ISSN = {0031-3203}, DOI = {10.1016/j.patcog.2017.02.018}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Pattern Recognition}, VOLUME = {67}, PAGES = {276--286}, }
Endnote
%0 Journal Article %A Pishchulin, Leonid %A Wuhrer, Stefanie %A Helten, Thomas %A Theobalt, Christian %A Schiele, Bernt %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Building Statistical Shape Spaces for 3D Human Modeling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-3E3D-E %R 10.1016/j.patcog.2017.02.018 %7 2017-02-20 %D 2017 %J Pattern Recognition %O Pattern Recognit. %V 67 %& 276 %P 276 - 286 %I Elsevier %C Amsterdam %@ false
Rematas, K., Nguyen, C., Ritschel, T., Fritz, M., and Tuytelaars, T. 2017. Novel Views of Objects from a Single Image. IEEE Transactions on Pattern Analysis and Machine Intelligence39, 8.
Export
BibTeX
@article{rematas17tpami, TITLE = {Novel Views of Objects from a Single Image}, AUTHOR = {Rematas, Konstantinos and Nguyen, Chuong and Ritschel, Tobias and Fritz, Mario and Tuytelaars, Tinne}, LANGUAGE = {eng}, ISSN = {0162-8828}, DOI = {10.1109/TPAMI.2016.2601093}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, VOLUME = {39}, NUMBER = {8}, PAGES = {1576--1590}, }
Endnote
%0 Journal Article %A Rematas, Konstantinos %A Nguyen, Chuong %A Ritschel, Tobias %A Fritz, Mario %A Tuytelaars, Tinne %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations %T Novel Views of Objects from a Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-058A-1 %R 10.1109/TPAMI.2016.2601093 %7 2016 %D 2017 %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR %J IEEE Transactions on Pattern Analysis and Machine Intelligence %O IEEE Trans. Pattern Anal. Mach. Intell. %V 39 %N 8 %& 1576 %P 1576 - 1590 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Robertini, N., Casas, D., de Aguiar, E., and Theobalt, C. 2017. Multi-view Performance Capture of Surface Details. International Journal of Computer Vision124, 1.
Export
BibTeX
@article{Robertini2017, TITLE = {Multi-view Performance Capture of Surface Details}, AUTHOR = {Robertini, Nadia and Casas, Dan and de Aguiar, Edilson and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0920-5691}, DOI = {10.1007/s11263-016-0979-1}, PUBLISHER = {Kluwer Academic Publishers}, ADDRESS = {Hingham, Mass.}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {International Journal of Computer Vision}, VOLUME = {124}, NUMBER = {1}, PAGES = {96--113}, }
Endnote
%0 Journal Article %A Robertini, Nadia %A Casas, Dan %A de Aguiar, Edilson %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-view Performance Capture of Surface Details : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A89-2 %R 10.1007/s11263-016-0979-1 %7 2017-01-21 %D 2017 %J International Journal of Computer Vision %O Int. J. Comput. Vis. %V 124 %N 1 %& 96 %P 96 - 113 %I Kluwer Academic Publishers %C Hingham, Mass. %@ false
Serrano, A., Garces, E., Masia, B., and Gutierrez, D. 2017. Convolutional Sparse Coding for Capturing High-Speed Video Content. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Serrano2017, TITLE = {Convolutional Sparse Coding for Capturing High-Speed Video Content}, AUTHOR = {Serrano, Ana and Garces, Elena and Masia, Belen and Gutierrez, Diego}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13086}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {380--389}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Garces, Elena %A Masia, Belen %A Gutierrez, Diego %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Convolutional Sparse Coding for Capturing High-Speed Video Content : %G eng %U http://hdl.handle.net/21.11116/0000-0000-03AA-D %R 10.1111/cgf.13086 %7 2017 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 380 %P 380 - 389 %I Blackwell-Wiley %C Oxford %@ false
Wang, Z., Martinez Esturo, J., Seidel, H.-P., and Weinkauf, T. 2017. Stream Line–Based Pattern Search in Flows. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Wang:Esturo:Seidel:Weinkauf2016, TITLE = {Stream Line--Based Pattern Search in Flows}, AUTHOR = {Wang, Zhongjie and Martinez Esturo, Janick and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12990}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {7--18}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Martinez Esturo, Janick %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stream Line–Based Pattern Search in Flows : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-4301-A %R 10.1111/cgf.12990 %7 2016 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 7 %P 7 - 18 %I Blackwell-Wiley %C Oxford %@ false
Weier, M., Stengel, M., Roth, T., et al. 2017. Perception-driven Accelerated Rendering. Computer Graphics Forum36, 2.
Export
BibTeX
@article{WeierEG2017STAR, TITLE = {Perception-driven Accelerated Rendering}, AUTHOR = {Weier, Martin and Stengel, Michael and Roth, Thorsten and Didyk, Piotr and Eisemann, Elmar and Eisemann, Martin and Grogorick, Steve and Hinkenjann, Andr{\'e} and Krujiff, Elmar and Magnor, Marcus A. and Myszkowski, Karol and Slusallek, Philipp}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13150}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {2}, PAGES = {611--643}, }
Endnote
%0 Journal Article %A Weier, Martin %A Stengel, Michael %A Roth, Thorsten %A Didyk, Piotr %A Eisemann, Elmar %A Eisemann, Martin %A Grogorick, Steve %A Hinkenjann, André %A Krujiff, Elmar %A Magnor, Marcus A. %A Myszkowski, Karol %A Slusallek, Philipp %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Perception-driven Accelerated Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-3496-8 %R 10.1111/cgf.13150 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 611 %P 611 - 643 %I Blackwell-Wiley %C Oxford %@ false
Zayer, R., Steinberger, M., and Seidel, H.-P. 2017a. A GPU-adapted Structure for Unstructured Grids. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{Zayer2017, TITLE = {A {GPU}-adapted Structure for Unstructured Grids}, AUTHOR = {Zayer, Rhaleb and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13144}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {495--507}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Zayer, Rhaleb %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A GPU-adapted Structure for Unstructured Grids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5A05-7 %R 10.1111/cgf.13144 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 495 %P 495 - 507 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Book Item
Akyűz, A.O., Tursun, O.T., Hasić-Telalović, J., and Karađuzović-Hadžiabdić, K. 2017. Ghosting in HDR Video. In: High Dynamic Range Video. Elsevier, Amsterdam.
Export
BibTeX
@incollection{hdrvideo2017, TITLE = {Ghosting in {HDR} Video}, AUTHOR = {Aky{\H u}z, Ahmet O{\u g}uz and Tursun, Okan Tarhan and Hasi{\'c}-Telalovi{\'c}, Jasminka and Kara{\dj}uzovi{\'c}-Had{\v z}iabdi{\'c}, Kanita}, LANGUAGE = {eng}, ISBN = {978-0-12-809477-8}, DOI = {10.1016/B978-0-12-809477-8.00001-7}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {High Dynamic Range Video}, EDITOR = {Chalmers, Alan and Campisi, Patrizio and Shiley, Peter and Olaizola, Igor G.}, PAGES = {3--44}, }
Endnote
%0 Book Section %A Akyűz, Ahmet Oğuz %A Tursun, Okan Tarhan %A Hasić-Telalović, Jasminka %A Karađuzović-Hadžiabdić, Kanita %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Ghosting in HDR Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4BAD-A %R 10.1016/B978-0-12-809477-8.00001-7 %D 2017 %B High Dynamic Range Video %E Chalmers, Alan; Campisi, Patrizio; Shiley, Peter; Olaizola, Igor G. %P 3 - 44 %I Elsevier %C Amsterdam %@ 978-0-12-809477-8
Saikia, H., Seidel, H.-P., and Weinkauf, T. 2017. Fast Similarity Search in Scalar Fields using Merging Histograms. In: Topological Methods in Data Analysis and Visualization IV. Springer, Cham.
Export
BibTeX
@incollection{Saikia_Seidel_Weinkauf2017, TITLE = {Fast Similarity Search in Scalar Fields using Merging Histograms}, AUTHOR = {Saikia, Himangshu and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-319-44682-0}, DOI = {10.1007/978-3-319-44684-4_7}, PUBLISHER = {Springer}, ADDRESS = {Cham}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {Topological Methods in Data Analysis and Visualization IV}, EDITOR = {Carr, Hamish and Garth, Christoph and Weinkauf, Tino}, PAGES = {121--134}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Saikia, Himangshu %A Seidel, Hans-Peter %A Weinkauf, Tino %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Fast Similarity Search in Scalar Fields using Merging Histograms : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-772A-0 %R 10.1007/978-3-319-44684-4_7 %D 2017 %B Topological Methods in Data Analysis and Visualization IV %E Carr, Hamish; Garth, Christoph; Weinkauf, Tino %P 121 - 134 %I Springer %C Cham %@ 978-3-319-44682-0 %S Mathematics and Visualization
Conference Paper
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017a. Towards a Quality Metric for Dense Light Fields. 30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017), IEEE Computer Society.
Export
BibTeX
@inproceedings{Vamsi2017, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISBN = {978-1-5386-0458-8}, DOI = {10.1109/CVPR.2017.396}, PUBLISHER = {IEEE Computer Society}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017)}, PAGES = {3720--3729}, ADDRESS = {Honolulu, HI, USA}, }
Endnote
%0 Conference Proceedings %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafał %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E476-3 %R 10.1109/CVPR.2017.396 %D 2017 %B 30th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2017-07-21 - 2017-07-26 %C Honolulu, HI, USA %B 30th IEEE Conference on Computer Vision and Pattern Recognition %P 3720 - 3729 %I IEEE Computer Society %@ 978-1-5386-0458-8
Babaei, V., Vidimče, K., Foshey, M., Kaspar, A., Didyk, P., and Matusik, W. 2017b. 3D Color Contoning. Proceedings SCF 2017, ACM.
Export
BibTeX
@inproceedings{BabaeiSFC2017, TITLE = {{3D} Color Contoning}, AUTHOR = {Babaei, Vahid and Vidim{\v c}e, Kiril and Foshey, Michael and Kaspar, Alexandre and Didyk, Piotr and Matusik, Wojciech}, LANGUAGE = {eng}, ISBN = {978-1-4503-4999-4}, DOI = {10.1145/3083157.3092885}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {Proceedings SCF 2017}, EID = {8}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Babaei, Vahid %A Vidimče, Kiril %A Foshey, Michael %A Kaspar, Alexandre %A Didyk, Piotr %A Matusik, Wojciech %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T 3D Color Contoning : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05A8-7 %R 10.1145/3083157.3092885 %D 2017 %B ACM Symposium On Computational Fabrication %Z date of event: 2017-06-12 - 2017-06-13 %C Cambridge, MA, USA %B Proceedings SCF 2017 %Z sequence number: 8 %I ACM %@ 978-1-4503-4999-4
Derler, A., Zayer, R., Seidel, H.-P., and Steinberger, M. 2017. Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the GPU. ICS 2017, International Conference on Supercomputing, ACM.
Export
BibTeX
@inproceedings{DerlerICS2017, TITLE = {Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the {GPU}}, AUTHOR = {Derler, Andreas and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-4503-5020-4}, DOI = {10.1145/3079079.3079085}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {ICS 2017, International Conference on Supercomputing}, EID = {7}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Derler, Andreas %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D73-D %R 10.1145/3079079.3079085 %D 2017 %B International Conference on Supercomputing %Z date of event: 2017-06-13 - 2017-06-16 %C Chicago, IL, USA %B ICS 2017 %Z sequence number: 7 %I ACM %@ 978-1-4503-5020-4
Dunn, D., Tippets, C., Torell, K., et al. 2017b. Membrane AR: Varifocal, Wide Field of View Augmented Reality Display from Deformable Membranes. ACM SIGGRAPH 2017 Emerging Technologies, ACM.
(Digital Content Association of Japan Award)
Export
BibTeX
@inproceedings{Dunn2017, TITLE = {Membrane {AR}: {V}arifocal, Wide Field of View Augmented Reality Display from Deformable Membranes}, AUTHOR = {Dunn, David and Tippets, Cary and Torell, Kent and Fuchs, Henry and Kellnhofer, Petr and Myszkowski, Karol and Didyk, Piotr and Ak{\c s}it, Kaan and Luebke, David}, LANGUAGE = {eng}, ISBN = {978-1-4503-5012-9}, DOI = {10.1145/3084822.3084846}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {ACM SIGGRAPH 2017 Emerging Technologies}, PAGES = {1--2}, EID = {15}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Dunn, David %A Tippets, Cary %A Torell, Kent %A Fuchs, Henry %A Kellnhofer, Petr %A Myszkowski, Karol %A Didyk, Piotr %A Akşit, Kaan %A Luebke, David %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Membrane AR: Varifocal, Wide Field of View Augmented Reality Display from Deformable Membranes : (Digital Content Association of Japan Award) %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E47B-A %R 10.1145/3084822.3084846 %D 2017 %B 44th SIGGRAPH Conference on Computer Graphics and Interactive Techniques %Z date of event: 2017-07-30 - 2017-08-03 %C Los Angeles, CA, USA %B ACM SIGGRAPH 2017 Emerging Technologies %P 1 - 2 %Z sequence number: 15 %I ACM %@ 978-1-4503-5012-9
Georgoulis, S., Rematas, K., Ritschel, T., Fritz, M., Tuytelaars, T., and Van Gool, L. 2017. What Is Around The Camera? IEEE International Conference on Computer Vision (ICCV 2017), IEEE.
Export
BibTeX
@inproceedings{stam17iccv, TITLE = {What Is Around The Camera?}, AUTHOR = {Georgoulis, Stamatios and Rematas, Konstantinos and Ritschel, Tobias and Fritz, Mario and Tuytelaars, Tinne and Van Gool, Luc}, LANGUAGE = {eng}, ISBN = {978-1-5386-1032-9}, DOI = {10.1109/ICCV.2017.553}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2017)}, PAGES = {5180--5188}, ADDRESS = {Venice, Italy}, }
Endnote
%0 Conference Proceedings %A Georgoulis, Stamatios %A Rematas, Konstantinos %A Ritschel, Tobias %A Fritz, Mario %A Tuytelaars, Tinne %A Van Gool, Luc %+ External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations External Organizations %T What Is Around The Camera? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-D0CD-4 %R 10.1109/ICCV.2017.553 %D 2017 %B International Conference on Computer Vision %Z date of event: 2017-10-22 - 2017-10-29 %C Venice, Italy %B IEEE International Conference on Computer Vision %P 5180 - 5188 %I IEEE %@ 978-1-5386-1032-9
Jiang, C. and Chen, R. 2017. Polyhedral Meshes with Concave Faces. SIGGRAPH Asia 2017 Posters (ACM SIGGRAPH Asia 2017), ACM.
Export
BibTeX
@inproceedings{JiangSA2017, TITLE = {Polyhedral Meshes with Concave Faces}, AUTHOR = {Jiang, Caigui and Chen, Renjie}, LANGUAGE = {eng}, ISBN = {978-1-4503-5405-9}, DOI = {10.1145/3145690.3145694}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {SIGGRAPH Asia 2017 Posters (ACM SIGGRAPH Asia 2017)}, PAGES = {1--2}, EID = {36}, ADDRESS = {Bangkok, Thailand}, }
Endnote
%0 Conference Proceedings %A Jiang, Caigui %A Chen, Renjie %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Polyhedral Meshes with Concave Faces : %G eng %U http://hdl.handle.net/21.11116/0000-0000-5FE4-5 %R 10.1145/3145690.3145694 %D 2017 %B ACM SIGGRAPH Asia %Z date of event: 2017-11-27 - 2017-11-30 %C Bangkok, Thailand %B SIGGRAPH Asia 2017 Posters %P 1 - 2 %Z sequence number: 36 %I ACM %@ 978-1-4503-5405-9
Mehta, D., Rhodin, H., Casas, D., et al. 2017b. Monocular 3D Human Pose Estimation in the Wild Using Improved CNN Supervision. 3DV 2017, International Conference on 3D Vision, IEEE.
Export
BibTeX
@inproceedings{Mehta_3DV2017, TITLE = {Monocular {3D} Human Pose Estimation in the Wild Using Improved {CNN} Supervision}, AUTHOR = {Mehta, Dushyant and Rhodin, Helge and Casas, Dan and Fua, Pascal and Sotnychenko, Oleksandr and Xu, Weipeng and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5386-2610-8; 978-1-5386-2611-5}, DOI = {10.1109/3DV.2017.00064}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {3DV 2017, International Conference on 3D Vision}, PAGES = {506--516}, ADDRESS = {Qingdao, China}, }
Endnote
%0 Conference Proceedings %A Mehta, Dushyant %A Rhodin, Helge %A Casas, Dan %A Fua, Pascal %A Sotnychenko, Oleksandr %A Xu, Weipeng %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Monocular 3D Human Pose Estimation in the Wild Using Improved CNN Supervision : %G eng %U http://hdl.handle.net/21.11116/0000-0002-5E01-4 %R 10.1109/3DV.2017.00064 %D 2017 %B International Conference on 3D Vision %Z date of event: 2017-10-10 - 2017-10-12 %C Qingdao, China %B 3DV 2017 %P 506 - 516 %I IEEE %@ 978-1-5386-2610-8 978-1-5386-2611-5
Mueller, F., Mehta, D., Sotnychenko, O., Sridhar, S., Casas, D., and Theobalt, C. 2017a. Real-Time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor. IEEE International Conference on Computer Vision (ICCV 2017), IEEE.
Export
BibTeX
@inproceedings{Mueller_ICCV2017, TITLE = {Real-Time Hand Tracking under Occlusion from an Egocentric {RGB}-{D} Sensor}, AUTHOR = {Mueller, Franziska and Mehta, Dushyant and Sotnychenko, Oleksandr and Sridhar, Srinath and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5386-1032-9}, DOI = {10.1109/ICCV.2017.131}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2017)}, PAGES = {1163--1172}, ADDRESS = {Venice, Italy}, }
Endnote
%0 Conference Proceedings %A Mueller, Franziska %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Sridhar, Srinath %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-Time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor : %G eng %U http://hdl.handle.net/21.11116/0000-0000-611B-5 %R 10.1109/ICCV.2017.131 %D 2017 %B International Conference on Computer Vision %Z date of event: 2017-10-22 - 2017-10-29 %C Venice, Italy %B IEEE International Conference on Computer Vision %P 1163 - 1172 %I IEEE %@ 978-1-5386-1032-9
Mueller, F., Mehta, D., Sotnychenko, O., Sridhar, S., Casas, D., and Theobalt, C. 2017b. Real-Time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor. IEEE International Conference on Computer Vision Workshops (IMADO 2017), IEEE.
Export
BibTeX
@inproceedings{MuellerICCV_W2017, TITLE = {Real-Time Hand Tracking under Occlusion from an Egocentric {RGB}-{D} Sensor}, AUTHOR = {Mueller, Franziska and Mehta, Dushyant and Sotnychenko, Oleksandr and Sridhar, Srinath and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5386-1034-3}, DOI = {10.1109/ICCVW.2017.82}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE International Conference on Computer Vision Workshops (IMADO 2017)}, PAGES = {1284--1293}, ADDRESS = {Venice, Italy}, }
Endnote
%0 Conference Proceedings %A Mueller, Franziska %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Sridhar, Srinath %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-Time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor : %G eng %U http://hdl.handle.net/21.11116/0000-0000-60A6-8 %R 10.1109/ICCVW.2017.82 %D 2017 %B ICCV 2017 Workshop on Image-based Modeling of Articulated and Deformable Objects %Z date of event: 2017-10-28 - 2017-10-28 %C Venice, Italy %B IEEE International Conference on Computer Vision Workshops %P 1284 - 1293 %I IEEE %@ 978-1-5386-1034-3
Piovarči, M., Wessely, M., Jagielski, M., Alexa, M., Matusik, W., and Didyk, P. 2017. Directional Screens. Proceedings SCF 2017, ACM.
Export
BibTeX
@inproceedings{PiovarciSFC2017, TITLE = {Directional Screens}, AUTHOR = {Piovar{\v c}i, Michal and Wessely, Michael and Jagielski, Micha{\l} and Alexa, Marc and Matusik, Wojciech and Didyk, Piotr}, LANGUAGE = {eng}, ISBN = {978-1-4503-4999-4}, DOI = {10.1145/3083157.3083162}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {Proceedings SCF 2017}, EID = {1}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Piovarči, Michal %A Wessely, Michael %A Jagielski, Michał %A Alexa, Marc %A Matusik, Wojciech %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Directional Screens : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05AA-3 %R 10.1145/3083157.3083162 %D 2017 %B ACM Symposium On Computational Fabrication %Z date of event: 2017-06-12 - 2017-06-13 %C Cambridge, MA, USA %B Proceedings SCF 2017 %Z sequence number: 1 %I ACM %@ 978-1-4503-4999-4
Sridhar, S., Markussen, A., Oulasvirta, A., Theobalt, C., and Boring, S. 2017a. WatchSense: On- and Above-Skin Input Sensing through a Wearable Depth Sensor. CHI’17, 35th Annual ACM Conference on Human Factors in Computing Systems, ACM.
Export
BibTeX
@inproceedings{WatchSense_CHI2017, TITLE = {{WatchSense}: {O}n- and Above-Skin Input Sensing through a Wearable Depth Sensor}, AUTHOR = {Sridhar, Srinath and Markussen, Anders and Oulasvirta, Antti and Theobalt, Christian and Boring, Sebastian}, LANGUAGE = {eng}, ISBN = {978-1-4503-4655-9}, DOI = {10.1145/3025453.3026005}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {CHI'17, 35th Annual ACM Conference on Human Factors in Computing Systems}, PAGES = {3891--3902}, ADDRESS = {Denver, CO, USA}, }
Endnote
%0 Conference Proceedings %A Sridhar, Srinath %A Markussen, Anders %A Oulasvirta, Antti %A Theobalt, Christian %A Boring, Sebastian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T WatchSense: On- and Above-Skin Input Sensing through a Wearable Depth Sensor : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-6517-8 %R 10.1145/3025453.3026005 %D 2017 %B 35th Annual ACM Conference on Human Factors in Computing Systems %Z date of event: 2017-05-06 - 2017-05-11 %C Denver, CO, USA %B CHI'17 %P 3891 - 3902 %I ACM %@ 978-1-4503-4655-9
Steinberger, M., Zayer, R., and Seidel, H.-P. 2017. Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the GPU. ICS 2017, International Conference on Supercomputing, ACM.
Export
BibTeX
@inproceedings{SteinbergerICS2017, TITLE = {Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the {GPU}}, AUTHOR = {Steinberger, Markus and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-5020-4}, DOI = {10.1145/3079079.3079086}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {ICS 2017, International Conference on Supercomputing}, EID = {13}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Steinberger, Markus %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D71-2 %R 10.1145/3079079.3079086 %D 2017 %B International Conference on Supercomputing %Z date of event: 2017-06-13 - 2017-06-16 %C Chicago, IL, USA %B ICS 2017 %Z sequence number: 13 %I ACM %@ 978-1-4503-5020-4
Tewari, A., Zollhöfer, M., Kim, H., et al. 2017a. MoFA: Model-Based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction. IEEE International Conference on Computer Vision (ICCV 2017), IEEE.
Export
BibTeX
@inproceedings{TewariICCV2017, TITLE = {{MoFA}: Model-Based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction}, AUTHOR = {Tewari, Ayush and Zollh{\"o}fer, Michael and Kim, Hyeongwoo and Garrido, Pablo and Bernard, Florian and P{\'e}rez, Patrick and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5386-1032-9}, DOI = {10.1109/ICCV.2017.401}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2017)}, PAGES = {3735--3744}, ADDRESS = {Venice, Italy}, }
Endnote
%0 Conference Proceedings %A Tewari, Ayush %A Zollhöfer, Michael %A Kim, Hyeongwoo %A Garrido, Pablo %A Bernard, Florian %A Pérez, Patrick %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T MoFA: Model-Based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction : %G eng %U http://hdl.handle.net/21.11116/0000-0000-6102-0 %R 10.1109/ICCV.2017.401 %D 2017 %B International Conference on Computer Vision %Z date of event: 2017-10-22 - 2017-10-29 %C Venice, Italy %B IEEE International Conference on Computer Vision %P 3735 - 3744 %I IEEE %@ 978-1-5386-1032-9
Tewari, A., Zollhöfer, M., Kim, H., et al. 2017b. MoFA: Model-based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction. IEEE International Conference on Computer Vision Workshops (IMADO 2017), IEEE.
Export
BibTeX
@inproceedings{tewari17MoFA, TITLE = {{MoFA}: Model-based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction}, AUTHOR = {Tewari, Ayush and Zollh{\"o}fer, Michael and Kim, Hyeongwoo and Garrido, Pablo and Bernard, Florian and P{\'e}rez, Patrick and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5386-1034-3}, DOI = {10.1109/ICCVW.2017.153}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE International Conference on Computer Vision Workshops (IMADO 2017)}, PAGES = {1274--1283}, ADDRESS = {Venice, Italy}, }
Endnote
%0 Conference Proceedings %A Tewari, Ayush %A Zollhöfer, Michael %A Kim, Hyeongwoo %A Garrido, Pablo %A Bernard, Florian %A Pérez, Patrick %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T MoFA: Model-based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-C2F7-4 %R 10.1109/ICCVW.2017.153 %D 2017 %B ICCV 2017 Workshop on Image-based Modeling of Articulated and Deformable Objects %Z date of event: 2017-10-28 - 2017-10-28 %C Venice, Italy %B IEEE International Conference on Computer Vision Workshops %P 1274 - 1283 %I IEEE %@ 978-1-5386-1034-3
Thies, J., Zollhöfer, M., Stamminger, M., Theobalt, C., and Nießner, M. 2017. Demo of FaceVR: Real-Time Facial Reenactment and Eye Gaze Control in Virtual Reality. ACM SIGGRAPH 2017 Emerging Technologies, ACM.
Export
BibTeX
@inproceedings{Thies2017, TITLE = {Demo of {FaceVR}: Real-Time Facial Reenactment and Eye Gaze Control in Virtual Reality}, AUTHOR = {Thies, Justus and Zollh{\"o}fer, Michael and Stamminger, Marc and Theobalt, Christian and Nie{\ss}ner, Matthias}, LANGUAGE = {eng}, ISBN = {978-1-4503-5012-9}, DOI = {10.1145/3084822.3084841}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {ACM SIGGRAPH 2017 Emerging Technologies}, PAGES = {1--2}, EID = {7}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Thies, Justus %A Zollhöfer, Michael %A Stamminger, Marc %A Theobalt, Christian %A Nießner, Matthias %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Demo of FaceVR: Real-Time Facial Reenactment and Eye Gaze Control in Virtual Reality : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05CA-A %R 10.1145/3084822.3084841 %D 2017 %B 44th SIGGRAPH Conference on Computer Graphics and Interactive Techniques %Z date of event: 2017-07-30 - 2017-08-03 %C Los Angeles, CA, USA %B ACM SIGGRAPH 2017 Emerging Technologies %P 1 - 2 %Z sequence number: 7 %I ACM %@ 978-1-4503-5012-9
Thunberg, J., Bernard, F., and Goncalves, J. 2017. Distributed Synchronization of Euclidean Transformations with Guaranteed Convergence. IEEE 56th Annual Conference on Decision and Control (CDC 2017), IEEE.
Export
BibTeX
@inproceedings{ThunbergCDC2017, TITLE = {Distributed Synchronization of {E}uclidean Transformations with Guaranteed Convergence}, AUTHOR = {Thunberg, Johan and Bernard, Florian and Goncalves, Jorge}, LANGUAGE = {eng}, DOI = {10.1109/CDC.2017.8264211}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE 56th Annual Conference on Decision and Control (CDC 2017)}, PAGES = {3757--3762}, ADDRESS = {Melbourne, Australia}, }
Endnote
%0 Conference Proceedings %A Thunberg, Johan %A Bernard, Florian %A Goncalves, Jorge %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Distributed Synchronization of Euclidean Transformations with Guaranteed Convergence : %G eng %U http://hdl.handle.net/21.11116/0000-0000-B5C5-5 %R 10.1109/CDC.2017.8264211 %D 2017 %B IEEE 56th Annual Conference on Decision and Control %Z date of event: 2017-12-12 - 2017-12-15 %C Melbourne, Australia %B IEEE 56th Annual Conference on Decision and Control %P 3757 - 3762 %I IEEE
Tompkin, J., Kim, K.I., Pfister, H., and Theobalt, C. 2017a. Criteria Sliders: Learning Continuous Database Criteria via Interactive Ranking. Proceedings of the British Machine Vision Conference (BMVC 2017).
Export
BibTeX
@inproceedings{Tompkin_BMVC2017, TITLE = {Criteria Sliders: {L}earning Continuous Database Criteria via Interactive Ranking}, AUTHOR = {Tompkin, James and Kim, Kwang In and Pfister, Hanspeter and Theobalt, Christian}, LANGUAGE = {eng}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the British Machine Vision Conference (BMVC 2017)}, EID = {0170}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Tompkin, James %A Kim, Kwang In %A Pfister, Hanspeter %A Theobalt, Christian %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Criteria Sliders: Learning Continuous Database Criteria via Interactive Ranking : %G eng %U http://hdl.handle.net/21.11116/0000-0002-5E03-2 %D 2017 %B 28th British Machine Vision Conference %Z date of event: 2017-09-04 - 2017-09-07 %C London, UK %B Proceedings of the British Machine Vision Conference %Z sequence number: 0170
Winter, M., Zayer, R., and Steinberger, M. 2017. Autonomous, Independent Management of Dynamic Graphs on GPUs. IEEE High Performance Extreme Computing Conference (HPEC 2017), IEEE.
Export
BibTeX
@inproceedings{Winter_HPEC2017, TITLE = {Autonomous, Independent Management of Dynamic Graphs on {GPUs}}, AUTHOR = {Winter, Martin and Zayer, Rhaleb and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-5386-3472-1}, DOI = {10.1109/HPEC.2017.8091058}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2017)}, PAGES = {1--7}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Winter, Martin %A Zayer, Rhaleb %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Autonomous, Independent Management of Dynamic Graphs on GPUs : %G eng %U http://hdl.handle.net/21.11116/0000-0001-2DBC-A %R 10.1109/HPEC.2017.8091058 %D 2017 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2017-09-12 - 2017-09-14 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 7 %I IEEE %@ 978-1-5386-3472-1
Zayer, R., Steinberger, M., and Seidel, H.-P. 2017b. Sparse Matrix Assembly on the GPU Through Multiplication Patterns. IEEE High Performance Extreme Computing Conference (HPEC 2017), IEEE.
Export
BibTeX
@inproceedings{Zayer_HPEC2017, TITLE = {Sparse Matrix Assembly on the {GPU} Through Multiplication Patterns}, AUTHOR = {Zayer, Rhaleb and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-5386-3472-1}, DOI = {10.1109/HPEC.2017.8091057}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2017)}, PAGES = {1--8}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Sparse Matrix Assembly on the GPU Through Multiplication Patterns : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3B33-5 %R 10.1109/HPEC.2017.8091057 %D 2017 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2017-09-12 - 2017-09-14 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 8 %I IEEE %@ 978-1-5386-3472-1
Paper
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017b. Towards a Quality Metric for Dense Light Fields. http://arxiv.org/abs/1704.07576.
(arXiv: 1704.07576)
Abstract
Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.
Export
BibTeX
@online{AdhikarlaArXiv17, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} K. and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, URL = {http://arxiv.org/abs/1704.07576}, EPRINT = {1704.07576}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.}, }
Endnote
%0 Report %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafał K. %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %U http://hdl.handle.net/11858/00-001M-0000-002D-2C2C-1 %U http://arxiv.org/abs/1704.07576 %D 2017 %X Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Bernard, F., Theobalt, C., and Moeller, M. 2017. Tighter Lifting-Free Convex Relaxations for Quadratic Matching Problems. http://arxiv.org/abs/1711.10733.
(arXiv: 1711.10733)
Abstract
In this work we study convex relaxations of quadratic optimisation problems over permutation matrices. While existing semidefinite programming approaches can achieve remarkably tight relaxations, they have the strong disadvantage that they lift the original $n {\times} n$-dimensional variable to an $n^2 {\times} n^2$-dimensional variable, which limits their practical applicability. In contrast, here we present a lifting-free convex relaxation that is provably at least as tight as existing (lifting-free) convex relaxations. We demonstrate experimentally that our approach is superior to existing convex and non-convex methods for various problems, including image arrangement and multi-graph matching.
Export
BibTeX
@online{Bernard2017, TITLE = {Tighter Lifting-Free Convex Relaxations for Quadratic Matching Problems}, AUTHOR = {Bernard, Florian and Theobalt, Christian and Moeller, Michael}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1711.10733}, EPRINT = {1711.10733}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In this work we study convex relaxations of quadratic optimisation problems over permutation matrices. While existing semidefinite programming approaches can achieve remarkably tight relaxations, they have the strong disadvantage that they lift the original $n {\times} n$-dimensional variable to an $n^2 {\times} n^2$-dimensional variable, which limits their practical applicability. In contrast, here we present a lifting-free convex relaxation that is provably at least as tight as existing (lifting-free) convex relaxations. We demonstrate experimentally that our approach is superior to existing convex and non-convex methods for various problems, including image arrangement and multi-graph matching.}, }
Endnote
%0 Report %A Bernard, Florian %A Theobalt, Christian %A Moeller, Michael %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Tighter Lifting-Free Convex Relaxations for Quadratic Matching Problems : %G eng %U http://hdl.handle.net/21.11116/0000-0000-6143-7 %U http://arxiv.org/abs/1711.10733 %D 2017 %X In this work we study convex relaxations of quadratic optimisation problems over permutation matrices. While existing semidefinite programming approaches can achieve remarkably tight relaxations, they have the strong disadvantage that they lift the original $n {\times} n$-dimensional variable to an $n^2 {\times} n^2$-dimensional variable, which limits their practical applicability. In contrast, here we present a lifting-free convex relaxation that is provably at least as tight as existing (lifting-free) convex relaxations. We demonstrate experimentally that our approach is superior to existing convex and non-convex methods for various problems, including image arrangement and multi-graph matching. %K Mathematics, Optimization and Control, math.OC,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Statistics, Machine Learning, stat.ML
Chen, R., Gotsman, C., and Hormann, K. 2017a. Path Planning with Divergence-Based Distance Functions. http://arxiv.org/abs/1708.02845.
(arXiv: 1708.02845)
Abstract
Distance functions between points in a domain are sometimes used to automatically plan a gradient-descent path towards a given target point in the domain, avoiding obstacles that may be present. A key requirement from such distance functions is the absence of spurious local minima, which may foil such an approach, and this has led to the common use of harmonic potential functions. Based on the planar Laplace operator, the potential function guarantees the absence of spurious minima, but is well known to be slow to numerically compute and prone to numerical precision issues. To alleviate the first of these problems, we propose a family of novel divergence distances. These are based on f-divergence of the Poisson kernel of the domain. We define the divergence distances and compare them to the harmonic potential function and other related distance functions. Our first result is theoretical: We show that the family of divergence distances are equivalent to the harmonic potential function on simply-connected domains, namely generate paths which are identical to those generated by the potential function. The proof is based on the concept of conformal invariance. Our other results are more practical and relate to two special cases of divergence distances, one based on the Kullback-Leibler divergence and one based on the total variation divergence. We show that using divergence distances instead of the potential function and other distances has a significant computational advantage, as, following a pre-processing stage, they may be computed up to an order of magnitude faster than the others when taking advantage of certain sparsity properties of the Poisson kernel. Furthermore, the computation is "embarrassingly parallel", so may be implemented on a GPU with up to three orders of magnitude speedup.
Export
BibTeX
@online{chen_arXiv1708.02845, TITLE = {Path Planning with Divergence-Based Distance Functions}, AUTHOR = {Chen, Renjie and Gotsman, Craig and Hormann, Kai}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1708.02845}, EPRINT = {1708.02845}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Distance functions between points in a domain are sometimes used to automatically plan a gradient-descent path towards a given target point in the domain, avoiding obstacles that may be present. A key requirement from such distance functions is the absence of spurious local minima, which may foil such an approach, and this has led to the common use of harmonic potential functions. Based on the planar Laplace operator, the potential function guarantees the absence of spurious minima, but is well known to be slow to numerically compute and prone to numerical precision issues. To alleviate the first of these problems, we propose a family of novel divergence distances. These are based on f-divergence of the Poisson kernel of the domain. We define the divergence distances and compare them to the harmonic potential function and other related distance functions. Our first result is theoretical: We show that the family of divergence distances are equivalent to the harmonic potential function on simply-connected domains, namely generate paths which are identical to those generated by the potential function. The proof is based on the concept of conformal invariance. Our other results are more practical and relate to two special cases of divergence distances, one based on the Kullback-Leibler divergence and one based on the total variation divergence. We show that using divergence distances instead of the potential function and other distances has a significant computational advantage, as, following a pre-processing stage, they may be computed up to an order of magnitude faster than the others when taking advantage of certain sparsity properties of the Poisson kernel. Furthermore, the computation is "embarrassingly parallel", so may be implemented on a GPU with up to three orders of magnitude speedup.}, }
Endnote
%0 Report %A Chen, Renjie %A Gotsman, Craig %A Hormann, Kai %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Path Planning with Divergence-Based Distance Functions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-063A-8 %U http://arxiv.org/abs/1708.02845 %D 2017 %X Distance functions between points in a domain are sometimes used to automatically plan a gradient-descent path towards a given target point in the domain, avoiding obstacles that may be present. A key requirement from such distance functions is the absence of spurious local minima, which may foil such an approach, and this has led to the common use of harmonic potential functions. Based on the planar Laplace operator, the potential function guarantees the absence of spurious minima, but is well known to be slow to numerically compute and prone to numerical precision issues. To alleviate the first of these problems, we propose a family of novel divergence distances. These are based on f-divergence of the Poisson kernel of the domain. We define the divergence distances and compare them to the harmonic potential function and other related distance functions. Our first result is theoretical: We show that the family of divergence distances are equivalent to the harmonic potential function on simply-connected domains, namely generate paths which are identical to those generated by the potential function. The proof is based on the concept of conformal invariance. Our other results are more practical and relate to two special cases of divergence distances, one based on the Kullback-Leibler divergence and one based on the total variation divergence. We show that using divergence distances instead of the potential function and other distances has a significant computational advantage, as, following a pre-processing stage, they may be computed up to an order of magnitude faster than the others when taking advantage of certain sparsity properties of the Poisson kernel. Furthermore, the computation is "embarrassingly parallel", so may be implemented on a GPU with up to three orders of magnitude speedup. %K Computer Science, Robotics, cs.RO
Chen, R., Gotsman, C., and Hormann, K. 2017b. Practical Distance Functions for Path-Planning in Planar Domains. http://arxiv.org/abs/1708.05855.
(arXiv: 1708.05855)
Abstract
Path planning is an important problem in robotics. One way to plan a path between two points $x,y$ within a (not necessarily simply-connected) planar domain $\Omega$, is to define a non-negative distance function $d(x,y)$ on $\Omega\times\Omega$ such that following the (descending) gradient of this distance function traces such a path. This presents two equally important challenges: A mathematical challenge -- to define $d$ such that $d(x,y)$ has a single minimum for any fixed $y$ (and this is when $x=y$), since a local minimum is in effect a "dead end", A computational challenge -- to define $d$ such that it may be computed efficiently. In this paper, given a description of $\Omega$, we show how to assign coordinates to each point of $\Omega$ and define a family of distance functions between points using these coordinates, such that both the mathematical and the computational challenges are met. This is done using the concepts of \emph{harmonic measure} and \emph{$f$-divergences}. In practice, path planning is done on a discrete network defined on a finite set of \emph{sites} sampled from $\Omega$, so any method that works well on the continuous domain must be adapted so that it still works well on the discrete domain. Given a set of sites sampled from $\Omega$, we show how to define a network connecting these sites such that a \emph{greedy routing} algorithm (which is the discrete equivalent of continuous gradient descent) based on the distance function mentioned above is guaranteed to generate a path in the network between any two such sites. In many cases, this network is close to a (desirable) planar graph, especially if the set of sites is dense.
Export
BibTeX
@online{ChenarXiv2017, TITLE = {Practical Distance Functions for Path-Planning in Planar Domains}, AUTHOR = {Chen, Renjie and Gotsman, Craig and Hormann, Kai}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1708.05855}, EPRINT = {1708.05855}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Path planning is an important problem in robotics. One way to plan a path between two points $x,y$ within a (not necessarily simply-connected) planar domain $\Omega$, is to define a non-negative distance function $d(x,y)$ on $\Omega\times\Omega$ such that following the (descending) gradient of this distance function traces such a path. This presents two equally important challenges: A mathematical challenge -- to define $d$ such that $d(x,y)$ has a single minimum for any fixed $y$ (and this is when $x=y$), since a local minimum is in effect a "dead end", A computational challenge -- to define $d$ such that it may be computed efficiently. In this paper, given a description of $\Omega$, we show how to assign coordinates to each point of $\Omega$ and define a family of distance functions between points using these coordinates, such that both the mathematical and the computational challenges are met. This is done using the concepts of \emph{harmonic measure} and \emph{$f$-divergences}. In practice, path planning is done on a discrete network defined on a finite set of \emph{sites} sampled from $\Omega$, so any method that works well on the continuous domain must be adapted so that it still works well on the discrete domain. Given a set of sites sampled from $\Omega$, we show how to define a network connecting these sites such that a \emph{greedy routing} algorithm (which is the discrete equivalent of continuous gradient descent) based on the distance function mentioned above is guaranteed to generate a path in the network between any two such sites. In many cases, this network is close to a (desirable) planar graph, especially if the set of sites is dense.}, }
Endnote
%0 Report %A Chen, Renjie %A Gotsman, Craig %A Hormann, Kai %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Practical Distance Functions for Path-Planning in Planar Domains : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-0634-3 %U http://arxiv.org/abs/1708.05855 %D 2017 %X Path planning is an important problem in robotics. One way to plan a path between two points $x,y$ within a (not necessarily simply-connected) planar domain $\Omega$, is to define a non-negative distance function $d(x,y)$ on $\Omega\times\Omega$ such that following the (descending) gradient of this distance function traces such a path. This presents two equally important challenges: A mathematical challenge -- to define $d$ such that $d(x,y)$ has a single minimum for any fixed $y$ (and this is when $x=y$), since a local minimum is in effect a "dead end", A computational challenge -- to define $d$ such that it may be computed efficiently. In this paper, given a description of $\Omega$, we show how to assign coordinates to each point of $\Omega$ and define a family of distance functions between points using these coordinates, such that both the mathematical and the computational challenges are met. This is done using the concepts of \emph{harmonic measure} and \emph{$f$-divergences}. In practice, path planning is done on a discrete network defined on a finite set of \emph{sites} sampled from $\Omega$, so any method that works well on the continuous domain must be adapted so that it still works well on the discrete domain. Given a set of sites sampled from $\Omega$, we show how to define a network connecting these sites such that a \emph{greedy routing} algorithm (which is the discrete equivalent of continuous gradient descent) based on the distance function mentioned above is guaranteed to generate a path in the network between any two such sites. In many cases, this network is close to a (desirable) planar graph, especially if the set of sites is dense. %K Computer Science, Robotics, cs.RO,
Kim, H., Zollhöfer, M., Tewari, A., Thies, J., Richardt, C., and Theobalt, C. 2017. InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image. http://arxiv.org/abs/1703.10956.
(arXiv: 1703.10956)
Abstract
We introduce InverseFaceNet, a deep convolutional inverse rendering framework for faces that jointly estimates facial pose, shape, expression, reflectance and illumination from a single input image in a single shot. By estimating all these parameters from just a single image, advanced editing possibilities on a single face image, such as appearance editing and relighting, become feasible. Previous learning-based face reconstruction approaches do not jointly recover all dimensions, or are severely limited in terms of visual quality. In contrast, we propose to recover high-quality facial pose, shape, expression, reflectance and illumination using a deep neural network that is trained using a large, synthetically created dataset. Our approach builds on a novel loss function that measures model-space similarity directly in parameter space and significantly improves reconstruction accuracy. In addition, we propose an analysis-by-synthesis breeding approach which iteratively updates the synthetic training corpus based on the distribution of real-world images, and we demonstrate that this strategy outperforms completely synthetically trained networks. Finally, we show high-quality reconstructions and compare our approach to several state-of-the-art approaches.
Export
BibTeX
@online{DBLP:journals/corr/KimZTTRT17, TITLE = {{InverseFaceNet}: {D}eep Single-Shot Inverse Face Rendering From A Single Image}, AUTHOR = {Kim, Hyeongwoo and Zollh{\"o}fer, Michael and Tewari, Ayush and Thies, Justus and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1703.10956}, EPRINT = {1703.10956}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We introduce InverseFaceNet, a deep convolutional inverse rendering framework for faces that jointly estimates facial pose, shape, expression, reflectance and illumination from a single input image in a single shot. By estimating all these parameters from just a single image, advanced editing possibilities on a single face image, such as appearance editing and relighting, become feasible. Previous learning-based face reconstruction approaches do not jointly recover all dimensions, or are severely limited in terms of visual quality. In contrast, we propose to recover high-quality facial pose, shape, expression, reflectance and illumination using a deep neural network that is trained using a large, synthetically created dataset. Our approach builds on a novel loss function that measures model-space similarity directly in parameter space and significantly improves reconstruction accuracy. In addition, we propose an analysis-by-synthesis breeding approach which iteratively updates the synthetic training corpus based on the distribution of real-world images, and we demonstrate that this strategy outperforms completely synthetically trained networks. Finally, we show high-quality reconstructions and compare our approach to several state-of-the-art approaches.}, }
Endnote
%0 Report %A Kim, Hyeongwoo %A Zollhöfer, Michael %A Tewari, Ayush %A Thies, Justus %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T InverseFaceNet: Deep Single-Shot Inverse Face Rendering From A Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8BCD-B %U http://arxiv.org/abs/1703.10956 %D 2017 %X We introduce InverseFaceNet, a deep convolutional inverse rendering framework for faces that jointly estimates facial pose, shape, expression, reflectance and illumination from a single input image in a single shot. By estimating all these parameters from just a single image, advanced editing possibilities on a single face image, such as appearance editing and relighting, become feasible. Previous learning-based face reconstruction approaches do not jointly recover all dimensions, or are severely limited in terms of visual quality. In contrast, we propose to recover high-quality facial pose, shape, expression, reflectance and illumination using a deep neural network that is trained using a large, synthetically created dataset. Our approach builds on a novel loss function that measures model-space similarity directly in parameter space and significantly improves reconstruction accuracy. In addition, we propose an analysis-by-synthesis breeding approach which iteratively updates the synthetic training corpus based on the distribution of real-world images, and we demonstrate that this strategy outperforms completely synthetically trained networks. Finally, we show high-quality reconstructions and compare our approach to several state-of-the-art approaches. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Malireddi, S.R., Mueller, F., Oberweger, M., et al. 2017. HandSeg: A Dataset for Hand Segmentation from Depth Images. http://arxiv.org/abs/1711.05944.
(arXiv: 1711.05944)
Abstract
We introduce a large-scale RGBD hand segmentation dataset, with detailed and automatically generated high-quality ground-truth annotations. Existing real-world datasets are limited in quantity due to the difficulty in manually annotating ground-truth labels. By leveraging a pair of brightly colored gloves and an RGBD camera, we propose an acquisition pipeline that eases the task of annotating very large datasets with minimal human intervention. We then quantify the importance of a large annotated dataset in this domain, and compare the performance of existing datasets in the training of deep-learning architectures. Finally, we propose a novel architecture employing strided convolution/deconvolutions in place of max-pooling and unpooling layers. Our variant outperforms baseline architectures while remaining computationally efficient at inference time. Source and datasets will be made publicly available.
Export
BibTeX
@online{Malireddi2017, TITLE = {{HandSeg}: A Dataset for Hand Segmentation from Depth Images}, AUTHOR = {Malireddi, Sri Raghu and Mueller, Franziska and Oberweger, Markus and Bojja, Abhishake Kumar and Lepetit, Vincent and Theobalt, Christian and Tagliasacchi, Andrea}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1711.05944}, EPRINT = {1711.05944}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We introduce a large-scale RGBD hand segmentation dataset, with detailed and automatically generated high-quality ground-truth annotations. Existing real-world datasets are limited in quantity due to the difficulty in manually annotating ground-truth labels. By leveraging a pair of brightly colored gloves and an RGBD camera, we propose an acquisition pipeline that eases the task of annotating very large datasets with minimal human intervention. We then quantify the importance of a large annotated dataset in this domain, and compare the performance of existing datasets in the training of deep-learning architectures. Finally, we propose a novel architecture employing strided convolution/deconvolutions in place of max-pooling and unpooling layers. Our variant outperforms baseline architectures while remaining computationally efficient at inference time. Source and datasets will be made publicly available.}, }
Endnote
%0 Report %A Malireddi, Sri Raghu %A Mueller, Franziska %A Oberweger, Markus %A Bojja, Abhishake Kumar %A Lepetit, Vincent %A Theobalt, Christian %A Tagliasacchi, Andrea %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T HandSeg: A Dataset for Hand Segmentation from Depth Images : %G eng %U http://hdl.handle.net/21.11116/0000-0000-6132-A %U http://arxiv.org/abs/1711.05944 %D 2017 %X We introduce a large-scale RGBD hand segmentation dataset, with detailed and automatically generated high-quality ground-truth annotations. Existing real-world datasets are limited in quantity due to the difficulty in manually annotating ground-truth labels. By leveraging a pair of brightly colored gloves and an RGBD camera, we propose an acquisition pipeline that eases the task of annotating very large datasets with minimal human intervention. We then quantify the importance of a large annotated dataset in this domain, and compare the performance of existing datasets in the training of deep-learning architectures. Finally, we propose a novel architecture employing strided convolution/deconvolutions in place of max-pooling and unpooling layers. Our variant outperforms baseline architectures while remaining computationally efficient at inference time. Source and datasets will be made publicly available. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2017c. Single-Shot Multi-Person 3D Body Pose Estimation From Monocular RGB Input. http://arxiv.org/abs/1712.03453.
(arXiv: 1712.03453)
Abstract
We propose a new efficient single-shot method for multi-person 3D pose estimation in general scenes from a monocular RGB camera. Our fully convolutional DNN-based approach jointly infers 2D and 3D joint locations on the basis of an extended 3D location map supported by body part associations. This new formulation enables the readout of full body poses at a subset of visible joints without the need for explicit bounding box tracking. It therefore succeeds even under strong partial body occlusions by other people and objects in the scene. We also contribute the first training data set showing real images of sophisticated multi-person interactions and occlusions. To this end, we leverage multi-view video-based performance capture of individual people for ground truth annotation and a new image compositing for user-controlled synthesis of large corpora of real multi-person images. We also propose a new video-recorded multi-person test set with ground truth 3D annotations. Our method achieves state-of-the-art performance on challenging multi-person scenes.
Export
BibTeX
@online{Mehta1712.03453, TITLE = {Single-Shot Multi-Person {3D} Body Pose Estimation From Monocular {RGB} Input}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Sridhar, Srinath and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1712.03453}, EPRINT = {1712.03453}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We propose a new efficient single-shot method for multi-person 3D pose estimation in general scenes from a monocular RGB camera. Our fully convolutional DNN-based approach jointly infers 2D and 3D joint locations on the basis of an extended 3D location map supported by body part associations. This new formulation enables the readout of full body poses at a subset of visible joints without the need for explicit bounding box tracking. It therefore succeeds even under strong partial body occlusions by other people and objects in the scene. We also contribute the first training data set showing real images of sophisticated multi-person interactions and occlusions. To this end, we leverage multi-view video-based performance capture of individual people for ground truth annotation and a new image compositing for user-controlled synthesis of large corpora of real multi-person images. We also propose a new video-recorded multi-person test set with ground truth 3D annotations. Our method achieves state-of-the-art performance on challenging multi-person scenes.}, }
Endnote
%0 Report %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Sridhar, Srinath %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Single-Shot Multi-Person 3D Body Pose Estimation From Monocular RGB Input : %G eng %U http://hdl.handle.net/21.11116/0000-0000-438F-4 %U http://arxiv.org/abs/1712.03453 %D 2017 %X We propose a new efficient single-shot method for multi-person 3D pose estimation in general scenes from a monocular RGB camera. Our fully convolutional DNN-based approach jointly infers 2D and 3D joint locations on the basis of an extended 3D location map supported by body part associations. This new formulation enables the readout of full body poses at a subset of visible joints without the need for explicit bounding box tracking. It therefore succeeds even under strong partial body occlusions by other people and objects in the scene. We also contribute the first training data set showing real images of sophisticated multi-person interactions and occlusions. To this end, we leverage multi-view video-based performance capture of individual people for ground truth annotation and a new image compositing for user-controlled synthesis of large corpora of real multi-person images. We also propose a new video-recorded multi-person test set with ground truth 3D annotations. Our method achieves state-of-the-art performance on challenging multi-person scenes. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Mehta, D., Sridhar, S., Sotnychenko, O., et al. 2017d. VNect: Real-time 3D Human Pose Estimation with a Single RGB Camera. http://arxiv.org/abs/1705.01583.
(arXiv: 1705.01583)
Abstract
We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras.
Export
BibTeX
@online{MehtaArXiv2017, TITLE = {{VNect}: Real-time {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sridhar, Srinath and Sotnychenko, Oleksandr and Rhodin, Helge and Shafiei, Mohammad and Seidel, Hans-Peter and Xu, Weipeng and Casas, Dan and Theobalt, Christian}, URL = {http://arxiv.org/abs/1705.01583}, DOI = {10.1145/3072959.3073596}, EPRINT = {1705.01583}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras.}, }
Endnote
%0 Report %A Mehta, Dushyant %A Sridhar, Srinath %A Sotnychenko, Oleksandr %A Rhodin, Helge %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Xu, Weipeng %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T VNect: Real-time 3D Human Pose Estimation with a Single RGB Camera : %U http://hdl.handle.net/11858/00-001M-0000-002D-7D78-3 %R 10.1145/3072959.3073596 %U http://arxiv.org/abs/1705.01583 %D 2017 %X We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Mueller, F., Bernard, F., Sotnychenko, O., et al. 2017c. GANerated Hands for Real-time 3D Hand Tracking from Monocular RGB. http://arxiv.org/abs/1712.01057.
(arXiv: 1712.01057)
Abstract
We address the highly challenging problem of real-time 3D hand tracking based on a monocular RGB-only sequence. Our tracking method combines a convolutional neural network with a kinematic 3D hand model, such that it generalizes well to unseen data, is robust to occlusions and varying camera viewpoints, and leads to anatomically plausible as well as temporally smooth hand motions. For training our CNN we propose a novel approach for the synthetic generation of training data that is based on a geometrically consistent image-to-image translation network. To be more specific, we use a neural network that translates synthetic images to "real" images, such that the so-generated images follow the same statistical distribution as real-world hand images. For training this translation network we combine an adversarial loss and a cycle-consistency loss with a geometric consistency loss in order to preserve geometric properties (such as hand pose) during translation. We demonstrate that our hand tracking system outperforms the current state-of-the-art on challenging RGB-only footage.
Export
BibTeX
@online{Mueller_arXiv1712.01057, TITLE = {{GANerated} Hands for Real-time {3D} Hand Tracking from Monocular {RGB}}, AUTHOR = {Mueller, Franziska and Bernard, Florian and Sotnychenko, Oleksandr and Mehta, Dushyant and Sridhar, Srinath and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1712.01057}, EPRINT = {1712.01057}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We address the highly challenging problem of real-time 3D hand tracking based on a monocular RGB-only sequence. Our tracking method combines a convolutional neural network with a kinematic 3D hand model, such that it generalizes well to unseen data, is robust to occlusions and varying camera viewpoints, and leads to anatomically plausible as well as temporally smooth hand motions. For training our CNN we propose a novel approach for the synthetic generation of training data that is based on a geometrically consistent image-to-image translation network. To be more specific, we use a neural network that translates synthetic images to "real" images, such that the so-generated images follow the same statistical distribution as real-world hand images. For training this translation network we combine an adversarial loss and a cycle-consistency loss with a geometric consistency loss in order to preserve geometric properties (such as hand pose) during translation. We demonstrate that our hand tracking system outperforms the current state-of-the-art on challenging RGB-only footage.}, }
Endnote
%0 Report %A Mueller, Franziska %A Bernard, Florian %A Sotnychenko, Oleksandr %A Mehta, Dushyant %A Sridhar, Srinath %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T GANerated Hands for Real-time 3D Hand Tracking from Monocular RGB : %G eng %U http://hdl.handle.net/21.11116/0000-0000-6156-2 %U http://arxiv.org/abs/1712.01057 %D 2017 %X We address the highly challenging problem of real-time 3D hand tracking based on a monocular RGB-only sequence. Our tracking method combines a convolutional neural network with a kinematic 3D hand model, such that it generalizes well to unseen data, is robust to occlusions and varying camera viewpoints, and leads to anatomically plausible as well as temporally smooth hand motions. For training our CNN we propose a novel approach for the synthetic generation of training data that is based on a geometrically consistent image-to-image translation network. To be more specific, we use a neural network that translates synthetic images to "real" images, such that the so-generated images follow the same statistical distribution as real-world hand images. For training this translation network we combine an adversarial loss and a cycle-consistency loss with a geometric consistency loss in order to preserve geometric properties (such as hand pose) during translation. We demonstrate that our hand tracking system outperforms the current state-of-the-art on challenging RGB-only footage. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Mueller, F., Mehta, D., Sotnychenko, O., Sridhar, S., Casas, D., and Theobalt, C. 2017d. Real-time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor. http://arxiv.org/abs/1704.02201.
(arXiv: 1704.02201)
Abstract
We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints, common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives.
Export
BibTeX
@online{DBLP:journals/corr/MuellerMS0CT17, TITLE = {Real-time Hand Tracking under Occlusion from an Egocentric {RGB}-D Sensor}, AUTHOR = {Mueller, Franziska and Mehta, Dushyant and Sotnychenko, Oleksandr and Sridhar, Srinath and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1704.02201}, EPRINT = {1704.02201}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints, common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives.}, }
Endnote
%0 Report %A Mueller, Franziska %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Sridhar, Srinath %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8BBD-F %U http://arxiv.org/abs/1704.02201 %D 2017 %X We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints, common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Tewari, A., Zollhöfer, M., Kim, H., et al. 2017c. MoFA: Model-based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction. http://arxiv.org/abs/1703.10580.
(arXiv: 1703.10580)
Abstract
In this work we propose a novel model-based deep convolutional autoencoder that addresses the highly challenging problem of reconstructing a 3D human face from a single in-the-wild color image. To this end, we combine a convolutional encoder network with an expert-designed generative model that serves as decoder. The core innovation is our new differentiable parametric decoder that encapsulates image formation analytically based on a generative model. Our decoder takes as input a code vector with exactly defined semantic meaning that encodes detailed face pose, shape, expression, skin reflectance and scene illumination. Due to this new way of combining CNN-based with model-based face reconstruction, the CNN-based encoder learns to extract semantically meaningful parameters from a single monocular input image. For the first time, a CNN encoder and an expert-designed generative model can be trained end-to-end in an unsupervised manner, which renders training on very large (unlabeled) real world data feasible. The obtained reconstructions compare favorably to current state-of-the-art approaches in terms of quality and richness of representation.
Export
BibTeX
@online{DBLP:journals/corr/TewariZK0BPT17, TITLE = {{MoFA}: Model-based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction}, AUTHOR = {Tewari, Ayush and Zollh{\"o}fer, Michael and Kim, Hyeongwoo and Garrido, Pablo and Bernard, Florian and P{\'e}rez, Patrick and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1703.10580}, EPRINT = {1703.10580}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In this work we propose a novel model-based deep convolutional autoencoder that addresses the highly challenging problem of reconstructing a 3D human face from a single in-the-wild color image. To this end, we combine a convolutional encoder network with an expert-designed generative model that serves as decoder. The core innovation is our new differentiable parametric decoder that encapsulates image formation analytically based on a generative model. Our decoder takes as input a code vector with exactly defined semantic meaning that encodes detailed face pose, shape, expression, skin reflectance and scene illumination. Due to this new way of combining CNN-based with model-based face reconstruction, the CNN-based encoder learns to extract semantically meaningful parameters from a single monocular input image. For the first time, a CNN encoder and an expert-designed generative model can be trained end-to-end in an unsupervised manner, which renders training on very large (unlabeled) real world data feasible. The obtained reconstructions compare favorably to current state-of-the-art approaches in terms of quality and richness of representation.}, }
Endnote
%0 Report %A Tewari, Ayush %A Zollhöfer, Michael %A Kim, Hyeongwoo %A Garrido, Pablo %A Bernard, Florian %A Pérez, Patrick %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T MoFA: Model-based Deep Convolutional Face Autoencoder for Unsupervised Monocular Reconstruction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8BEA-9 %U http://arxiv.org/abs/1703.10580 %D 2017 %X In this work we propose a novel model-based deep convolutional autoencoder that addresses the highly challenging problem of reconstructing a 3D human face from a single in-the-wild color image. To this end, we combine a convolutional encoder network with an expert-designed generative model that serves as decoder. The core innovation is our new differentiable parametric decoder that encapsulates image formation analytically based on a generative model. Our decoder takes as input a code vector with exactly defined semantic meaning that encodes detailed face pose, shape, expression, skin reflectance and scene illumination. Due to this new way of combining CNN-based with model-based face reconstruction, the CNN-based encoder learns to extract semantically meaningful parameters from a single monocular input image. For the first time, a CNN encoder and an expert-designed generative model can be trained end-to-end in an unsupervised manner, which renders training on very large (unlabeled) real world data feasible. The obtained reconstructions compare favorably to current state-of-the-art approaches in terms of quality and richness of representation. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Tewari, A., Zollhöfer, M., Garrido, P., et al. 2017d. Self-supervised Multi-level Face Model Learning for Monocular Reconstruction at over 250 Hz. http://arxiv.org/abs/1712.02859.
(arXiv: 1712.02859)
Abstract
The reconstruction of dense 3D models of face geometry and appearance from a single image is highly challenging and ill-posed. To constrain the problem, many approaches rely on strong priors, such as parametric face models learned from limited 3D scan data. However, prior models restrict generalization of the true diversity in facial geometry, skin reflectance and illumination. To alleviate this problem, we present the first approach that jointly learns 1) a regressor for face shape, expression, reflectance and illumination on the basis of 2) a concurrently learned parametric face model. Our multi-level face model combines the advantage of 3D Morphable Models for regularization with the out-of-space generalization of a learned corrective space. We train end-to-end on in-the-wild images without dense annotations by fusing a convolutional encoder with a differentiable expert-designed renderer and a self-supervised training loss, both defined at multiple detail levels. Our approach compares favorably to the state-of-the-art in terms of reconstruction quality, better generalizes to real world faces, and runs at over 250 Hz.
Export
BibTeX
@online{tewari2017, TITLE = {Self-supervised Multi-level Face Model Learning for Monocular Reconstruction at over 250 Hz}, AUTHOR = {Tewari, Ayush and Zollh{\"o}fer, Michael and Garrido, Pablo and Bernard, Florian and Kim, Hyeongwoo and P{\'e}rez, Patrick and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1712.02859}, EPRINT = {1712.02859}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {The reconstruction of dense 3D models of face geometry and appearance from a single image is highly challenging and ill-posed. To constrain the problem, many approaches rely on strong priors, such as parametric face models learned from limited 3D scan data. However, prior models restrict generalization of the true diversity in facial geometry, skin reflectance and illumination. To alleviate this problem, we present the first approach that jointly learns 1) a regressor for face shape, expression, reflectance and illumination on the basis of 2) a concurrently learned parametric face model. Our multi-level face model combines the advantage of 3D Morphable Models for regularization with the out-of-space generalization of a learned corrective space. We train end-to-end on in-the-wild images without dense annotations by fusing a convolutional encoder with a differentiable expert-designed renderer and a self-supervised training loss, both defined at multiple detail levels. Our approach compares favorably to the state-of-the-art in terms of reconstruction quality, better generalizes to real world faces, and runs at over 250 Hz.}, }
Endnote
%0 Report %A Tewari, Ayush %A Zollhöfer, Michael %A Garrido, Pablo %A Bernard, Florian %A Kim, Hyeongwoo %A Pérez, Patrick %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Self-supervised Multi-level Face Model Learning for Monocular Reconstruction at over 250 Hz : %G eng %U http://hdl.handle.net/21.11116/0000-0000-615E-A %U http://arxiv.org/abs/1712.02859 %D 2017 %X The reconstruction of dense 3D models of face geometry and appearance from a single image is highly challenging and ill-posed. To constrain the problem, many approaches rely on strong priors, such as parametric face models learned from limited 3D scan data. However, prior models restrict generalization of the true diversity in facial geometry, skin reflectance and illumination. To alleviate this problem, we present the first approach that jointly learns 1) a regressor for face shape, expression, reflectance and illumination on the basis of 2) a concurrently learned parametric face model. Our multi-level face model combines the advantage of 3D Morphable Models for regularization with the out-of-space generalization of a learned corrective space. We train end-to-end on in-the-wild images without dense annotations by fusing a convolutional encoder with a differentiable expert-designed renderer and a self-supervised training loss, both defined at multiple detail levels. Our approach compares favorably to the state-of-the-art in terms of reconstruction quality, better generalizes to real world faces, and runs at over 250 Hz. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Tompkin, J., Kim, K.I., Pfister, H., and Theobalt, C. 2017b. Criteria Sliders: Learning Continuous Database Criteria via Interactive Ranking. http://arxiv.org/abs/1706.03863.
(arXiv: 1706.03863)
Abstract
Large databases are often organized by hand-labeled metadata, or criteria, which are expensive to collect. We can use unsupervised learning to model database variation, but these models are often high dimensional, complex to parameterize, or require expert knowledge. We learn low-dimensional continuous criteria via interactive ranking, so that the novice user need only describe the relative ordering of examples. This is formed as semi-supervised label propagation in which we maximize the information gained from a limited number of examples. Further, we actively suggest data points to the user to rank in a more informative way than existing work. Our efficient approach allows users to interactively organize thousands of data points along 1D and 2D continuous sliders. We experiment with datasets of imagery and geometry to demonstrate that our tool is useful for quickly assessing and organizing the content of large databases.
Export
BibTeX
@online{DBLP:journals/corr/TompkinKPT17, TITLE = {Criteria Sliders: Learning Continuous Database Criteria via Interactive Ranking}, AUTHOR = {Tompkin, James and Kim, Kwang In and Pfister, Hanspeter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1706.03863}, EPRINT = {1706.03863}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Large databases are often organized by hand-labeled metadata, or criteria, which are expensive to collect. We can use unsupervised learning to model database variation, but these models are often high dimensional, complex to parameterize, or require expert knowledge. We learn low-dimensional continuous criteria via interactive ranking, so that the novice user need only describe the relative ordering of examples. This is formed as semi-supervised label propagation in which we maximize the information gained from a limited number of examples. Further, we actively suggest data points to the user to rank in a more informative way than existing work. Our efficient approach allows users to interactively organize thousands of data points along 1D and 2D continuous sliders. We experiment with datasets of imagery and geometry to demonstrate that our tool is useful for quickly assessing and organizing the content of large databases.}, }
Endnote
%0 Report %A Tompkin, James %A Kim, Kwang In %A Pfister, Hanspeter %A Theobalt, Christian %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Criteria Sliders: Learning Continuous Database Criteria via Interactive Ranking : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8BB4-1 %U http://arxiv.org/abs/1706.03863 %D 2017 %X Large databases are often organized by hand-labeled metadata, or criteria, which are expensive to collect. We can use unsupervised learning to model database variation, but these models are often high dimensional, complex to parameterize, or require expert knowledge. We learn low-dimensional continuous criteria via interactive ranking, so that the novice user need only describe the relative ordering of examples. This is formed as semi-supervised label propagation in which we maximize the information gained from a limited number of examples. Further, we actively suggest data points to the user to rank in a more informative way than existing work. Our efficient approach allows users to interactively organize thousands of data points along 1D and 2D continuous sliders. We experiment with datasets of imagery and geometry to demonstrate that our tool is useful for quickly assessing and organizing the content of large databases. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2017. MonoPerfCap: Human Performance Capture from Monocular Video. http://arxiv.org/abs/1708.02136.
(arXiv: 1708.02136)
Abstract
We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.
Export
BibTeX
@online{Xu2017, TITLE = {{MonoPerfCap}: Human Performance Capture from Monocular Video}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Mehta, Dushyant and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1708.02136}, EPRINT = {1708.02136}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.}, }
Endnote
%0 Report %A Xu, Weipeng %A Chatterjee, Avishek %A Zollhöfer, Michael %A Rhodin, Helge %A Mehta, Dushyant %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MonoPerfCap: Human Performance Capture from Monocular Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05C2-9 %U http://arxiv.org/abs/1708.02136 %D 2017 %X We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Report
Fox, G., Meka, A., Zollhöfer, M., Richardt, C., and Theobalt, C. 2017. Live User-guided Intrinsic Video For Static Scenes. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection.We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance.
Export
BibTeX
@techreport{Report2017-4-001, TITLE = {Live User-guided Intrinsic Video For Static Scenes}, AUTHOR = {Fox, Gereon and Meka, Abhimitra and Zollh{\"o}fer, Michael and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2017-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection.We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Fox, Gereon %A Meka, Abhimitra %A Zollhöfer, Michael %A Richardt, Christian %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Live User-guided Intrinsic Video For Static Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5DA7-3 %Y Max-Planck-Institut für Informatik %C Saarbrücken %D 2017 %P 12 p. %X We present a novel real-time approach for user-guided intrinsic decomposition of static scenes captured by an RGB-D sensor. In the first step, we acquire a three-dimensional representation of the scene using a dense volumetric reconstruction framework. The obtained reconstruction serves as a proxy to densely fuse reflectance estimates and to store user-provided constraints in three-dimensional space. User constraints, in the form of constant shading and reflectance strokes, can be placed directly on the real-world geometry using an intuitive touch-based interaction metaphor, or using interactive mouse strokes. Fusing the decomposition results and constraints in three-dimensional space allows for robust propagation of this information to novel views by re-projection.We leverage this information to improve on the decomposition quality of existing intrinsic video decomposition techniques by further constraining the ill-posed decomposition problem. In addition to improved decomposition quality, we show a variety of live augmented reality applications such as recoloring of objects, relighting of scenes and editing of material appearance. %B Research Report %@ false
Sridhar, S., Markussen, A., Oulasvirta, A., Theobalt, C., and Boring, S. 2017b. WatchSense: On- and Above-Skin Input Sensing through a Wearable Depth Sensor. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
This paper contributes a novel sensing approach to support on- and above-skin finger input for interaction on the move. WatchSense uses a depth sensor embedded in a wearable device to expand the input space to neighboring areas of skin and the space above it. Our approach addresses challenging camera-based tracking conditions, such as oblique viewing angles and occlusions. It can accurately detect fingertips, their locations, and whether they are touching the skin or hovering above it. It extends previous work that supported either mid-air or multitouch input by simultaneously supporting both. We demonstrate feasibility with a compact, wearable prototype attached to a user's forearm (simulating an integrated depth sensor). Our prototype---which runs in real-time on consumer mobile devices---enables a 3D input space on the back of the hand. We evaluated the accuracy and robustness of the approach in a user study. We also show how WatchSense increases the expressiveness of input by interweaving mid-air and multitouch for several interactive applications.
Export
BibTeX
@techreport{sridharwatch17, TITLE = {{WatchSense}: On- and Above-Skin Input Sensing through a Wearable Depth Sensor}, AUTHOR = {Sridhar, Srinath and Markussen, Anders and Oulasvirta, Antti and Theobalt, Christian and Boring, Sebastian}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2016-4-003}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {This paper contributes a novel sensing approach to support on- and above-skin finger input for interaction on the move. WatchSense uses a depth sensor embedded in a wearable device to expand the input space to neighboring areas of skin and the space above it. Our approach addresses challenging camera-based tracking conditions, such as oblique viewing angles and occlusions. It can accurately detect fingertips, their locations, and whether they are touching the skin or hovering above it. It extends previous work that supported either mid-air or multitouch input by simultaneously supporting both. We demonstrate feasibility with a compact, wearable prototype attached to a user's forearm (simulating an integrated depth sensor). Our prototype---which runs in real-time on consumer mobile devices---enables a 3D input space on the back of the hand. We evaluated the accuracy and robustness of the approach in a user study. We also show how WatchSense increases the expressiveness of input by interweaving mid-air and multitouch for several interactive applications.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Sridhar, Srinath %A Markussen, Anders %A Oulasvirta, Antti %A Theobalt, Christian %A Boring, Sebastian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T WatchSense: On- and Above-Skin Input Sensing through a Wearable Depth Sensor : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-402E-D %Y Max-Planck-Institut für Informatik %C Saarbrücken %D 2017 %P 17 p. %X This paper contributes a novel sensing approach to support on- and above-skin finger input for interaction on the move. WatchSense uses a depth sensor embedded in a wearable device to expand the input space to neighboring areas of skin and the space above it. Our approach addresses challenging camera-based tracking conditions, such as oblique viewing angles and occlusions. It can accurately detect fingertips, their locations, and whether they are touching the skin or hovering above it. It extends previous work that supported either mid-air or multitouch input by simultaneously supporting both. We demonstrate feasibility with a compact, wearable prototype attached to a user's forearm (simulating an integrated depth sensor). Our prototype---which runs in real-time on consumer mobile devices---enables a 3D input space on the back of the hand. We evaluated the accuracy and robustness of the approach in a user study. We also show how WatchSense increases the expressiveness of input by interweaving mid-air and multitouch for several interactive applications. %B Research Report %@ false
Thesis
Garrido, P. 2017. High-quality face capture, animation and editing from monocular video. urn:nbn:de:bsz:291-scidok-69419.
Export
BibTeX
@phdthesis{Garridophd17, TITLE = {High-quality face capture, animation and editing from monocular video}, AUTHOR = {Garrido, Pablo}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-69419}, DOI = {10.22028/D291-26785}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, }
Endnote
%0 Thesis %A Garrido, Pablo %Y Theobalt, Christian %A referee: Perez, Patrick %A referee: Pauly, Mark %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T High-quality face capture, animation and editing from monocular video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-D1BC-2 %U urn:nbn:de:bsz:291-scidok-69419 %R 10.22028/D291-26785 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P 185 p. %V phd %9 phd %U http://scidok.sulb.uni-saarland.de/volltexte/2017/6941/http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=de
Gryaditskaya, Y. 2017. High Dynamic Range Imaging: Problems of Video Exposure Bracketing, Luminance Calibration and Gloss Editing. urn:nbn:de:bsz:291-scidok-69296.
Abstract
Two-dimensional, conventional images are gradually losing their hegemony, leaving room for novel formats. Among these, 8 bit images give place to high dynamic range (HDR) image formats, allowing to improve colour gamut and visibility of details in dark and bright areas leading to a more immersive viewing experience. It opens wide opportunities for post-processing, which can be useful for artistic rendering, enhancement of viewing experience or medical applications. Simultaneously, light-field scene representation as well is gaining importance, propelled by the recent reappearance of virtual reality, the improvement of both acquisition techniques, and computational and storage capabilities. Light-field data as well allows to achieve a broad range of effects in post-production: among others, it enables a change of a camera position, an aperture or a focal length. It facilitates object insertions and simplifies visual effects workflow by integrating 3D nature of visual effects with 3D nature of light fields. Content generation is one of the stumbling blocks in these realms. Sensor limitations of a conventional camera do not allow to capture wide dynamic range. This especially is the case for mobile devices, where small sensors are optimised for capturing in high-resolution. The “HDR mode” often encountered on such devices, relies on techniques called “exposure fusion” and allows to partially overcome the limited range of a sensor. The HDR video at the same time remains a challenging problem. We suggest a solution for an HDR video capturing on a mobile device. We analyse dynamic range of motion regions, the regions which are the most prone to reconstruction artefacts, and suggest a real-time exposure selection algorithm. Further, an HDR content visualization task often requires an input to be in absolute values. We address this problem by presenting a calibration algorithm that can be applied to existent imagery and does not require any additional measurement hardware. Finally, as light fields use becomes more common, a key challenge is the ability to edit or modify the appearance of the objects in the light field. To this end, we propose a multidimensional filtering approach in which the specular highlights are filtered in the spatial and angular domains to target a desired increase of the material roughness.
Export
BibTeX
@phdthesis{Gryphd17, TITLE = {High Dynamic Range Imaging: Problems of Video Exposure Bracketing, Luminance Calibration and Gloss Editing}, AUTHOR = {Gryaditskaya, Yulia}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-69296}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {Two-dimensional, conventional images are gradually losing their hegemony, leaving room for novel formats. Among these, 8 bit images give place to high dynamic range (HDR) image formats, allowing to improve colour gamut and visibility of details in dark and bright areas leading to a more immersive viewing experience. It opens wide opportunities for post-processing, which can be useful for artistic rendering, enhancement of viewing experience or medical applications. Simultaneously, light-field scene representation as well is gaining importance, propelled by the recent reappearance of virtual reality, the improvement of both acquisition techniques, and computational and storage capabilities. Light-field data as well allows to achieve a broad range of effects in post-production: among others, it enables a change of a camera position, an aperture or a focal length. It facilitates object insertions and simplifies visual effects workflow by integrating 3D nature of visual effects with 3D nature of light fields. Content generation is one of the stumbling blocks in these realms. Sensor limitations of a conventional camera do not allow to capture wide dynamic range. This especially is the case for mobile devices, where small sensors are optimised for capturing in high-resolution. The {\textquotedblleft}HDR mode{\textquotedblright} often encountered on such devices, relies on techniques called {\textquotedblleft}exposure fusion{\textquotedblright} and allows to partially overcome the limited range of a sensor. The HDR video at the same time remains a challenging problem. We suggest a solution for an HDR video capturing on a mobile device. We analyse dynamic range of motion regions, the regions which are the most prone to reconstruction artefacts, and suggest a real-time exposure selection algorithm. Further, an HDR content visualization task often requires an input to be in absolute values. We address this problem by presenting a calibration algorithm that can be applied to existent imagery and does not require any additional measurement hardware. Finally, as light fields use becomes more common, a key challenge is the ability to edit or modify the appearance of the objects in the light field. To this end, we propose a multidimensional filtering approach in which the specular highlights are filtered in the spatial and angular domains to target a desired increase of the material roughness.}, }
Endnote
%0 Thesis %A Gryaditskaya, Yulia %Y Seidel, Hans-Peter %A referee: Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Imaging: Problems of Video Exposure Bracketing, Luminance Calibration and Gloss Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-ABA6-3 %U urn:nbn:de:bsz:291-scidok-69296 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P 88 p. %V phd %9 phd %X Two-dimensional, conventional images are gradually losing their hegemony, leaving room for novel formats. Among these, 8 bit images give place to high dynamic range (HDR) image formats, allowing to improve colour gamut and visibility of details in dark and bright areas leading to a more immersive viewing experience. It opens wide opportunities for post-processing, which can be useful for artistic rendering, enhancement of viewing experience or medical applications. Simultaneously, light-field scene representation as well is gaining importance, propelled by the recent reappearance of virtual reality, the improvement of both acquisition techniques, and computational and storage capabilities. Light-field data as well allows to achieve a broad range of effects in post-production: among others, it enables a change of a camera position, an aperture or a focal length. It facilitates object insertions and simplifies visual effects workflow by integrating 3D nature of visual effects with 3D nature of light fields. Content generation is one of the stumbling blocks in these realms. Sensor limitations of a conventional camera do not allow to capture wide dynamic range. This especially is the case for mobile devices, where small sensors are optimised for capturing in high-resolution. The “HDR mode” often encountered on such devices, relies on techniques called “exposure fusion” and allows to partially overcome the limited range of a sensor. The HDR video at the same time remains a challenging problem. We suggest a solution for an HDR video capturing on a mobile device. We analyse dynamic range of motion regions, the regions which are the most prone to reconstruction artefacts, and suggest a real-time exposure selection algorithm. Further, an HDR content visualization task often requires an input to be in absolute values. We address this problem by presenting a calibration algorithm that can be applied to existent imagery and does not require any additional measurement hardware. Finally, as light fields use becomes more common, a key challenge is the ability to edit or modify the appearance of the objects in the light field. To this end, we propose a multidimensional filtering approach in which the specular highlights are filtered in the spatial and angular domains to target a desired increase of the material roughness. %U http://scidok.sulb.uni-saarland.de/volltexte/2017/6929/
Nalbach, O. 2017. Smarter Screen Space Shading. urn:nbn:de:bsz:291-scidok-ds-269289.
Abstract
This dissertation introduces a range of new methods to produce images of virtual scenes in a matter of milliseconds. Imposing as few constraints as possible on the set of scenes that can be handled, e.g., regarding geometric changes over time or lighting conditions, precludes pre-computations and makes this a particularly difficult problem. We first present a general approach, called deep screen space, using which a variety of light transport aspects can be simulated within the aforementioned setting. This approach is then further extended to additionally handle scenes containing participating media like clouds. We also show how to improve the correctness of deep screen space and related algorithms by accounting for mutual visibility of points in a scene. After that, we take a completely different point of view on image generation using a learning-based approach to approximate a rendering function. We show that neural networks can hallucinate shading effects which otherwise have to be computed using costly analytic computations. Finally, we contribute a holistic framework to deal with phosphorescent materials in computer graphics, covering all aspects from acquisition of real materials, to easy editing, to image synthesis.
Export
BibTeX
@phdthesis{Nalbachphd2017, TITLE = {Smarter Screen Space Shading}, AUTHOR = {Nalbach, Oliver}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-ds-269289}, DOI = {10.22028/D291-26928}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {This dissertation introduces a range of new methods to produce images of virtual scenes in a matter of milliseconds. Imposing as few constraints as possible on the set of scenes that can be handled, e.g., regarding geometric changes over time or lighting conditions, precludes pre-computations and makes this a particularly difficult problem. We first present a general approach, called deep screen space, using which a variety of light transport aspects can be simulated within the aforementioned setting. This approach is then further extended to additionally handle scenes containing participating media like clouds. We also show how to improve the correctness of deep screen space and related algorithms by accounting for mutual visibility of points in a scene. After that, we take a completely different point of view on image generation using a learning-based approach to approximate a rendering function. We show that neural networks can hallucinate shading effects which otherwise have to be computed using costly analytic computations. Finally, we contribute a holistic framework to deal with phosphorescent materials in computer graphics, covering all aspects from acquisition of real materials, to easy editing, to image synthesis.}, }
Endnote
%0 Thesis %A Nalbach, Oliver %Y Seidel, Hans-Peter %A referee: Ritschel, Tobias %A referee: Gutièrrez, Diego %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Smarter Screen Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-892C-7 %U urn:nbn:de:bsz:291-scidok-ds-269289 %R 10.22028/D291-26928 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P 123 p. %V phd %9 phd %X This dissertation introduces a range of new methods to produce images of virtual scenes in a matter of milliseconds. Imposing as few constraints as possible on the set of scenes that can be handled, e.g., regarding geometric changes over time or lighting conditions, precludes pre-computations and makes this a particularly difficult problem. We first present a general approach, called deep screen space, using which a variety of light transport aspects can be simulated within the aforementioned setting. This approach is then further extended to additionally handle scenes containing participating media like clouds. We also show how to improve the correctness of deep screen space and related algorithms by accounting for mutual visibility of points in a scene. After that, we take a completely different point of view on image generation using a learning-based approach to approximate a rendering function. We show that neural networks can hallucinate shading effects which otherwise have to be computed using costly analytic computations. Finally, we contribute a holistic framework to deal with phosphorescent materials in computer graphics, covering all aspects from acquisition of real materials, to easy editing, to image synthesis. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/26896
Weigel, M. 2017. Interactive On-Skin Devices for Expressive Touch-based Interactions. urn:nbn:de:bsz:291-scidok-68857.
Abstract
Skin has been proposed as a large, always-available, and easy to access input surface for mobile computing. However, it is fundamentally different than prior rigid devices: skin is elastic, highly curved, and provides tactile sensation. This thesis advances the understanding of skin as an input surface and contributes novel skin-worn devices and their interaction techniques. We present the findings from an elicitation study on how and where people interact on their skin. The findings show that participants use various body locations for on-skin interaction. Moreover, they show that skin allows for expressive interaction using multi-touch input and skin-specific modalities. We contribute three skin-worn device classes and their interaction techniques to enable expressive on-skin interactions: iSkin investigates multi-touch and pressure input on various body locations. SkinMarks supports touch, squeeze, and bend sensing with co-located visual output. The devices' conformality to skin enables interaction on highly challenging body locations. Finally, ExpressSkin investigates expressive interaction techniques using fluid combinations of high-resolution pressure, shear, and squeeze input. Taken together, this thesis contributes towards expressive on-skin interaction with multi-touch and skin-specific input modalities on various body locations.
Export
BibTeX
@phdthesis{Weigelphd17, TITLE = {Interactive On-Skin Devices for Expressive Touch-based Interactions}, AUTHOR = {Weigel, Martin}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-68857}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {Skin has been proposed as a large, always-available, and easy to access input surface for mobile computing. However, it is fundamentally different than prior rigid devices: skin is elastic, highly curved, and provides tactile sensation. This thesis advances the understanding of skin as an input surface and contributes novel skin-worn devices and their interaction techniques. We present the findings from an elicitation study on how and where people interact on their skin. The findings show that participants use various body locations for on-skin interaction. Moreover, they show that skin allows for expressive interaction using multi-touch input and skin-specific modalities. We contribute three skin-worn device classes and their interaction techniques to enable expressive on-skin interactions: iSkin investigates multi-touch and pressure input on various body locations. SkinMarks supports touch, squeeze, and bend sensing with co-located visual output. The devices' conformality to skin enables interaction on highly challenging body locations. Finally, ExpressSkin investigates expressive interaction techniques using fluid combinations of high-resolution pressure, shear, and squeeze input. Taken together, this thesis contributes towards expressive on-skin interaction with multi-touch and skin-specific input modalities on various body locations.}, }
Endnote
%0 Thesis %A Weigel, Martin %Y Steimle, Jürgen %A referee: Olwal, Alex %A referee: Krüger, Antonio %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Interactive On-Skin Devices for Expressive Touch-based Interactions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-904F-D %U urn:nbn:de:bsz:291-scidok-68857 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P 153 p. %V phd %9 phd %X Skin has been proposed as a large, always-available, and easy to access input surface for mobile computing. However, it is fundamentally different than prior rigid devices: skin is elastic, highly curved, and provides tactile sensation. This thesis advances the understanding of skin as an input surface and contributes novel skin-worn devices and their interaction techniques. We present the findings from an elicitation study on how and where people interact on their skin. The findings show that participants use various body locations for on-skin interaction. Moreover, they show that skin allows for expressive interaction using multi-touch input and skin-specific modalities. We contribute three skin-worn device classes and their interaction techniques to enable expressive on-skin interactions: iSkin investigates multi-touch and pressure input on various body locations. SkinMarks supports touch, squeeze, and bend sensing with co-located visual output. The devices' conformality to skin enables interaction on highly challenging body locations. Finally, ExpressSkin investigates expressive interaction techniques using fluid combinations of high-resolution pressure, shear, and squeeze input. Taken together, this thesis contributes towards expressive on-skin interaction with multi-touch and skin-specific input modalities on various body locations. %U http://scidok.sulb.uni-saarland.de/volltexte/2017/6885/http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=de
Wu, X. 2017. Structure-aware Content Creation. urn:nbn:de:bsz:291-scidok-67750.
Abstract
Nowadays, access to digital information has become ubiquitous, while three-dimensional visual representation is becoming indispensable to knowledge understanding and information retrieval. Three-dimensional digitization plays a natural role in bridging connections between the real and virtual world, which prompt the huge demand for massive three-dimensional digital content. But reducing the effort required for three-dimensional modeling has been a practical problem, and long standing challenge in compute graphics and related fields. In this thesis, we propose several techniques for lightening up the content creation process, which have the common theme of being structure-aware, \ie maintaining global relations among the parts of shape. We are especially interested in formulating our algorithms such that they make use of symmetry structures, because of their concise yet highly abstract principles are universally applicable to most regular patterns. We introduce our work from three different aspects in this thesis. First, we characterized spaces of symmetry preserving deformations, and developed a method to explore this space in real-time, which significantly simplified the generation of symmetry preserving shape variants. Second, we empirically studied three-dimensional offset statistics, and developed a fully automatic retargeting application, which is based on verified sparsity. Finally, we made step forward in solving the approximate three-dimensional partial symmetry detection problem, using a novel co-occurrence analysis method, which could serve as the foundation to high-level applications.
Export
BibTeX
@phdthesis{wuphd2017, TITLE = {Structure-aware Content Creation}, AUTHOR = {Wu, Xiaokun}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-67750}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {Nowadays, access to digital information has become ubiquitous, while three-dimensional visual representation is becoming indispensable to knowledge understanding and information retrieval. Three-dimensional digitization plays a natural role in bridging connections between the real and virtual world, which prompt the huge demand for massive three-dimensional digital content. But reducing the effort required for three-dimensional modeling has been a practical problem, and long standing challenge in compute graphics and related fields. In this thesis, we propose several techniques for lightening up the content creation process, which have the common theme of being structure-aware, \ie maintaining global relations among the parts of shape. We are especially interested in formulating our algorithms such that they make use of symmetry structures, because of their concise yet highly abstract principles are universally applicable to most regular patterns. We introduce our work from three different aspects in this thesis. First, we characterized spaces of symmetry preserving deformations, and developed a method to explore this space in real-time, which significantly simplified the generation of symmetry preserving shape variants. Second, we empirically studied three-dimensional offset statistics, and developed a fully automatic retargeting application, which is based on verified sparsity. Finally, we made step forward in solving the approximate three-dimensional partial symmetry detection problem, using a novel co-occurrence analysis method, which could serve as the foundation to high-level applications.}, }
Endnote
%0 Thesis %A Wu, Xiaokun %Y Seidel, Hans-Peter %A referee: Wand, Michael %A referee: Hildebrandt, Klaus %A referee: Klein, Reinhard %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Structure-aware Content Creation : Detection, Retargeting and Deformation %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-8072-6 %U urn:nbn:de:bsz:291-scidok-67750 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P viii, 61 p. %V phd %9 phd %X Nowadays, access to digital information has become ubiquitous, while three-dimensional visual representation is becoming indispensable to knowledge understanding and information retrieval. Three-dimensional digitization plays a natural role in bridging connections between the real and virtual world, which prompt the huge demand for massive three-dimensional digital content. But reducing the effort required for three-dimensional modeling has been a practical problem, and long standing challenge in compute graphics and related fields. In this thesis, we propose several techniques for lightening up the content creation process, which have the common theme of being structure-aware, \ie maintaining global relations among the parts of shape. We are especially interested in formulating our algorithms such that they make use of symmetry structures, because of their concise yet highly abstract principles are universally applicable to most regular patterns. We introduce our work from three different aspects in this thesis. First, we characterized spaces of symmetry preserving deformations, and developed a method to explore this space in real-time, which significantly simplified the generation of symmetry preserving shape variants. Second, we empirically studied three-dimensional offset statistics, and developed a fully automatic retargeting application, which is based on verified sparsity. Finally, we made step forward in solving the approximate three-dimensional partial symmetry detection problem, using a novel co-occurrence analysis method, which could serve as the foundation to high-level applications. %U http://scidok.sulb.uni-saarland.de/volltexte/2017/6775/http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=de