Philipp Müller (PhD student)

MSc Philipp Müller

Address
Max-Planck-Institut für Informatik
Saarland Informatics Campus
Campus E1 4
66123 Saarbrücken
Location
E1 4 - Room 623
Phone
+49 681 9325 2133
Fax
+49 681 9325 2099
Email
Get email via email

Personal Information

For more information and a list of publications, visit my website.

Publications

Müller, P., & Bulling, A. (n.d.). Emergent Leadership Detection Across Datasets. In 2019 International Conference on Multimodal Interaction (ICMI 2019). Suzhou, China: ACM. doi:10.1145/3340555.3353721
(Accepted/in press)
Abstract
Automatic detection of emergent leaders in small groups from nonverbal behaviour is a growing research topic in social signal processing but existing methods were evaluated on single datasets -- an unrealistic assumption for real-world applications in which systems are required to also work in settings unseen at training time. It therefore remains unclear whether current methods for emergent leadership detection generalise to similar but new settings and to which extent. To overcome this limitation, we are the first to study a cross-dataset evaluation setting for the emergent leadership detection task. We provide evaluations for within- and cross-dataset prediction using two current datasets (PAVIS and MPIIGroupInteraction), as well as an investigation on the robustness of commonly used feature channels (visual focus of attention, body pose, facial action units, speaking activity) and online prediction in the cross-dataset setting. Our evaluations show that using pose and eye contact based features, cross-dataset prediction is possible with an accuracy of 0.68, as such providing another important piece of the puzzle towards emergent leadership detection in the real world.
Export
BibTeX
@inproceedings{MuellerBulling19, TITLE = {Emergent Leadership Detection Across Datasets}, AUTHOR = {M{\"u}ller, Philipp and Bulling, Andreas}, LANGUAGE = {eng}, DOI = {10.1145/3340555.3353721}, PUBLISHER = {ACM}, YEAR = {2019}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Automatic detection of emergent leaders in small groups from nonverbal behaviour is a growing research topic in social signal processing but existing methods were evaluated on single datasets -- an unrealistic assumption for real-world applications in which systems are required to also work in settings unseen at training time. It therefore remains unclear whether current methods for emergent leadership detection generalise to similar but new settings and to which extent. To overcome this limitation, we are the first to study a cross-dataset evaluation setting for the emergent leadership detection task. We provide evaluations for within- and cross-dataset prediction using two current datasets (PAVIS and MPIIGroupInteraction), as well as an investigation on the robustness of commonly used feature channels (visual focus of attention, body pose, facial action units, speaking activity) and online prediction in the cross-dataset setting. Our evaluations show that using pose and eye contact based features, cross-dataset prediction is possible with an accuracy of 0.68, as such providing another important piece of the puzzle towards emergent leadership detection in the real world.}, BOOKTITLE = {2019 International Conference on Multimodal Interaction (ICMI 2019)}, ADDRESS = {Suzhou, China}, }
Endnote
%0 Conference Proceedings %A Müller, Philipp %A Bulling, Andreas %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Emergent Leadership Detection Across Datasets : %G eng %U http://hdl.handle.net/21.11116/0000-0004-5D4E-E %R 10.1145/3340555.3353721 %D 2019 %B 21st ACM International Conference on Multimodal Interaction %Z date of event: 2019-10-14 - 2019-10-18 %C Suzhou, China %X Automatic detection of emergent leaders in small groups from nonverbal behaviour is a growing research topic in social signal processing but existing methods were evaluated on single datasets -- an unrealistic assumption for real-world applications in which systems are required to also work in settings unseen at training time. It therefore remains unclear whether current methods for emergent leadership detection generalise to similar but new settings and to which extent. To overcome this limitation, we are the first to study a cross-dataset evaluation setting for the emergent leadership detection task. We provide evaluations for within- and cross-dataset prediction using two current datasets (PAVIS and MPIIGroupInteraction), as well as an investigation on the robustness of commonly used feature channels (visual focus of attention, body pose, facial action units, speaking activity) and online prediction in the cross-dataset setting. Our evaluations show that using pose and eye contact based features, cross-dataset prediction is possible with an accuracy of 0.68, as such providing another important piece of the puzzle towards emergent leadership detection in the real world. %K Computer Science, Human-Computer Interaction, cs.HC %B 2019 International Conference on Multimodal Interaction %I ACM
Müller, P., Buschek, D., Huang, M. X., & Bulling, A. (2019). Reducing Calibration Drift in Mobile Eye Trackers by Exploiting Mobile Phone Usage. In Proceedings ETRA 2019. Denver, CO, USA: ACM. doi:10.1145/3314111.3319918
Export
BibTeX
@inproceedings{mueller19_etra, TITLE = {Reducing Calibration Drift in Mobile Eye Trackers by Exploiting Mobile Phone Usage}, AUTHOR = {M{\"u}ller, Philipp and Buschek, Daniel and Huang, Michael Xuelin and Bulling, Andreas}, LANGUAGE = {eng}, ISBN = {978-1-4503-6709-7}, DOI = {10.1145/3314111.3319918}, PUBLISHER = {ACM}, YEAR = {2019}, MARGINALMARK = {$\bullet$}, DATE = {2019}, BOOKTITLE = {Proceedings ETRA 2019}, DEBUG = {author: Sharif, Bonita}, EDITOR = {Krejtz, Krzysztof}, EID = {9}, ADDRESS = {Denver, CO, USA}, }
Endnote
%0 Conference Proceedings %A Müller, Philipp %A Buschek, Daniel %A Huang, Michael Xuelin %A Bulling, Andreas %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Reducing Calibration Drift in Mobile Eye Trackers by Exploiting Mobile Phone Usage : %G eng %U http://hdl.handle.net/21.11116/0000-0003-2BCF-5 %R 10.1145/3314111.3319918 %D 2019 %B 11th ACM Symposium on Eye Tracking Research & Applications %Z date of event: 2019-06-25 - 2019-06-28 %C Denver, CO, USA %B Proceedings ETRA 2019 %E Krejtz, Krzysztof; Sharif, Bonita %Z sequence number: 9 %I ACM %@ 978-1-4503-6709-7
Müller, P., & Bulling, A. (2019). Emergent Leadership Detection Across Datasets. Retrieved from http://arxiv.org/abs/1905.02058
(arXiv: 1905.02058)
Abstract
Automatic detection of emergent leaders in small groups from nonverbal behaviour is a growing research topic in social signal processing but existing methods were evaluated on single datasets -- an unrealistic assumption for real-world applications in which systems are required to also work in settings unseen at training time. It therefore remains unclear whether current methods for emergent leadership detection generalise to similar but new settings and to which extent. To overcome this limitation, we are the first to study a cross-dataset evaluation setting for the emergent leadership detection task. We provide evaluations for within- and cross-dataset prediction using two current datasets (PAVIS and MPIIGroupInteraction), as well as an investigation on the robustness of commonly used feature channels (visual focus of attention, body pose, facial action units, speaking activity) and online prediction in the cross-dataset setting. Our evaluations show that using pose and eye contact based features, cross-dataset prediction is possible with an accuracy of 0.68, as such providing another important piece of the puzzle towards emergent leadership detection in the real world.
Export
BibTeX
@online{Mueller_arXiv1905.02058, TITLE = {Emergent Leadership Detection Across Datasets}, AUTHOR = {M{\"u}ller, Philipp and Bulling, Andreas}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1905.02058}, EPRINT = {1905.02058}, EPRINTTYPE = {arXiv}, YEAR = {2019}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Automatic detection of emergent leaders in small groups from nonverbal behaviour is a growing research topic in social signal processing but existing methods were evaluated on single datasets -- an unrealistic assumption for real-world applications in which systems are required to also work in settings unseen at training time. It therefore remains unclear whether current methods for emergent leadership detection generalise to similar but new settings and to which extent. To overcome this limitation, we are the first to study a cross-dataset evaluation setting for the emergent leadership detection task. We provide evaluations for within- and cross-dataset prediction using two current datasets (PAVIS and MPIIGroupInteraction), as well as an investigation on the robustness of commonly used feature channels (visual focus of attention, body pose, facial action units, speaking activity) and online prediction in the cross-dataset setting. Our evaluations show that using pose and eye contact based features, cross-dataset prediction is possible with an accuracy of 0.68, as such providing another important piece of the puzzle towards emergent leadership detection in the real world.}, }
Endnote
%0 Report %A Müller, Philipp %A Bulling, Andreas %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Emergent Leadership Detection Across Datasets : %G eng %U http://hdl.handle.net/21.11116/0000-0003-8F7F-F %U http://arxiv.org/abs/1905.02058 %D 2019 %X Automatic detection of emergent leaders in small groups from nonverbal behaviour is a growing research topic in social signal processing but existing methods were evaluated on single datasets -- an unrealistic assumption for real-world applications in which systems are required to also work in settings unseen at training time. It therefore remains unclear whether current methods for emergent leadership detection generalise to similar but new settings and to which extent. To overcome this limitation, we are the first to study a cross-dataset evaluation setting for the emergent leadership detection task. We provide evaluations for within- and cross-dataset prediction using two current datasets (PAVIS and MPIIGroupInteraction), as well as an investigation on the robustness of commonly used feature channels (visual focus of attention, body pose, facial action units, speaking activity) and online prediction in the cross-dataset setting. Our evaluations show that using pose and eye contact based features, cross-dataset prediction is possible with an accuracy of 0.68, as such providing another important piece of the puzzle towards emergent leadership detection in the real world. %K Computer Science, Human-Computer Interaction, cs.HC
Müller, P., Huang, M. X., Zhang, X., & Bulling, A. (2018). Robust Eye Contact Detection in Natural Multi-Person Interactions Using Gaze and Speaking Behaviour. In Proceedings ETRA 2018. Warsaw, Poland: ACM. doi:10.1145/3204493.3204549
Export
BibTeX
@inproceedings{mueller18_etra, TITLE = {Robust Eye Contact Detection in Natural Multi-Person Interactions Using Gaze and Speaking Behaviour}, AUTHOR = {M{\"u}ller, Philipp and Huang, Michael Xuelin and Zhang, Xucong and Bulling, Andreas}, LANGUAGE = {eng}, ISBN = {978-1-4503-5706-7}, DOI = {10.1145/3204493.3204549}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Proceedings ETRA 2018}, EDITOR = {Spencer, Stephen N.}, PAGES = {1--10}, EID = {31}, ADDRESS = {Warsaw, Poland}, }
Endnote
%0 Conference Proceedings %A Müller, Philipp %A Huang, Michael Xuelin %A Zhang, Xucong %A Bulling, Andreas %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Robust Eye Contact Detection in Natural Multi-Person Interactions Using Gaze and Speaking Behaviour : %G eng %U http://hdl.handle.net/21.11116/0000-0001-1D9A-2 %R 10.1145/3204493.3204549 %D 2018 %B ACM Symposium on Eye Tracking Research & Applications %Z date of event: 2018-06-14 - 2018-06-17 %C Warsaw, Poland %B Proceedings ETRA 2018 %E Spencer, Stephen N. %P 1 - 10 %Z sequence number: 31 %I ACM %@ 978-1-4503-5706-7
Müller, P., Huang, M. X., & Bulling, A. (2018). Detecting Low Rapport During Natural Interactions in Small Groups from Non-Verbal Behaviour. In IUI 2018, 23rd International Conference on Intelligent User Interfaces. Tokyo, Japan: ACM. doi:10.1145/3172944.3172969
Export
BibTeX
@inproceedings{mueller18_iui, TITLE = {Detecting Low Rapport During Natural Interactions in Small Groups from Non-Verbal Behaviour}, AUTHOR = {M{\"u}ller, Philipp and Huang, Michael Xuelin and Bulling, Andreas}, LANGUAGE = {eng}, ISBN = {978-1-4503-4945-1}, DOI = {10.1145/3172944.3172969}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {IUI 2018, 23rd International Conference on Intelligent User Interfaces}, PAGES = {153--164}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Müller, Philipp %A Huang, Michael Xuelin %A Bulling, Andreas %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Detecting Low Rapport During Natural Interactions in Small Groups from Non-Verbal Behaviour : %G eng %U http://hdl.handle.net/21.11116/0000-0001-1D62-1 %R 10.1145/3172944.3172969 %D 2018 %B 23rd International Conference on Intelligent User Interfaces %Z date of event: 2018-03-07 - 2018-03-11 %C Tokyo, Japan %B IUI 2018 %P 153 - 164 %I ACM %@ 978-1-4503-4945-1
Steil, J., Müller, P., Sugano, Y., & Bulling, A. (2018). Forecasting User Attention During Everyday Mobile Interactions Using Device-Integrated and Wearable Sensors. In MobileHCI 2018, 20th International Conference on Human-Computer Interaction with Mobile Devices and Services. Barcelona, Spain: ACM. doi:10.1145/3229434.3229439
Export
BibTeX
@inproceedings{steil_MobileHCI2018, TITLE = {Forecasting User Attention During Everyday Mobile Interactions Using Device-Integrated and Wearable Sensors}, AUTHOR = {Steil, Julian and M{\"u}ller, Philipp and Sugano, Yusuke and Bulling, Andreas}, LANGUAGE = {eng}, ISBN = {978-1-4503-5898-9}, DOI = {10.1145/3229434.3229439}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {MobileHCI 2018, 20th International Conference on Human-Computer Interaction with Mobile Devices and Services}, PAGES = {1--13}, EID = {1}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Steil, Julian %A Müller, Philipp %A Sugano, Yusuke %A Bulling, Andreas %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Forecasting User Attention During Everyday Mobile Interactions Using Device-Integrated and Wearable Sensors : %G eng %U http://hdl.handle.net/21.11116/0000-0001-1834-A %R 10.1145/3229434.3229439 %D 2018 %B 20th International Conference on Human-Computer Interaction with Mobile Devices and Services %Z date of event: 2018-09-03 - 2018-09-06 %C Barcelona, Spain %B MobileHCI 2018 %P 1 - 13 %Z sequence number: 1 %I ACM %@ 978-1-4503-5898-9
Müller, P., Amin, S., Verma, P., Andriluka, M., & Bulling, A. (2015). Emotion Recognition from Embedded Bodily Expressions and Speech During Dyadic Interactions. In International Conference on Affective Computing and Intelligent Interaction (ACII 2015). Xi’an, China: IEEE Computer Society. doi:10.1109/ACII.2015.7344640
Export
BibTeX
@inproceedings{MuellerACII2015, TITLE = {Emotion Recognition from Embedded Bodily Expressions and Speech During Dyadic Interactions}, AUTHOR = {M{\"u}ller, Philipp and Amin, Sikandar and Verma, Prateek and Andriluka, Mykhaylo and Bulling, Andreas}, LANGUAGE = {eng}, DOI = {10.1109/ACII.2015.7344640}, PUBLISHER = {IEEE Computer Society}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {International Conference on Affective Computing and Intelligent Interaction (ACII 2015)}, PAGES = {663--669}, ADDRESS = {Xi'an, China}, }
Endnote
%0 Conference Proceedings %A Müller, Philipp %A Amin, Sikandar %A Verma, Prateek %A Andriluka, Mykhaylo %A Bulling, Andreas %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Emotion Recognition from Embedded Bodily Expressions and Speech During Dyadic Interactions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-56A2-F %R 10.1109/ACII.2015.7344640 %D 2015 %B 6th International Conference on Affective Computing and Intelligent Interaction %Z date of event: 2015-09-21 - 2015-09-24 %C Xi'an, China %B International Conference on Affective Computing and Intelligent Interaction %P 663 - 669 %I IEEE Computer Society
Müller, P. (2015). Learning to Choose Optimal Viewpoints for Pose Estimation of 3D Objects. Universität des Saarlandes, Saarbrücken.
Export
BibTeX
@mastersthesis{MuellerMaster2015, TITLE = {Learning to Choose Optimal Viewpoints for Pose Estimation of {3D} Objects}, AUTHOR = {M{\"u}ller, Philipp}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2015}, DATE = {2015}, }
Endnote
%0 Thesis %A Müller, Philipp %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Learning to Choose Optimal Viewpoints for Pose Estimation of 3D Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-18BF-1 %I Universität des Saarlandes %C Saarbrücken %D 2015 %P 55 p. %V master %9 master
Amin, S., Müller, P., Bulling, A., & Andriluka, M. (2014). Test-time Adaptation for 3D Human Pose Estimation. In Pattern Recognition (GCPR 2014). Münster, Germany: Springer. doi:10.1007/978-3-319-11752-2_20
Export
BibTeX
@inproceedings{880, TITLE = {Test-time Adaptation for {3D} Human Pose Estimation}, AUTHOR = {Amin, Sikandar and M{\"u}ller, Philipp and Bulling, Andreas and Andriluka, Mykhaylo}, LANGUAGE = {eng}, ISBN = {978-3-319-11751-5; 978-3-319-11752-2}, DOI = {10.1007/978-3-319-11752-2_20}, PUBLISHER = {Springer}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Pattern Recognition (GCPR 2014)}, EDITOR = {Jiang, Xiaoyi and Hornegger, Joachim and Koch, Reinhard}, PAGES = {253--264}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {8753}, ADDRESS = {M{\"u}nster, Germany}, }
Endnote
%0 Conference Proceedings %A Amin, Sikandar %A Müller, Philipp %A Bulling, Andreas %A Andriluka, Mykhaylo %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Test-time Adaptation for 3D Human Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-3736-5 %R 10.1007/978-3-319-11752-2_20 %D 2014 %B 36th German Conference on Pattern Recognition %Z date of event: 2014-09-02 - 2014-09-05 %C Münster, Germany %B Pattern Recognition %E Jiang, Xiaoyi; Hornegger, Joachim; Koch, Reinhard %P 253 - 264 %I Springer %@ 978-3-319-11751-5 978-3-319-11752-2 %B Lecture Notes in Computer Science %N 8753