Last Year

Master
[1]
T.-P. Nguyen, “Advanced Semantics for Commonsense Knowledge Extraction,” Universität des Saarlandes, Saarbrücken, 2020.
Abstract
Commonsense knowledge (CSK) about concepts and their properties is useful for AI applications such as robust chatbots. Prior works like ConceptNet, TupleKB and others compiled large CSK collections, but are restricted in their expressiveness to subject-predicate-object (SPO) triples with simple concepts for S and monolithic strings for P and O. Also, these projects have either prioritized precision or recall, but hardly reconcile these complementary goals. This thesis presents a methodology, called Ascent, to automatically build a large-scale knowledge base (KB) of CSK assertions, with advanced expressiveness and both better precision and recall than prior works. Ascent goes beyond triples by capturing composite concepts with subgroups and aspects, and by refining assertions with semantic facets. The latter are important to express temporal and spatial validity of assertions and further qualifiers. Ascent combines open information extraction with judicious cleaning using language models. Intrinsic evaluation shows the superior size and quality of the Ascent KB, and an extrinsic evaluation for QA-support tasks underlines the benefits of Ascent.
Export
BibTeX
@mastersthesis{NguyenMSc2020, TITLE = {Advanced Semantics for Commonsense Knowledge Extraction}, AUTHOR = {Nguyen, Tuan-Phong}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, ABSTRACT = {Commonsense knowledge (CSK) about concepts and their properties is useful for AI applications such as robust chatbots. Prior works like ConceptNet, TupleKB and others compiled large CSK collections, but are restricted in their expressiveness to subject-predicate-object (SPO) triples with simple concepts for S and monolithic strings for P and O. Also, these projects have either prioritized precision or recall, but hardly reconcile these complementary goals. This thesis presents a methodology, called Ascent, to automatically build a large-scale knowledge base (KB) of CSK assertions, with advanced expressiveness and both better precision and recall than prior works. Ascent goes beyond triples by capturing composite concepts with subgroups and aspects, and by refining assertions with semantic facets. The latter are important to express temporal and spatial validity of assertions and further qualifiers. Ascent combines open information extraction with judicious cleaning using language models. Intrinsic evaluation shows the superior size and quality of the Ascent KB, and an extrinsic evaluation for QA-support tasks underlines the benefits of Ascent.}, }
Endnote
%0 Thesis %A Nguyen, Tuan-Phong %Y Razniewski, Simon %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Advanced Semantics for Commonsense Knowledge Extraction : %G eng %U http://hdl.handle.net/21.11116/0000-0007-FED0-0 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 67 p. %V master %9 master %X Commonsense knowledge (CSK) about concepts and their properties is useful for AI applications such as robust chatbots. Prior works like ConceptNet, TupleKB and others compiled large CSK collections, but are restricted in their expressiveness to subject-predicate-object (SPO) triples with simple concepts for S and monolithic strings for P and O. Also, these projects have either prioritized precision or recall, but hardly reconcile these complementary goals. This thesis presents a methodology, called Ascent, to automatically build a large-scale knowledge base (KB) of CSK assertions, with advanced expressiveness and both better precision and recall than prior works. Ascent goes beyond triples by capturing composite concepts with subgroups and aspects, and by refining assertions with semantic facets. The latter are important to express temporal and spatial validity of assertions and further qualifiers. Ascent combines open information extraction with judicious cleaning using language models. Intrinsic evaluation shows the superior size and quality of the Ascent KB, and an extrinsic evaluation for QA-support tasks underlines the benefits of Ascent.
[2]
V. Skripniuk, “Watermarking for generative adversarial networks,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@mastersthesis{SkriMaster2020, TITLE = {Watermarking for generative adversarial networks}, AUTHOR = {Skripniuk, Vlasislav}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Skripniuk, Vlasislav %A referee: Fritz, Mario %A contributor: Zhang, Yang %+ International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Watermarking for generative adversarial networks : %G eng %U http://hdl.handle.net/21.11116/0000-0008-45A1-4 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 69 p. %V master %9 master
PhD
[3]
F. Behjati Ardakani, “Computational models of gene expression regulation,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{Ardadiss_2019, TITLE = {Computational models of gene expression regulation}, AUTHOR = {Behjati Ardakani, Fatemeh}, LANGUAGE = {eng}, DOI = {http://dx.doi.org/10.22028/D291-30206}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Behjati Ardakani, Fatemeh %Y Schulz, Marcel %A referee: Marschall, Tobias %+ Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society %T Computational models of gene expression regulation : %G eng %U http://hdl.handle.net/21.11116/0000-0007-7163-A %R http://dx.doi.org/10.22028/D291-30206 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 170 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/29766
[4]
K. Budhathoki, “Causal Inference on Discrete Data,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{BudDiss_2020, TITLE = {Causal Inference on Discrete Data}, AUTHOR = {Budhathoki, Kailash}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291--ds-329528}, DOI = {10.22028/D291-32952}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Budhathoki, Kailash %Y Vreeken, Jilles %A referee: Weikum, Gerhard %A referee: Heskes, Tom %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Causal Inference on Discrete Data : %G eng %U http://hdl.handle.net/21.11116/0000-0007-FE73-A %R 10.22028/D291-32952 %U urn:nbn:de:bsz:291--ds-329528 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 171 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/30501
[5]
M. Fleury, “Formalization of Logical Calculi in Isabelle/HOL,” Universität des Saarlandes, Saarbrücken, 2020.
Abstract
I develop a formal framework for propositional satifisfiability with the conflict-driven clause learning (CDCL) procedure using the Isabelle/HOL proof assistant. The framework offers a convenient way to prove metatheorems and experiment with variants, including the Davis-Putnam-Logemann-Loveland procedure. The most noteworthy aspects of my work are the inclusion of rules for forget and restart and the refinement approach. I use the formalization to develop three extensions: First, an incremental solving extension of CDCL. Second, I verify an optimizing CDCL (OCDCL): Given a cost function on literals, OCDCL derives an optimal model with minimum cost. Finally, I work on model covering. Thanks to the CDCL framework I can reuse, these extensions are easier to develop. Through a chain of refinements, I connect the abstract CDCL calculus first to a more concrete calculus, then to a SAT solver expressed in a simple functional programming language, and finally to a SAT solver in an imperative language, with total correctness guarantees. The imperative version relies on the two-watched-literal data structure and other optimizations found in modern solvers. I used the Isabelle Refinement Framework to automate the most tedious refinement steps. After that, I extend this work with further optimizations like blocking literals and the use of machine words as long as possible, before switching to unbounded integers to keep completeness.
Export
BibTeX
@phdthesis{Fleuryphd2019, TITLE = {Formalization of Logical Calculi in Isabelle/{HOL}}, AUTHOR = {Fleury, Mathias}, LANGUAGE = {eng}, DOI = {10.22028/D291-30179}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, ABSTRACT = {I develop a formal framework for propositional satifisfiability with the conflict-driven clause learning (CDCL) procedure using the Isabelle/HOL proof assistant. The framework offers a convenient way to prove metatheorems and experiment with variants, including the Davis-Putnam-Logemann-Loveland procedure. The most noteworthy aspects of my work are the inclusion of rules for forget and restart and the refinement approach. I use the formalization to develop three extensions: First, an incremental solving extension of CDCL. Second, I verify an optimizing CDCL (OCDCL): Given a cost function on literals, OCDCL derives an optimal model with minimum cost. Finally, I work on model covering. Thanks to the CDCL framework I can reuse, these extensions are easier to develop. Through a chain of refinements, I connect the abstract CDCL calculus first to a more concrete calculus, then to a SAT solver expressed in a simple functional programming language, and finally to a SAT solver in an imperative language, with total correctness guarantees. The imperative version relies on the two-watched-literal data structure and other optimizations found in modern solvers. I used the Isabelle Refinement Framework to automate the most tedious refinement steps. After that, I extend this work with further optimizations like blocking literals and the use of machine words as long as possible, before switching to unbounded integers to keep completeness.}, }
Endnote
%0 Thesis %A Fleury, Mathias %Y Weidenbach, Christoph %A referee: Biere, Armin %+ Automation of Logic, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Automation of Logic, MPI for Informatics, Max Planck Society External Organizations %T Formalization of Logical Calculi in Isabelle/HOL : %G eng %U http://hdl.handle.net/21.11116/0000-0005-AE07-0 %R 10.22028/D291-30179 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 169 p. %V phd %9 phd %X I develop a formal framework for propositional satifisfiability with the conflict-driven clause learning (CDCL) procedure using the Isabelle/HOL proof assistant. The framework offers a convenient way to prove metatheorems and experiment with variants, including the Davis-Putnam-Logemann-Loveland procedure. The most noteworthy aspects of my work are the inclusion of rules for forget and restart and the refinement approach. I use the formalization to develop three extensions: First, an incremental solving extension of CDCL. Second, I verify an optimizing CDCL (OCDCL): Given a cost function on literals, OCDCL derives an optimal model with minimum cost. Finally, I work on model covering. Thanks to the CDCL framework I can reuse, these extensions are easier to develop. Through a chain of refinements, I connect the abstract CDCL calculus first to a more concrete calculus, then to a SAT solver expressed in a simple functional programming language, and finally to a SAT solver in an imperative language, with total correctness guarantees. The imperative version relies on the two-watched-literal data structure and other optimizations found in modern solvers. I used the Isabelle Refinement Framework to automate the most tedious refinement steps. After that, I extend this work with further optimizations like blocking literals and the use of machine words as long as possible, before switching to unbounded integers to keep completeness. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28722
[6]
Y. He, “Improved Methods and Analysis for Semantic Image Segmentation,” Universität des Saarlandes, Saarbrücken, 2020.
Abstract
Modern deep learning has enabled amazing developments of computer vision in recent years (Hinton and Salakhutdinov, 2006; Krizhevsky et al., 2012). As a fundamental task, semantic segmentation aims to predict class labels for each pixel of images, which empowers machines perception of the visual world. In spite of recent successes of fully convolutional networks (Long etal., 2015), several challenges remain to be addressed. In this thesis, we focus on this topic, under different kinds of input formats and various types of scenes. Specifically, our study contains two aspects: (1) Data-driven neural modules for improved performance. (2) Leverage of datasets w.r.t.training systems with higher performances and better data privacy guarantees. In the first part of this thesis, we improve semantic segmentation by designing new modules which are compatible with existing architectures. First, we develop a spatio-temporal data-driven pooling, which brings additional information of data (i.e. superpixels) into neural networks, benefiting the training of neural networks as well as the inference on novel data. We investigate our approach in RGB-D videos for segmenting indoor scenes, where depth provides complementary cues to colors and our model performs particularly well. Second, we design learnable dilated convolutions, which are the extension of standard dilated convolutions, whose dilation factors (Yu and Koltun, 2016) need to be carefully determined by hand to obtain decent performance. We present a method to learn dilation factors together with filter weights of convolutions to avoid a complicated search of dilation factors. We explore extensive studies on challenging street scenes, across various baselines with different complexity as well as several datasets at varying image resolutions. In the second part, we investigate how to utilize expensive training data. First, we start from the generative modelling and study the network architectures and the learning pipeline for generating multiple examples. We aim to improve the diversity of generated examples but also to preserve the comparable quality of the examples. Second, we develop a generative model for synthesizing features of a network. With a mixture of real images and synthetic features, we are able to train a segmentation model with better generalization capability. Our approach is evaluated on different scene parsing tasks to demonstrate the effectiveness of the proposed method. Finally, we study membership inference on the semantic segmentation task. We propose the first membership inference attack system against black-box semantic segmentation models, that tries to infer if a data pair is used as training data or not. From our observations, information on training data is indeed leaking. To mitigate the leakage, we leverage our synthetic features to perform prediction obfuscations, reducing the posterior distribution gaps between a training and a testing set. Consequently, our study provides not only an approach for detecting illegal use of data, but also the foundations for a safer use of semantic segmentation models.
Export
BibTeX
@phdthesis{HEphd2019, TITLE = {Improved Methods and Analysis for Semantic Image Segmentation}, AUTHOR = {He, Yang}, LANGUAGE = {eng}, DOI = {10.22028/D291-30218}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, ABSTRACT = {Modern deep learning has enabled amazing developments of computer vision in recent years (Hinton and Salakhutdinov, 2006; Krizhevsky et al., 2012). As a fundamental task, semantic segmentation aims to predict class labels for each pixel of images, which empowers machines perception of the visual world. In spite of recent successes of fully convolutional networks (Long etal., 2015), several challenges remain to be addressed. In this thesis, we focus on this topic, under different kinds of input formats and various types of scenes. Speci{fi}cally, our study contains two aspects: (1) Data-driven neural modules for improved performance. (2) Leverage of datasets w.r.t.training systems with higher performances and better data privacy guarantees. In the {fi}rst part of this thesis, we improve semantic segmentation by designing new modules which are compatible with existing architectures. First, we develop a spatio-temporal data-driven pooling, which brings additional information of data (i.e. superpixels) into neural networks, bene{fi}ting the training of neural networks as well as the inference on novel data. We investigate our approach in RGB-D videos for segmenting indoor scenes, where depth provides complementary cues to colors and our model performs particularly well. Second, we design learnable dilated convolutions, which are the extension of standard dilated convolutions, whose dilation factors (Yu and Koltun, 2016) need to be carefully determined by hand to obtain decent performance. We present a method to learn dilation factors together with {fi}lter weights of convolutions to avoid a complicated search of dilation factors. We explore extensive studies on challenging street scenes, across various baselines with different complexity as well as several datasets at varying image resolutions. In the second part, we investigate how to utilize expensive training data. First, we start from the generative modelling and study the network architectures and the learning pipeline for generating multiple examples. We aim to improve the diversity of generated examples but also to preserve the comparable quality of the examples. Second, we develop a generative model for synthesizing features of a network. With a mixture of real images and synthetic features, we are able to train a segmentation model with better generalization capability. Our approach is evaluated on different scene parsing tasks to demonstrate the effectiveness of the proposed method. Finally, we study membership inference on the semantic segmentation task. We propose the {fi}rst membership inference attack system against black-box semantic segmentation models, that tries to infer if a data pair is used as training data or not. From our observations, information on training data is indeed leaking. To mitigate the leakage, we leverage our synthetic features to perform prediction obfuscations, reducing the posterior distribution gaps between a training and a testing set. Consequently, our study provides not only an approach for detecting illegal use of data, but also the foundations for a safer use of semantic segmentation models.}, }
Endnote
%0 Thesis %A He, Yang %Y Fritz, Mario %A referee: Schiele, Bernt %A referee: Denzler, Joachim %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Improved Methods and Analysis for Semantic Image Segmentation : %G eng %U http://hdl.handle.net/21.11116/0000-0005-C0DD-9 %R 10.22028/D291-30218 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 162 p. %V phd %9 phd %X Modern deep learning has enabled amazing developments of computer vision in recent years (Hinton and Salakhutdinov, 2006; Krizhevsky et al., 2012). As a fundamental task, semantic segmentation aims to predict class labels for each pixel of images, which empowers machines perception of the visual world. In spite of recent successes of fully convolutional networks (Long etal., 2015), several challenges remain to be addressed. In this thesis, we focus on this topic, under different kinds of input formats and various types of scenes. Specifically, our study contains two aspects: (1) Data-driven neural modules for improved performance. (2) Leverage of datasets w.r.t.training systems with higher performances and better data privacy guarantees. In the first part of this thesis, we improve semantic segmentation by designing new modules which are compatible with existing architectures. First, we develop a spatio-temporal data-driven pooling, which brings additional information of data (i.e. superpixels) into neural networks, benefiting the training of neural networks as well as the inference on novel data. We investigate our approach in RGB-D videos for segmenting indoor scenes, where depth provides complementary cues to colors and our model performs particularly well. Second, we design learnable dilated convolutions, which are the extension of standard dilated convolutions, whose dilation factors (Yu and Koltun, 2016) need to be carefully determined by hand to obtain decent performance. We present a method to learn dilation factors together with filter weights of convolutions to avoid a complicated search of dilation factors. We explore extensive studies on challenging street scenes, across various baselines with different complexity as well as several datasets at varying image resolutions. In the second part, we investigate how to utilize expensive training data. First, we start from the generative modelling and study the network architectures and the learning pipeline for generating multiple examples. We aim to improve the diversity of generated examples but also to preserve the comparable quality of the examples. Second, we develop a generative model for synthesizing features of a network. With a mixture of real images and synthetic features, we are able to train a segmentation model with better generalization capability. Our approach is evaluated on different scene parsing tasks to demonstrate the effectiveness of the proposed method. Finally, we study membership inference on the semantic segmentation task. We propose the first membership inference attack system against black-box semantic segmentation models, that tries to infer if a data pair is used as training data or not. From our observations, information on training data is indeed leaking. To mitigate the leakage, we leverage our synthetic features to perform prediction obfuscations, reducing the posterior distribution gaps between a training and a testing set. Consequently, our study provides not only an approach for detecting illegal use of data, but also the foundations for a safer use of semantic segmentation models. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28784
[7]
E. Insafutdinov, “Towards Accurate Multi-Person Pose Estimation in the Wild,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{Insaphd2020, TITLE = {Towards Accurate Multi-Person Pose Estimation in the Wild}, AUTHOR = {Insafutdinov, Eldar}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Insafutdinov, Eldar %Y Schiele, Bernt %A referee: Rosenhahn, Bodo %A referee: Denj, Jia %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Towards Accurate Multi-Person Pose Estimation in the Wild : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8173-4 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 161 p. %V phd %9 phd
[8]
M. John, “Of Keyboards and Beyond,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{John_2019, TITLE = {Of Keyboards and Beyond}, AUTHOR = {John, Maximilian}, DOI = {10.22028/D291-30635}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A John, Maximilian %Y Karrenbauer, Andreas %A referee: Mehlhorn, Kurt %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Of Keyboards and Beyond : Optimization in Human-Computer Interaction %U http://hdl.handle.net/21.11116/0000-0007-7152-D %R 10.22028/D291-30635 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 91 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28954
[9]
A. Meka, “Live inverse rendering,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{Meka_2019, TITLE = {Live inverse rendering}, AUTHOR = {Meka, Abhimitra}, LANGUAGE = {eng}, DOI = {http://dx.doi.org/10.22028/D291-30206}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Meka, Abhimitra %Y Theobalt, Christian %A referee: Drettakis, George %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Live inverse rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0007-715A-5 %R http://dx.doi.org/10.22028/D291-30206 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 189 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28721
[10]
F. Mueller, “Real-time 3D Hand Reconstruction in Challenging Scenes from a Single Color or Depth Camera,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{MuellerFDiss_2020, TITLE = {Real-time 3{D} Hand Reconstruction in Challenging Scenes from a Single Color or Depth Camera}, AUTHOR = {Mueller, Franziska}, LANGUAGE = {eng}, DOI = {10.22028/D291-32846}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Mueller, Franziska %Y Theobalt, Christian %A referee: Seidel, Hans-Peter %A referee: Izadi, Shahram %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Real-time 3D Hand Reconstruction in Challenging Scenes from a Single Color or Depth Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0007-D8C7-5 %R 10.22028/D291-32846 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 155 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/30313
[11]
P. Müller, “Sensing, Interpreting, and Anticipating Human Social Behaviour in the Real World,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{MuellerDiss_2019, TITLE = {Sensing, Interpreting, and Anticipating Human Social Behaviour in the Real World}, AUTHOR = {M{\"u}ller, Philipp}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Müller, Philipp %Y Bulling, Andreas %A referee: André, Elisabeth %A referee: Krüger, Antonio %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Sensing, Interpreting, and Anticipating Human Social Behaviour in the Real World : %G eng %U http://hdl.handle.net/21.11116/0000-0007-D8BB-3 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 224 p. %V phd %9 phd
[12]
T. Orekondy, “Understanding and Controlling Leakage in Machine Learning,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{Orekondy_PhD2020, TITLE = {Understanding and Controlling Leakage in Machine Learning}, AUTHOR = {Orekondy, Tribhuvanesh}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291--ds-335519}, DOI = {10.22028/D291-33551}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Orekondy, Tribhuvanesh %Y Schiele, Bernt %A referee: Fritz, Mario %A referee: Frahm, Jan-Michael %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Understanding and Controlling Leakage in Machine Learning : %G eng %U http://hdl.handle.net/21.11116/0000-0008-4ACC-0 %R 10.22028/D291-33551 %U urn:nbn:de:bsz:291--ds-335519 %I Universität des Saarlandes %C Saarbrücken %D 2020 %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/30989
[13]
D. Vaz, “Approximation Algorithms for Network Design and Cut Problems in Bounded-Treewidth,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{Vaz_2020, TITLE = {Approximation Algorithms for Network Design and Cut Problems in Bounded-Treewidth}, AUTHOR = {Vaz, Daniel}, LANGUAGE = {eng}, DOI = {10.22028/D291-32983}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Vaz, Daniel %Y Mehlhorn, Kurt %A referee: Chalermsook, Parinya %A referee: Krauthgamer, Robert %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Approximation Algorithms for Network Design and Cut Problems in Bounded-Treewidth : %G eng %U http://hdl.handle.net/21.11116/0000-0007-D8D7-3 %R 10.22028/D291-32983 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 175 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/30394