Current Year

PhD
[1]
F. Behjati Ardakani, “Computational models of gene expression regulation,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{Ardadiss_2019, TITLE = {Computational models of gene expression regulation}, AUTHOR = {Behjati Ardakani, Fatemeh}, LANGUAGE = {eng}, DOI = {http://dx.doi.org/10.22028/D291-30206}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Behjati Ardakani, Fatemeh %Y Schulz, Marcel %A referee: Marschall, Tobias %+ Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society %T Computational models of gene expression regulation : %G eng %U http://hdl.handle.net/21.11116/0000-0007-7163-A %R http://dx.doi.org/10.22028/D291-30206 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 170 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/29766
[2]
M. Fleury, “Formalization of Logical Calculi in Isabelle/HOL,” Universität des Saarlandes, Saarbrücken, 2020.
Abstract
I develop a formal framework for propositional satifisfiability with the conflict-driven clause learning (CDCL) procedure using the Isabelle/HOL proof assistant. The framework offers a convenient way to prove metatheorems and experiment with variants, including the Davis-Putnam-Logemann-Loveland procedure. The most noteworthy aspects of my work are the inclusion of rules for forget and restart and the refinement approach. I use the formalization to develop three extensions: First, an incremental solving extension of CDCL. Second, I verify an optimizing CDCL (OCDCL): Given a cost function on literals, OCDCL derives an optimal model with minimum cost. Finally, I work on model covering. Thanks to the CDCL framework I can reuse, these extensions are easier to develop. Through a chain of refinements, I connect the abstract CDCL calculus first to a more concrete calculus, then to a SAT solver expressed in a simple functional programming language, and finally to a SAT solver in an imperative language, with total correctness guarantees. The imperative version relies on the two-watched-literal data structure and other optimizations found in modern solvers. I used the Isabelle Refinement Framework to automate the most tedious refinement steps. After that, I extend this work with further optimizations like blocking literals and the use of machine words as long as possible, before switching to unbounded integers to keep completeness.
Export
BibTeX
@phdthesis{Fleuryphd2019, TITLE = {Formalization of Logical Calculi in Isabelle/{HOL}}, AUTHOR = {Fleury, Mathias}, LANGUAGE = {eng}, DOI = {10.22028/D291-30179}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, ABSTRACT = {I develop a formal framework for propositional satifisfiability with the conflict-driven clause learning (CDCL) procedure using the Isabelle/HOL proof assistant. The framework offers a convenient way to prove metatheorems and experiment with variants, including the Davis-Putnam-Logemann-Loveland procedure. The most noteworthy aspects of my work are the inclusion of rules for forget and restart and the refinement approach. I use the formalization to develop three extensions: First, an incremental solving extension of CDCL. Second, I verify an optimizing CDCL (OCDCL): Given a cost function on literals, OCDCL derives an optimal model with minimum cost. Finally, I work on model covering. Thanks to the CDCL framework I can reuse, these extensions are easier to develop. Through a chain of refinements, I connect the abstract CDCL calculus first to a more concrete calculus, then to a SAT solver expressed in a simple functional programming language, and finally to a SAT solver in an imperative language, with total correctness guarantees. The imperative version relies on the two-watched-literal data structure and other optimizations found in modern solvers. I used the Isabelle Refinement Framework to automate the most tedious refinement steps. After that, I extend this work with further optimizations like blocking literals and the use of machine words as long as possible, before switching to unbounded integers to keep completeness.}, }
Endnote
%0 Thesis %A Fleury, Mathias %Y Weidenbach, Christoph %A referee: Biere, Armin %+ Automation of Logic, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Automation of Logic, MPI for Informatics, Max Planck Society External Organizations %T Formalization of Logical Calculi in Isabelle/HOL : %G eng %U http://hdl.handle.net/21.11116/0000-0005-AE07-0 %R 10.22028/D291-30179 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 169 p. %V phd %9 phd %X I develop a formal framework for propositional satifisfiability with the conflict-driven clause learning (CDCL) procedure using the Isabelle/HOL proof assistant. The framework offers a convenient way to prove metatheorems and experiment with variants, including the Davis-Putnam-Logemann-Loveland procedure. The most noteworthy aspects of my work are the inclusion of rules for forget and restart and the refinement approach. I use the formalization to develop three extensions: First, an incremental solving extension of CDCL. Second, I verify an optimizing CDCL (OCDCL): Given a cost function on literals, OCDCL derives an optimal model with minimum cost. Finally, I work on model covering. Thanks to the CDCL framework I can reuse, these extensions are easier to develop. Through a chain of refinements, I connect the abstract CDCL calculus first to a more concrete calculus, then to a SAT solver expressed in a simple functional programming language, and finally to a SAT solver in an imperative language, with total correctness guarantees. The imperative version relies on the two-watched-literal data structure and other optimizations found in modern solvers. I used the Isabelle Refinement Framework to automate the most tedious refinement steps. After that, I extend this work with further optimizations like blocking literals and the use of machine words as long as possible, before switching to unbounded integers to keep completeness. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28722
[3]
Y. He, “Improved Methods and Analysis for Semantic Image Segmentation,” Universität des Saarlandes, Saarbrücken, 2020.
Abstract
Modern deep learning has enabled amazing developments of computer vision in recent years (Hinton and Salakhutdinov, 2006; Krizhevsky et al., 2012). As a fundamental task, semantic segmentation aims to predict class labels for each pixel of images, which empowers machines perception of the visual world. In spite of recent successes of fully convolutional networks (Long etal., 2015), several challenges remain to be addressed. In this thesis, we focus on this topic, under different kinds of input formats and various types of scenes. Specifically, our study contains two aspects: (1) Data-driven neural modules for improved performance. (2) Leverage of datasets w.r.t.training systems with higher performances and better data privacy guarantees. In the first part of this thesis, we improve semantic segmentation by designing new modules which are compatible with existing architectures. First, we develop a spatio-temporal data-driven pooling, which brings additional information of data (i.e. superpixels) into neural networks, benefiting the training of neural networks as well as the inference on novel data. We investigate our approach in RGB-D videos for segmenting indoor scenes, where depth provides complementary cues to colors and our model performs particularly well. Second, we design learnable dilated convolutions, which are the extension of standard dilated convolutions, whose dilation factors (Yu and Koltun, 2016) need to be carefully determined by hand to obtain decent performance. We present a method to learn dilation factors together with filter weights of convolutions to avoid a complicated search of dilation factors. We explore extensive studies on challenging street scenes, across various baselines with different complexity as well as several datasets at varying image resolutions. In the second part, we investigate how to utilize expensive training data. First, we start from the generative modelling and study the network architectures and the learning pipeline for generating multiple examples. We aim to improve the diversity of generated examples but also to preserve the comparable quality of the examples. Second, we develop a generative model for synthesizing features of a network. With a mixture of real images and synthetic features, we are able to train a segmentation model with better generalization capability. Our approach is evaluated on different scene parsing tasks to demonstrate the effectiveness of the proposed method. Finally, we study membership inference on the semantic segmentation task. We propose the first membership inference attack system against black-box semantic segmentation models, that tries to infer if a data pair is used as training data or not. From our observations, information on training data is indeed leaking. To mitigate the leakage, we leverage our synthetic features to perform prediction obfuscations, reducing the posterior distribution gaps between a training and a testing set. Consequently, our study provides not only an approach for detecting illegal use of data, but also the foundations for a safer use of semantic segmentation models.
Export
BibTeX
@phdthesis{HEphd2019, TITLE = {Improved Methods and Analysis for Semantic Image Segmentation}, AUTHOR = {He, Yang}, LANGUAGE = {eng}, DOI = {10.22028/D291-30218}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, ABSTRACT = {Modern deep learning has enabled amazing developments of computer vision in recent years (Hinton and Salakhutdinov, 2006; Krizhevsky et al., 2012). As a fundamental task, semantic segmentation aims to predict class labels for each pixel of images, which empowers machines perception of the visual world. In spite of recent successes of fully convolutional networks (Long etal., 2015), several challenges remain to be addressed. In this thesis, we focus on this topic, under different kinds of input formats and various types of scenes. Speci{fi}cally, our study contains two aspects: (1) Data-driven neural modules for improved performance. (2) Leverage of datasets w.r.t.training systems with higher performances and better data privacy guarantees. In the {fi}rst part of this thesis, we improve semantic segmentation by designing new modules which are compatible with existing architectures. First, we develop a spatio-temporal data-driven pooling, which brings additional information of data (i.e. superpixels) into neural networks, bene{fi}ting the training of neural networks as well as the inference on novel data. We investigate our approach in RGB-D videos for segmenting indoor scenes, where depth provides complementary cues to colors and our model performs particularly well. Second, we design learnable dilated convolutions, which are the extension of standard dilated convolutions, whose dilation factors (Yu and Koltun, 2016) need to be carefully determined by hand to obtain decent performance. We present a method to learn dilation factors together with {fi}lter weights of convolutions to avoid a complicated search of dilation factors. We explore extensive studies on challenging street scenes, across various baselines with different complexity as well as several datasets at varying image resolutions. In the second part, we investigate how to utilize expensive training data. First, we start from the generative modelling and study the network architectures and the learning pipeline for generating multiple examples. We aim to improve the diversity of generated examples but also to preserve the comparable quality of the examples. Second, we develop a generative model for synthesizing features of a network. With a mixture of real images and synthetic features, we are able to train a segmentation model with better generalization capability. Our approach is evaluated on different scene parsing tasks to demonstrate the effectiveness of the proposed method. Finally, we study membership inference on the semantic segmentation task. We propose the {fi}rst membership inference attack system against black-box semantic segmentation models, that tries to infer if a data pair is used as training data or not. From our observations, information on training data is indeed leaking. To mitigate the leakage, we leverage our synthetic features to perform prediction obfuscations, reducing the posterior distribution gaps between a training and a testing set. Consequently, our study provides not only an approach for detecting illegal use of data, but also the foundations for a safer use of semantic segmentation models.}, }
Endnote
%0 Thesis %A He, Yang %Y Fritz, Mario %A referee: Schiele, Bernt %A referee: Denzler, Joachim %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Improved Methods and Analysis for Semantic Image Segmentation : %G eng %U http://hdl.handle.net/21.11116/0000-0005-C0DD-9 %R 10.22028/D291-30218 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 162 p. %V phd %9 phd %X Modern deep learning has enabled amazing developments of computer vision in recent years (Hinton and Salakhutdinov, 2006; Krizhevsky et al., 2012). As a fundamental task, semantic segmentation aims to predict class labels for each pixel of images, which empowers machines perception of the visual world. In spite of recent successes of fully convolutional networks (Long etal., 2015), several challenges remain to be addressed. In this thesis, we focus on this topic, under different kinds of input formats and various types of scenes. Specifically, our study contains two aspects: (1) Data-driven neural modules for improved performance. (2) Leverage of datasets w.r.t.training systems with higher performances and better data privacy guarantees. In the first part of this thesis, we improve semantic segmentation by designing new modules which are compatible with existing architectures. First, we develop a spatio-temporal data-driven pooling, which brings additional information of data (i.e. superpixels) into neural networks, benefiting the training of neural networks as well as the inference on novel data. We investigate our approach in RGB-D videos for segmenting indoor scenes, where depth provides complementary cues to colors and our model performs particularly well. Second, we design learnable dilated convolutions, which are the extension of standard dilated convolutions, whose dilation factors (Yu and Koltun, 2016) need to be carefully determined by hand to obtain decent performance. We present a method to learn dilation factors together with filter weights of convolutions to avoid a complicated search of dilation factors. We explore extensive studies on challenging street scenes, across various baselines with different complexity as well as several datasets at varying image resolutions. In the second part, we investigate how to utilize expensive training data. First, we start from the generative modelling and study the network architectures and the learning pipeline for generating multiple examples. We aim to improve the diversity of generated examples but also to preserve the comparable quality of the examples. Second, we develop a generative model for synthesizing features of a network. With a mixture of real images and synthetic features, we are able to train a segmentation model with better generalization capability. Our approach is evaluated on different scene parsing tasks to demonstrate the effectiveness of the proposed method. Finally, we study membership inference on the semantic segmentation task. We propose the first membership inference attack system against black-box semantic segmentation models, that tries to infer if a data pair is used as training data or not. From our observations, information on training data is indeed leaking. To mitigate the leakage, we leverage our synthetic features to perform prediction obfuscations, reducing the posterior distribution gaps between a training and a testing set. Consequently, our study provides not only an approach for detecting illegal use of data, but also the foundations for a safer use of semantic segmentation models. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28784
[4]
M. John, “Of keyboards and beyond,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{John_2019, TITLE = {Of keyboards and beyond}, AUTHOR = {John, Maximilian}, DOI = {http://dx.doi.org/10.22028/D291-30635}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A John, Maximilian %Y Karrenbauer, Andreas %A referee: Mehlhorn, Kurt %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Of keyboards and beyond : optimization in human-computer interaction %U http://hdl.handle.net/21.11116/0000-0007-7152-D %R http://dx.doi.org/10.22028/D291-30635 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 91 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28954
[5]
A. Meka, “Live inverse rendering,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{Meka_2019, TITLE = {Live inverse rendering}, AUTHOR = {Meka, Abhimitra}, LANGUAGE = {eng}, DOI = {http://dx.doi.org/10.22028/D291-30206}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Meka, Abhimitra %Y Theobalt, Christian %A referee: Drettakis, George %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Live inverse rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0007-715A-5 %R http://dx.doi.org/10.22028/D291-30206 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 189 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28721