Last Year

PhD
[1]
A. Bhattacharyya, “Long-term future prediction under uncertainty and multi-modality,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Batphd2021, TITLE = {Long-term future prediction under uncertainty and multi-modality}, AUTHOR = {Bhattacharyya, Apratim}, LANGUAGE = {eng}, URL = {nbn:de:bsz:291--ds-356522}, DOI = {10.22028/D291-35652}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Bhattacharyya, Apratim %Y Schiele, Bernt %A referee: Fritz, Mario %A referee: Geiger, Andreas %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Long-term future prediction under uncertainty and multi-modality : %G eng %U http://hdl.handle.net/21.11116/0000-000A-20BF-B %R 10.22028/D291-35652 %U nbn:de:bsz:291--ds-356522 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 210 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/32595
[2]
B. R. Chaudhury, “Finding fair and efficient allocations,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Chaudphd2021, TITLE = {Finding fair and efficient allocations}, AUTHOR = {Chaudhury, Bhaskar Ray}, LANGUAGE = {eng}, URL = {nbn:de:bsz:291--ds-345370}, DOI = {10.22028/D291-34537}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Chaudhury, Bhaskar Ray %Y Mehlhorn, Kurt %A referee: Bringmann, Karl %A referee: Roughgarden, Tim %A referee: Moulin, Herve %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Finding fair and efficient allocations : %G eng %U http://hdl.handle.net/21.11116/0000-0009-9CC9-5 %R 10.22028/D291-34537 %U nbn:de:bsz:291--ds-345370 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 173 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31737
[3]
S. N. Chowdhury, “Text-image synergy for multimodal retrieval and annotation,” Universität des Saarlandes, Saarbrücken, 2021.
Abstract
Text and images are the two most common data modalities found on the Internet. Understanding the synergy between text and images, that is, seamlessly analyzing information from these modalities may be trivial for humans, but is challenging for software systems. In this dissertation we study problems where deciphering text-image synergy is crucial for finding solutions. We propose methods and ideas that establish semantic connections between text and images in multimodal contents, and empirically show their effectiveness in four interconnected problems: Image Retrieval, Image Tag Refinement, Image-Text Alignment, and Image Captioning. Our promising results and observations open up interesting scopes for future research involving text-image data understanding.Text and images are the two most common data modalities found on the Internet. Understanding the synergy between text and images, that is, seamlessly analyzing information from these modalities may be trivial for humans, but is challenging for software systems. In this dissertation we study problems where deciphering text-image synergy is crucial for finding solutions. We propose methods and ideas that establish semantic connections between text and images in multimodal contents, and empirically show their effectiveness in four interconnected problems: Image Retrieval, Image Tag Refinement, Image-Text Alignment, and Image Captioning. Our promising results and observations open up interesting scopes for future research involving text-image data understanding.
Export
BibTeX
@phdthesis{Chowphd2021, TITLE = {Text-image synergy for multimodal retrieval and annotation}, AUTHOR = {Chowdhury, Sreyasi Nag}, LANGUAGE = {eng}, DOI = {10.22028/D291-34509}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, ABSTRACT = {Text and images are the two most common data modalities found on the Internet. Understanding the synergy between text and images, that is, seamlessly analyzing information from these modalities may be trivial for humans, but is challenging for software systems. In this dissertation we study problems where deciphering text-image synergy is crucial for finding solutions. We propose methods and ideas that establish semantic connections between text and images in multimodal contents, and empirically show their effectiveness in four interconnected problems: Image Retrieval, Image Tag Refinement, Image-Text Alignment, and Image Captioning. Our promising results and observations open up interesting scopes for future research involving text-image data understanding.Text and images are the two most common data modalities found on the Internet. Understanding the synergy between text and images, that is, seamlessly analyzing information from these modalities may be trivial for humans, but is challenging for software systems. In this dissertation we study problems where deciphering text-image synergy is crucial for finding solutions. We propose methods and ideas that establish semantic connections between text and images in multimodal contents, and empirically show their effectiveness in four interconnected problems: Image Retrieval, Image Tag Refinement, Image-Text Alignment, and Image Captioning. Our promising results and observations open up interesting scopes for future research involving text-image data understanding.}, }
Endnote
%0 Thesis %A Chowdhury, Sreyasi Nag %A referee: Weikum, Gerhard %A referee: de Melo, Gerard %A referee: Berberich, Klaus %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Text-image synergy for multimodal retrieval and annotation : %G eng %U http://hdl.handle.net/21.11116/0000-0009-428A-1 %R 10.22028/D291-34509 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 131 p. %V phd %9 phd %X Text and images are the two most common data modalities found on the Internet. Understanding the synergy between text and images, that is, seamlessly analyzing information from these modalities may be trivial for humans, but is challenging for software systems. In this dissertation we study problems where deciphering text-image synergy is crucial for finding solutions. We propose methods and ideas that establish semantic connections between text and images in multimodal contents, and empirically show their effectiveness in four interconnected problems: Image Retrieval, Image Tag Refinement, Image-Text Alignment, and Image Captioning. Our promising results and observations open up interesting scopes for future research involving text-image data understanding.Text and images are the two most common data modalities found on the Internet. Understanding the synergy between text and images, that is, seamlessly analyzing information from these modalities may be trivial for humans, but is challenging for software systems. In this dissertation we study problems where deciphering text-image synergy is crucial for finding solutions. We propose methods and ideas that establish semantic connections between text and images in multimodal contents, and empirically show their effectiveness in four interconnected problems: Image Retrieval, Image Tag Refinement, Image-Text Alignment, and Image Captioning. Our promising results and observations open up interesting scopes for future research involving text-image data understanding. %K image retrieval image-text alignment image captioning commonsense knowledge %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31690
[4]
D. A. Durai, “Novel graph based algorithms fortranscriptome sequence analysis,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Duraiphd2020, TITLE = {Novel graph based algorithms fortranscriptome sequence analysis}, AUTHOR = {Durai, Dilip Ariyur}, LANGUAGE = {eng}, DOI = {10.22028/D291-34158}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Durai, Dilip Ariyur %Y Schulz, Marcel %A referee: Helms, Volker %+ Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society External Organizations %T Novel graph based algorithms fortranscriptome sequence analysis : %G eng %U http://hdl.handle.net/21.11116/0000-0008-E4D6-5 %R 10.22028/D291-34158 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 143 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31478
[5]
J. Forster, “Real-time human performance capture and synthesis,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Forsterphd2020, TITLE = {Real-time human performance capture and synthesis}, AUTHOR = {Forster, Jan}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Forster, Jan %Y Theobalt, Christian %A referee: Seidel, Hans-Peter %A referee: Hilton, Adrian %+ Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Real-time human performance capture and synthesis : %G eng %U http://hdl.handle.net/21.11116/0000-0009-7D87-3 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 153 p. %V phd %9 phd
[6]
M. H. Gad-Elrab, “Explainable methods for knowledge graph refinement and exploration via symbolic reasoning,” Universität des Saarlandes, Saarbrücken, 2021.
Abstract
Knowledge Graphs (KGs) have applications in many domains such as Finance, Manufacturing, and Healthcare. While recent efforts have created large KGs, their content is far from complete and sometimes includes invalid statements. Therefore, it is crucial to refine the constructed KGs to enhance their coverage and accuracy via KG completion and KG validation. It is also vital to provide human-comprehensible explanations for such refinements, so that humans have trust in the KG quality. Enabling KG exploration, by search and browsing, is also essential for users to understand the KG value and limitations towards down-stream applications. However, the large size of KGs makes KG exploration very challenging. While the type taxonomy of KGs is a useful asset along these lines, it remains insufficient for deep exploration. In this dissertation we tackle the aforementioned challenges of KG refinement and KG exploration by combining logical reasoning over the KG with other techniques such as KG embedding models and text mining. Through such combination, we introduce methods that provide human-understandable output. Concretely, we introduce methods to tackle KG incompleteness by learning exception-aware rules over the existing KG. Learned rules are then used in inferring missing links in the KG accurately. Furthermore, we propose a framework for constructing human-comprehensible explanations for candidate facts from both KG and text. Extracted explanations are used to insure the validity of KG facts. Finally, to facilitate KG exploration, we introduce a method that combines KG embeddings with rule mining to compute informative entity clusters with explanations.
Export
BibTeX
@phdthesis{Elrabphd2021, TITLE = {Explainable methods for knowledge graph refinement and exploration via symbolic reasoning}, AUTHOR = {Gad-Elrab, Mohamed Hassan}, LANGUAGE = {eng}, DOI = {10.22028/D291-34423}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, ABSTRACT = {Knowledge Graphs (KGs) have applications in many domains such as Finance, Manufacturing, and Healthcare. While recent efforts have created large KGs, their content is far from complete and sometimes includes invalid statements. Therefore, it is crucial to refine the constructed KGs to enhance their coverage and accuracy via KG completion and KG validation. It is also vital to provide human-comprehensible explanations for such refinements, so that humans have trust in the KG quality. Enabling KG exploration, by search and browsing, is also essential for users to understand the KG value and limitations towards down-stream applications. However, the large size of KGs makes KG exploration very challenging. While the type taxonomy of KGs is a useful asset along these lines, it remains insufficient for deep exploration. In this dissertation we tackle the aforementioned challenges of KG refinement and KG exploration by combining logical reasoning over the KG with other techniques such as KG embedding models and text mining. Through such combination, we introduce methods that provide human-understandable output. Concretely, we introduce methods to tackle KG incompleteness by learning exception-aware rules over the existing KG. Learned rules are then used in inferring missing links in the KG accurately. Furthermore, we propose a framework for constructing human-comprehensible explanations for candidate facts from both KG and text. Extracted explanations are used to insure the validity of KG facts. Finally, to facilitate KG exploration, we introduce a method that combines KG embeddings with rule mining to compute informative entity clusters with explanations.}, }
Endnote
%0 Thesis %A Gad-Elrab, Mohamed Hassan %Y Weikum, Gerhard %A referee: Theobald, Martin %A referee: Stepanova, Daria %A referee: Razniewski, Simon %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Explainable methods for knowledge graph refinement and exploration via symbolic reasoning : %G eng %U http://hdl.handle.net/21.11116/0000-0009-427E-0 %R 10.22028/D291-34423 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 176 p. %V phd %9 phd %X Knowledge Graphs (KGs) have applications in many domains such as Finance, Manufacturing, and Healthcare. While recent efforts have created large KGs, their content is far from complete and sometimes includes invalid statements. Therefore, it is crucial to refine the constructed KGs to enhance their coverage and accuracy via KG completion and KG validation. It is also vital to provide human-comprehensible explanations for such refinements, so that humans have trust in the KG quality. Enabling KG exploration, by search and browsing, is also essential for users to understand the KG value and limitations towards down-stream applications. However, the large size of KGs makes KG exploration very challenging. While the type taxonomy of KGs is a useful asset along these lines, it remains insufficient for deep exploration. In this dissertation we tackle the aforementioned challenges of KG refinement and KG exploration by combining logical reasoning over the KG with other techniques such as KG embedding models and text mining. Through such combination, we introduce methods that provide human-understandable output. Concretely, we introduce methods to tackle KG incompleteness by learning exception-aware rules over the existing KG. Learned rules are then used in inferring missing links in the KG accurately. Furthermore, we propose a framework for constructing human-comprehensible explanations for candidate facts from both KG and text. Extracted explanations are used to insure the validity of KG facts. Finally, to facilitate KG exploration, we introduce a method that combines KG embeddings with rule mining to compute informative entity clusters with explanations. %K knowledge graphs symbolic learning embedding models rule learning Big Data %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31629
[7]
A. Ghazimatin, “Enhancing explainability and scrutability of recommender systems,” Universität des Saarlandes, Saarbrücken, 2021.
Abstract
Our increasing reliance on complex algorithms for recommendations calls for models and methods for explainable, scrutable, and trustworthy AI. While explainability is required for understanding the relationships between model inputs and outputs, a scrutable system allows us to modify its behavior as desired. These properties help bridge the gap between our expectations and the algorithm’s behavior and accordingly boost our trust in AI. Aiming to cope with information overload, recommender systems play a crucial role in filtering content (such as products, news, songs, and movies) and shaping a personalized experience for their users. Consequently, there has been a growing demand from the information consumers to receive proper explanations for their personalized recommendations. These explanations aim at helping users understand why certain items are recommended to them and how their previous inputs to the system relate to the generation of such recommendations. Besides, in the event of receiving undesirable content, explanations could possibly contain valuable information as to how the system’s behavior can be modified accordingly. In this thesis, we present our contributions towards explainability and scrutability of recommender systems: • We introduce a user-centric framework, FAIRY, for discovering and ranking post-hoc explanations for the social feeds generated by black-box platforms. These explanations reveal relationships between users’ profiles and their feed items and are extracted from the local interaction graphs of users. FAIRY employs a learning-to-rank (LTR) method to score candidate explanations based on their relevance and surprisal. • We propose a method, PRINCE, to facilitate provider-side explainability in graph-based recommender systems that use personalized PageRank at their core. PRINCE explanations are comprehensible for users, because they present subsets of the user’s prior actions responsible for the received recommendations. PRINCE operates in a counterfactual setup and builds on a polynomial-time algorithm for finding the smallest counterfactual explanations. • We propose a human-in-the-loop framework, ELIXIR, for enhancing scrutability and subsequently the recommendation models by leveraging user feedback on explanations. ELIXIR enables recommender systems to collect user feedback on pairs of recommendations and explanations. The feedback is incorporated into the model by imposing a soft constraint for learning user-specific item representations. We evaluate all proposed models and methods with real user studies and demonstrate their benefits at achieving explainability and scrutability in recommender systems.
Export
BibTeX
@phdthesis{Ghazphd2021, TITLE = {Enhancing explainability and scrutability of recommender systems}, AUTHOR = {Ghazimatin, Azin}, LANGUAGE = {eng}, URL = {nbn:de:bsz:291--ds-355166}, DOI = {10.22028/D291-35516}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, ABSTRACT = {Our increasing reliance on complex algorithms for recommendations calls for models and methods for explainable, scrutable, and trustworthy AI. While explainability is required for understanding the relationships between model inputs and outputs, a scrutable system allows us to modify its behavior as desired. These properties help bridge the gap between our expectations and the algorithm{\textquoteright}s behavior and accordingly boost our trust in AI. Aiming to cope with information overload, recommender systems play a crucial role in {fi}ltering content (such as products, news, songs, and movies) and shaping a personalized experience for their users. Consequently, there has been a growing demand from the information consumers to receive proper explanations for their personalized recommendations. These explanations aim at helping users understand why certain items are recommended to them and how their previous inputs to the system relate to the generation of such recommendations. Besides, in the event of receiving undesirable content, explanations could possibly contain valuable information as to how the system{\textquoteright}s behavior can be modi{fi}ed accordingly. In this thesis, we present our contributions towards explainability and scrutability of recommender systems: \mbox{$\bullet$} We introduce a user-centric framework, FAIRY, for discovering and ranking post-hoc explanations for the social feeds generated by black-box platforms. These explanations reveal relationships between users{\textquoteright} pro{fi}les and their feed items and are extracted from the local interaction graphs of users. FAIRY employs a learning-to-rank (LTR) method to score candidate explanations based on their relevance and surprisal. \mbox{$\bullet$} We propose a method, PRINCE, to facilitate provider-side explainability in graph-based recommender systems that use personalized PageRank at their core. PRINCE explanations are comprehensible for users, because they present subsets of the user{\textquoteright}s prior actions responsible for the received recommendations. PRINCE operates in a counterfactual setup and builds on a polynomial-time algorithm for {fi}nding the smallest counterfactual explanations. \mbox{$\bullet$} We propose a human-in-the-loop framework, ELIXIR, for enhancing scrutability and subsequently the recommendation models by leveraging user feedback on explanations. ELIXIR enables recommender systems to collect user feedback on pairs of recommendations and explanations. The feedback is incorporated into the model by imposing a soft constraint for learning user-speci{fi}c item representations. We evaluate all proposed models and methods with real user studies and demonstrate their bene{fi}ts at achieving explainability and scrutability in recommender systems.}, }
Endnote
%0 Thesis %A Ghazimatin, Azin %Y Weikum, Gerhard %A referee: Saha Roy, Rishiraj %A referee: Amer-Yahia, Sihem %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Enhancing explainability and scrutability of recommender systems : %G eng %U http://hdl.handle.net/21.11116/0000-000A-3C99-7 %R 10.22028/D291-35516 %U nbn:de:bsz:291--ds-355166 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 136 p. %V phd %9 phd %X Our increasing reliance on complex algorithms for recommendations calls for models and methods for explainable, scrutable, and trustworthy AI. While explainability is required for understanding the relationships between model inputs and outputs, a scrutable system allows us to modify its behavior as desired. These properties help bridge the gap between our expectations and the algorithm’s behavior and accordingly boost our trust in AI. Aiming to cope with information overload, recommender systems play a crucial role in filtering content (such as products, news, songs, and movies) and shaping a personalized experience for their users. Consequently, there has been a growing demand from the information consumers to receive proper explanations for their personalized recommendations. These explanations aim at helping users understand why certain items are recommended to them and how their previous inputs to the system relate to the generation of such recommendations. Besides, in the event of receiving undesirable content, explanations could possibly contain valuable information as to how the system’s behavior can be modified accordingly. In this thesis, we present our contributions towards explainability and scrutability of recommender systems: • We introduce a user-centric framework, FAIRY, for discovering and ranking post-hoc explanations for the social feeds generated by black-box platforms. These explanations reveal relationships between users’ profiles and their feed items and are extracted from the local interaction graphs of users. FAIRY employs a learning-to-rank (LTR) method to score candidate explanations based on their relevance and surprisal. • We propose a method, PRINCE, to facilitate provider-side explainability in graph-based recommender systems that use personalized PageRank at their core. PRINCE explanations are comprehensible for users, because they present subsets of the user’s prior actions responsible for the received recommendations. PRINCE operates in a counterfactual setup and builds on a polynomial-time algorithm for finding the smallest counterfactual explanations. • We propose a human-in-the-loop framework, ELIXIR, for enhancing scrutability and subsequently the recommendation models by leveraging user feedback on explanations. ELIXIR enables recommender systems to collect user feedback on pairs of recommendations and explanations. The feedback is incorporated into the model by imposing a soft constraint for learning user-specific item representations. We evaluate all proposed models and methods with real user studies and demonstrate their benefits at achieving explainability and scrutability in recommender systems. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/32590
[8]
P. Mandros, “Discovering robust dependencies from data,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Panphd2020, TITLE = {Discovering robust dependencies from data}, AUTHOR = {Mandros, Panagiotis}, LANGUAGE = {eng}, DOI = {10.22028/D291-34291}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Mandros, Panagiotis %Y Vreeken, Jilles %A referee: Weikum, Gerhard %A referee: Webb, Geoffrey %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Discovering robust dependencies from data : %G eng %U http://hdl.handle.net/21.11116/0000-0008-E4CF-E %R 10.22028/D291-34291 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 194 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31535
[9]
A. Marx, “Information-Theoretic Causal Discovery,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Marxphd2020, TITLE = {Information-Theoretic Causal Discovery}, AUTHOR = {Marx, Alexander}, LANGUAGE = {eng}, DOI = {10.22028/D291-34290}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Marx, Alexander %Y Vreeken, Jilles %A referee: Weikum, Gerhard %A referee: Ommen, Thijs van %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Information-Theoretic Causal Discovery : %G eng %U http://hdl.handle.net/21.11116/0000-0008-EECA-9 %R 10.22028/D291-34290 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 195 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31480
[10]
S. Metzler, “Structural Building Blocks in Graph Data,” Universität des Saarlandes, Saarbrücken, 2021.
Abstract
Graph data nowadays easily become so large that it is infeasible to study the underlying structures manually. Thus, computational methods are needed to uncover large-scale structural information. In this thesis, we present methods to understand and summarise large networks. We propose the hyperbolic community model to describe groups of more densely connected nodes within networks using very intuitive parameters. The model accounts for a frequent connectivity pattern in real data: a few community members are highly interconnected; most members mainly have ties to this core. Our model fits real data much better than previously-proposed models. Our corresponding random graph generator, HyGen, creates graphs with realistic intra-community structure. Using the hyperbolic model, we conduct a large-scale study of the temporal evolution of communities on online question–answer sites. We observe that the user activity within a community is constant with respect to its size throughout its lifetime, and a small group of users is responsible for the majority of the social interactions. We propose an approach for Boolean tensor clustering. This special tensor factorisation is restricted to binary data and assumes that one of the tensor directions has only non-overlapping factors. These assumptions – valid for many real-world data, in particular time-evolving networks – enable the use of bitwise operators and lift much of the computational complexity from the task.
Export
BibTeX
@phdthesis{SaskiaDiss21, TITLE = {Structural Building Blocks in Graph Data}, AUTHOR = {Metzler, Saskia}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, ABSTRACT = {Graph data nowadays easily become so large that it is infeasible to study the underlying structures manually. Thus, computational methods are needed to uncover large-scale structural information. In this thesis, we present methods to understand and summarise large networks. We propose the hyperbolic community model to describe groups of more densely connected nodes within networks using very intuitive parameters. The model accounts for a frequent connectivity pattern in real data: a few community members are highly interconnected; most members mainly have ties to this core. Our model fits real data much better than previously-proposed models. Our corresponding random graph generator, HyGen, creates graphs with realistic intra-community structure. Using the hyperbolic model, we conduct a large-scale study of the temporal evolution of communities on online question--answer sites. We observe that the user activity within a community is constant with respect to its size throughout its lifetime, and a small group of users is responsible for the majority of the social interactions. We propose an approach for Boolean tensor clustering. This special tensor factorisation is restricted to binary data and assumes that one of the tensor directions has only non-overlapping factors. These assumptions -- valid for many real-world data, in particular time-evolving networks -- enable the use of bitwise operators and lift much of the computational complexity from the task.}, }
Endnote
%0 Thesis %A Metzler, Saskia %Y Miettinen, Pauli %Y Weikum, Gerhard %Y Günnemann, Stephan %+ Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Structural Building Blocks in Graph Data : Characterised by Hyperbolic Communities and Uncovered by Boolean Tensor Clustering %G eng %U http://hdl.handle.net/21.11116/0000-0008-0BC1-2 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 196 p. %V phd %9 phd %X Graph data nowadays easily become so large that it is infeasible to study the underlying structures manually. Thus, computational methods are needed to uncover large-scale structural information. In this thesis, we present methods to understand and summarise large networks. We propose the hyperbolic community model to describe groups of more densely connected nodes within networks using very intuitive parameters. The model accounts for a frequent connectivity pattern in real data: a few community members are highly interconnected; most members mainly have ties to this core. Our model fits real data much better than previously-proposed models. Our corresponding random graph generator, HyGen, creates graphs with realistic intra-community structure. Using the hyperbolic model, we conduct a large-scale study of the temporal evolution of communities on online question–answer sites. We observe that the user activity within a community is constant with respect to its size throughout its lifetime, and a small group of users is responsible for the majority of the social interactions. We propose an approach for Boolean tensor clustering. This special tensor factorisation is restricted to binary data and assumes that one of the tensor directions has only non-overlapping factors. These assumptions – valid for many real-world data, in particular time-evolving networks – enable the use of bitwise operators and lift much of the computational complexity from the task. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/30904
[11]
A. Pandey, “Variety Membership Testing in Algebraic Complexity Theory,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Pandeyphd2021, TITLE = {Variety Membership Testing in Algebraic Complexity Theory}, AUTHOR = {Pandey, Anurag}, LANGUAGE = {eng}, DOI = {10.22028/D291-34244}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Pandey, Anurag %Y Bläser, Markus %A referee: Ikenmeyer, Christian %A referee: Mahajan, Meena %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Variety Membership Testing in Algebraic Complexity Theory : %G eng %U http://hdl.handle.net/21.11116/0000-0008-E9F5-D %R 10.22028/D291-34244 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 128 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31479
[12]
M. Scherer, “Computational solutions for addressing heterogeneity in DNA methylation data,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Schererphd2020, TITLE = {Computational solutions for addressing heterogeneity in {DNA} methylation data}, AUTHOR = {Scherer, Michael}, LANGUAGE = {eng}, DOI = {10.22028/D291-33808}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Scherer, Michael %Y Lengauer, Thomas %A referee: Walther, Jörn %A referee: Marschall, Tobias %+ Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society External Organizations Computational Biology and Applied Algorithmics, MPI for Informatics, Max Planck Society %T Computational solutions for addressing heterogeneity in DNA methylation data : %G eng %U http://hdl.handle.net/21.11116/0000-0008-BA18-C %R 10.22028/D291-33808 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 147 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31186
[13]
X. Shen, “Deep Latent-Variable Models for Neural Text Generation,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Shenphd2021, TITLE = {Deep Latent-Variable Models for Neural Text Generation}, AUTHOR = {Shen, Xiaoyu}, LANGUAGE = {eng}, URL = {nbn:de:bsz:291--ds-350558}, DOI = {10.22028/D291-35055}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Shen, Xiaoyu %Y Klakow, Dietrich %A referee: Weikum, Gerhard %A referee: Schütze, Hinrich %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Deep Latent-Variable Models for Neural Text Generation : %G eng %U http://hdl.handle.net/21.11116/0000-0009-B25D-6 %R 10.22028/D291-35055 %U nbn:de:bsz:291--ds-350558 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 201 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/32106
[14]
R. Shetty, “Adversarial Content Manipulation for Analyzing and Improving Model Robustness,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Shettyphd2020, TITLE = {Adversarial Content Manipulation for Analyzing and Improving Model Robustness}, AUTHOR = {Shetty, Rakshith}, LANGUAGE = {eng}, URL = {nbn:de:bsz:291--ds-346515}, DOI = {10.22028/D291-34651}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Shetty, Rakshith %Y Schiele, Bernt %A referee: Fritz, Mario %A referee: Torralba, Antonio %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Adversarial Content Manipulation for Analyzing and Improving Model Robustness : %G eng %U http://hdl.handle.net/21.11116/0000-0009-5D93-9 %R 10.22028/D291-34651 %U nbn:de:bsz:291--ds-346515 %F OTHER: hdl:20.500.11880/31874 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 191 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31874
[15]
A. Tewari, “Self-supervised reconstruction and synthesis of faces,” Universität des Saarlandes, Saarbrücken, 2021.
Export
BibTeX
@phdthesis{Tewariphd2021, TITLE = {Self-supervised reconstruction and synthesis of faces}, AUTHOR = {Tewari, Ayush}, LANGUAGE = {eng}, URL = {nbn:de:bsz:291--ds-345982}, DOI = {10.22028/D291-34598}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, }
Endnote
%0 Thesis %A Tewari, Ayush %Y Theobalt, Christian %A referee: Zollhöfer, Michael %A referee: Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Self-supervised reconstruction and synthesis of faces : %G eng %U http://hdl.handle.net/21.11116/0000-0009-9CD2-A %R 10.22028/D291-34598 %U nbn:de:bsz:291--ds-345982 %I Universität des Saarlandes %C Saarbrücken %D 2021 %P 173 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31754