2017
[1]
A. Abujabal, M. Yahya, M. Riedewald, and G. Weikum, “Automated Template Generation for Question Answering over Knowledge Graphs,” in WWW’17, 26th International Conference on World Wide Web, Perth, Australia, 2017.
Export
BibTeX
@inproceedings{AbujabalWWW2017, TITLE = {Automated Template Generation for Question Answering over Knowledge Graphs}, AUTHOR = {Abujabal, Abdalghani and Yahya, Mohamed and Riedewald, Mirek and Weikum, Gerhard}, LANGUAGE = {eng}, ISBN = {978-1-4503-4913-0}, DOI = {10.1145/3038912.3052583}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WWW'17, 26th International Conference on World Wide Web}, PAGES = {1191--1200}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Abujabal, Abdalghani %A Yahya, Mohamed %A Riedewald, Mirek %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Automated Template Generation for Question Answering over Knowledge Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4F9C-E %R 10.1145/3038912.3052583 %D 2017 %B 26th International Conference on World Wide Web %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B WWW'17 %P 1191 - 1200 %I ACM %@ 978-1-4503-4913-0
[2]
P. Agarwal and J. Strötgen, “Tiwiki: Searching Wikipedia with Temporal Constraints,” in 7th Temporal Web Analytics Workshop (TempWeb 2017), Perth, Australia. (Accepted/in press)
Export
BibTeX
@inproceedings{AgarwalStroetgen2017_TempWeb, TITLE = {Tiwiki: Searching {W}ikipedia with Temporal Constraints}, AUTHOR = {Agarwal, Prabal and Str{\"o}tgen, Jannik}, LANGUAGE = {eng}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {7th Temporal Web Analytics Workshop (TempWeb 2017)}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Agarwal, Prabal %A Strötgen, Jannik %+ International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Tiwiki: Searching Wikipedia with Temporal Constraints : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-53AE-9 %D 2017 %8 31.01.2017 %B 7th Temporal Web Analytics Workshop %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B 7th Temporal Web Analytics Workshop
[3]
R. Andrade and J. Strötgen, “All Dates Lead to Rome: Extracting and Explaining Temporal References in Street Names,” in WWW’17 Companion, Perth, Australia, 2017.
Export
BibTeX
@inproceedings{AndradeWWW2017, TITLE = {All Dates Lead to {R}ome: {E}xtracting and Explaining Temporal References in Street Names}, AUTHOR = {Andrade, Rosita and Str{\"o}tgen, Jannik}, LANGUAGE = {eng}, ISBN = {978-1-4503-4914-7}, DOI = {10.1145/3041021.3054249}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WWW'17 Companion}, PAGES = {757--758}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Andrade, Rosita %A Strötgen, Jannik %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T All Dates Lead to Rome: Extracting and Explaining Temporal References in Street Names : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-62AE-1 %R 10.1145/3041021.3054249 %D 2017 %B 26th International Conference on World Wide Web %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B WWW'17 Companion %P 757 - 758 %I ACM %@ 978-1-4503-4914-7
[4]
R. Bertens, J. Vreeken, and A. Siebes, “Efficiently Discovering Unexpected Pattern Co-Occurrences,” in Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017), Houston, TX, USA. (Accepted/in press)
Export
BibTeX
@inproceedings{bertens:17:upc, TITLE = {Efficiently Discovering Unexpected Pattern Co-Occurrences}, AUTHOR = {Bertens, Roel and Vreeken, Jilles and Siebes, Arno}, LANGUAGE = {eng}, PUBLISHER = {SIAM}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017)}, ADDRESS = {Houston, TX, USA}, }
Endnote
%0 Conference Proceedings %A Bertens, Roel %A Vreeken, Jilles %A Siebes, Arno %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Efficiently Discovering Unexpected Pattern Co-Occurrences : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4BDF-7 %D 2017 %B 17th SIAM International Conference on Data Mining %Z date of event: 2017-04-27 - 2017-04-29 %C Houston, TX, USA %B Proceedings of the Seventeenth SIAM International Conference on Data Mining %I SIAM
[5]
A. Bhattacharyya and J. Vreeken, “Efficiently Summarising Event Sequences with Rich Interleaving Patterns,” 2017. [Online]. Available: http://arxiv.org/abs/1701.08096. (arXiv: 1701.08096)
Abstract
Discovering the key structure of a database is one of the main goals of data mining. In pattern set mining we do so by discovering a small set of patterns that together describe the data well. The richer the class of patterns we consider, and the more powerful our description language, the better we will be able to summarise the data. In this paper we propose \ourmethod, a novel greedy MDL-based method for summarising sequential data using rich patterns that are allowed to interleave. Experiments show \ourmethod is orders of magnitude faster than the state of the art, results in better models, as well as discovers meaningful semantics in the form patterns that identify multiple choices of values.
Export
BibTeX
@online{DBLP:journals/corr/BhattacharyyaV17, TITLE = {Efficiently Summarising Event Sequences with Rich Interleaving Patterns}, AUTHOR = {Bhattacharyya, Apratim and Vreeken, Jilles}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1701.08096}, EPRINT = {1701.08096}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Discovering the key structure of a database is one of the main goals of data mining. In pattern set mining we do so by discovering a small set of patterns that together describe the data well. The richer the class of patterns we consider, and the more powerful our description language, the better we will be able to summarise the data. In this paper we propose \ourmethod, a novel greedy MDL-based method for summarising sequential data using rich patterns that are allowed to interleave. Experiments show \ourmethod is orders of magnitude faster than the state of the art, results in better models, as well as discovers meaningful semantics in the form patterns that identify multiple choices of values.}, }
Endnote
%0 Report %A Bhattacharyya, Apratim %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Efficiently Summarising Event Sequences with Rich Interleaving Patterns : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90E4-A %U http://arxiv.org/abs/1701.08096 %D 2017 %X Discovering the key structure of a database is one of the main goals of data mining. In pattern set mining we do so by discovering a small set of patterns that together describe the data well. The richer the class of patterns we consider, and the more powerful our description language, the better we will be able to summarise the data. In this paper we propose \ourmethod, a novel greedy MDL-based method for summarising sequential data using rich patterns that are allowed to interleave. Experiments show \ourmethod is orders of magnitude faster than the state of the art, results in better models, as well as discovers meaningful semantics in the form patterns that identify multiple choices of values. %K Computer Science, Artificial Intelligence, cs.AI,Computer Science, Databases, cs.DB
[6]
A. Bhattacharyya and J. Vreeken, “Efficiently Summarising Event Sequences with Rich Interleaving Patterns,” in Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017), Houston, TX, USA. (Accepted/in press)
Export
BibTeX
@inproceedings{bhattacharyya:17:squish, TITLE = {Efficiently Summarising Event Sequences with Rich Interleaving Patterns}, AUTHOR = {Bhattacharyya, Apratim and Vreeken, Jilles}, LANGUAGE = {eng}, PUBLISHER = {SIAM}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017)}, ADDRESS = {Houston, TX, USA}, }
Endnote
%0 Conference Proceedings %A Bhattacharyya, Apratim %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Efficiently Summarising Event Sequences with Rich Interleaving Patterns : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4BDC-D %D 2017 %B 17th SIAM International Conference on Data Mining %Z date of event: 2017-04-27 - 2017-04-29 %C Houston, TX, USA %B Proceedings of the Seventeenth SIAM International Conference on Data Mining %I SIAM
[7]
N. Boldyrev, M. Spaniol, J. Strötgen, and G. Weikum, “SESAME: European Statistics Explored via Semantic Alignment onto Wikipedia,” in WWW’17 Companion, Perth, Australia, 2017.
Export
BibTeX
@inproceedings{BoldyrevWWW2017, TITLE = {{SESAME}: {E}uropean Statistics Explored via Semantic Alignment onto {Wikipedia}}, AUTHOR = {Boldyrev, Natalia and Spaniol, Marc and Str{\"o}tgen, Jannik and Weikum, Gerhard}, LANGUAGE = {eng}, ISBN = {978-1-4503-4914-7}, DOI = {10.1145/3041021.3054732}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WWW'17 Companion}, PAGES = {177--181}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Boldyrev, Natalia %A Spaniol, Marc %A Strötgen, Jannik %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T SESAME: European Statistics Explored via Semantic Alignment onto Wikipedia : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-80B0-0 %R 10.1145/3041021.3054732 %D 2017 %B 26th International Conference on World Wide Web %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B WWW'17 Companion %P 177 - 181 %I ACM %@ 978-1-4503-4914-7
[8]
M. Boley, B. R. Goldsmith, L. M. Ghiringhelli, and J. Vreeken, “Identifying Consistent Statements about Numerical Data with Dispersion-Corrected Subgroup Discovery,” 2017. [Online]. Available: http://arxiv.org/abs/1701.07696. (arXiv: 1701.07696)
Abstract
Existing algorithms for subgroup discovery with numerical targets do not optimize the error or target variable dispersion of the groups they find. This often leads to unreliable or inconsistent statements about the data, rendering practical applications, especially in scientific domains, futile. Therefore, we here extend the optimistic estimator framework for optimal subgroup discovery to a new class of objective functions: we show how tight estimators can be computed efficiently for all functions that are determined by subgroup size (non-decreasing dependence), the subgroup median value, and a dispersion measure around the median (non-increasing dependence). In the important special case when dispersion is measured using the average absolute deviation from the median, this novel approach yields a linear time algorithm. Empirical evaluation on a wide range of datasets shows that, when used within branch-and-bound search, this approach is highly efficient and indeed discovers subgroups with much smaller errors.
Export
BibTeX
@online{DBLP:journals/corr/BoleyGGV17, TITLE = {Identifying Consistent Statements about Numerical Data with Dispersion-Corrected Subgroup Discovery}, AUTHOR = {Boley, Mario and Goldsmith, Bryan R. and Ghiringhelli, Luca M. and Vreeken, Jilles}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1701.07696}, EPRINT = {1701.07696}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Existing algorithms for subgroup discovery with numerical targets do not optimize the error or target variable dispersion of the groups they find. This often leads to unreliable or inconsistent statements about the data, rendering practical applications, especially in scientific domains, futile. Therefore, we here extend the optimistic estimator framework for optimal subgroup discovery to a new class of objective functions: we show how tight estimators can be computed efficiently for all functions that are determined by subgroup size (non-decreasing dependence), the subgroup median value, and a dispersion measure around the median (non-increasing dependence). In the important special case when dispersion is measured using the average absolute deviation from the median, this novel approach yields a linear time algorithm. Empirical evaluation on a wide range of datasets shows that, when used within branch-and-bound search, this approach is highly efficient and indeed discovers subgroups with much smaller errors.}, }
Endnote
%0 Report %A Boley, Mario %A Goldsmith, Bryan R. %A Ghiringhelli, Luca M. %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Identifying Consistent Statements about Numerical Data with Dispersion-Corrected Subgroup Discovery : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90DB-F %U http://arxiv.org/abs/1701.07696 %D 2017 %X Existing algorithms for subgroup discovery with numerical targets do not optimize the error or target variable dispersion of the groups they find. This often leads to unreliable or inconsistent statements about the data, rendering practical applications, especially in scientific domains, futile. Therefore, we here extend the optimistic estimator framework for optimal subgroup discovery to a new class of objective functions: we show how tight estimators can be computed efficiently for all functions that are determined by subgroup size (non-decreasing dependence), the subgroup median value, and a dispersion measure around the median (non-increasing dependence). In the important special case when dispersion is measured using the average absolute deviation from the median, this novel approach yields a linear time algorithm. Empirical evaluation on a wide range of datasets shows that, when used within branch-and-bound search, this approach is highly efficient and indeed discovers subgroups with much smaller errors. %K Computer Science, Artificial Intelligence, cs.AI,Computer Science, Databases, cs.DB
[9]
M. Boley, B. R. Goldsmith, L. M. Ghiringhelli, and J. Vreeken, “Identifying Consistent Statements about Numerical Data with Dispersion-Corrected Subgroup Discovery,” Data Mining and Knowledge Discovery, vol. 31, no. 5, 2017.
Export
BibTeX
@article{Boley2017, TITLE = {Identifying Consistent Statements about Numerical Data with Dispersion-Corrected Subgroup Discovery}, AUTHOR = {Boley, Mario and Goldsmith, Bryan R. and Ghiringhelli, Luca M. and Vreeken, Jilles}, LANGUAGE = {eng}, DOI = {10.1007/s10618-017-0520-3}, PUBLISHER = {Springer}, ADDRESS = {London}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, JOURNAL = {Data Mining and Knowledge Discovery}, VOLUME = {31}, NUMBER = {5}, PAGES = {1391--1418}, }
Endnote
%0 Journal Article %A Boley, Mario %A Goldsmith, Bryan R. %A Ghiringhelli, Luca M. %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Identifying Consistent Statements about Numerical Data with Dispersion-Corrected Subgroup Discovery : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90E1-0 %R 10.1007/s10618-017-0520-3 %7 2017-06-28 %D 2017 %8 28.06.2017 %J Data Mining and Knowledge Discovery %V 31 %N 5 %& 1391 %P 1391 - 1418 %I Springer %C London
[10]
K. Budhathoki and J. Vreeken, “Causal Inference by Stochastic Complexity,” 2017. [Online]. Available: http://arxiv.org/abs/1702.06776. (arXiv: 1702.06776)
Abstract
The algorithmic Markov condition states that the most likely causal direction between two random variables X and Y can be identified as that direction with the lowest Kolmogorov complexity. Due to the halting problem, however, this notion is not computable. We hence propose to do causal inference by stochastic complexity. That is, we propose to approximate Kolmogorov complexity via the Minimum Description Length (MDL) principle, using a score that is mini-max optimal with regard to the model class under consideration. This means that even in an adversarial setting, such as when the true distribution is not in this class, we still obtain the optimal encoding for the data relative to the class. We instantiate this framework, which we call CISC, for pairs of univariate discrete variables, using the class of multinomial distributions. Experiments show that CISC is highly accurate on synthetic, benchmark, as well as real-world data, outperforming the state of the art by a margin, and scales extremely well with regard to sample and domain sizes.
Export
BibTeX
@online{DBLP:journals/corr/BudhathokiV17, TITLE = {Causal Inference by Stochastic Complexity}, AUTHOR = {Budhathoki, Kailash and Vreeken, Jilles}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1702.06776}, EPRINT = {1702.06776}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {The algorithmic Markov condition states that the most likely causal direction between two random variables X and Y can be identified as that direction with the lowest Kolmogorov complexity. Due to the halting problem, however, this notion is not computable. We hence propose to do causal inference by stochastic complexity. That is, we propose to approximate Kolmogorov complexity via the Minimum Description Length (MDL) principle, using a score that is mini-max optimal with regard to the model class under consideration. This means that even in an adversarial setting, such as when the true distribution is not in this class, we still obtain the optimal encoding for the data relative to the class. We instantiate this framework, which we call CISC, for pairs of univariate discrete variables, using the class of multinomial distributions. Experiments show that CISC is highly accurate on synthetic, benchmark, as well as real-world data, outperforming the state of the art by a margin, and scales extremely well with regard to sample and domain sizes.}, }
Endnote
%0 Report %A Budhathoki, Kailash %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Causal Inference by Stochastic Complexity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90F2-A %U http://arxiv.org/abs/1702.06776 %D 2017 %X The algorithmic Markov condition states that the most likely causal direction between two random variables X and Y can be identified as that direction with the lowest Kolmogorov complexity. Due to the halting problem, however, this notion is not computable. We hence propose to do causal inference by stochastic complexity. That is, we propose to approximate Kolmogorov complexity via the Minimum Description Length (MDL) principle, using a score that is mini-max optimal with regard to the model class under consideration. This means that even in an adversarial setting, such as when the true distribution is not in this class, we still obtain the optimal encoding for the data relative to the class. We instantiate this framework, which we call CISC, for pairs of univariate discrete variables, using the class of multinomial distributions. Experiments show that CISC is highly accurate on synthetic, benchmark, as well as real-world data, outperforming the state of the art by a margin, and scales extremely well with regard to sample and domain sizes. %K Computer Science, Learning, cs.LG,Computer Science, Artificial Intelligence, cs.AI
[11]
K. Budhathoki and J. Vreeken, “Causal Inference by Compression,” in 16th IEEE International Conference on Data Mining (ICDM 2016), Barcelona, Spain, 2017.
Export
BibTeX
@inproceedings{budhathoki:16:origo, TITLE = {Causal Inference by Compression}, AUTHOR = {Budhathoki, Kailash and Vreeken, Jilles}, LANGUAGE = {eng}, ISBN = {978-1-5090-5473-2}, DOI = {10.1109/ICDM.2016.0015}, PUBLISHER = {IEEE}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {16th IEEE International Conference on Data Mining (ICDM 2016)}, EDITOR = {Bonchi, Francesco and Domingo-Ferrer, Josep and Baeza-Yates, Ricardo and Zhou, Zhi-Hua and Wu, Xindong}, PAGES = {41--50}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Budhathoki, Kailash %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Causal Inference by Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-1CC0-6 %R 10.1109/ICDM.2016.0015 %D 2017 %8 02.02.2017 %B 16th International Conference on Data Mining %Z date of event: 2016-12-12 - 2016-12-15 %C Barcelona, Spain %B 16th IEEE International Conference on Data Mining %E Bonchi, Francesco; Domingo-Ferrer, Josep; Baeza-Yates, Ricardo; Zhou, Zhi-Hua; Wu, Xindong %P 41 - 50 %I IEEE %@ 978-1-5090-5473-2
[12]
K. Budhathoki and J. Vreeken, “Correlation by Compression,” in Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017), Houston, TX, USA. (Accepted/in press)
Export
BibTeX
@inproceedings{budhathoki:17:cbc, TITLE = {Correlation by Compression}, AUTHOR = {Budhathoki, Kailash and Vreeken, Jilles}, LANGUAGE = {eng}, PUBLISHER = {SIAM}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017)}, ADDRESS = {Houston, TX, USA}, }
Endnote
%0 Conference Proceedings %A Budhathoki, Kailash %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Correlation by Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4BD8-6 %D 2017 %B 17th SIAM International Conference on Data Mining %Z date of event: 2017-04-27 - 2017-04-29 %C Houston, TX, USA %B Proceedings of the Seventeenth SIAM International Conference on Data Mining %I SIAM
[13]
C. X. Chu, N. Tandon, and G. Weikum, “Distilling Task Knowledge from How-To Communities,” in WWW’17, 26th International Conference on World Wide Web, Perth, Australia, 2017.
Export
BibTeX
@inproceedings{Cuong:WWW2017, TITLE = {Distilling Task Knowledge from How-To Communities}, AUTHOR = {Chu, Cuong Xuan and Tandon, Niket and Weikum, Gerhard}, LANGUAGE = {eng}, ISBN = {978-1-4503-4913-0}, DOI = {10.1145/3038912.3052715}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WWW'17, 26th International Conference on World Wide Web}, PAGES = {805--814}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Chu, Cuong Xuan %A Tandon, Niket %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Distilling Task Knowledge from How-To Communities : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-54BE-E %R 10.1145/3038912.3052715 %D 2017 %B 26th International Conference on World Wide Web %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B WWW'17 %P 805 - 814 %I ACM %@ 978-1-4503-4913-0
[14]
C. Costa, G. Chatzimilioudis, D. Zeinalipour-Yazti, and M. F. Mokbel, “Towards Real-Time Road Traffic Analytics using Telco Big Data,” in Eleventh International Workshop on Real-Time Business Intelligence and Analytics (BIRTE 2017), Munich, Germany. (Accepted/in press)
Export
BibTeX
@inproceedings{birte17traffictbd, TITLE = {Towards Real-Time Road Traffic Analytics using {Telco Big Data}}, AUTHOR = {Costa, Constantinos and Chatzimilioudis, Georgios and Zeinalipour-Yazti, Demetrios and Mokbel, Mohamed F.}, LANGUAGE = {eng}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Eleventh International Workshop on Real-Time Business Intelligence and Analytics (BIRTE 2017)}, ADDRESS = {Munich, Germany}, }
Endnote
%0 Conference Proceedings %A Costa, Constantinos %A Chatzimilioudis, Georgios %A Zeinalipour-Yazti, Demetrios %A Mokbel, Mohamed F. %+ External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Towards Real-Time Road Traffic Analytics using Telco Big Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-DDB7-A %D 2017 %B Eleventh International Workshop on Real-Time Business Intelligence and Analytics %Z date of event: 2017-08-28 - 2017-08-28 %C Munich, Germany %B Eleventh International Workshop on Real-Time Business Intelligence and Analytics
[15]
C. Costa, G. Chatzimilioudis, D. Zeinalipour-Yazti, and M. F. Mokbel, “SPATE: Compacting and Exploring Telco Big Data,” in ICDE 2017, 33rd IEEE International Conference on Data Engineering, San Diego, CA, USA, 2017.
Export
BibTeX
@inproceedings{icde17-spate-demo, TITLE = {{SPATE}: Compacting and Exploring Telco Big Data}, AUTHOR = {Costa, Constantinos and Chatzimilioudis, Georgios and Zeinalipour-Yazti, Demetrios and Mokbel, Mohamed F.}, LANGUAGE = {eng}, ISBN = {978-1-5090-6544-8}, DOI = {10.1109/ICDE.2017.203}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {ICDE 2017, 33rd IEEE International Conference on Data Engineering}, PAGES = {1419--1420}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Costa, Constantinos %A Chatzimilioudis, Georgios %A Zeinalipour-Yazti, Demetrios %A Mokbel, Mohamed F. %+ External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T SPATE: Compacting and Exploring Telco Big Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-62BA-5 %R 10.1109/ICDE.2017.203 %D 2017 %B 33rd IEEE International Conference on Data Engineering %Z date of event: 2017-04-19 - 2017-04-22 %C San Diego, CA, USA %B ICDE 2017 %P 1419 - 1420 %I IEEE %@ 978-1-5090-6544-8
[16]
C. Costa, G. Chatzimilioudis, D. Zeinalipour-Yazti, and M. F. Mokbel, “Efficient Exploration of Telco Big Data with Compression and Decaying,” in ICDE 2017, 33rd IEEE International Conference on Data Engineering, San Diego, CA, USA, 2017.
Export
BibTeX
@inproceedings{icde17-spate, TITLE = {Efficient Exploration of Telco Big Data with Compression and Decaying}, AUTHOR = {Costa, Constantinos and Chatzimilioudis, Georgios and Zeinalipour-Yazti, Demetrios and Mokbel, Mohamed F.}, LANGUAGE = {eng}, ISBN = {978-1-5090-6544-8}, DOI = {10.1109/ICDE.2017.175}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {ICDE 2017, 33rd IEEE International Conference on Data Engineering}, PAGES = {1332--1343}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Costa, Constantinos %A Chatzimilioudis, Georgios %A Zeinalipour-Yazti, Demetrios %A Mokbel, Mohamed F. %+ External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Efficient Exploration of Telco Big Data with Compression and Decaying : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-62B3-4 %R 10.1109/ICDE.2017.175 %D 2017 %B 33rd IEEE International Conference on Data Engineering %Z date of event: 2017-04-19 - 2017-04-22 %C San Diego, CA, USA %B ICDE 2017 %P 1332 - 1343 %I IEEE %@ 978-1-5090-6544-8
[17]
S. Das, K. Berberich, D. Klakow, A. Mishra, and V. Setty, “Estimating Event Focus Time with Distributed Representation of Words,” Universität des Saarlandes, Saarbrücken, 2017.
Abstract
Time is an important dimension as it aids in disambiguating and understanding news- worthy events that happened in the past. It helps in chronological ordering of events to understand its causality, evolution, and ramifications. In Information Retrieval, time alongside text is known to improve the quality of search results. So, making use of the temporal dimensionality in the text-based analysis is an interesting idea to explore. Considering the importance of time, methods to automatically resolve temporal foci’s of events are essential. In this thesis, we try to solve this research question by training our models on two different kinds of corpora and then evaluate on a set of historical event-queries.
Export
BibTeX
@mastersthesis{dasmaster17, TITLE = {Estimating Event Focus Time with Distributed Representation of Words}, AUTHOR = {Das, Supratim and Berberich, Klaus and Klakow, Dietrich and Mishra, Arunav and Setty, Vinay}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {Time is an important dimension as it aids in disambiguating and understanding news- worthy events that happened in the past. It helps in chronological ordering of events to understand its causality, evolution, and ramifications. In Information Retrieval, time alongside text is known to improve the quality of search results. So, making use of the temporal dimensionality in the text-based analysis is an interesting idea to explore. Considering the importance of time, methods to automatically resolve temporal foci{\textquoteright}s of events are essential. In this thesis, we try to solve this research question by training our models on two different kinds of corpora and then evaluate on a set of historical event-queries.}, }
Endnote
%0 Thesis %A Das, Supratim %A Berberich, Klaus %A Klakow, Dietrich %A Mishra, Arunav %A Setty, Vinay %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Estimating Event Focus Time with Distributed Representation of Words : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-DFF1-7 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P 83 p. %V master %9 master %X Time is an important dimension as it aids in disambiguating and understanding news- worthy events that happened in the past. It helps in chronological ordering of events to understand its causality, evolution, and ramifications. In Information Retrieval, time alongside text is known to improve the quality of search results. So, making use of the temporal dimensionality in the text-based analysis is an interesting idea to explore. Considering the importance of time, methods to automatically resolve temporal foci’s of events are essential. In this thesis, we try to solve this research question by training our models on two different kinds of corpora and then evaluate on a set of historical event-queries.
[18]
S. Degaetano-Ortlieb and J. Strötgen, “Diachronic Variation of Temporal Expressions in Scientific Writing through the Lens of Relative Entropy,” in Proceedings of the Conference of the German Society for Computational Linguistics and Language Technology (GSCL 2017), Berlin, Germany. (Accepted/in press)
Export
BibTeX
@inproceedings{DegaetanoortliebStroetgen2017, TITLE = {Diachronic Variation of Temporal Expressions in Scientific Writing through the Lens of Relative Entropy}, AUTHOR = {Degaetano-Ortlieb, Stefania and Str{\"o}tgen, Jannik}, LANGUAGE = {eng}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the Conference of the German Society for Computational Linguistics and Language Technology (GSCL 2017)}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Degaetano-Ortlieb, Stefania %A Strötgen, Jannik %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Diachronic Variation of Temporal Expressions in Scientific Writing through the Lens of Relative Entropy : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-A8E8-5 %D 2017 %B Proceedings of the Conference of the German Society for Computational Linguistics and Language Technology %Z date of event: 2017-09-13 - 2017-09-14 %C Berlin, Germany %B Proceedings of the Conference of the German Society for Computational Linguistics and Language Technology
[19]
S. Dutta, “Efficient knowledge Management for Named Entities from Text,” Universität des Saarlandes, Saarbrücken, 2017.
Abstract
The evolution of search from keywords to entities has necessitated the efficient harvesting and management of entity-centric information for constructing knowledge bases catering to various applications such as semantic search, question answering, and information retrieval. The vast amounts of natural language texts available across diverse domains on the Web provide rich sources for discovering facts about named entities such as people, places, and organizations. A key challenge, in this regard, entails the need for precise identification and disambiguation of entities across documents for extraction of attributes/relations and their proper representation in knowledge bases. Additionally, the applicability of such repositories not only involves the quality and accuracy of the stored information, but also storage management and query processing efficiency. This dissertation aims to tackle the above problems by presenting efficient approaches for entity-centric knowledge acquisition from texts and its representation in knowledge repositories. This dissertation presents a robust approach for identifying text phrases pertaining to the same named entity across huge corpora, and their disambiguation to canonical entities present in a knowledge base, by using enriched semantic contexts and link validation encapsulated in a hierarchical clustering framework. This work further presents language and consistency features for classification models to compute the credibility of obtained textual facts, ensuring quality of the extracted information. Finally, an encoding algorithm, using frequent term detection and improved data locality, to represent entities for enhanced knowledge base storage and query performance is presented.
Export
BibTeX
@phdthesis{duttaphd17, TITLE = {Efficient knowledge Management for Named Entities from Text}, AUTHOR = {Dutta, Sourav}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-67924}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {The evolution of search from keywords to entities has necessitated the efficient harvesting and management of entity-centric information for constructing knowledge bases catering to various applications such as semantic search, question answering, and information retrieval. The vast amounts of natural language texts available across diverse domains on the Web provide rich sources for discovering facts about named entities such as people, places, and organizations. A key challenge, in this regard, entails the need for precise identification and disambiguation of entities across documents for extraction of attributes/relations and their proper representation in knowledge bases. Additionally, the applicability of such repositories not only involves the quality and accuracy of the stored information, but also storage management and query processing efficiency. This dissertation aims to tackle the above problems by presenting efficient approaches for entity-centric knowledge acquisition from texts and its representation in knowledge repositories. This dissertation presents a robust approach for identifying text phrases pertaining to the same named entity across huge corpora, and their disambiguation to canonical entities present in a knowledge base, by using enriched semantic contexts and link validation encapsulated in a hierarchical clustering framework. This work further presents language and consistency features for classification models to compute the credibility of obtained textual facts, ensuring quality of the extracted information. Finally, an encoding algorithm, using frequent term detection and improved data locality, to represent entities for enhanced knowledge base storage and query performance is presented.}, }
Endnote
%0 Thesis %A Dutta, Sourav %Y Weikum, Gerhard %A referee: Nejdl, Wolfgang %A referee: Berberich, Klaus %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Efficient knowledge Management for Named Entities from Text : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-A793-E %U urn:nbn:de:bsz:291-scidok-67924 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P xv, 134 p. %V phd %9 phd %X The evolution of search from keywords to entities has necessitated the efficient harvesting and management of entity-centric information for constructing knowledge bases catering to various applications such as semantic search, question answering, and information retrieval. The vast amounts of natural language texts available across diverse domains on the Web provide rich sources for discovering facts about named entities such as people, places, and organizations. A key challenge, in this regard, entails the need for precise identification and disambiguation of entities across documents for extraction of attributes/relations and their proper representation in knowledge bases. Additionally, the applicability of such repositories not only involves the quality and accuracy of the stored information, but also storage management and query processing efficiency. This dissertation aims to tackle the above problems by presenting efficient approaches for entity-centric knowledge acquisition from texts and its representation in knowledge repositories. This dissertation presents a robust approach for identifying text phrases pertaining to the same named entity across huge corpora, and their disambiguation to canonical entities present in a knowledge base, by using enriched semantic contexts and link validation encapsulated in a hierarchical clustering framework. This work further presents language and consistency features for classification models to compute the credibility of obtained textual facts, ensuring quality of the extracted information. Finally, an encoding algorithm, using frequent term detection and improved data locality, to represent entities for enhanced knowledge base storage and query performance is presented. %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2017/6792/
[20]
S. Eslami, “Utility-preserving Profile Removal in Online Forums,” Universität des Saarlandes, Saarbrücken, 2017.
Export
BibTeX
@mastersthesis{EslamiMSc2017, TITLE = {Utility-preserving Profile Removal in Online Forums}, AUTHOR = {Eslami, Sedigheh}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, }
Endnote
%0 Thesis %A Eslami, Sedigheh %Y Weikum, Gerhard %A referee: Roy, Rishiraj Saha %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Utility-preserving Profile Removal in Online Forums : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-9236-4 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P XII, 66 p. %V master %9 master
[21]
E. Galbrun and P. Miettinen, “Analysing Political Opinions Using Redescription Mining,” in 16th IEEE International Conference on Data Mining Workshops (ICDMW 2016), Barcelona, Spain, 2017.
Export
BibTeX
@inproceedings{galbrun16analysing, TITLE = {Analysing Political Opinions Using Redescription Mining}, AUTHOR = {Galbrun, Esther and Miettinen, Pauli}, LANGUAGE = {eng}, ISBN = {978-1-5090-5910-2}, DOI = {10.1109/ICDMW.2016.121}, PUBLISHER = {IEEE}, YEAR = {2015}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {16th IEEE International Conference on Data Mining Workshops (ICDMW 2016)}, EDITOR = {Domeniconi, Carlotta and Gullo, Francesco and Bonchi, Francesco and Domingo-Ferrer, Josep and Baeza-Yates, Ricardo and Zhou, Zhi-Hua and Wu, Xindong}, PAGES = {422--427}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Galbrun, Esther %A Miettinen, Pauli %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Analysing Political Opinions Using Redescription Mining : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2247-5 %R 10.1109/ICDMW.2016.121 %D 2017 %8 02.02.2017 %B 16th International Conference on Data Mining %Z date of event: 2015-12-12 - 2015-12-15 %C Barcelona, Spain %B 16th IEEE International Conference on Data Mining Workshops %E Domeniconi, Carlotta; Gullo, Francesco; Bonchi, Francesco; Domingo-Ferrer, Josep; Baeza-Yates, Ricardo; Zhou, Zhi-Hua; Wu, Xindong %P 422 - 427 %I IEEE %@ 978-1-5090-5910-2
[22]
X. Ge, A. Daphalapurkar, M. Shmipi, K. Darpun, K. Pelechrinis, P. K. Chrysanthis, and D. Zeinalipour-Yazti, “Data-driven Serendipity Navigation in Urban Places,” in IEEE 37th International Conference on Distributed Computing Systems (ICDCS 2017), Atlanta, GA, USA, 2017.
Export
BibTeX
@inproceedings{icdcs17-serendipity-demo, TITLE = {Data-driven Serendipity Navigation in Urban Places}, AUTHOR = {Ge, Xiaoyi and Daphalapurkar, Ameya and Shmipi, Manali and Darpun, Kohli and Pelechrinis, Konstantinos and Chrysanthis, Panos K. and Zeinalipour-Yazti, Demetrios}, LANGUAGE = {eng}, ISBN = {978-1-5386-1792-2}, DOI = {10.1109/ICDCS.2017.286}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE 37th International Conference on Distributed Computing Systems (ICDCS 2017)}, EDITOR = {Lee, Kisung and Liu, Ling}, PAGES = {2501--2504}, ADDRESS = {Atlanta, GA, USA}, }
Endnote
%0 Conference Proceedings %A Ge, Xiaoyi %A Daphalapurkar, Ameya %A Shmipi, Manali %A Darpun, Kohli %A Pelechrinis, Konstantinos %A Chrysanthis, Panos K. %A Zeinalipour-Yazti, Demetrios %+ External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Data-driven Serendipity Navigation in Urban Places : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-082B-7 %R 10.1109/ICDCS.2017.286 %D 2017 %B 37th IEEE International Conference on Distributed Computing Systems %Z date of event: 2017-06-05 - 2017-06-08 %C Atlanta, GA, USA %B IEEE 37th International Conference on Distributed Computing Systems %E Lee, Kisung; Liu, Ling %P 2501 - 2504 %I IEEE %@ 978-1-5386-1792-2
[23]
B. Goldsmith, M. Boley, J. Vreeken, M. Scheffler, and L. Ghiringhelli,, “Uncovering Structure-property Relationships of Materials by Subgroup Discovery,” New Journal of Physics, vol. 19, no. 1, 2017.
Export
BibTeX
@article{goldsmith:17:gold, TITLE = {Uncovering Structure-property Relationships of Materials by Subgroup Discovery}, AUTHOR = {Goldsmith, Brian and Boley, Mario and Vreeken, Jilles and Scheffler, Matthias and Ghiringhelli,, Luca}, LANGUAGE = {eng}, ISSN = {1367-2630}, DOI = {10.1088/1367-2630/aa57c2}, PUBLISHER = {IOP Publishing}, ADDRESS = {Bristol}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, JOURNAL = {New Journal of Physics}, VOLUME = {19}, NUMBER = {1}, EID = {013031}, }
Endnote
%0 Journal Article %A Goldsmith, Brian %A Boley, Mario %A Vreeken, Jilles %A Scheffler, Matthias %A Ghiringhelli,, Luca %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Uncovering Structure-property Relationships of Materials by Subgroup Discovery : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4BF5-4 %R 10.1088/1367-2630/aa57c2 %7 2017 %D 2017 %J New Journal of Physics %O New J. Phys. %V 19 %N 1 %Z sequence number: 013031 %I IOP Publishing %C Bristol %@ false %U http://iopscience.iop.org/article/10.1088/1367-2630/aa57c2
[24]
A. Grycner, “Constructing Lexicons of Relational Phrases,” Universität des Saarlandes, Saarbrücken, 2017.
Abstract
Knowledge Bases are one of the key components of Natural Language Understanding systems. For example, DBpedia, YAGO, and Wikidata capture and organize knowledge about named entities and relations between them, which is often crucial for tasks like Question Answering and Named Entity Disambiguation. While Knowledge Bases have good coverage of prominent entities, they are often limited with respect to relations. The goal of this thesis is to bridge this gap and automatically create lexicons of textual representations of relations, namely relational phrases. The lexicons should contain information about paraphrases, hierarchy, as well as semantic types of arguments of relational phrases. The thesis makes three main contributions. The first contribution addresses disambiguating relational phrases by aligning them with the WordNet dictionary. Moreover, the alignment allows imposing the WordNet hierarchy on the relational phrases. The second contribution proposes a method for graph construction of relations using Probabilistic Graphical Models. In addition, we apply this model to relation paraphrasing. The third contribution presents a method for constructing a lexicon of relational paraphrases with fine-grained semantic typing of arguments. This method is based on information from a multilingual parallel corpus.
Export
BibTeX
@phdthesis{Grynerphd17, TITLE = {Constructing Lexicons of Relational Phrases}, AUTHOR = {Grycner, Adam}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-69101}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {Knowledge Bases are one of the key components of Natural Language Understanding systems. For example, DBpedia, YAGO, and Wikidata capture and organize knowledge about named entities and relations between them, which is often crucial for tasks like Question Answering and Named Entity Disambiguation. While Knowledge Bases have good coverage of prominent entities, they are often limited with respect to relations. The goal of this thesis is to bridge this gap and automatically create lexicons of textual representations of relations, namely relational phrases. The lexicons should contain information about paraphrases, hierarchy, as well as semantic types of arguments of relational phrases. The thesis makes three main contributions. The first contribution addresses disambiguating relational phrases by aligning them with the WordNet dictionary. Moreover, the alignment allows imposing the WordNet hierarchy on the relational phrases. The second contribution proposes a method for graph construction of relations using Probabilistic Graphical Models. In addition, we apply this model to relation paraphrasing. The third contribution presents a method for constructing a lexicon of relational paraphrases with fine-grained semantic typing of arguments. This method is based on information from a multilingual parallel corpus.}, }
Endnote
%0 Thesis %A Grycner, Adam %Y Weikum, Gerhard %A referee: Klakow, Dietrich %A referee: Ponzetto, Simone Paolo %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Constructing Lexicons of Relational Phrases : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-933B-1 %U urn:nbn:de:bsz:291-scidok-69101 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P 125 p. %V phd %9 phd %X Knowledge Bases are one of the key components of Natural Language Understanding systems. For example, DBpedia, YAGO, and Wikidata capture and organize knowledge about named entities and relations between them, which is often crucial for tasks like Question Answering and Named Entity Disambiguation. While Knowledge Bases have good coverage of prominent entities, they are often limited with respect to relations. The goal of this thesis is to bridge this gap and automatically create lexicons of textual representations of relations, namely relational phrases. The lexicons should contain information about paraphrases, hierarchy, as well as semantic types of arguments of relational phrases. The thesis makes three main contributions. The first contribution addresses disambiguating relational phrases by aligning them with the WordNet dictionary. Moreover, the alignment allows imposing the WordNet hierarchy on the relational phrases. The second contribution proposes a method for graph construction of relations using Probabilistic Graphical Models. In addition, we apply this model to relation paraphrasing. The third contribution presents a method for constructing a lexicon of relational paraphrases with fine-grained semantic typing of arguments. This method is based on information from a multilingual parallel corpus. %U http://scidok.sulb.uni-saarland.de/volltexte/2017/6910/http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=de
[25]
S. Gurajada, “Distributed Querying of Large Labeled Graphs,” Universität des Saarlandes, Saarbrücken, 2017.
Abstract
Graph is a vital abstract data type that has profound significance in several applications. Because of its versitality, graphs have been adapted into several different forms and one such adaption with many practical applications is the “Labeled Graph”, where vertices and edges are labeled. An enormous research effort has been invested in to the task of managing and querying graphs, yet a lot challenges are left unsolved. In this thesis, we advance the state-of-the-art for the following query models, and propose a distributed solution to process them in an efficient and scalable manner. • Set Reachability. We formalize and investigate a generalization of the basic notion of reachability, called set reachability. Set reachability deals with finding all reachable pairs for a given source and target sets. We present a non-iterative distributed solution that takes only a single round of communication for any set reachability query. This is achieved by precomputation, replication, and indexing of partial reachabilities among the boundary vertices. • Basic Graph Patterns (BGP). Supported by majority of query languages, BGP queries are a common mode of querying knowledge graphs, biological datasets, etc. We present a novel distributed architecture that relies on the concepts of asynchronous executions, join-ahead pruning, and a multi-threaded query processing framework to process BGP queries in an efficient and scalable manner. • Generalized Graph Patterns (GGP). These queries combine the semantics of pattern matching and navigational queries, and are popular in scenarios where the schema of an underlying graph is either unknown or partially known. We present a distributed solution with bimodal indexing layout that individually support efficient processing of BGP queries and navigational queries. Furthermore, we design a unified query optimizer and a processor to efficiently process GGP queries and also in a scalable manner. To this end, we propose a prototype distributed engine, coined “TriAD” (Triple Asynchronous and Distributed) that supports all the aforementioned query models. We also provide a detailed empirical evaluation of TriAD in comparison to several state-of-the-art systems over multiple real-world and synthetic datasets.
Export
BibTeX
@phdthesis{guraphd2017, TITLE = {Distributed Querying of Large Labeled Graphs}, AUTHOR = {Gurajada, Sairam}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-67738}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {Graph is a vital abstract data type that has profound significance in several applications. Because of its versitality, graphs have been adapted into several different forms and one such adaption with many practical applications is the {\textquotedblleft}Labeled Graph{\textquotedblright}, where vertices and edges are labeled. An enormous research effort has been invested in to the task of managing and querying graphs, yet a lot challenges are left unsolved. In this thesis, we advance the state-of-the-art for the following query models, and propose a distributed solution to process them in an efficient and scalable manner. \mbox{$\bullet$} Set Reachability. We formalize and investigate a generalization of the basic notion of reachability, called set reachability. Set reachability deals with finding all reachable pairs for a given source and target sets. We present a non-iterative distributed solution that takes only a single round of communication for any set reachability query. This is achieved by precomputation, replication, and indexing of partial reachabilities among the boundary vertices. \mbox{$\bullet$} Basic Graph Patterns (BGP). Supported by majority of query languages, BGP queries are a common mode of querying knowledge graphs, biological datasets, etc. We present a novel distributed architecture that relies on the concepts of asynchronous executions, join-ahead pruning, and a multi-threaded query processing framework to process BGP queries in an efficient and scalable manner. \mbox{$\bullet$} Generalized Graph Patterns (GGP). These queries combine the semantics of pattern matching and navigational queries, and are popular in scenarios where the schema of an underlying graph is either unknown or partially known. We present a distributed solution with bimodal indexing layout that individually support efficient processing of BGP queries and navigational queries. Furthermore, we design a unified query optimizer and a processor to efficiently process GGP queries and also in a scalable manner. To this end, we propose a prototype distributed engine, coined {\textquotedblleft}TriAD{\textquotedblright} (Triple Asynchronous and Distributed) that supports all the aforementioned query models. We also provide a detailed empirical evaluation of TriAD in comparison to several state-of-the-art systems over multiple real-world and synthetic datasets.}, }
Endnote
%0 Thesis %A Gurajada, Sairam %Y Theobald, Martin %A referee: Weikum, Gerhard %A referee: Özsu, M. Tamer %A referee: Michel, Sebastian %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Distributed Querying of Large Labeled Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-8202-E %U urn:nbn:de:bsz:291-scidok-67738 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P x, 167 p. %V phd %9 phd %X Graph is a vital abstract data type that has profound significance in several applications. Because of its versitality, graphs have been adapted into several different forms and one such adaption with many practical applications is the “Labeled Graph”, where vertices and edges are labeled. An enormous research effort has been invested in to the task of managing and querying graphs, yet a lot challenges are left unsolved. In this thesis, we advance the state-of-the-art for the following query models, and propose a distributed solution to process them in an efficient and scalable manner. • Set Reachability. We formalize and investigate a generalization of the basic notion of reachability, called set reachability. Set reachability deals with finding all reachable pairs for a given source and target sets. We present a non-iterative distributed solution that takes only a single round of communication for any set reachability query. This is achieved by precomputation, replication, and indexing of partial reachabilities among the boundary vertices. • Basic Graph Patterns (BGP). Supported by majority of query languages, BGP queries are a common mode of querying knowledge graphs, biological datasets, etc. We present a novel distributed architecture that relies on the concepts of asynchronous executions, join-ahead pruning, and a multi-threaded query processing framework to process BGP queries in an efficient and scalable manner. • Generalized Graph Patterns (GGP). These queries combine the semantics of pattern matching and navigational queries, and are popular in scenarios where the schema of an underlying graph is either unknown or partially known. We present a distributed solution with bimodal indexing layout that individually support efficient processing of BGP queries and navigational queries. Furthermore, we design a unified query optimizer and a processor to efficiently process GGP queries and also in a scalable manner. To this end, we propose a prototype distributed engine, coined “TriAD” (Triple Asynchronous and Distributed) that supports all the aforementioned query models. We also provide a detailed empirical evaluation of TriAD in comparison to several state-of-the-art systems over multiple real-world and synthetic datasets. %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2017/6773/
[26]
K. Hui and K. Berberich, “Low-Cost Preference Judgment via Ties,” in Advances in Information Retrieval (ECIR 2017), Aberdeen, UK, 2017.
Export
BibTeX
@inproceedings{hui2017short, TITLE = {Low-Cost Preference Judgment via Ties}, AUTHOR = {Hui, Kai and Berberich, Klaus}, LANGUAGE = {eng}, ISBN = {978-3-319-56607-8}, DOI = {10.1007/978-3-319-56608-5_58}, PUBLISHER = {Springer}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {Advances in Information Retrieval (ECIR 2017)}, EDITOR = {Jose, Joemon M. and Hauff, Claudia and Altingovde, Ismail Sengor and Song, Dawei and Albakour, Dyaa and Watt, Stuart and Tait, John}, PAGES = {626--632}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {10193}, ADDRESS = {Aberdeen, UK}, }
Endnote
%0 Conference Proceedings %A Hui, Kai %A Berberich, Klaus %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Low-Cost Preference Judgment via Ties : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-1F7B-A %R 10.1007/978-3-319-56608-5_58 %D 2017 %B 39th European Conference on Information Retrieval %Z date of event: 2017-04-09 - 2017-04-13 %C Aberdeen, UK %B Advances in Information Retrieval %E Jose, Joemon M.; Hauff, Claudia; Altingovde, Ismail Sengor; Song, Dawei; Albakour, Dyaa; Watt, Stuart; Tait, John %P 626 - 632 %I Springer %@ 978-3-319-56607-8 %B Lecture Notes in Computer Science %N 10193
[27]
K. Hui and K. Berberich, “Transitivity, Time Consumption, and Quality of Preference Judgments in Crowdsourcing,” in Advances in Information Retrieval (ECIR 2017), Aberdeen, UK, 2017.
Export
BibTeX
@inproceedings{hui2017full, TITLE = {Transitivity, Time Consumption, and Quality of Preference Judgments in Crowdsourcing}, AUTHOR = {Hui, Kai and Berberich, Klaus}, LANGUAGE = {eng}, ISBN = {978-3-319-56607-8}, DOI = {10.1007/978-3-319-56608-5_19}, PUBLISHER = {Springer}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {Advances in Information Retrieval (ECIR 2017)}, EDITOR = {Jose, Joemon M. and Hauff, Claudia and Altingovde, Ismail Sengor and Song, Dawei and Albakour, Dyaa and Watt, Stuart and Tait, John}, PAGES = {239--251}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {10193}, ADDRESS = {Aberdeen, UK}, }
Endnote
%0 Conference Proceedings %A Hui, Kai %A Berberich, Klaus %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Transitivity, Time Consumption, and Quality of Preference Judgments in Crowdsourcing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-1F75-5 %R 10.1007/978-3-319-56608-5_19 %D 2017 %B 39th European Conference on Information Retrieval %Z date of event: 2016-04-09 - 2017-04-13 %C Aberdeen, UK %B Advances in Information Retrieval %E Jose, Joemon M.; Hauff, Claudia; Altingovde, Ismail Sengor; Song, Dawei; Albakour, Dyaa; Watt, Stuart; Tait, John %P 239 - 251 %I Springer %@ 978-3-319-56607-8 %B Lecture Notes in Computer Science %N 10193
[28]
K. Hui, A. Yates, K. Berberich, and G. de Melo, “Position-Aware Representations for Relevance Matching in Neural Information Retrieval,” in WWW’17 Companion, Perth, Australia, 2017.
Export
BibTeX
@inproceedings{HuiWWW2017, TITLE = {Position-Aware Representations for Relevance Matching in Neural Information Retrieval}, AUTHOR = {Hui, Kai and Yates, Andrew and Berberich, Klaus and de Melo, Gerard}, LANGUAGE = {eng}, ISBN = {978-1-4503-4914-7}, DOI = {10.1145/3041021.3054258}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WWW'17 Companion}, PAGES = {799--800}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Hui, Kai %A Yates, Andrew %A Berberich, Klaus %A de Melo, Gerard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Position-Aware Representations for Relevance Matching in Neural Information Retrieval : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90A4-B %R 10.1145/3041021.3054258 %D 2017 %B 26th International Conference on World Wide Web %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B WWW'17 Companion %P 799 - 800 %I ACM %@ 978-1-4503-4914-7
[29]
K. Hui, A. Yates, K. Berberich, and G. de Melo, “A Position-Aware Deep Model for Relevance Matching in Information Retrieval,” 2017. [Online]. Available: http://arxiv.org/abs/1704.03940. (arXiv: 1704.03940)
Abstract
In order to adopt deep learning for information retrieval, models are needed that can capture all relevant information required to assess the relevance of a document to a given user query. While previous works have successfully captured unigram term matches, how to fully employ position-dependent information such as proximity and term dependencies has been insufficiently explored. In this work, we propose a novel neural IR model named PACRR (Position-Aware Convolutional-Recurrent Relevance), aiming at better modeling position-dependent interactions between a query and a document via convolutional layers as well as recurrent layers. Extensive experiments on six years' TREC Web Track data confirm that the proposed model yields better results under different benchmarks.
Export
BibTeX
@online{DBLP:journals/corr/HuiYBM17, TITLE = {A Position-Aware Deep Model for Relevance Matching in Information Retrieval}, AUTHOR = {Hui, Kai and Yates, Andrew and Berberich, Klaus and de Melo, Gerard}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1704.03940}, EPRINT = {1704.03940}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In order to adopt deep learning for information retrieval, models are needed that can capture all relevant information required to assess the relevance of a document to a given user query. While previous works have successfully captured unigram term matches, how to fully employ position-dependent information such as proximity and term dependencies has been insufficiently explored. In this work, we propose a novel neural IR model named PACRR (Position-Aware Convolutional-Recurrent Relevance), aiming at better modeling position-dependent interactions between a query and a document via convolutional layers as well as recurrent layers. Extensive experiments on six years' TREC Web Track data confirm that the proposed model yields better results under different benchmarks.}, }
Endnote
%0 Report %A Hui, Kai %A Yates, Andrew %A Berberich, Klaus %A de Melo, Gerard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T A Position-Aware Deep Model for Relevance Matching in Information Retrieval : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90A8-3 %U http://arxiv.org/abs/1704.03940 %D 2017 %X In order to adopt deep learning for information retrieval, models are needed that can capture all relevant information required to assess the relevance of a document to a given user query. While previous works have successfully captured unigram term matches, how to fully employ position-dependent information such as proximity and term dependencies has been insufficiently explored. In this work, we propose a novel neural IR model named PACRR (Position-Aware Convolutional-Recurrent Relevance), aiming at better modeling position-dependent interactions between a query and a document via convolutional layers as well as recurrent layers. Extensive experiments on six years' TREC Web Track data confirm that the proposed model yields better results under different benchmarks. %K Computer Science, Information Retrieval, cs.IR,Computer Science, Computation and Language, cs.CL
[30]
R. Jäschke, J. Strötgen, E. Krotova, and F. Fischer, “„Der Helmut Kohl unter den Brotaufstrichen“ - Zur Extraktion vossianischer Antonomasien aus großen Zeitungskorpora,” in DHd 2017, 4. Tagung des Verbands Digital Humanities im deutschsprachigen Raum e.V., Bern, Switzerland, 2017.
Export
BibTeX
@inproceedings{JaeschkeEtAl2017_DHD, TITLE = {{{``Der Helmut Kohl unter den Brotaufstrichen'' -- Zur Extraktion vossianischer Antonomasien aus gro{\ss}en Zeitungskorpora}}}, AUTHOR = {J{\"a}schke, Robert and Str{\"o}tgen, Jannik and Krotova, Elena and Fischer, Frank}, LANGUAGE = {deu}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {DHd 2017, 4. Tagung des Verbands Digital Humanities im deutschsprachigen Raum e.V.}, PAGES = {120--124}, ADDRESS = {Bern, Switzerland}, }
Endnote
%0 Conference Proceedings %A Jäschke, Robert %A Strötgen, Jannik %A Krotova, Elena %A Fischer, Frank %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations %T „Der Helmut Kohl unter den Brotaufstrichen“ - Zur Extraktion vossianischer Antonomasien aus großen Zeitungskorpora : %G deu %U http://hdl.handle.net/11858/00-001M-0000-002C-4E05-A %D 2017 %B 4. Tagung des Verbands Digital Humanities im deutschsprachigen Raum e.V. %Z date of event: 2017-02-13 - 2017-02-18 %C Bern, Switzerland %B DHd 2017 %P 120 - 124
[31]
J. Kalofolias, E. Galbrun, and P. Miettinen, “From Sets of Good Redescriptions to Good Sets of Redescriptions,” in 16th IEEE International Conference on Data Mining (ICDM 2016), Barcelona, Spain, 2017.
Export
BibTeX
@inproceedings{kalofolias16from, TITLE = {From Sets of Good Redescriptions to Good Sets of Redescriptions}, AUTHOR = {Kalofolias, Janis and Galbrun, Esther and Miettinen, Pauli}, LANGUAGE = {eng}, ISBN = {978-1-5090-5473-2}, DOI = {10.1109/ICDM.2016.0032}, PUBLISHER = {IEEE}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {16th IEEE International Conference on Data Mining (ICDM 2016)}, PAGES = {211--220}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Kalofolias, Janis %A Galbrun, Esther %A Miettinen, Pauli %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T From Sets of Good Redescriptions to Good Sets of Redescriptions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-224D-A %R 10.1109/ICDM.2016.0032 %D 2017 %8 02.02.2017 %B 16th International Conference on Data Mining %Z date of event: 2016-12-12 - 2016-12-15 %C Barcelona, Spain %B 16th IEEE International Conference on Data Mining %P 211 - 220 %I IEEE %@ 978-1-5090-5473-2
[32]
E. Kuzey, “Populating Knowledge bases with Temporal Information,” Universität des Saarlandes, Saarbrücken, 2017.
Export
BibTeX
@phdthesis{KuzeyPhd2017, TITLE = {Populating Knowledge bases with Temporal Information}, AUTHOR = {Kuzey, Erdal}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, }
Endnote
%0 Thesis %A Kuzey, Erdal %Y Weikum, Gerhard %A referee: de Rijke , Maarten %A referee: Suchanek, Fabian %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Populating Knowledge bases with Temporal Information : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-EAE5-7 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P XIV, 143 p. %V phd %9 phd %U http://scidok.sulb.uni-saarland.de/volltexte/2017/6811/http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=de
[33]
L. Lange, “Time in Newspaper: A Large-Scale Analysis of Temporal Expressions in News Corpora,” Universität des Saarlandes, Saarbrücken, 2017.
Export
BibTeX
@mastersthesis{LangeBcS2017, TITLE = {Time in Newspaper: {A} Large-Scale Analysis of Temporal Expressions in News Corpora}, AUTHOR = {Lange, Lukas}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, TYPE = {Bachelor's thesis}, }
Endnote
%0 Thesis %A Lange, Lukas %Y Strötgen, Jannik %A referee: Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Time in Newspaper: A Large-Scale Analysis of Temporal Expressions in News Corpora : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5D08-B %I Universität des Saarlandes %C Saarbrücken %D 2017 %P 77 p. %V bachelor %9 bachelor
[34]
P. Mandros, M. Boley, and J. Vreeken, “Discovering Reliable Approximate Functional Dependencies,” 2017. [Online]. Available: http://arxiv.org/abs/1705.09391. (arXiv: 1705.09391)
Abstract
Given a database and a target attribute of interest, how can we tell whether there exists a functional, or approximately functional dependence of the target on any set of other attributes in the data? How can we reliably, without bias to sample size or dimensionality, measure the strength of such a dependence? And, how can we efficiently discover the optimal or $\alpha$-approximate top-$k$ dependencies? These are exactly the questions we answer in this paper. As we want to be agnostic on the form of the dependence, we adopt an information-theoretic approach, and construct a reliable, bias correcting score that can be efficiently computed. Moreover, we give an effective optimistic estimator of this score, by which for the first time we can mine the approximate functional dependencies from data with guarantees of optimality. Empirical evaluation shows that the derived score achieves a good bias for variance trade-off, can be used within an efficient discovery algorithm, and indeed discovers meaningful dependencies. Most important, it remains reliable in the face of data sparsity.
Export
BibTeX
@online{DBLP:journals/corr/MandrosBV17, TITLE = {Discovering Reliable Approximate Functional Dependencies}, AUTHOR = {Mandros, Panagiotis and Boley, Mario and Vreeken, Jilles}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1705.09391}, EPRINT = {1705.09391}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Given a database and a target attribute of interest, how can we tell whether there exists a functional, or approximately functional dependence of the target on any set of other attributes in the data? How can we reliably, without bias to sample size or dimensionality, measure the strength of such a dependence? And, how can we efficiently discover the optimal or $\alpha$-approximate top-$k$ dependencies? These are exactly the questions we answer in this paper. As we want to be agnostic on the form of the dependence, we adopt an information-theoretic approach, and construct a reliable, bias correcting score that can be efficiently computed. Moreover, we give an effective optimistic estimator of this score, by which for the first time we can mine the approximate functional dependencies from data with guarantees of optimality. Empirical evaluation shows that the derived score achieves a good bias for variance trade-off, can be used within an efficient discovery algorithm, and indeed discovers meaningful dependencies. Most important, it remains reliable in the face of data sparsity.}, }
Endnote
%0 Report %A Mandros, Panagiotis %A Boley, Mario %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Discovering Reliable Approximate Functional Dependencies : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90F8-D %U http://arxiv.org/abs/1705.09391 %D 2017 %X Given a database and a target attribute of interest, how can we tell whether there exists a functional, or approximately functional dependence of the target on any set of other attributes in the data? How can we reliably, without bias to sample size or dimensionality, measure the strength of such a dependence? And, how can we efficiently discover the optimal or $\alpha$-approximate top-$k$ dependencies? These are exactly the questions we answer in this paper. As we want to be agnostic on the form of the dependence, we adopt an information-theoretic approach, and construct a reliable, bias correcting score that can be efficiently computed. Moreover, we give an effective optimistic estimator of this score, by which for the first time we can mine the approximate functional dependencies from data with guarantees of optimality. Empirical evaluation shows that the derived score achieves a good bias for variance trade-off, can be used within an efficient discovery algorithm, and indeed discovers meaningful dependencies. Most important, it remains reliable in the face of data sparsity. %K Computer Science, Databases, cs.DB,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Information Theory, cs.IT,Mathematics, Information Theory, math.IT
[35]
A. Marx and J. Vreeken, “Causal Inference on Multivariate Mixed-Type Data by Minimum Description Length,” 2017. [Online]. Available: http://arxiv.org/abs/1702.06385. (arXiv: 1702.06385)
Abstract
Given data over the joint distribution of two univariate or multivariate random variables $X$ and $Y$ of mixed or single type data, we consider the problem of inferring the most likely causal direction between $X$ and $Y$. We take an information theoretic approach, from which it follows that first describing the data over cause and then that of effect given cause is shorter than the reverse direction. For practical inference, we propose a score for causal models for mixed type data based on the Minimum Description Length (MDL) principle. In particular, we model dependencies between $X$ and $Y$ using classification and regression trees. Inferring the optimal model is NP-hard, and hence we propose Crack, a fast greedy algorithm to infer the most likely causal direction directly from the data. Empirical evaluation on synthetic, benchmark, and real world data shows that Crack reliably and with high accuracy infers the correct causal direction on both univariate and multivariate cause--effect pairs over both single and mixed type data.
Export
BibTeX
@online{DBLP:journals/corr/MarxV17, TITLE = {Causal Inference on Multivariate Mixed-Type Data by Minimum Description Length}, AUTHOR = {Marx, Alexander and Vreeken, Jilles}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1702.06385}, EPRINT = {1702.06385}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Given data over the joint distribution of two univariate or multivariate random variables $X$ and $Y$ of mixed or single type data, we consider the problem of inferring the most likely causal direction between $X$ and $Y$. We take an information theoretic approach, from which it follows that first describing the data over cause and then that of effect given cause is shorter than the reverse direction. For practical inference, we propose a score for causal models for mixed type data based on the Minimum Description Length (MDL) principle. In particular, we model dependencies between $X$ and $Y$ using classification and regression trees. Inferring the optimal model is NP-hard, and hence we propose Crack, a fast greedy algorithm to infer the most likely causal direction directly from the data. Empirical evaluation on synthetic, benchmark, and real world data shows that Crack reliably and with high accuracy infers the correct causal direction on both univariate and multivariate cause--effect pairs over both single and mixed type data.}, }
Endnote
%0 Report %A Marx, Alexander %A Vreeken, Jilles %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Causal Inference on Multivariate Mixed-Type Data by Minimum Description Length : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90EF-3 %U http://arxiv.org/abs/1702.06385 %D 2017 %X Given data over the joint distribution of two univariate or multivariate random variables $X$ and $Y$ of mixed or single type data, we consider the problem of inferring the most likely causal direction between $X$ and $Y$. We take an information theoretic approach, from which it follows that first describing the data over cause and then that of effect given cause is shorter than the reverse direction. For practical inference, we propose a score for causal models for mixed type data based on the Minimum Description Length (MDL) principle. In particular, we model dependencies between $X$ and $Y$ using classification and regression trees. Inferring the optimal model is NP-hard, and hence we propose Crack, a fast greedy algorithm to infer the most likely causal direction directly from the data. Empirical evaluation on synthetic, benchmark, and real world data shows that Crack reliably and with high accuracy infers the correct causal direction on both univariate and multivariate cause--effect pairs over both single and mixed type data. %K Statistics, Machine Learning, stat.ML,Computer Science, Learning, cs.LG
[36]
F. Meawad, M. H. Gad-Elrab, and E. Hemayed, “Designing Mobile Augmented Reality Experiences Using Friendly Markers,” in 4th International Conference on User Science and Engineering (i-USEr 2016), Melaka, Malaysia, 2017.
Export
BibTeX
@inproceedings{Meawad2017, TITLE = {Designing Mobile Augmented Reality Experiences Using Friendly Markers}, AUTHOR = {Meawad, Fatma and Gad-Elrab, Mohamed H. and Hemayed, Elsayed}, LANGUAGE = {eng}, ISBN = {978-1-5090-263-9}, DOI = {10.1109/IUSER.2016.7857937}, PUBLISHER = {IEEE}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {4th International Conference on User Science and Engineering (i-USEr 2016)}, PAGES = {75--80}, ADDRESS = {Melaka, Malaysia}, }
Endnote
%0 Conference Proceedings %A Meawad, Fatma %A Gad-Elrab, Mohamed H. %A Hemayed, Elsayed %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Designing Mobile Augmented Reality Experiences Using Friendly Markers : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CF28-A %R 10.1109/IUSER.2016.7857937 %D 2017 %B 4th International Conference on User Science and Engineering %Z date of event: 2016-08-23 - 2016-08-25 %C Melaka, Malaysia %B 4th International Conference on User Science and Engineering %P 75 - 80 %I IEEE %@ 978-1-5090-263-9
[37]
S. Metzler, S. Günnemann, and P. Miettinen, “Hyperbolae Are No Hyperbole: Modelling Communities That Are Not Cliques,” in 16th IEEE International Conference on Data Mining (ICDM 2016), Barcelona, Spain, 2017.
Export
BibTeX
@inproceedings{metzler16hyperbolae, TITLE = {Hyperbolae Are No Hyperbole: {Modelling} Communities That Are Not Cliques}, AUTHOR = {Metzler, Saskia and G{\"u}nnemann, Stephan and Miettinen, Pauli}, LANGUAGE = {eng}, ISBN = {978-1-5090-5473-2}, DOI = {10.1109/ICDM.2016.0044}, PUBLISHER = {IEEE}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {16th IEEE International Conference on Data Mining (ICDM 2016)}, PAGES = {330--339}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Metzler, Saskia %A Günnemann, Stephan %A Miettinen, Pauli %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Hyperbolae Are No Hyperbole: Modelling Communities That Are Not Cliques : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-225F-F %R 10.1109/ICDM.2016.0044 %D 2017 %8 02.02.2017 %B 16th International Conference on Data Mining %Z date of event: 2016-12-12 - 2016-12-15 %C Barcelona, Spain %B 16th IEEE International Conference on Data Mining %P 330 - 339 %I IEEE %@ 978-1-5090-5473-2
[38]
P. Mirza, S. Razniewski, F. Darari, and G. Weikum, “Cardinal Virtues: Extracting Relation Cardinalities from Text,” 2017. [Online]. Available: http://arxiv.org/abs/1704.04455. (arXiv: 1704.04455)
Abstract
Information extraction (IE) from text has largely focused on relations between individual entities, such as who has won which award. However, some facts are never fully mentioned, and no IE method has perfect recall. Thus, it is beneficial to also tap contents about the cardinalities of these relations, for example, how many awards someone has won. We introduce this novel problem of extracting cardinalities and discusses the specific challenges that set it apart from standard IE. We present a distant supervision method using conditional random fields. A preliminary evaluation results in precision between 3% and 55%, depending on the difficulty of relations.
Export
BibTeX
@online{Mirza2017, TITLE = {Cardinal Virtues: Extracting Relation Cardinalities from Text}, AUTHOR = {Mirza, Paramita and Razniewski, Simon and Darari, Fariz and Weikum, Gerhard}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1704.04455}, EPRINT = {1704.04455}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Information extraction (IE) from text has largely focused on relations between individual entities, such as who has won which award. However, some facts are never fully mentioned, and no IE method has perfect recall. Thus, it is beneficial to also tap contents about the cardinalities of these relations, for example, how many awards someone has won. We introduce this novel problem of extracting cardinalities and discusses the specific challenges that set it apart from standard IE. We present a distant supervision method using conditional random fields. A preliminary evaluation results in precision between 3% and 55%, depending on the difficulty of relations.}, }
Endnote
%0 Report %A Mirza, Paramita %A Razniewski, Simon %A Darari, Fariz %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Cardinal Virtues: Extracting Relation Cardinalities from Text : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8128-9 %U http://arxiv.org/abs/1704.04455 %D 2017 %X Information extraction (IE) from text has largely focused on relations between individual entities, such as who has won which award. However, some facts are never fully mentioned, and no IE method has perfect recall. Thus, it is beneficial to also tap contents about the cardinalities of these relations, for example, how many awards someone has won. We introduce this novel problem of extracting cardinalities and discusses the specific challenges that set it apart from standard IE. We present a distant supervision method using conditional random fields. A preliminary evaluation results in precision between 3% and 55%, depending on the difficulty of relations. %K Computer Science, Computation and Language, cs.CL
[39]
A. Mishra and K. Berberich, “How do Order and Proximity Impact the Readability of Event Summaries?,” in Advances in Information Retrieval (ECIR 2017), Aberdeen, UK, 2017.
Export
BibTeX
@inproceedings{DBLP:conf/ecir/MishraB17, TITLE = {How do Order and Proximity Impact the Readability of Event Summaries?}, AUTHOR = {Mishra, Arunav and Berberich, Klaus}, LANGUAGE = {eng}, ISBN = {978-3-319-56607-8}, DOI = {10.1007/978-3-319-56608-5_17}, PUBLISHER = {Springer}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {Advances in Information Retrieval (ECIR 2017)}, EDITOR = {Jose, Joemon M. and Hauff, Claudia and Altingovde, Ismail Sengor and Song, Dawei and Albakour, Dyaa and Watt, Stuart and Tait, John}, PAGES = {212--225}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {10193}, ADDRESS = {Aberdeen, UK}, }
Endnote
%0 Conference Proceedings %A Mishra, Arunav %A Berberich, Klaus %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T How do Order and Proximity Impact the Readability of Event Summaries? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-20D9-B %R 10.1007/978-3-319-56608-5_17 %D 2017 %B 39th European Conference on Information Retrieval %Z date of event: 2017-04-09 - 2017-04-13 %C Aberdeen, UK %B Advances in Information Retrieval %E Jose, Joemon M.; Hauff, Claudia; Altingovde, Ismail Sengor; Song, Dawei; Albakour, Dyaa; Watt, Stuart; Tait, John %P 212 - 225 %I Springer %@ 978-3-319-56607-8 %B Lecture Notes in Computer Science %N 10193
[40]
S. Mukherjee, “Probabilistic Graphical Models for Credibility Analysis in Evolving Online Communities,” Universität des Saarlandes, Saarbrücken, 2017.
Abstract
One of the major hurdles preventing the full exploitation of information from online communities is the widespread concern regarding the quality and credibility of user-contributed content. Prior works in this domain operate on a static snapshot of the community, making strong assumptions about the structure of the data (e.g., relational tables), or consider only shallow features for text classification. To address the above limitations, we propose probabilistic graphical models that can leverage the joint interplay between multiple factors in online communities --- like user interactions, community dynamics, and textual content --- to automatically assess the credibility of user-contributed online content, and the expertise of users and their evolution with user-interpretable explanation. To this end, we devise new models based on Conditional Random Fields for different settings like incorporating partial expert knowledge for semi-supervised learning, and handling discrete labels as well as numeric ratings for fine-grained analysis. This enables applications such as extracting reliable side-effects of drugs from user-contributed posts in healthforums, and identifying credible content in news communities. Online communities are dynamic, as users join and leave, adapt to evolving trends, and mature over time. To capture this dynamics, we propose generative models based on Hidden Markov Model, Latent Dirichlet Allocation, and Brownian Motion to trace the continuous evolution of user expertise and their language model over time. This allows us to identify expert users and credible content jointly over time, improving state-of-the-art recommender systems by explicitly considering the maturity of users. This also enables applications such as identifying helpful product reviews, and detecting fake and anomalous reviews with limited information.
Export
BibTeX
@phdthesis{Mukherjeephd17, TITLE = {Probabilistic Graphical Models for Credibility Analysis in Evolving Online Communities}, AUTHOR = {Mukherjee, Subhabrata}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-69269}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {One of the major hurdles preventing the full exploitation of information from online communities is the widespread concern regarding the quality and credibility of user-contributed content. Prior works in this domain operate on a static snapshot of the community, making strong assumptions about the structure of the data (e.g., relational tables), or consider only shallow features for text classification. To address the above limitations, we propose probabilistic graphical models that can leverage the joint interplay between multiple factors in online communities --- like user interactions, community dynamics, and textual content --- to automatically assess the credibility of user-contributed online content, and the expertise of users and their evolution with user-interpretable explanation. To this end, we devise new models based on Conditional Random Fields for different settings like incorporating partial expert knowledge for semi-supervised learning, and handling discrete labels as well as numeric ratings for fine-grained analysis. This enables applications such as extracting reliable side-effects of drugs from user-contributed posts in healthforums, and identifying credible content in news communities. Online communities are dynamic, as users join and leave, adapt to evolving trends, and mature over time. To capture this dynamics, we propose generative models based on Hidden Markov Model, Latent Dirichlet Allocation, and Brownian Motion to trace the continuous evolution of user expertise and their language model over time. This allows us to identify expert users and credible content jointly over time, improving state-of-the-art recommender systems by explicitly considering the maturity of users. This also enables applications such as identifying helpful product reviews, and detecting fake and anomalous reviews with limited information.}, }
Endnote
%0 Thesis %A Mukherjee, Subhabrata %Y Weikum, Gerhard %A referee: Han, Jiawei %A referee: Günnemann, Stephan %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Probabilistic Graphical Models for Credibility Analysis in Evolving Online Communities : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-A648-0 %U urn:nbn:de:bsz:291-scidok-69269 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P 166 p. %V phd %9 phd %X One of the major hurdles preventing the full exploitation of information from online communities is the widespread concern regarding the quality and credibility of user-contributed content. Prior works in this domain operate on a static snapshot of the community, making strong assumptions about the structure of the data (e.g., relational tables), or consider only shallow features for text classification. To address the above limitations, we propose probabilistic graphical models that can leverage the joint interplay between multiple factors in online communities --- like user interactions, community dynamics, and textual content --- to automatically assess the credibility of user-contributed online content, and the expertise of users and their evolution with user-interpretable explanation. To this end, we devise new models based on Conditional Random Fields for different settings like incorporating partial expert knowledge for semi-supervised learning, and handling discrete labels as well as numeric ratings for fine-grained analysis. This enables applications such as extracting reliable side-effects of drugs from user-contributed posts in healthforums, and identifying credible content in news communities. Online communities are dynamic, as users join and leave, adapt to evolving trends, and mature over time. To capture this dynamics, we propose generative models based on Hidden Markov Model, Latent Dirichlet Allocation, and Brownian Motion to trace the continuous evolution of user expertise and their language model over time. This allows us to identify expert users and credible content jointly over time, improving state-of-the-art recommender systems by explicitly considering the maturity of users. This also enables applications such as identifying helpful product reviews, and detecting fake and anomalous reviews with limited information. %U http://scidok.sulb.uni-saarland.de/doku/lic_ohne_pod.php?la=dehttp://scidok.sulb.uni-saarland.de/volltexte/2017/6926/
[41]
S. Mukherjee, H. Lamba, and G. Weikum, “Item Recommendation with Evolving User Preferences and Experience,” 2017. [Online]. Available: http://arxiv.org/abs/1705.02519. (arXiv: 1705.02519)
Abstract
Current recommender systems exploit user and item similarities by collaborative filtering. Some advanced methods also consider the temporal evolution of item ratings as a global background process. However, all prior methods disregard the individual evolution of a user's experience level and how this is expressed in the user's writing in a review community. In this paper, we model the joint evolution of user experience, interest in specific item facets, writing style, and rating behavior. This way we can generate individual recommendations that take into account the user's maturity level (e.g., recommending art movies rather than blockbusters for a cinematography expert). As only item ratings and review texts are observables, we capture the user's experience and interests in a latent model learned from her reviews, vocabulary and writing style. We develop a generative HMM-LDA model to trace user evolution, where the Hidden Markov Model (HMM) traces her latent experience progressing over time -- with solely user reviews and ratings as observables over time. The facets of a user's interest are drawn from a Latent Dirichlet Allocation (LDA) model derived from her reviews, as a function of her (again latent) experience level. In experiments with five real-world datasets, we show that our model improves the rating prediction over state-of-the-art baselines, by a substantial margin. We also show, in a use-case study, that our model performs well in the assessment of user experience levels.
Export
BibTeX
@online{Mukherjee2017d, TITLE = {Item Recommendation with Evolving User Preferences and Experience}, AUTHOR = {Mukherjee, Subhabrata and Lamba, Hemank and Weikum, Gerhard}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1705.02519}, DOI = {10.1109/ICDM.2015.111}, EPRINT = {1705.02519}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Current recommender systems exploit user and item similarities by collaborative filtering. Some advanced methods also consider the temporal evolution of item ratings as a global background process. However, all prior methods disregard the individual evolution of a user's experience level and how this is expressed in the user's writing in a review community. In this paper, we model the joint evolution of user experience, interest in specific item facets, writing style, and rating behavior. This way we can generate individual recommendations that take into account the user's maturity level (e.g., recommending art movies rather than blockbusters for a cinematography expert). As only item ratings and review texts are observables, we capture the user's experience and interests in a latent model learned from her reviews, vocabulary and writing style. We develop a generative HMM-LDA model to trace user evolution, where the Hidden Markov Model (HMM) traces her latent experience progressing over time -- with solely user reviews and ratings as observables over time. The facets of a user's interest are drawn from a Latent Dirichlet Allocation (LDA) model derived from her reviews, as a function of her (again latent) experience level. In experiments with five real-world datasets, we show that our model improves the rating prediction over state-of-the-art baselines, by a substantial margin. We also show, in a use-case study, that our model performs well in the assessment of user experience levels.}, }
Endnote
%0 Report %A Mukherjee, Subhabrata %A Lamba, Hemank %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Item Recommendation with Evolving User Preferences and Experience : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8103-C %R 10.1109/ICDM.2015.111 %U http://arxiv.org/abs/1705.02519 %D 2017 %X Current recommender systems exploit user and item similarities by collaborative filtering. Some advanced methods also consider the temporal evolution of item ratings as a global background process. However, all prior methods disregard the individual evolution of a user's experience level and how this is expressed in the user's writing in a review community. In this paper, we model the joint evolution of user experience, interest in specific item facets, writing style, and rating behavior. This way we can generate individual recommendations that take into account the user's maturity level (e.g., recommending art movies rather than blockbusters for a cinematography expert). As only item ratings and review texts are observables, we capture the user's experience and interests in a latent model learned from her reviews, vocabulary and writing style. We develop a generative HMM-LDA model to trace user evolution, where the Hidden Markov Model (HMM) traces her latent experience progressing over time -- with solely user reviews and ratings as observables over time. The facets of a user's interest are drawn from a Latent Dirichlet Allocation (LDA) model derived from her reviews, as a function of her (again latent) experience level. In experiments with five real-world datasets, we show that our model improves the rating prediction over state-of-the-art baselines, by a substantial margin. We also show, in a use-case study, that our model performs well in the assessment of user experience levels. %K Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computation and Language, cs.CL,Computer Science, Information Retrieval, cs.IR,cs.SI,Statistics, Machine Learning, stat.ML
[42]
S. Mukherjee, G. Weikum, and C. Danescu-Niculescu-Mizil, “People on Drugs: Credibility of User Statements in Health Communities,” 2017. [Online]. Available: http://arxiv.org/abs/1705.02522. (arXiv: 1705.02522)
Abstract
Online health communities are a valuable source of information for patients and physicians. However, such user-generated resources are often plagued by inaccuracies and misinformation. In this work we propose a method for automatically establishing the credibility of user-generated medical statements and the trustworthiness of their authors by exploiting linguistic cues and distant supervision from expert sources. To this end we introduce a probabilistic graphical model that jointly learns user trustworthiness, statement credibility, and language objectivity. We apply this methodology to the task of extracting rare or unknown side-effects of medical drugs --- this being one of the problems where large scale non-expert data has the potential to complement expert medical knowledge. We show that our method can reliably extract side-effects and filter out false statements, while identifying trustworthy users that are likely to contribute valuable medical information.
Export
BibTeX
@online{escidoc:2457314, TITLE = {People on Drugs: Credibility of User Statements in Health Communities}, AUTHOR = {Mukherjee, Subhabrata and Weikum, Gerhard and Danescu-Niculescu-Mizil, Cristian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1705.02522}, EPRINT = {1705.02522}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Online health communities are a valuable source of information for patients and physicians. However, such user-generated resources are often plagued by inaccuracies and misinformation. In this work we propose a method for automatically establishing the credibility of user-generated medical statements and the trustworthiness of their authors by exploiting linguistic cues and distant supervision from expert sources. To this end we introduce a probabilistic graphical model that jointly learns user trustworthiness, statement credibility, and language objectivity. We apply this methodology to the task of extracting rare or unknown side-effects of medical drugs --- this being one of the problems where large scale non-expert data has the potential to complement expert medical knowledge. We show that our method can reliably extract side-effects and filter out false statements, while identifying trustworthy users that are likely to contribute valuable medical information.}, }
Endnote
%0 Report %A Mukherjee, Subhabrata %A Weikum, Gerhard %A Danescu-Niculescu-Mizil, Cristian %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T People on Drugs: Credibility of User Statements in Health Communities : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-80FE-2 %U http://arxiv.org/abs/1705.02522 %D 2017 %X Online health communities are a valuable source of information for patients and physicians. However, such user-generated resources are often plagued by inaccuracies and misinformation. In this work we propose a method for automatically establishing the credibility of user-generated medical statements and the trustworthiness of their authors by exploiting linguistic cues and distant supervision from expert sources. To this end we introduce a probabilistic graphical model that jointly learns user trustworthiness, statement credibility, and language objectivity. We apply this methodology to the task of extracting rare or unknown side-effects of medical drugs --- this being one of the problems where large scale non-expert data has the potential to complement expert medical knowledge. We show that our method can reliably extract side-effects and filter out false statements, while identifying trustworthy users that are likely to contribute valuable medical information. %K Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computation and Language, cs.CL,Computer Science, Information Retrieval, cs.IR,cs.SI,Statistics, Machine Learning, stat.ML
[43]
S. Mukherjee and G. Weikum, “People on Media: Jointly Identifying Credible News and Trustworthy Citizen Journalists in Online Communities,” 2017. [Online]. Available: http://arxiv.org/abs/1705.02667. (arXiv: 1705.02667)
Abstract
Media seems to have become more partisan, often providing a biased coverage of news catering to the interest of specific groups. It is therefore essential to identify credible information content that provides an objective narrative of an event. News communities such as digg, reddit, or newstrust offer recommendations, reviews, quality ratings, and further insights on journalistic works. However, there is a complex interaction between different factors in such online communities: fairness and style of reporting, language clarity and objectivity, topical perspectives (like political viewpoint), expertise and bias of community members, and more. This paper presents a model to systematically analyze the different interactions in a news community between users, news, and sources. We develop a probabilistic graphical model that leverages this joint interaction to identify 1) highly credible news articles, 2) trustworthy news sources, and 3) expert users who perform the role of "citizen journalists" in the community. Our method extends CRF models to incorporate real-valued ratings, as some communities have very fine-grained scales that cannot be easily discretized without losing information. To the best of our knowledge, this paper is the first full-fledged analysis of credibility, trust, and expertise in news communities.
Export
BibTeX
@online{escidoc:2457309, TITLE = {People on Media: Jointly Identifying Credible News and Trustworthy Citizen Journalists in Online Communities}, AUTHOR = {Mukherjee, Subhabrata and Weikum, Gerhard}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1705.02667}, DOI = {10.1145/2806416.2806537}, EPRINT = {1705.02667}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Media seems to have become more partisan, often providing a biased coverage of news catering to the interest of specific groups. It is therefore essential to identify credible information content that provides an objective narrative of an event. News communities such as digg, reddit, or newstrust offer recommendations, reviews, quality ratings, and further insights on journalistic works. However, there is a complex interaction between different factors in such online communities: fairness and style of reporting, language clarity and objectivity, topical perspectives (like political viewpoint), expertise and bias of community members, and more. This paper presents a model to systematically analyze the different interactions in a news community between users, news, and sources. We develop a probabilistic graphical model that leverages this joint interaction to identify 1) highly credible news articles, 2) trustworthy news sources, and 3) expert users who perform the role of "citizen journalists" in the community. Our method extends CRF models to incorporate real-valued ratings, as some communities have very fine-grained scales that cannot be easily discretized without losing information. To the best of our knowledge, this paper is the first full-fledged analysis of credibility, trust, and expertise in news communities.}, }
Endnote
%0 Report %A Mukherjee, Subhabrata %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T People on Media: Jointly Identifying Credible News and Trustworthy Citizen Journalists in Online Communities : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-80F7-0 %R 10.1145/2806416.2806537 %U http://arxiv.org/abs/1705.02667 %D 2017 %X Media seems to have become more partisan, often providing a biased coverage of news catering to the interest of specific groups. It is therefore essential to identify credible information content that provides an objective narrative of an event. News communities such as digg, reddit, or newstrust offer recommendations, reviews, quality ratings, and further insights on journalistic works. However, there is a complex interaction between different factors in such online communities: fairness and style of reporting, language clarity and objectivity, topical perspectives (like political viewpoint), expertise and bias of community members, and more. This paper presents a model to systematically analyze the different interactions in a news community between users, news, and sources. We develop a probabilistic graphical model that leverages this joint interaction to identify 1) highly credible news articles, 2) trustworthy news sources, and 3) expert users who perform the role of "citizen journalists" in the community. Our method extends CRF models to incorporate real-valued ratings, as some communities have very fine-grained scales that cannot be easily discretized without losing information. To the best of our knowledge, this paper is the first full-fledged analysis of credibility, trust, and expertise in news communities. %K Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computation and Language, cs.CL,Computer Science, Information Retrieval, cs.IR,cs.SI,Statistics, Machine Learning, stat.ML
[44]
S. Mukherjee, S. Guennemann, and G. Weikum, “Personalized Item Recommendation with Continuous Experience Evolution of Users using Brownian Motion,” 2017. [Online]. Available: http://arxiv.org/abs/1705.02669. (arXiv: 1705.02669)
Abstract
Online review communities are dynamic as users join and leave, adopt new vocabulary, and adapt to evolving trends. Recent work has shown that recommender systems benefit from explicit consideration of user experience. However, prior work assumes a fixed number of discrete experience levels, whereas in reality users gain experience and mature continuously over time. This paper presents a new model that captures the continuous evolution of user experience, and the resulting language model in reviews and other posts. Our model is unsupervised and combines principles of Geometric Brownian Motion, Brownian Motion, and Latent Dirichlet Allocation to trace a smooth temporal progression of user experience and language model respectively. We develop practical algorithms for estimating the model parameters from data and for inference with our model (e.g., to recommend items). Extensive experiments with five real-world datasets show that our model not only fits data better than discrete-model baselines, but also outperforms state-of-the-art methods for predicting item ratings.
Export
BibTeX
@online{Mukherjee2017, TITLE = {Personalized Item Recommendation with Continuous Experience Evolution of Users using Brownian Motion}, AUTHOR = {Mukherjee, Subhabrata and Guennemann, Stephan and Weikum, Gerhard}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1705.02669}, DOI = {10.1145/2939672.2939780}, EPRINT = {1705.02669}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Online review communities are dynamic as users join and leave, adopt new vocabulary, and adapt to evolving trends. Recent work has shown that recommender systems benefit from explicit consideration of user experience. However, prior work assumes a fixed number of discrete experience levels, whereas in reality users gain experience and mature continuously over time. This paper presents a new model that captures the continuous evolution of user experience, and the resulting language model in reviews and other posts. Our model is unsupervised and combines principles of Geometric Brownian Motion, Brownian Motion, and Latent Dirichlet Allocation to trace a smooth temporal progression of user experience and language model respectively. We develop practical algorithms for estimating the model parameters from data and for inference with our model (e.g., to recommend items). Extensive experiments with five real-world datasets show that our model not only fits data better than discrete-model baselines, but also outperforms state-of-the-art methods for predicting item ratings.}, }
Endnote
%0 Report %A Mukherjee, Subhabrata %A Guennemann, Stephan %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Personalized Item Recommendation with Continuous Experience Evolution of Users using Brownian Motion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-80BE-3 %R 10.1145/2939672.2939780 %U http://arxiv.org/abs/1705.02669 %D 2017 %X Online review communities are dynamic as users join and leave, adopt new vocabulary, and adapt to evolving trends. Recent work has shown that recommender systems benefit from explicit consideration of user experience. However, prior work assumes a fixed number of discrete experience levels, whereas in reality users gain experience and mature continuously over time. This paper presents a new model that captures the continuous evolution of user experience, and the resulting language model in reviews and other posts. Our model is unsupervised and combines principles of Geometric Brownian Motion, Brownian Motion, and Latent Dirichlet Allocation to trace a smooth temporal progression of user experience and language model respectively. We develop practical algorithms for estimating the model parameters from data and for inference with our model (e.g., to recommend items). Extensive experiments with five real-world datasets show that our model not only fits data better than discrete-model baselines, but also outperforms state-of-the-art methods for predicting item ratings. %K Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computation and Language, cs.CL,Computer Science, Information Retrieval, cs.IR,cs.SI,Statistics, Machine Learning, stat.ML
[45]
S. Mukherjee, K. Popat, and G. Weikum, “Exploring Latent Semantic Factors to Find Useful Product Reviews,” 2017. [Online]. Available: http://arxiv.org/abs/1705.02518. (arXiv: 1705.02518)
Abstract
Online reviews provided by consumers are a valuable asset for e-Commerce platforms, influencing potential consumers in making purchasing decisions. However, these reviews are of varying quality, with the useful ones buried deep within a heap of non-informative reviews. In this work, we attempt to automatically identify review quality in terms of its helpfulness to the end consumers. In contrast to previous works in this domain exploiting a variety of syntactic and community-level features, we delve deep into the semantics of reviews as to what makes them useful, providing interpretable explanation for the same. We identify a set of consistency and semantic factors, all from the text, ratings, and timestamps of user-generated reviews, making our approach generalizable across all communities and domains. We explore review semantics in terms of several latent factors like the expertise of its author, his judgment about the fine-grained facets of the underlying product, and his writing style. These are cast into a Hidden Markov Model -- Latent Dirichlet Allocation (HMM-LDA) based model to jointly infer: (i) reviewer expertise, (ii) item facets, and (iii) review helpfulness. Large-scale experiments on five real-world datasets from Amazon show significant improvement over state-of-the-art baselines in predicting and ranking useful reviews.
Export
BibTeX
@online{Mukjherjee2017e, TITLE = {Exploring Latent Semantic Factors to Find Useful Product Reviews}, AUTHOR = {Mukherjee, Subhabrata and Popat, Kashyap and Weikum, Gerhard}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1705.02518}, EPRINT = {1705.02518}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Online reviews provided by consumers are a valuable asset for e-Commerce platforms, influencing potential consumers in making purchasing decisions. However, these reviews are of varying quality, with the useful ones buried deep within a heap of non-informative reviews. In this work, we attempt to automatically identify review quality in terms of its helpfulness to the end consumers. In contrast to previous works in this domain exploiting a variety of syntactic and community-level features, we delve deep into the semantics of reviews as to what makes them useful, providing interpretable explanation for the same. We identify a set of consistency and semantic factors, all from the text, ratings, and timestamps of user-generated reviews, making our approach generalizable across all communities and domains. We explore review semantics in terms of several latent factors like the expertise of its author, his judgment about the fine-grained facets of the underlying product, and his writing style. These are cast into a Hidden Markov Model -- Latent Dirichlet Allocation (HMM-LDA) based model to jointly infer: (i) reviewer expertise, (ii) item facets, and (iii) review helpfulness. Large-scale experiments on five real-world datasets from Amazon show significant improvement over state-of-the-art baselines in predicting and ranking useful reviews.}, }
Endnote
%0 Report %A Mukherjee, Subhabrata %A Popat, Kashyap %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Exploring Latent Semantic Factors to Find Useful Product Reviews : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-811C-5 %U http://arxiv.org/abs/1705.02518 %D 2017 %X Online reviews provided by consumers are a valuable asset for e-Commerce platforms, influencing potential consumers in making purchasing decisions. However, these reviews are of varying quality, with the useful ones buried deep within a heap of non-informative reviews. In this work, we attempt to automatically identify review quality in terms of its helpfulness to the end consumers. In contrast to previous works in this domain exploiting a variety of syntactic and community-level features, we delve deep into the semantics of reviews as to what makes them useful, providing interpretable explanation for the same. We identify a set of consistency and semantic factors, all from the text, ratings, and timestamps of user-generated reviews, making our approach generalizable across all communities and domains. We explore review semantics in terms of several latent factors like the expertise of its author, his judgment about the fine-grained facets of the underlying product, and his writing style. These are cast into a Hidden Markov Model -- Latent Dirichlet Allocation (HMM-LDA) based model to jointly infer: (i) reviewer expertise, (ii) item facets, and (iii) review helpfulness. Large-scale experiments on five real-world datasets from Amazon show significant improvement over state-of-the-art baselines in predicting and ranking useful reviews. %K Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computation and Language, cs.CL,Computer Science, Information Retrieval, cs.IR,cs.SI,Statistics, Machine Learning, stat.ML
[46]
S. Mukherjee, S. Dutta, and G. Weikum, “Credible Review Detection with Limited Information using Consistency Analysis,” 2017. [Online]. Available: http://arxiv.org/abs/1705.02668. (arXiv: 1705.02668)
Abstract
Online reviews provide viewpoints on the strengths and shortcomings of products/services, influencing potential customers' purchasing decisions. However, the proliferation of non-credible reviews -- either fake (promoting/ demoting an item), incompetent (involving irrelevant aspects), or biased -- entails the problem of identifying credible reviews. Prior works involve classifiers harnessing rich information about items/users -- which might not be readily available in several domains -- that provide only limited interpretability as to why a review is deemed non-credible. This paper presents a novel approach to address the above issues. We utilize latent topic models leveraging review texts, item ratings, and timestamps to derive consistency features without relying on item/user histories, unavailable for "long-tail" items/users. We develop models, for computing review credibility scores to provide interpretable evidence for non-credible reviews, that are also transferable to other domains -- addressing the scarcity of labeled data. Experiments on real-world datasets demonstrate improvements over state-of-the-art baselines.
Export
BibTeX
@online{Mukherjee2017b, TITLE = {Credible Review Detection with Limited Information using Consistency Analysis}, AUTHOR = {Mukherjee, Subhabrata and Dutta, Sourav and Weikum, Gerhard}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1705.02668}, EPRINT = {1705.02668}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Online reviews provide viewpoints on the strengths and shortcomings of products/services, influencing potential customers' purchasing decisions. However, the proliferation of non-credible reviews -- either fake (promoting/ demoting an item), incompetent (involving irrelevant aspects), or biased -- entails the problem of identifying credible reviews. Prior works involve classifiers harnessing rich information about items/users -- which might not be readily available in several domains -- that provide only limited interpretability as to why a review is deemed non-credible. This paper presents a novel approach to address the above issues. We utilize latent topic models leveraging review texts, item ratings, and timestamps to derive consistency features without relying on item/user histories, unavailable for "long-tail" items/users. We develop models, for computing review credibility scores to provide interpretable evidence for non-credible reviews, that are also transferable to other domains -- addressing the scarcity of labeled data. Experiments on real-world datasets demonstrate improvements over state-of-the-art baselines.}, }
Endnote
%0 Report %A Mukherjee, Subhabrata %A Dutta, Sourav %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Credible Review Detection with Limited Information using Consistency Analysis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-80C1-A %U http://arxiv.org/abs/1705.02668 %D 2017 %X Online reviews provide viewpoints on the strengths and shortcomings of products/services, influencing potential customers' purchasing decisions. However, the proliferation of non-credible reviews -- either fake (promoting/ demoting an item), incompetent (involving irrelevant aspects), or biased -- entails the problem of identifying credible reviews. Prior works involve classifiers harnessing rich information about items/users -- which might not be readily available in several domains -- that provide only limited interpretability as to why a review is deemed non-credible. This paper presents a novel approach to address the above issues. We utilize latent topic models leveraging review texts, item ratings, and timestamps to derive consistency features without relying on item/user histories, unavailable for "long-tail" items/users. We develop models, for computing review credibility scores to provide interpretable evidence for non-credible reviews, that are also transferable to other domains -- addressing the scarcity of labeled data. Experiments on real-world datasets demonstrate improvements over state-of-the-art baselines. %K Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computation and Language, cs.CL,Computer Science, Information Retrieval, cs.IR,cs.SI,Statistics, Machine Learning, stat.ML
[47]
S. Mukherjee, K. Popat, and G. Weikum, “Exploring Latent Semantic Factors to Find Useful Product Reviews,” in Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017), Houston, TX, USA. (Accepted/in press)
Export
BibTeX
@inproceedings{MukherjeeSDM2017, TITLE = {Exploring Latent Semantic Factors to Find Useful Product Reviews}, AUTHOR = {Mukherjee, Subhabrata and Popat, Kashyap and Weikum, Gerhard}, LANGUAGE = {eng}, PUBLISHER = {SIAM}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017)}, ADDRESS = {Houston, TX, USA}, }
Endnote
%0 Conference Proceedings %A Mukherjee, Subhabrata %A Popat, Kashyap %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Exploring Latent Semantic Factors to Find Useful Product Reviews : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4CD4-6 %D 2017 %B 17th SIAM International Conference on Data Mining %Z date of event: 2017-04-27 - 2017-04-29 %C Houston, TX, USA %B Proceedings of the Seventeenth SIAM International Conference on Data Mining %I SIAM
[48]
S. Neumann, R. Gemulla, and P. Miettinen, “What You Will Gain By Rounding: Theory and Algorithms for Rounding Rank,” in 16th IEEE International Conference on Data Mining (ICDM 2016), Barcelona, Spain, 2017.
Export
BibTeX
@inproceedings{neumann16what, TITLE = {What You Will Gain By Rounding: {Theory} and Algorithms for Rounding Rank}, AUTHOR = {Neumann, Stefan and Gemulla, Rainer and Miettinen, Pauli}, LANGUAGE = {eng}, DOI = {10.1109/ICDM.2016.147}, PUBLISHER = {IEEE}, YEAR = {2016}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {16th IEEE International Conference on Data Mining (ICDM 2016)}, EDITOR = {Bonchi, Francesco and Domingo-Ferrer, Josep and Baeza-Yates, Ricardo and Zhou, Zhi-Hua and Wu, Xindong}, PAGES = {380--389}, ADDRESS = {Barcelona, Spain}, }
Endnote
%0 Conference Proceedings %A Neumann, Stefan %A Gemulla, Rainer %A Miettinen, Pauli %+ External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T What You Will Gain By Rounding: Theory and Algorithms for Rounding Rank : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2265-0 %R 10.1109/ICDM.2016.147 %D 2017 %8 02.02.2017 %B 16th International Conference on Data Mining %Z date of event: 2016-12-12 - 2016-12-15 %C Barcelona, Spain %B 16th IEEE International Conference on Data Mining %E Bonchi, Francesco; Domingo-Ferrer, Josep; Baeza-Yates, Ricardo; Zhou, Zhi-Hua; Wu, Xindong %P 380 - 389 %I IEEE
[49]
A. Nikitin, C. Laoudias, G. Chatzimilioudis, P. Karras, and D. Zeinalipour-Yazti, “Indoor Localization Accuracy Estimation from Fingerprint Data,” in 18th IEEE International Conference on Mobile Data Management (MDM 2017), Daejeon, South Korea, 2017.
Export
BibTeX
@inproceedings{mdm17-spate, TITLE = {Indoor Localization Accuracy Estimation from Fingerprint Data}, AUTHOR = {Nikitin, Artyom and Laoudias, Christos and Chatzimilioudis, Georgios and Karras, Panagiotis and Zeinalipour-Yazti, Demetrios}, LANGUAGE = {eng}, ISBN = {978-1-5386-3932-0}, DOI = {10.1109/MDM.2017.34}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {18th IEEE International Conference on Mobile Data Management (MDM 2017)}, PAGES = {196--205}, ADDRESS = {Daejeon, South Korea}, }
Endnote
%0 Conference Proceedings %A Nikitin, Artyom %A Laoudias, Christos %A Chatzimilioudis, Georgios %A Karras, Panagiotis %A Zeinalipour-Yazti, Demetrios %+ External Organizations External Organizations External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Indoor Localization Accuracy Estimation from Fingerprint Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-0832-6 %R 10.1109/MDM.2017.34 %D 2017 %B 18th IEEE International Conference on Mobile Data Management %Z date of event: 2017-05-29 - 2017-06-01 %C Daejeon, South Korea %B 18th IEEE International Conference on Mobile Data Management %P 196 - 205 %I IEEE %@ 978-1-5386-3932-0
[50]
A. Nikitin, C. Laoudias, G. Chatzimilioudis, P. Karras, and D. Zeinalipour-Yazti, “ACCES: Offline Accuracy Estimation for Fingerprint-based Localization,” in 18th IEEE International Conference on Mobile Data Management (MDM 2017), Daejeon, South Korea, 2017.
Export
BibTeX
@inproceedings{mdm17-spate-demo, TITLE = {{ACCES}: Offline Accuracy Estimation for Fingerprint-based Localization}, AUTHOR = {Nikitin, Artyom and Laoudias, Christos and Chatzimilioudis, Georgios and Karras, Panagiotis and Zeinalipour-Yazti, Demetrios}, LANGUAGE = {eng}, ISBN = {978-1-5386-3932-0}, DOI = {10.1109/MDM.2017.61}, PUBLISHER = {IEEE Computer Society}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {18th IEEE International Conference on Mobile Data Management (MDM 2017)}, PAGES = {358--359}, ADDRESS = {Daejeon, South Korea}, }
Endnote
%0 Conference Proceedings %A Nikitin, Artyom %A Laoudias, Christos %A Chatzimilioudis, Georgios %A Karras, Panagiotis %A Zeinalipour-Yazti, Demetrios %+ External Organizations External Organizations External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T ACCES: Offline Accuracy Estimation for Fingerprint-based Localization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-082D-3 %R 10.1109/MDM.2017.61 %D 2017 %B 18th IEEE International Conference on Mobile Data Management %Z date of event: 2017-05-29 - 2017-06-01 %C Daejeon, South Korea %B 18th IEEE International Conference on Mobile Data Management %P 358 - 359 %I IEEE Computer Society %@ 978-1-5386-3932-0
[51]
S. Paramonov, D. Stepanova, and P. Miettinen, “Hybrid ASP-based Approach to Pattern Mining,” in Lecture Notes in Computer Science, London, UK, 2017, vol. 10364.
Export
BibTeX
@inproceedings{StepanovaRR2017, TITLE = {Hybrid {ASP}-based Approach to Pattern Mining}, AUTHOR = {Paramonov, Sergey and Stepanova, Daria and Miettinen, Pauli}, LANGUAGE = {eng}, ISBN = {978-3-319-61251-5}, PUBLISHER = {Springer}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {Rules and Reasoning (RuleML+RR 2017)}, PAGES = {199--214}, BOOKTITLE = {Lecture Notes in Computer Science}, VOLUME = {10364}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Paramonov, Sergey %A Stepanova, Daria %A Miettinen, Pauli %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Hybrid ASP-based Approach to Pattern Mining : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8450-8 %D 2017 %B International Joint Conference on Rules and Reasoning %Z date of event: 2017-07-12 - 2017-07-15 %C London, UK %B Rules and Reasoning %P 199 - 214 %I Springer %@ 978-3-319-61251-5 %B Lecture Notes in Computer Science %V 10364
[52]
R. Pienta, M. Kahng, Z. Lin, J. Vreeken, P. Talukdar, J. Abello, G. Parameswaran, and D. H. Chau, “Adaptive Local Exploration of Large Graphs,” in Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017), Houston, TX, USA. (Accepted/in press)
Export
BibTeX
@inproceedings{pienta:17:facets, TITLE = {Adaptive Local Exploration of Large Graphs}, AUTHOR = {Pienta, Robert and Kahng, Minsuk and Lin, Zhang and Vreeken, Jilles and Talukdar, Partha and Abello, James and Parameswaran, Ganesh and Chau, Duen Horng}, LANGUAGE = {eng}, PUBLISHER = {SIAM}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the Seventeenth SIAM International Conference on Data Mining (SDM 2017)}, ADDRESS = {Houston, TX, USA}, }
Endnote
%0 Conference Proceedings %A Pienta, Robert %A Kahng, Minsuk %A Lin, Zhang %A Vreeken, Jilles %A Talukdar, Partha %A Abello, James %A Parameswaran, Ganesh %A Chau, Duen Horng %+ External Organizations External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations %T Adaptive Local Exploration of Large Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4BEA-D %D 2017 %B 17th SIAM International Conference on Data Mining %Z date of event: 2017-04-27 - 2017-04-29 %C Houston, TX, USA %B Proceedings of the Seventeenth SIAM International Conference on Data Mining %I SIAM
[53]
E. Pitoura, P. Tsaparas, G. Flouris, I. Fundulaki, P. Papadakos, S. Abiteboul, and G. Weikum, “On Measuring Bias in Online Information,” 2017. [Online]. Available: http://arxiv.org/abs/1704.05730. (arXiv: 1704.05730)
Abstract
Bias in online information has recently become a pressing issue, with search engines, social networks and recommendation services being accused of exhibiting some form of bias. In this vision paper, we make the case for a systematic approach towards measuring bias. To this end, we discuss formal measures for quantifying the various types of bias, we outline the system components necessary for realizing them, and we highlight the related research challenges and open problems.
Export
BibTeX
@online{Pitoura2017, TITLE = {On Measuring Bias in Online Information}, AUTHOR = {Pitoura, Evaggelia and Tsaparas, Panayiotis and Flouris, Giorgos and Fundulaki, Irini and Papadakos, Panagiotis and Abiteboul, Serge and Weikum, Gerhard}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1704.05730}, EPRINT = {1704.05730}, EPRINTTYPE = {arXiv}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Bias in online information has recently become a pressing issue, with search engines, social networks and recommendation services being accused of exhibiting some form of bias. In this vision paper, we make the case for a systematic approach towards measuring bias. To this end, we discuss formal measures for quantifying the various types of bias, we outline the system components necessary for realizing them, and we highlight the related research challenges and open problems.}, }
Endnote
%0 Report %A Pitoura, Evaggelia %A Tsaparas, Panayiotis %A Flouris, Giorgos %A Fundulaki, Irini %A Papadakos, Panagiotis %A Abiteboul, Serge %A Weikum, Gerhard %+ External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T On Measuring Bias in Online Information : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8123-4 %U http://arxiv.org/abs/1704.05730 %D 2017 %X Bias in online information has recently become a pressing issue, with search engines, social networks and recommendation services being accused of exhibiting some form of bias. In this vision paper, we make the case for a systematic approach towards measuring bias. To this end, we discuss formal measures for quantifying the various types of bias, we outline the system components necessary for realizing them, and we highlight the related research challenges and open problems. %K Computer Science, Databases, cs.DB,Computer Science, Computers and Society, cs.CY
[54]
K. Popat, “Assessing the Credibility of Claims on the Web,” in WWW’17 Companion, Perth, Australia, 2017.
Export
BibTeX
@inproceedings{PopatWWW2017, TITLE = {Assessing the Credibility of Claims on the {Web}}, AUTHOR = {Popat, Kashyap}, LANGUAGE = {eng}, ISBN = {978-1-4503-4914-7}, DOI = {10.1145/3041021.3053379}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WWW'17 Companion}, PAGES = {735--739}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Popat, Kashyap %+ Databases and Information Systems, MPI for Informatics, Max Planck Society %T Assessing the Credibility of Claims on the Web : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-90CC-2 %R 10.1145/3041021.3053379 %D 2017 %B 26th International Conference on World Wide Web %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B WWW'17 Companion %P 735 - 739 %I ACM %@ 978-1-4503-4914-7
[55]
K. Popat, S. Mukherjee, J. Strötgen, and G. Weikum, “Where the Truth Lies: Explaining the Credibility of Emerging Claims on the Web and Social Media,” in WWW’17 Companion, Perth, Australia, 2017.
Export
BibTeX
@inproceedings{PopatWWW2017, TITLE = {Where the Truth Lies: {E}xplaining the Credibility of Emerging Claims on the {W}eb and Social Media}, AUTHOR = {Popat, Kashyap and Mukherjee, Subhabrata and Str{\"o}tgen, Jannik and Weikum, Gerhard}, LANGUAGE = {eng}, ISBN = {978-1-4503-4914-7}, DOI = {10.1145/3041021.3055133}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WWW'17 Companion}, PAGES = {1003--1012}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Popat, Kashyap %A Mukherjee, Subhabrata %A Strötgen, Jannik %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Where the Truth Lies: Explaining the Credibility of Emerging Claims on the Web and Social Media : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4CD8-D %R 10.1145/3041021.3055133 %D 2017 %B 26th International Conference on World Wide Web %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B WWW'17 Companion %P 1003 - 1012 %I ACM %@ 978-1-4503-4914-7
[56]
N. Reiter, E. Gius, J. Strötgen, and M. Willand, “A Shared Task for a Shared Goal: Systematic Annotation of Literary,” in Digital Humanities 2017 (DH 2017), Montréal, Canada. (Accepted/in press)
Export
BibTeX
@inproceedings{StroetgenDH2017, TITLE = {A Shared Task for a Shared Goal: {S}ystematic Annotation of Literary}, AUTHOR = {Reiter, Nils and Gius, Evelyn and Str{\"o}tgen, Jannik and Willand, Marcus}, LANGUAGE = {eng}, YEAR = {2017}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Digital Humanities 2017 (DH 2017)}, ADDRESS = {Montr{\'e}al, Canada}, }
Endnote
%0 Conference Proceedings %A Reiter, Nils %A Gius, Evelyn %A Strötgen, Jannik %A Willand, Marcus %+ External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T A Shared Task for a Shared Goal: Systematic Annotation of Literary : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-7BDC-3 %D 2017 %B Digital Humanities %Z date of event: 2017-08-08 - 2017-08-11 %C Montréal, Canada %B Digital Humanities 2017
[57]
A. Rohrbach, A. Torabi, M. Rohrbach, N. Tandon, C. Pal, H. Larochelle, A. Courville, and B. Schiele, “Movie Description,” International Journal of Computer Vision, vol. 123, no. 1, 2017.
Abstract
Audio Description (AD) provides linguistic descriptions of movies and allows visually impaired people to follow a movie along with their peers. Such descriptions are by design mainly visual and thus naturally form an interesting data source for computer vision and computational linguistics. In this work we propose a novel dataset which contains transcribed ADs, which are temporally aligned to full length movies. In addition we also collected and aligned movie scripts used in prior work and compare the two sources of descriptions. In total the Large Scale Movie Description Challenge (LSMDC) contains a parallel corpus of 118,114 sentences and video clips from 202 movies. First we characterize the dataset by benchmarking different approaches for generating video descriptions. Comparing ADs to scripts, we find that ADs are indeed more visual and describe precisely what is shown rather than what should happen according to the scripts created prior to movie production. Furthermore, we present and compare the results of several teams who participated in a challenge organized in the context of the workshop "Describing and Understanding Video & The Large Scale Movie Description Challenge (LSMDC)", at ICCV 2015.
Export
BibTeX
@article{RohrbachMovie, TITLE = {Movie Description}, AUTHOR = {Rohrbach, Anna and Torabi, Atousa and Rohrbach, Marcus and Tandon, Niket and Pal, Christopher and Larochelle, Hugo and Courville, Aaron and Schiele, Bernt}, LANGUAGE = {eng}, DOI = {10.1007/s11263-016-0987-1}, PUBLISHER = {Springer}, ADDRESS = {London}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, ABSTRACT = {Audio Description (AD) provides linguistic descriptions of movies and allows visually impaired people to follow a movie along with their peers. Such descriptions are by design mainly visual and thus naturally form an interesting data source for computer vision and computational linguistics. In this work we propose a novel dataset which contains transcribed ADs, which are temporally aligned to full length movies. In addition we also collected and aligned movie scripts used in prior work and compare the two sources of descriptions. In total the Large Scale Movie Description Challenge (LSMDC) contains a parallel corpus of 118,114 sentences and video clips from 202 movies. First we characterize the dataset by benchmarking different approaches for generating video descriptions. Comparing ADs to scripts, we find that ADs are indeed more visual and describe precisely what is shown rather than what should happen according to the scripts created prior to movie production. Furthermore, we present and compare the results of several teams who participated in a challenge organized in the context of the workshop "Describing and Understanding Video & The Large Scale Movie Description Challenge (LSMDC)", at ICCV 2015.}, JOURNAL = {International Journal of Computer Vision}, VOLUME = {123}, NUMBER = {1}, PAGES = {94--120}, }
Endnote
%0 Journal Article %A Rohrbach, Anna %A Torabi, Atousa %A Rohrbach, Marcus %A Tandon, Niket %A Pal, Christopher %A Larochelle, Hugo %A Courville, Aaron %A Schiele, Bernt %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Movie Description : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-FD03-C %R 10.1007/s11263-016-0987-1 %7 2017-01-25 %D 2017 %X Audio Description (AD) provides linguistic descriptions of movies and allows visually impaired people to follow a movie along with their peers. Such descriptions are by design mainly visual and thus naturally form an interesting data source for computer vision and computational linguistics. In this work we propose a novel dataset which contains transcribed ADs, which are temporally aligned to full length movies. In addition we also collected and aligned movie scripts used in prior work and compare the two sources of descriptions. In total the Large Scale Movie Description Challenge (LSMDC) contains a parallel corpus of 118,114 sentences and video clips from 202 movies. First we characterize the dataset by benchmarking different approaches for generating video descriptions. Comparing ADs to scripts, we find that ADs are indeed more visual and describe precisely what is shown rather than what should happen according to the scripts created prior to movie production. Furthermore, we present and compare the results of several teams who participated in a challenge organized in the context of the workshop "Describing and Understanding Video & The Large Scale Movie Description Challenge (LSMDC)", at ICCV 2015. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Computation and Language, cs.CL %J International Journal of Computer Vision %O IJCV %V 123 %N 1 %& 94 %P 94 - 120 %I Springer %C London
[58]
V. Setty, A. Anand, A. Mishra, and A. Anand, “Modeling Event Importance for Ranking Daily News Events,” in WSDM’17, 10th ACM International Conference on Web Search and Data Mining, Cambridge, UK, 2017.
Export
BibTeX
@inproceedings{Setii2017, TITLE = {Modeling Event Importance for Ranking Daily News Events}, AUTHOR = {Setty, Vinay and Anand, Abhijit and Mishra, Arunav and Anand, Avishek}, LANGUAGE = {eng}, ISBN = {978-1-4503-4675-7}, DOI = {10.1145/3018661.3018728}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WSDM'17, 10th ACM International Conference on Web Search and Data Mining}, PAGES = {231--240}, ADDRESS = {Cambridge, UK}, }
Endnote
%0 Conference Proceedings %A Setty, Vinay %A Anand, Abhijit %A Mishra, Arunav %A Anand, Avishek %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Modeling Event Importance for Ranking Daily News Events : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-26D5-9 %R 10.1145/3018661.3018728 %D 2017 %B 10th ACM International Conference on Web Search and Data Mining %Z date of event: 2017-02-06 - 2017-02-10 %C Cambridge, UK %B WSDM'17 %P 231 - 240 %I ACM %@ 978-1-4503-4675-7
[59]
J. Stoyanovich, B. Howe, S. Abiteboul, G. Miklau, A. Sahuguet, and G. Weikum, “Fides: Towards a Platform for Responsible Data Science,” in 29th International Conference on Scientific and Statistical Database Management (SSDBM 2017), Chicago, IL, USA, 2017.
Export
BibTeX
@inproceedings{StoyanovichSSDBM2017, TITLE = {Fides: {T}owards a Platform for Responsible Data Science}, AUTHOR = {Stoyanovich, Julia and Howe, Bill and Abiteboul, Serge and Miklau, Gerome and Sahuguet, Arnaud and Weikum, Gerhard}, LANGUAGE = {eng}, ISBN = {978-1-4503-5282-6}, DOI = {10.1145/3085504.3085530}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {29th International Conference on Scientific and Statistical Database Management (SSDBM 2017)}, EID = {26}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Stoyanovich, Julia %A Howe, Bill %A Abiteboul, Serge %A Miklau, Gerome %A Sahuguet, Arnaud %A Weikum, Gerhard %+ External Organizations External Organizations External Organizations External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Fides: Towards a Platform for Responsible Data Science : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-80BA-B %R 10.1145/3085504.3085530 %D 2017 %B 29th International Conference on Scientific and Statistical Database Management %Z date of event: 2017-06-27 - 2017-06-29 %C Chicago, IL, USA %B 29th International Conference on Scientific and Statistical Database Management %Z sequence number: 26 %I ACM %@ 978-1-4503-5282-6
[60]
C. Teflioudi and R. Gemulla, “Exact and Approximate Maximum Inner Product Search with LEMP,” ACM Transactions on Database Systems, vol. 42, no. 1, 2017.
Export
BibTeX
@article{Teflioudi:2016:EAM:3015779.2996452, TITLE = {Exact and Approximate Maximum Inner Product Search with {LEMP}}, AUTHOR = {Teflioudi, Christina and Gemulla, Rainer}, LANGUAGE = {eng}, ISSN = {0362-5915}, DOI = {10.1145/2996452}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, JOURNAL = {ACM Transactions on Database Systems}, VOLUME = {42}, NUMBER = {1}, EID = {5}, }
Endnote
%0 Journal Article %A Teflioudi, Christina %A Gemulla, Rainer %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Exact and Approximate Maximum Inner Product Search with LEMP : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-349C-B %R 10.1145/2996452 %7 2016 %D 2017 %J ACM Transactions on Database Systems %O TODS %V 42 %N 1 %Z sequence number: 5 %I ACM %C New York, NY %@ false
[61]
H. D. Tran, “An Approach to Nonmonotonic Relational Learning from Knowledge Graphs,” Universität des Saarlandes, Saarbrücken, 2017.
Export
BibTeX
@mastersthesis{TranMSc2017, TITLE = {An Approach to Nonmonotonic Relational Learning from Knowledge Graphs}, AUTHOR = {Tran, Hai Dang}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, }
Endnote
%0 Thesis %A Tran, Hai Dang %Y Stepanova, Daria %A referee: Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T An Approach to Nonmonotonic Relational Learning from Knowledge Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-845A-3 %I Universität des Saarlandes %C Saarbrücken %D 2017 %P XV, 48 p. %V master %9 master
[62]
G. Weikum, “What Computers Should Know, Shouldn’t Know, and Shouldn’t Believe,” in WWW’17 Companion, Perth, Australia, 2017.
Export
BibTeX
@inproceedings{WeikumWWW2017, TITLE = {What Computers Should Know, Shouldn{\textquoteright}t Know, and Shouldn{\textquoteright}t Believe}, AUTHOR = {Weikum, Gerhard}, LANGUAGE = {eng}, ISBN = {978-1-4503-4914-7}, DOI = {10.1145/3041021.3051120}, PUBLISHER = {ACM}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {WWW'17 Companion}, PAGES = {1559--1560}, ADDRESS = {Perth, Australia}, }
Endnote
%0 Conference Proceedings %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society %T What Computers Should Know, Shouldn’t Know, and Shouldn’t Believe : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7DA0-5 %R 10.1145/3041021.3051120 %D 2017 %B 26th International Conference on World Wide Web %Z date of event: 2017-04-03 - 2017-04-07 %C Perth, Australia %B WWW'17 Companion %P 1559 - 1560 %I ACM %@ 978-1-4503-4914-7
[63]
D. Ziegler, “Answer Type Prediction for Question Answering over Knowledge Bases,” Universität des Saarlandes, Saarbrücken, 2017.
Export
BibTeX
@mastersthesis{ZieglerMSc2017, TITLE = {Answer Type Prediction for Question Answering over Knowledge Bases}, AUTHOR = {Ziegler, David}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, }
Endnote
%0 Thesis %A Ziegler, David %Y Abujabal, Abdalghani %A referee: Roy, Rishiraj Saha %A referee: Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Answer Type Prediction for Question Answering over Knowledge Bases : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-8F38-A %I Universität des Saarlandes %C Saarbrücken %D 2017 %P X, 48 p. %V master %9 master