2018
[1]
S. Degaetano-Ortlieb and J. Strötgen, “Diachronic Variation of Temporal Expressions in Scientific Writing through the Lens of Relative Entropy,” in Language Technologies for the Challenges of the Digital Age (GSCL 2017), Berlin, Germany, 2018.
Export
BibTeX
@inproceedings{DegaetanoortliebStroetgen2017, TITLE = {Diachronic Variation of Temporal Expressions in Scientific Writing through the Lens of Relative Entropy}, AUTHOR = {Degaetano-Ortlieb, Stefania and Str{\"o}tgen, Jannik}, LANGUAGE = {eng}, ISBN = {978-3-319-73705-8}, DOI = {10.1007/978-3-319-73706-5_22}, PUBLISHER = {Springer}, YEAR = {2017}, DATE = {2018}, BOOKTITLE = {Language Technologies for the Challenges of the Digital Age (GSCL 2017)}, EDITOR = {Rehm, Georg and Declerck, Thierry}, PAGES = {259--275}, SERIES = {Lecture Notes in Artificial Intelligence}, VOLUME = {10713}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Degaetano-Ortlieb, Stefania %A Strötgen, Jannik %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Diachronic Variation of Temporal Expressions in Scientific Writing through the Lens of Relative Entropy : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-A8E8-5 %R 10.1007/978-3-319-73706-5_22 %D 2018 %B Conference of the German Society for Computational Linguistics and Language Technology %Z date of event: 2017-09-13 - 2017-09-14 %C Berlin, Germany %B Language Technologies for the Challenges of the Digital Age %E Rehm, Georg; Declerck, Thierry %P 259 - 275 %I Springer %@ 978-3-319-73705-8 %B Lecture Notes in Artificial Intelligence %N 10713
[2]
P. Ernst, “Biomedical Knowledge Base Construction from Text and its Applications in Knowledge-based Systems,” Universität des Saarlandes, Saarbrücken, 2018.
Abstract
While general-purpose Knowledge Bases (KBs) have gone a long way in compiling comprehensive knowledgee about people, events, places, etc., domain-specific KBs, such as on health, are equally important, but are less explored. Consequently, a comprehensive and expressive health KB that spans all aspects of biomedical knowledge is still missing. The main goal of this thesis is to develop principled methods for building such a KB and enabling knowledge-centric applications. We address several challenges and make the following contributions: - To construct a health KB, we devise a largely automated and scalable pattern-based knowledge extraction method covering a spectrum of different text genres and distilling a wide variety of facts from different biomedical areas. - To consider higher-arity relations, crucial for proper knowledge representation in advanced domain such as health, we generalize the fact-pattern duality paradigm of previous methods. A key novelty is the integration of facts with missing arguments by extending our framework to partial patterns and facts by reasoning over the composability of partial facts. - To demonstrate the benefits of a health KB, we devise systems for entity-aware search and analytics and for entity-relationship-oriented exploration. Extensive experiments and use-case studies demonstrate the viability of the proposed approaches.
Export
BibTeX
@phdthesis{Ernstphd2017, TITLE = {Biomedical Knowledge Base Construction from Text and its Applications in Knowledge-based Systems}, AUTHOR = {Ernst, Patrick}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-ds-271051}, DOI = {10.22028/D291-27105}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2018}, ABSTRACT = {While general-purpose Knowledge Bases (KBs) have gone a long way in compiling comprehensive knowledgee about people, events, places, etc., domain-specific KBs, such as on health, are equally important, but are less explored. Consequently, a comprehensive and expressive health KB that spans all aspects of biomedical knowledge is still missing. The main goal of this thesis is to develop principled methods for building such a KB and enabling knowledge-centric applications. We address several challenges and make the following contributions: -- To construct a health KB, we devise a largely automated and scalable pattern-based knowledge extraction method covering a spectrum of different text genres and distilling a wide variety of facts from different biomedical areas. -- To consider higher-arity relations, crucial for proper knowledge representation in advanced domain such as health, we generalize the fact-pattern duality paradigm of previous methods. A key novelty is the integration of facts with missing arguments by extending our framework to partial patterns and facts by reasoning over the composability of partial facts. -- To demonstrate the benefits of a health KB, we devise systems for entity-aware search and analytics and for entity-relationship-oriented exploration. Extensive experiments and use-case studies demonstrate the viability of the proposed approaches.}, }
Endnote
%0 Thesis %A Ernst, Patrick %Y Weikum, Gerhard %A referee: Verspoor, Karin %A referee: Berberich, Klaus %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Biomedical Knowledge Base Construction from Text and its Applications in Knowledge-based Systems : %G eng %U http://hdl.handle.net/21.11116/0000-0001-1864-4 %U urn:nbn:de:bsz:291-scidok-ds-271051 %R 10.22028/D291-27105 %I Universität des Saarlandes %C Saarbrücken %D 2018 %8 20.02.2018 %P 147 p. %V phd %9 phd %X While general-purpose Knowledge Bases (KBs) have gone a long way in compiling comprehensive knowledgee about people, events, places, etc., domain-specific KBs, such as on health, are equally important, but are less explored. Consequently, a comprehensive and expressive health KB that spans all aspects of biomedical knowledge is still missing. The main goal of this thesis is to develop principled methods for building such a KB and enabling knowledge-centric applications. We address several challenges and make the following contributions: - To construct a health KB, we devise a largely automated and scalable pattern-based knowledge extraction method covering a spectrum of different text genres and distilling a wide variety of facts from different biomedical areas. - To consider higher-arity relations, crucial for proper knowledge representation in advanced domain such as health, we generalize the fact-pattern duality paradigm of previous methods. A key novelty is the integration of facts with missing arguments by extending our framework to partial patterns and facts by reasoning over the composability of partial facts. - To demonstrate the benefits of a health KB, we devise systems for entity-aware search and analytics and for entity-relationship-oriented exploration. Extensive experiments and use-case studies demonstrate the viability of the proposed approaches. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/26987
[3]
E. Galbrun and P. Miettinen, “Mining Redescriptions with Siren,” ACM Transactions on Knowledge Discovery from Data, vol. 12, no. 1, 2018.
Export
BibTeX
@article{galbrun17mining, TITLE = {Mining Redescriptions with {Siren}}, AUTHOR = {Galbrun, Esther and Miettinen, Pauli}, LANGUAGE = {eng}, DOI = {10.1145/3007212}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, JOURNAL = {ACM Transactions on Knowledge Discovery from Data}, VOLUME = {12}, NUMBER = {1}, EID = {6}, }
Endnote
%0 Journal Article %A Galbrun, Esther %A Miettinen, Pauli %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Mining Redescriptions with Siren : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-227B-F %R 10.1145/3007212 %7 2018 %D 2018 %J ACM Transactions on Knowledge Discovery from Data %V 12 %N 1 %Z sequence number: 6 %I ACM %C New York, NY
[4]
E. Gius, N. Reiter, J. Strötgen, and M. Willand, “SANTA: Systematische Analyse Narrativer Texte durch Annotation,” in Kritik der digitalen Vernunft (DHd 2018), Köln, Germany. (Accepted/in press)
Export
BibTeX
@inproceedings{GiusDHd2018, TITLE = {{{SANTA}: {Systematische Analyse Narrativer Texte durch Annotation}}}, AUTHOR = {Gius, Evelyn and Reiter, Nils and Str{\"o}tgen, Jannik and Willand, Marcus}, LANGUAGE = {deu}, URL = {http://dhd2018.uni-koeln.de/}, YEAR = {2018}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Kritik der digitalen Vernunft (DHd 2018)}, ADDRESS = {K{\"o}ln, Germany}, }
Endnote
%0 Conference Proceedings %A Gius, Evelyn %A Reiter, Nils %A Strötgen, Jannik %A Willand, Marcus %+ External Organizations External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T SANTA: Systematische Analyse Narrativer Texte durch Annotation : %G deu %U http://hdl.handle.net/11858/00-001M-0000-002E-73EC-4 %D 2017 %B 5. Tagung des Verbands Digital Humanities im deutschsprachigen Raum e.V. %Z date of event: 2018-02-26 - 2018-03-02 %C Köln, Germany %B Kritik der digitalen Vernunft
[5]
K. Hui, A. Yates, K. Berberich, and G. de Melo, “Co-PACRR: A Context-Aware Neural IR Model for Ad-hoc Retrieval,” in WSDM’18, 11th ACM International Conference on Web Search and Data Mining, Marina Del Rey, CA, USA, 2018.
Export
BibTeX
@inproceedings{Hui_WSDM2018, TITLE = {Co-{PACRR}: {A} Context-Aware Neural {IR} Model for Ad-hoc Retrieval}, AUTHOR = {Hui, Kai and Yates, Andrew and Berberich, Klaus and de Melo, Gerard}, LANGUAGE = {eng}, ISBN = {978-1-4503-5581-0}, DOI = {10.1145/3159652.3159689}, PUBLISHER = {ACM}, YEAR = {2018}, DATE = {2018}, BOOKTITLE = {WSDM'18, 11th ACM International Conference on Web Search and Data Mining}, PAGES = {279--287}, ADDRESS = {Marina Del Rey, CA, USA}, }
Endnote
%0 Conference Proceedings %A Hui, Kai %A Yates, Andrew %A Berberich, Klaus %A de Melo, Gerard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Co-PACRR: A Context-Aware Neural IR Model for Ad-hoc Retrieval : %G eng %U http://hdl.handle.net/21.11116/0000-0000-6367-D %R 10.1145/3159652.3159689 %D 2018 %B 11th ACM International Conference on Web Search and Data Mining %Z date of event: 2018-02-05 - 2018-02-09 %C Marina Del Rey, CA, USA %B WSDM'18 %P 279 - 287 %I ACM %@ 978-1-4503-5581-0
[6]
J. Kalofolias, E. Galbrun, and P. Miettinen, “From Sets of Good Redescriptions to Good Sets of Redescriptions,” Knowledge and Information Systems, 2018.
Export
BibTeX
@article{kalofolias18from, TITLE = {From Sets of Good Redescriptions to Good Sets of Redescriptions}, AUTHOR = {Kalofolias, Janis and Galbrun, Esther and Miettinen, Pauli}, LANGUAGE = {eng}, ISSN = {0219-1377}, DOI = {10.1007/s10115-017-1149-7}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2018}, JOURNAL = {Knowledge and Information Systems}, }
Endnote
%0 Journal Article %A Kalofolias, Janis %A Galbrun, Esther %A Miettinen, Pauli %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T From Sets of Good Redescriptions to Good Sets of Redescriptions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-90D1-5 %R 10.1007/s10115-017-1149-7 %7 2018-01-19 %D 2018 %8 19.01.2018 %J Knowledge and Information Systems %I Springer %C New York, NY %@ false
[7]
S. Karaev, J. Hook, and P. Miettinen, “Latitude: A Model for Mixed Linear-Tropical Matrix Factorization,” 2018. [Online]. Available: http://arxiv.org/abs/1801.06136. (arXiv: 1801.06136)
Abstract
Nonnegative matrix factorization (NMF) is one of the most frequently-used matrix factorization models in data analysis. A significant reason to the popularity of NMF is its interpretability and the `parts of whole' interpretation of its components. Recently, max-times, or subtropical, matrix factorization (SMF) has been introduced as an alternative model with equally interpretable `winner takes it all' interpretation. In this paper we propose a new mixed linear--tropical model, and a new algorithm, called Latitude, that combines NMF and SMF, being able to smoothly alternate between the two. In our model, the data is modeled using the latent factors and latent parameters that control whether the factors are interpreted as NMF or SMF features, or their mixtures. We present an algorithm for our novel matrix factorization. Our experiments show that our algorithm improves over both baselines, and can yield interpretable results that reveal more of the latent structure than either NMF or SMF alone.
Export
BibTeX
@online{Karaev2018, TITLE = {Latitude: A Model for Mixed Linear-Tropical Matrix Factorization}, AUTHOR = {Karaev, Sanjar and Hook, James and Miettinen, Pauli}, URL = {http://arxiv.org/abs/1801.06136}, EPRINT = {1801.06136}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Nonnegative matrix factorization (NMF) is one of the most frequently-used matrix factorization models in data analysis. A significant reason to the popularity of NMF is its interpretability and the `parts of whole' interpretation of its components. Recently, max-times, or subtropical, matrix factorization (SMF) has been introduced as an alternative model with equally interpretable `winner takes it all' interpretation. In this paper we propose a new mixed linear--tropical model, and a new algorithm, called Latitude, that combines NMF and SMF, being able to smoothly alternate between the two. In our model, the data is modeled using the latent factors and latent parameters that control whether the factors are interpreted as NMF or SMF features, or their mixtures. We present an algorithm for our novel matrix factorization. Our experiments show that our algorithm improves over both baselines, and can yield interpretable results that reveal more of the latent structure than either NMF or SMF alone.}, }
Endnote
%0 Report %A Karaev, Sanjar %A Hook, James %A Miettinen, Pauli %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society %T Latitude: A Model for Mixed Linear-Tropical Matrix Factorization : %U http://hdl.handle.net/21.11116/0000-0000-636B-9 %U http://arxiv.org/abs/1801.06136 %D 2018 %X Nonnegative matrix factorization (NMF) is one of the most frequently-used matrix factorization models in data analysis. A significant reason to the popularity of NMF is its interpretability and the `parts of whole' interpretation of its components. Recently, max-times, or subtropical, matrix factorization (SMF) has been introduced as an alternative model with equally interpretable `winner takes it all' interpretation. In this paper we propose a new mixed linear--tropical model, and a new algorithm, called Latitude, that combines NMF and SMF, being able to smoothly alternate between the two. In our model, the data is modeled using the latent factors and latent parameters that control whether the factors are interpreted as NMF or SMF features, or their mixtures. We present an algorithm for our novel matrix factorization. Our experiments show that our algorithm improves over both baselines, and can yield interpretable results that reveal more of the latent structure than either NMF or SMF alone. %K Computer Science, Learning, cs.LG
[8]
A. Mishra, “Leveraging Semantic Annotations for Event-focused Search & Summarization,” Universität des Saarlandes, Saarbrücken, 2018.
Abstract
Today in this Big Data era, overwhelming amounts of textual information across different sources with a high degree of redundancy has made it hard for a consumer to retrospect on past events. A plausible solution is to link semantically similar information contained across the different sources to enforce a structure thereby providing multiple access paths to relevant information. Keeping this larger goal in view, this work uses Wikipedia and online news articles as two prominent yet disparate information sources to address the following three problems: • We address a linking problem to connect Wikipedia excerpts to news articles by casting it into an IR task. Our novel approach integrates time, geolocations, and entities with text to identify relevant documents that can be linked to a given excerpt. • We address an unsupervised extractive multi-document summarization task to generate a fixed-length event digest that facilitates efficient consumption of information contained within a large set of documents. Our novel approach proposes an ILP for global inference across text, time, geolocations, and entities associated with the event. • To estimate temporal focus of short event descriptions, we present a semi-supervised approach that leverages redundancy within a longitudinal news collection to estimate accurate probabilistic time models. Extensive experimental evaluations demonstrate the effectiveness and viability of our proposed approaches towards achieving the larger goal.
Export
BibTeX
@phdthesis{Mishraphd2018, TITLE = {Leveraging Semantic Annotations for Event-focused Search \& Summarization}, AUTHOR = {Mishra, Arunav}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-ds-271081}, DOI = {10.22028/D291-27108}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2018}, ABSTRACT = {Today in this Big Data era, overwhelming amounts of textual information across different sources with a high degree of redundancy has made it hard for a consumer to retrospect on past events. A plausible solution is to link semantically similar information contained across the different sources to enforce a structure thereby providing multiple access paths to relevant information. Keeping this larger goal in view, this work uses Wikipedia and online news articles as two prominent yet disparate information sources to address the following three problems: \mbox{$\bullet$} We address a linking problem to connect Wikipedia excerpts to news articles by casting it into an IR task. Our novel approach integrates time, geolocations, and entities with text to identify relevant documents that can be linked to a given excerpt. \mbox{$\bullet$} We address an unsupervised extractive multi-document summarization task to generate a fixed-length event digest that facilitates efficient consumption of information contained within a large set of documents. Our novel approach proposes an ILP for global inference across text, time, geolocations, and entities associated with the event. \mbox{$\bullet$} To estimate temporal focus of short event descriptions, we present a semi-supervised approach that leverages redundancy within a longitudinal news collection to estimate accurate probabilistic time models. Extensive experimental evaluations demonstrate the effectiveness and viability of our proposed approaches towards achieving the larger goal.}, }
Endnote
%0 Thesis %A Mishra, Arunav %Y Berberich, Klaus %A referee: Weikum, Gerhard %A referee: Hauff, Claudia %+ Databases and Information Systems, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Leveraging Semantic Annotations for Event-focused Search & Summarization : %G eng %U http://hdl.handle.net/21.11116/0000-0001-1844-8 %U urn:nbn:de:bsz:291-scidok-ds-271081 %R 10.22028/D291-27108 %I Universität des Saarlandes %C Saarbrücken %D 2018 %8 08.02.2018 %P 252 p. %V phd %9 phd %X Today in this Big Data era, overwhelming amounts of textual information across different sources with a high degree of redundancy has made it hard for a consumer to retrospect on past events. A plausible solution is to link semantically similar information contained across the different sources to enforce a structure thereby providing multiple access paths to relevant information. Keeping this larger goal in view, this work uses Wikipedia and online news articles as two prominent yet disparate information sources to address the following three problems: • We address a linking problem to connect Wikipedia excerpts to news articles by casting it into an IR task. Our novel approach integrates time, geolocations, and entities with text to identify relevant documents that can be linked to a given excerpt. • We address an unsupervised extractive multi-document summarization task to generate a fixed-length event digest that facilitates efficient consumption of information contained within a large set of documents. Our novel approach proposes an ILP for global inference across text, time, geolocations, and entities associated with the event. • To estimate temporal focus of short event descriptions, we present a semi-supervised approach that leverages redundancy within a longitudinal news collection to estimate accurate probabilistic time models. Extensive experimental evaluations demonstrate the effectiveness and viability of our proposed approaches towards achieving the larger goal. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/26995
[9]
K. Popat, S. Mukherjee, J. Strötgen, and G. Weikum, “CredEye: A Credibility Lens for Analyzing and Explaining Misinformation,” in WWW’18 Companion, Lyon, France. (Accepted/in press)
Export
BibTeX
@inproceedings{PopatWWW2017, TITLE = {{CredEye}: {A} Credibility Lens for Analyzing and Explaining Misinformation}, AUTHOR = {Popat, Kashyap and Mukherjee, Subhabrata and Str{\"o}tgen, Jannik and Weikum, Gerhard}, LANGUAGE = {eng}, DOI = {10.1145/3184558.3186967}, PUBLISHER = {ACM}, YEAR = {2018}, PUBLREMARK = {Accepted}, BOOKTITLE = {WWW'18 Companion}, ADDRESS = {Lyon, France}, }
Endnote
%0 Conference Proceedings %A Popat, Kashyap %A Mukherjee, Subhabrata %A Strötgen, Jannik %A Weikum, Gerhard %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T CredEye: A Credibility Lens for Analyzing and Explaining Misinformation : %G eng %U http://hdl.handle.net/21.11116/0000-0000-B546-5 %R 10.1145/3184558.3186967 %D 2018 %B 27th International Conference on World Wide Web %Z date of event: 2018-04-23 - 2018-04-27 %C Lyon, France %B WWW'18 Companion %I ACM
[10]
A. Spitz, J. Strötgen, and M. Gertz, “Predicting Document Creation Times in News Citation Networks,” in WWW’18 Companion, Lyon, France. (Accepted/in press)
Export
BibTeX
@inproceedings{SpitzWWW2017, TITLE = {Predicting Document Creation Times in News Citation Networks}, AUTHOR = {Spitz, Andreas and Str{\"o}tgen, Jannik and Gertz, Michael}, LANGUAGE = {eng}, DOI = {10.1145/3184558.3191633}, PUBLISHER = {ACM}, YEAR = {2018}, PUBLREMARK = {Accepted}, BOOKTITLE = {WWW'18 Companion}, ADDRESS = {Lyon, France}, }
Endnote
%0 Conference Proceedings %A Spitz, Andreas %A Strötgen, Jannik %A Gertz, Michael %+ External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations %T Predicting Document Creation Times in News Citation Networks : %G eng %U http://hdl.handle.net/21.11116/0000-0000-B544-7 %R 10.1145/3184558.3191633 %D 2018 %B 27th International Conference on World Wide Web %Z date of event: 2018-04-23 - 2018-04-27 %C Lyon, France %B WWW'18 Companion %I ACM
[11]
J. Strötgen, R. Andrade, and D. Gupta, “Putting Dates on the Map: Harvesting and Analyzing Street Names with Date Mentions and their Explanations,” in Proceedings of the Joint Conference on Digital Libraries (JCDL 2018), Fort Worth, TX, USA. (Accepted/in press)
Export
BibTeX
@inproceedings{StroetgenJCDL2018, TITLE = {Putting Dates on the Map: {H}arvesting and Analyzing Street Names with Date Mentions and their Explanations}, AUTHOR = {Str{\"o}tgen, Jannik and Andrade, Rosita and Gupta, Dhruv}, LANGUAGE = {eng}, PUBLISHER = {ACM}, YEAR = {2018}, PUBLREMARK = {Accepted}, BOOKTITLE = {Proceedings of the Joint Conference on Digital Libraries (JCDL 2018)}, ADDRESS = {Fort Worth, TX, USA}, }
Endnote
%0 Conference Proceedings %A Strötgen, Jannik %A Andrade, Rosita %A Gupta, Dhruv %+ Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society Databases and Information Systems, MPI for Informatics, Max Planck Society %T Putting Dates on the Map: Harvesting and Analyzing Street Names with Date Mentions and their Explanations : %G eng %U http://hdl.handle.net/21.11116/0000-0000-B548-3 %D 2018 %B Joint Conference on Digital Libraries %Z date of event: 2018-06-03 - 2018-06-06 %C Fort Worth, TX, USA %B Proceedings of the Joint Conference on Digital Libraries %I ACM
[12]
J. Strötgen, A.-L. Minard, L. Lange, M. Speranza, and B. Magnini, “KRAUTS: A German Temporally Annotated News Corpus,” in Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. (Accepted/in press)
Export
BibTeX
@inproceedings{StroetgenELREC2018, TITLE = {{KRAUTS}: {A German} Temporally Annotated News Corpus}, AUTHOR = {Str{\"o}tgen, Jannik and Minard, Anne-Lyse and Lange, Lukas and Speranza, Manuela and Magnini, Bernardo}, LANGUAGE = {eng}, URL = {http://lrec2018.lrec-conf.org/en/}, YEAR = {2018}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Eleventh International Conference on Language Resources and Evaluation (LREC 2018)}, ADDRESS = {Miyazaki, Japan}, }
Endnote
%0 Conference Proceedings %A Strötgen, Jannik %A Minard, Anne-Lyse %A Lange, Lukas %A Speranza, Manuela %A Magnini, Bernardo %+ Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations Databases and Information Systems, MPI for Informatics, Max Planck Society External Organizations External Organizations %T KRAUTS: A German Temporally Annotated News Corpus : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-8B8C-E %U http://lrec2018.lrec-conf.org/en/ %D 2017 %B 11th Language Resources and Evaluation Conference %Z date of event: 2018-05-07 - 2018-05-12 %C Miyazaki, Japan %B Eleventh International Conference on Language Resources and Evaluation