Current Year

[1]
A. Abboud, A. Backurs, K. Bringmann, and M. Künnemann, “Fine-Grained Complexity of Analyzing Compressed Data: Quantifying Improvements over Decompress-And-Solve,” 2018. [Online]. Available: http://arxiv.org/abs/1803.00796. (arXiv: 1803.00796)
Abstract
Can we analyze data without decompressing it? As our data keeps growing, understanding the time complexity of problems on compressed inputs, rather than in convenient uncompressed forms, becomes more and more relevant. Suppose we are given a compression of size $n$ of data that originally has size $N$, and we want to solve a problem with time complexity $T(\cdot)$. The naive strategy of "decompress-and-solve" gives time $T(N)$, whereas "the gold standard" is time $T(n)$: to analyze the compression as efficiently as if the original data was small. We restrict our attention to data in the form of a string (text, files, genomes, etc.) and study the most ubiquitous tasks. While the challenge might seem to depend heavily on the specific compression scheme, most methods of practical relevance (Lempel-Ziv-family, dictionary methods, and others) can be unified under the elegant notion of Grammar Compressions. A vast literature, across many disciplines, established this as an influential notion for Algorithm design. We introduce a framework for proving (conditional) lower bounds in this field, allowing us to assess whether decompress-and-solve can be improved, and by how much. Our main results are: - The $O(nN\sqrt{\log{N/n}})$ bound for LCS and the $O(\min\{N \log N, nM\})$ bound for Pattern Matching with Wildcards are optimal up to $N^{o(1)}$ factors, under the Strong Exponential Time Hypothesis. (Here, $M$ denotes the uncompressed length of the compressed pattern.) - Decompress-and-solve is essentially optimal for Context-Free Grammar Parsing and RNA Folding, under the $k$-Clique conjecture. - We give an algorithm showing that decompress-and-solve is not optimal for Disjointness.
Export
BibTeX
@online{Abboud_arXiv1803.00796, TITLE = {Fine-Grained Complexity of Analyzing Compressed Data: Quantifying Improvements over Decompress-And-Solve}, AUTHOR = {Abboud, Amir and Backurs, Arturs and Bringmann, Karl and K{\"u}nnemann, Marvin}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00796}, EPRINT = {1803.00796}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Can we analyze data without decompressing it? As our data keeps growing, understanding the time complexity of problems on compressed inputs, rather than in convenient uncompressed forms, becomes more and more relevant. Suppose we are given a compression of size $n$ of data that originally has size $N$, and we want to solve a problem with time complexity $T(\cdot)$. The naive strategy of "decompress-and-solve" gives time $T(N)$, whereas "the gold standard" is time $T(n)$: to analyze the compression as efficiently as if the original data was small. We restrict our attention to data in the form of a string (text, files, genomes, etc.) and study the most ubiquitous tasks. While the challenge might seem to depend heavily on the specific compression scheme, most methods of practical relevance (Lempel-Ziv-family, dictionary methods, and others) can be unified under the elegant notion of Grammar Compressions. A vast literature, across many disciplines, established this as an influential notion for Algorithm design. We introduce a framework for proving (conditional) lower bounds in this field, allowing us to assess whether decompress-and-solve can be improved, and by how much. Our main results are: -- The $O(nN\sqrt{\log{N/n}})$ bound for LCS and the $O(\min\{N \log N, nM\})$ bound for Pattern Matching with Wildcards are optimal up to $N^{o(1)}$ factors, under the Strong Exponential Time Hypothesis. (Here, $M$ denotes the uncompressed length of the compressed pattern.) -- Decompress-and-solve is essentially optimal for Context-Free Grammar Parsing and RNA Folding, under the $k$-Clique conjecture. -- We give an algorithm showing that decompress-and-solve is not optimal for Disjointness.}, }
Endnote
%0 Report %A Abboud, Amir %A Backurs, Arturs %A Bringmann, Karl %A Künnemann, Marvin %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Fine-Grained Complexity of Analyzing Compressed Data: Quantifying Improvements over Decompress-And-Solve : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E38-C %U http://arxiv.org/abs/1803.00796 %D 2018 %X Can we analyze data without decompressing it? As our data keeps growing, understanding the time complexity of problems on compressed inputs, rather than in convenient uncompressed forms, becomes more and more relevant. Suppose we are given a compression of size $n$ of data that originally has size $N$, and we want to solve a problem with time complexity $T(\cdot)$. The naive strategy of "decompress-and-solve" gives time $T(N)$, whereas "the gold standard" is time $T(n)$: to analyze the compression as efficiently as if the original data was small. We restrict our attention to data in the form of a string (text, files, genomes, etc.) and study the most ubiquitous tasks. While the challenge might seem to depend heavily on the specific compression scheme, most methods of practical relevance (Lempel-Ziv-family, dictionary methods, and others) can be unified under the elegant notion of Grammar Compressions. A vast literature, across many disciplines, established this as an influential notion for Algorithm design. We introduce a framework for proving (conditional) lower bounds in this field, allowing us to assess whether decompress-and-solve can be improved, and by how much. Our main results are: - The $O(nN\sqrt{\log{N/n}})$ bound for LCS and the $O(\min\{N \log N, nM\})$ bound for Pattern Matching with Wildcards are optimal up to $N^{o(1)}$ factors, under the Strong Exponential Time Hypothesis. (Here, $M$ denotes the uncompressed length of the compressed pattern.) - Decompress-and-solve is essentially optimal for Context-Free Grammar Parsing and RNA Folding, under the $k$-Clique conjecture. - We give an algorithm showing that decompress-and-solve is not optimal for Disjointness. %K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[2]
A. Abboud and K. Bringmann, “Tighter Connections Between Formula-SAT and Shaving Logs,” 2018. [Online]. Available: http://arxiv.org/abs/1804.08978. (arXiv: 1804.08978)
Abstract
A noticeable fraction of Algorithms papers in the last few decades improve the running time of well-known algorithms for fundamental problems by logarithmic factors. For example, the $O(n^2)$ dynamic programming solution to the Longest Common Subsequence problem (LCS) was improved to $O(n^2/\log^2 n)$ in several ways and using a variety of ingenious tricks. This line of research, also known as "the art of shaving log factors", lacks a tool for proving negative results. Specifically, how can we show that it is unlikely that LCS can be solved in time $O(n^2/\log^3 n)$? Perhaps the only approach for such results was suggested in a recent paper of Abboud, Hansen, Vassilevska W. and Williams (STOC'16). The authors blame the hardness of shaving logs on the hardness of solving satisfiability on Boolean formulas (Formula-SAT) faster than exhaustive search. They show that an $O(n^2/\log^{1000} n)$ algorithm for LCS would imply a major advance in circuit lower bounds. Whether this approach can lead to tighter barriers was unclear. In this paper, we push this approach to its limit and, in particular, prove that a well-known barrier from complexity theory stands in the way for shaving five additional log factors for fundamental combinatorial problems. For LCS, regular expression pattern matching, as well as the Fr\'echet distance problem from Computational Geometry, we show that an $O(n^2/\log^{7+\varepsilon} n)$ runtime would imply new Formula-SAT algorithms. Our main result is a reduction from SAT on formulas of size $s$ over $n$ variables to LCS on sequences of length $N=2^{n/2} \cdot s^{1+o(1)}$. Our reduction is essentially as efficient as possible, and it greatly improves the previously known reduction for LCS with $N=2^{n/2} \cdot s^c$, for some $c \geq 100$.
Export
BibTeX
@online{Abboud_arXiv1804.08978, TITLE = {Tighter Connections Between Formula-{SAT} and Shaving Logs}, AUTHOR = {Abboud, Amir and Bringmann, Karl}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.08978}, EPRINT = {1804.08978}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {A noticeable fraction of Algorithms papers in the last few decades improve the running time of well-known algorithms for fundamental problems by logarithmic factors. For example, the $O(n^2)$ dynamic programming solution to the Longest Common Subsequence problem (LCS) was improved to $O(n^2/\log^2 n)$ in several ways and using a variety of ingenious tricks. This line of research, also known as "the art of shaving log factors", lacks a tool for proving negative results. Specifically, how can we show that it is unlikely that LCS can be solved in time $O(n^2/\log^3 n)$? Perhaps the only approach for such results was suggested in a recent paper of Abboud, Hansen, Vassilevska W. and Williams (STOC'16). The authors blame the hardness of shaving logs on the hardness of solving satisfiability on Boolean formulas (Formula-SAT) faster than exhaustive search. They show that an $O(n^2/\log^{1000} n)$ algorithm for LCS would imply a major advance in circuit lower bounds. Whether this approach can lead to tighter barriers was unclear. In this paper, we push this approach to its limit and, in particular, prove that a well-known barrier from complexity theory stands in the way for shaving five additional log factors for fundamental combinatorial problems. For LCS, regular expression pattern matching, as well as the Fr\'echet distance problem from Computational Geometry, we show that an $O(n^2/\log^{7+\varepsilon} n)$ runtime would imply new Formula-SAT algorithms. Our main result is a reduction from SAT on formulas of size $s$ over $n$ variables to LCS on sequences of length $N=2^{n/2} \cdot s^{1+o(1)}$. Our reduction is essentially as efficient as possible, and it greatly improves the previously known reduction for LCS with $N=2^{n/2} \cdot s^c$, for some $c \geq 100$.}, }
Endnote
%0 Report %A Abboud, Amir %A Bringmann, Karl %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Tighter Connections Between Formula-SAT and Shaving Logs : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3DF7-5 %U http://arxiv.org/abs/1804.08978 %D 2018 %X A noticeable fraction of Algorithms papers in the last few decades improve the running time of well-known algorithms for fundamental problems by logarithmic factors. For example, the $O(n^2)$ dynamic programming solution to the Longest Common Subsequence problem (LCS) was improved to $O(n^2/\log^2 n)$ in several ways and using a variety of ingenious tricks. This line of research, also known as "the art of shaving log factors", lacks a tool for proving negative results. Specifically, how can we show that it is unlikely that LCS can be solved in time $O(n^2/\log^3 n)$? Perhaps the only approach for such results was suggested in a recent paper of Abboud, Hansen, Vassilevska W. and Williams (STOC'16). The authors blame the hardness of shaving logs on the hardness of solving satisfiability on Boolean formulas (Formula-SAT) faster than exhaustive search. They show that an $O(n^2/\log^{1000} n)$ algorithm for LCS would imply a major advance in circuit lower bounds. Whether this approach can lead to tighter barriers was unclear. In this paper, we push this approach to its limit and, in particular, prove that a well-known barrier from complexity theory stands in the way for shaving five additional log factors for fundamental combinatorial problems. For LCS, regular expression pattern matching, as well as the Fr\'echet distance problem from Computational Geometry, we show that an $O(n^2/\log^{7+\varepsilon} n)$ runtime would imply new Formula-SAT algorithms. Our main result is a reduction from SAT on formulas of size $s$ over $n$ variables to LCS on sequences of length $N=2^{n/2} \cdot s^{1+o(1)}$. Our reduction is essentially as efficient as possible, and it greatly improves the previously known reduction for LCS with $N=2^{n/2} \cdot s^c$, for some $c \geq 100$. %K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[3]
M. Abrahamsen, A. Adamaszek, K. Bringmann, V. Cohen-Addad, M. Mehr, E. Rotenberg, A. Roytman, and M. Thorup, “Fast Fencing,” 2018. [Online]. Available: http://arxiv.org/abs/1804.00101. (arXiv: 1804.00101)
Abstract
We consider very natural "fence enclosure" problems studied by Capoyleas, Rote, and Woeginger and Arkin, Khuller, and Mitchell in the early 90s. Given a set $S$ of $n$ points in the plane, we aim at finding a set of closed curves such that (1) each point is enclosed by a curve and (2) the total length of the curves is minimized. We consider two main variants. In the first variant, we pay a unit cost per curve in addition to the total length of the curves. An equivalent formulation of this version is that we have to enclose $n$ unit disks, paying only the total length of the enclosing curves. In the other variant, we are allowed to use at most $k$ closed curves and pay no cost per curve. For the variant with at most $k$ closed curves, we present an algorithm that is polynomial in both $n$ and $k$. For the variant with unit cost per curve, or unit disks, we present a near-linear time algorithm. Capoyleas, Rote, and Woeginger solved the problem with at most $k$ curves in $n^{O(k)}$ time. Arkin, Khuller, and Mitchell used this to solve the unit cost per curve version in exponential time. At the time, they conjectured that the problem with $k$ curves is NP-hard for general $k$. Our polynomial time algorithm refutes this unless P equals NP.
Export
BibTeX
@online{Abrahamsen_arXiv1804.00101, TITLE = {Fast Fencing}, AUTHOR = {Abrahamsen, Mikkel and Adamaszek, Anna and Bringmann, Karl and Cohen-Addad, Vincent and Mehr, Mehran and Rotenberg, Eva and Roytman, Alan and Thorup, Mikkel}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.00101}, EPRINT = {1804.00101}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {We consider very natural "fence enclosure" problems studied by Capoyleas, Rote, and Woeginger and Arkin, Khuller, and Mitchell in the early 90s. Given a set $S$ of $n$ points in the plane, we aim at finding a set of closed curves such that (1) each point is enclosed by a curve and (2) the total length of the curves is minimized. We consider two main variants. In the first variant, we pay a unit cost per curve in addition to the total length of the curves. An equivalent formulation of this version is that we have to enclose $n$ unit disks, paying only the total length of the enclosing curves. In the other variant, we are allowed to use at most $k$ closed curves and pay no cost per curve. For the variant with at most $k$ closed curves, we present an algorithm that is polynomial in both $n$ and $k$. For the variant with unit cost per curve, or unit disks, we present a near-linear time algorithm. Capoyleas, Rote, and Woeginger solved the problem with at most $k$ curves in $n^{O(k)}$ time. Arkin, Khuller, and Mitchell used this to solve the unit cost per curve version in exponential time. At the time, they conjectured that the problem with $k$ curves is NP-hard for general $k$. Our polynomial time algorithm refutes this unless P equals NP.}, }
Endnote
%0 Report %A Abrahamsen, Mikkel %A Adamaszek, Anna %A Bringmann, Karl %A Cohen-Addad, Vincent %A Mehr, Mehran %A Rotenberg, Eva %A Roytman, Alan %A Thorup, Mikkel %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations %T Fast Fencing : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3DFE-E %U http://arxiv.org/abs/1804.00101 %D 2018 %X We consider very natural "fence enclosure" problems studied by Capoyleas, Rote, and Woeginger and Arkin, Khuller, and Mitchell in the early 90s. Given a set $S$ of $n$ points in the plane, we aim at finding a set of closed curves such that (1) each point is enclosed by a curve and (2) the total length of the curves is minimized. We consider two main variants. In the first variant, we pay a unit cost per curve in addition to the total length of the curves. An equivalent formulation of this version is that we have to enclose $n$ unit disks, paying only the total length of the enclosing curves. In the other variant, we are allowed to use at most $k$ closed curves and pay no cost per curve. For the variant with at most $k$ closed curves, we present an algorithm that is polynomial in both $n$ and $k$. For the variant with unit cost per curve, or unit disks, we present a near-linear time algorithm. Capoyleas, Rote, and Woeginger solved the problem with at most $k$ curves in $n^{O(k)}$ time. Arkin, Khuller, and Mitchell used this to solve the unit cost per curve version in exponential time. At the time, they conjectured that the problem with $k$ curves is NP-hard for general $k$. Our polynomial time algorithm refutes this unless P equals NP. %K Computer Science, Computational Geometry, cs.CG
[4]
A. Adamaszek, P. Chalermsook, A. Ene, and A. Wiese, “Submodular Unsplittable Flow on Trees,” Mathematical Programming / B, 2018.
Export
BibTeX
@article{Adamaszek2018, TITLE = {Submodular Unsplittable Flow on Trees}, AUTHOR = {Adamaszek, Anna and Chalermsook, Parinya and Ene, Alina and Wiese, Andreas}, LANGUAGE = {eng}, ISSN = {0025-5610}, DOI = {10.1007/s10107-017-1218-4}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2018}, JOURNAL = {Mathematical Programming / B}, }
Endnote
%0 Journal Article %A Adamaszek, Anna %A Chalermsook, Parinya %A Ene, Alina %A Wiese, Andreas %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Submodular Unsplittable Flow on Trees : %G eng %U http://hdl.handle.net/21.11116/0000-0000-73B6-1 %R 10.1007/s10107-017-1218-4 %7 2018-01-17 %D 2018 %8 17.01.2018 %J Mathematical Programming / B %I Springer %C Berlin %@ false
[5]
J. Baldus and K. Bringmann, “A Fast Implementation of Near Neighbors Queries for Fréchet Distance (GIS Cup),” 2018. [Online]. Available: http://arxiv.org/abs/1803.00806. (arXiv: 1803.00806)
Abstract
This paper describes an implementation of fast near-neighbours queries (also known as range searching) with respect to the Fr\'echet distance. The algorithm is designed to be efficient on practical data such as GPS trajectories. Our approach is to use a quadtree data structure to enumerate all curves in the database that have similar start and endpoints as the query curve. On these curves we run positive and negative filters to narrow the set of potential results. Only for those trajectories where these heuristics fail, we compute the Fr\'echet distance exactly, by running a novel recursive variant of the classic free-space diagram algorithm. Our implementation won the ACM SIGSPATIAL GIS Cup 2017.
Export
BibTeX
@online{Baldus_arXiv1803.00806, TITLE = {A Fast Implementation of Near Neighbors Queries for {F}réchet Distance ({GIS Cup})}, AUTHOR = {Baldus, Julian and Bringmann, Karl}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00806}, EPRINT = {1803.00806}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {This paper describes an implementation of fast near-neighbours queries (also known as range searching) with respect to the Fr\'echet distance. The algorithm is designed to be efficient on practical data such as GPS trajectories. Our approach is to use a quadtree data structure to enumerate all curves in the database that have similar start and endpoints as the query curve. On these curves we run positive and negative filters to narrow the set of potential results. Only for those trajectories where these heuristics fail, we compute the Fr\'echet distance exactly, by running a novel recursive variant of the classic free-space diagram algorithm. Our implementation won the ACM SIGSPATIAL GIS Cup 2017.}, }
Endnote
%0 Report %A Baldus, Julian %A Bringmann, Karl %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T A Fast Implementation of Near Neighbors Queries for Fréchet Distance (GIS Cup) : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E1A-E %U http://arxiv.org/abs/1803.00806 %D 2018 %X This paper describes an implementation of fast near-neighbours queries (also known as range searching) with respect to the Fr\'echet distance. The algorithm is designed to be efficient on practical data such as GPS trajectories. Our approach is to use a quadtree data structure to enumerate all curves in the database that have similar start and endpoints as the query curve. On these curves we run positive and negative filters to narrow the set of potential results. Only for those trajectories where these heuristics fail, we compute the Fr\'echet distance exactly, by running a novel recursive variant of the classic free-space diagram algorithm. Our implementation won the ACM SIGSPATIAL GIS Cup 2017. %K Computer Science, Computational Geometry, cs.CG
[6]
G. Ballard, C. Ikenmeyer, J. M. Landsberg, and N. Ryder, “The Geometry of Rank Decompositions of Matrix Multiplication II: 3 x 3 matrices,” 2018. [Online]. Available: http://arxiv.org/abs/1801.00843. (arXiv: 1801.00843)
Abstract
This is the second in a series of papers on rank decompositions of the matrix multiplication tensor. We present new rank $23$ decompositions for the $3\times 3$ matrix multiplication tensor $M_{\langle 3\rangle}$. All our decompositions have symmetry groups that include the standard cyclic permutation of factors but otherwise exhibit a range of behavior. One of them has 11 cubes as summands and admits an unexpected symmetry group of order 12. We establish basic information regarding symmetry groups of decompositions and outline two approaches for finding new rank decompositions of $M_{\langle n\rangle}$ for larger $n$.
Export
BibTeX
@online{Ballard_arXiv1801.00843, TITLE = {The geometry of rank decompositions of matrix multiplication II: $3\times 3$ matrices}, AUTHOR = {Ballard, Grey and Ikenmeyer, Christian and Landsberg, J. M. and Ryder, Nick}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1801.00843}, EPRINT = {1801.00843}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {This is the second in a series of papers on rank decompositions of the matrix multiplication tensor. We present new rank $23$ decompositions for the $3\times 3$ matrix multiplication tensor $M_{\langle 3\rangle}$. All our decompositions have symmetry groups that include the standard cyclic permutation of factors but otherwise exhibit a range of behavior. One of them has 11 cubes as summands and admits an unexpected symmetry group of order 12. We establish basic information regarding symmetry groups of decompositions and outline two approaches for finding new rank decompositions of $M_{\langle n\rangle}$ for larger $n$.}, }
Endnote
%0 Report %A Ballard, Grey %A Ikenmeyer, Christian %A Landsberg, J. M. %A Ryder, Nick %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T The Geometry of Rank Decompositions of Matrix Multiplication II: 3 x 3 matrices : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3F64-9 %U http://arxiv.org/abs/1801.00843 %D 2018 %X This is the second in a series of papers on rank decompositions of the matrix multiplication tensor. We present new rank $23$ decompositions for the $3\times 3$ matrix multiplication tensor $M_{\langle 3\rangle}$. All our decompositions have symmetry groups that include the standard cyclic permutation of factors but otherwise exhibit a range of behavior. One of them has 11 cubes as summands and admits an unexpected symmetry group of order 12. We establish basic information regarding symmetry groups of decompositions and outline two approaches for finding new rank decompositions of $M_{\langle n\rangle}$ for larger $n$. %K Computer Science, Computational Complexity, cs.CC,
[7]
R. Becker, M. Sagraloff, V. Sharma, and C. Yap, “A Simple Near-Optimal Subdivision Algorithm for Complex Root Isolation based on the Pellet Test and Newton Iteration,” Journal of Symbolic Computation, vol. 86, 2018.
Export
BibTeX
@article{Becker2017JSC, TITLE = {A Simple Near-Optimal Subdivision Algorithm for Complex Root Isolation based on the {Pellet} Test and {Newton} Iteration}, AUTHOR = {Becker, Ruben and Sagraloff, Michael and Sharma, Vikram and Yap, Chee}, LANGUAGE = {eng}, ISSN = {0747-7171}, DOI = {10.1016/j.jsc.2017.03.009}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Journal of Symbolic Computation}, VOLUME = {86}, PAGES = {51--96}, }
Endnote
%0 Journal Article %A Becker, Ruben %A Sagraloff, Michael %A Sharma, Vikram %A Yap, Chee %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T A Simple Near-Optimal Subdivision Algorithm for Complex Root Isolation based on the Pellet Test and Newton Iteration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5717-8 %R 10.1016/j.jsc.2017.03.009 %7 2017-03-29 %D 2018 %J Journal of Symbolic Computation %V 86 %& 51 %P 51 - 96 %I Elsevier %C Amsterdam %@ false
[8]
A. Bhattacharya, D. Issac, R. Jaiswal, and A. Kumar, “Sampling in Space Restricted Settings,” Algorithmica, vol. 80, no. 5, 2018.
Export
BibTeX
@article{Bhattacharya2018, TITLE = {Sampling in Space Restricted Settings}, AUTHOR = {Bhattacharya, Anup and Issac, Davis and Jaiswal, Ragesh and Kumar, Amit}, LANGUAGE = {eng}, ISSN = {0178-4617}, DOI = {10.1007/s00453-017-0335-z}, PUBLISHER = {Springer-Verlag}, ADDRESS = {New York}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Algorithmica}, VOLUME = {80}, NUMBER = {5}, PAGES = {1439--1458}, }
Endnote
%0 Journal Article %A Bhattacharya, Anup %A Issac, Davis %A Jaiswal, Ragesh %A Kumar, Amit %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Sampling in Space Restricted Settings : %G eng %U http://hdl.handle.net/21.11116/0000-0001-2C37-1 %R 10.1007/s00453-017-0335-z %7 2017 %D 2018 %J Algorithmica %V 80 %N 5 %& 1439 %P 1439 - 1458 %I Springer-Verlag %C New York %@ false
[9]
M. Bläser, C. Ikenmeyer, G. Jindal, and V. Lysikov, “Generalized Matrix Completion and Algebraic Natural Proofs Contact Add Comment RSS-Feed,” Electronic Colloquium on Computational Complexity (ECCC): Report Series, vol. 18–064, 2018.
Export
BibTeX
@article{BlaeserCCC18_064, TITLE = {Generalized Matrix Completion and Algebraic Natural Proofs Contact Add Comment {RSS}-Feed}, AUTHOR = {Bl{\"a}ser, Markus and Ikenmeyer, Christian and Jindal, Gorav and Lysikov, Vladimir}, LANGUAGE = {eng}, ISSN = {1433-8092}, PUBLISHER = {Hasso-Plattner-Institut f{\"u}r Softwaretechnik GmbH}, ADDRESS = {Potsdam}, YEAR = {2018}, JOURNAL = {Electronic Colloquium on Computational Complexity (ECCC): Report Series}, VOLUME = {18-064}, PAGES = {1--27}, }
Endnote
%0 Journal Article %A Bläser, Markus %A Ikenmeyer, Christian %A Jindal, Gorav %A Lysikov, Vladimir %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Generalized Matrix Completion and Algebraic Natural Proofs Contact Add Comment RSS-Feed : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3F5F-0 %7 2018 %D 2018 %J Electronic Colloquium on Computational Complexity (ECCC): Report Series %V 18-064 %& 1 %P 1 - 27 %I Hasso-Plattner-Institut für Softwaretechnik GmbH %C Potsdam %@ false %U https://eccc.weizmann.ac.il/report/2018/064/
[10]
L. Boczkowski, E. Natale, O. Feinerman, and A. Korman, “Limits on Reliable Information Flows through Stochastic Populations,” PLoS Computational Biology, vol. 14, no. 6, 2018.
Export
BibTeX
@article{Boczkowski2018, TITLE = {Limits on Reliable Information Flows through Stochastic Populations}, AUTHOR = {Boczkowski, Lucas and Natale, Emanuele and Feinerman, Ofer and Korman, Amos}, LANGUAGE = {eng}, ISSN = {1553-734X}, DOI = {10.1371/journal.pcbi.1006195}, PUBLISHER = {Public Library of Science}, ADDRESS = {San Francisco, CA}, YEAR = {2018}, JOURNAL = {PLoS Computational Biology}, VOLUME = {14}, NUMBER = {6}, EID = {e1006195}, }
Endnote
%0 Journal Article %A Boczkowski, Lucas %A Natale, Emanuele %A Feinerman, Ofer %A Korman, Amos %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Limits on Reliable Information Flows through Stochastic Populations : %G eng %U http://hdl.handle.net/21.11116/0000-0001-999D-2 %R 10.1371/journal.pcbi.1006195 %7 2018 %D 2018 %J PLoS Computational Biology %V 14 %N 6 %Z sequence number: e1006195 %I Public Library of Science %C San Francisco, CA %@ false
[11]
K. Bringmann, P. Gawrychowski, S. Mozes, and O. Weimann, “Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless APSP can),” in SODA’18, Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, New Orleans, LA, USA, 2018.
Export
BibTeX
@inproceedings{Bringmann_SODA18b, TITLE = {Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless {APSP} can)}, AUTHOR = {Bringmann, Karl and Gawrychowski, Pawe{\l} and Mozes, Shay and Weimann, Oren}, LANGUAGE = {eng}, ISBN = {978-1-61197-503-1}, DOI = {10.1137/1.9781611975031.77}, PUBLISHER = {SIAM}, YEAR = {2018}, DATE = {2018}, BOOKTITLE = {SODA'18, Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms}, EDITOR = {Czumaj, Artur}, PAGES = {1190--1206}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Bringmann, Karl %A Gawrychowski, Paweł %A Mozes, Shay %A Weimann, Oren %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless APSP can) : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3F13-5 %R 10.1137/1.9781611975031.77 %D 2018 %B Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %Z date of event: 2018-01-07 - 2018-01-10 %C New Orleans, LA, USA %B SODA'18 %E Czumaj, Artur %P 1190 - 1206 %I SIAM %@ 978-1-61197-503-1
[12]
K. Bringmann, S. Cabello, and M. T. M. Emmerich, “Maximum Volume Subset Selection for Anchored Boxes,” 2018. [Online]. Available: http://arxiv.org/abs/1803.00849. (arXiv: 1803.00849)
Abstract
Let $B$ be a set of $n$ axis-parallel boxes in $\mathbb{R}^d$ such that each box has a corner at the origin and the other corner in the positive quadrant of $\mathbb{R}^d$, and let $k$ be a positive integer. We study the problem of selecting $k$ boxes in $B$ that maximize the volume of the union of the selected boxes. This research is motivated by applications in skyline queries for databases and in multicriteria optimization, where the problem is known as the hypervolume subset selection problem. It is known that the problem can be solved in polynomial time in the plane, while the best known running time in any dimension $d \ge 3$ is $\Omega\big(\binom{n}{k}\big)$. We show that: - The problem is NP-hard already in 3 dimensions. - In 3 dimensions, we break the bound $\Omega\big(\binom{n}{k}\big)$, by providing an $n^{O(\sqrt{k})}$ algorithm. - For any constant dimension $d$, we present an efficient polynomial-time approximation scheme.
Export
BibTeX
@online{escidoc:2584984, TITLE = {Maximum Volume Subset Selection for Anchored Boxes}, AUTHOR = {Bringmann, Karl and Cabello, Sergio and Emmerich, Michael T. M.}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00849}, DOI = {Bringmann_arXiv1803.00849}, EPRINT = {1803.00849}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Let $B$ be a set of $n$ axis-parallel boxes in $\mathbb{R}^d$ such that each box has a corner at the origin and the other corner in the positive quadrant of $\mathbb{R}^d$, and let $k$ be a positive integer. We study the problem of selecting $k$ boxes in $B$ that maximize the volume of the union of the selected boxes. This research is motivated by applications in skyline queries for databases and in multicriteria optimization, where the problem is known as the hypervolume subset selection problem. It is known that the problem can be solved in polynomial time in the plane, while the best known running time in any dimension $d \ge 3$ is $\Omega\big(\binom{n}{k}\big)$. We show that: -- The problem is NP-hard already in 3 dimensions. -- In 3 dimensions, we break the bound $\Omega\big(\binom{n}{k}\big)$, by providing an $n^{O(\sqrt{k})}$ algorithm. -- For any constant dimension $d$, we present an efficient polynomial-time approximation scheme.}, }
Endnote
%0 Report %A Bringmann, Karl %A Cabello, Sergio %A Emmerich, Michael T. M. %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Maximum Volume Subset Selection for Anchored Boxes : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E08-2 %U http://arxiv.org/abs/1803.00849 %R Bringmann_arXiv1803.00849 %D 2018 %X Let $B$ be a set of $n$ axis-parallel boxes in $\mathbb{R}^d$ such that each box has a corner at the origin and the other corner in the positive quadrant of $\mathbb{R}^d$, and let $k$ be a positive integer. We study the problem of selecting $k$ boxes in $B$ that maximize the volume of the union of the selected boxes. This research is motivated by applications in skyline queries for databases and in multicriteria optimization, where the problem is known as the hypervolume subset selection problem. It is known that the problem can be solved in polynomial time in the plane, while the best known running time in any dimension $d \ge 3$ is $\Omega\big(\binom{n}{k}\big)$. We show that: - The problem is NP-hard already in 3 dimensions. - In 3 dimensions, we break the bound $\Omega\big(\binom{n}{k}\big)$, by providing an $n^{O(\sqrt{k})}$ algorithm. - For any constant dimension $d$, we present an efficient polynomial-time approximation scheme. %K Computer Science, Computational Geometry, cs.CG,Computer Science, Data Structures and Algorithms, cs.DS
[13]
K. Bringmann and P. Wellnitz, “Clique-Based Lower Bounds for Parsing Tree-Adjoining Grammars,” 2018. [Online]. Available: http://arxiv.org/abs/1803.00804. (arXiv: 1803.00804)
Abstract
Tree-adjoining grammars are a generalization of context-free grammars that are well suited to model human languages and are thus popular in computational linguistics. In the tree-adjoining grammar recognition problem, given a grammar $\Gamma$ and a string $s$ of length $n$, the task is to decide whether $s$ can be obtained from $\Gamma$. Rajasekaran and Yooseph's parser (JCSS'98) solves this problem in time $O(n^{2\omega})$, where $\omega < 2.373$ is the matrix multiplication exponent. The best algorithms avoiding fast matrix multiplication take time $O(n^6)$. The first evidence for hardness was given by Satta (J. Comp. Linguist.'94): For a more general parsing problem, any algorithm that avoids fast matrix multiplication and is significantly faster than $O(|\Gamma| n^6)$ in the case of $|\Gamma| = \Theta(n^{12})$ would imply a breakthrough for Boolean matrix multiplication. Following an approach by Abboud et al. (FOCS'15) for context-free grammar recognition, in this paper we resolve many of the disadvantages of the previous lower bound. We show that, even on constant-size grammars, any improvement on Rajasekaran and Yooseph's parser would imply a breakthrough for the $k$-Clique problem. This establishes tree-adjoining grammar parsing as a practically relevant problem with the unusual running time of $n^{2\omega}$, up to lower order factors.
Export
BibTeX
@online{Bringmann_arXiv1803.00804, TITLE = {Clique-Based Lower Bounds for Parsing Tree-Adjoining Grammars}, AUTHOR = {Bringmann, Karl and Wellnitz, Philip}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00804}, EPRINT = {1803.00804}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Tree-adjoining grammars are a generalization of context-free grammars that are well suited to model human languages and are thus popular in computational linguistics. In the tree-adjoining grammar recognition problem, given a grammar $\Gamma$ and a string $s$ of length $n$, the task is to decide whether $s$ can be obtained from $\Gamma$. Rajasekaran and Yooseph's parser (JCSS'98) solves this problem in time $O(n^{2\omega})$, where $\omega < 2.373$ is the matrix multiplication exponent. The best algorithms avoiding fast matrix multiplication take time $O(n^6)$. The first evidence for hardness was given by Satta (J. Comp. Linguist.'94): For a more general parsing problem, any algorithm that avoids fast matrix multiplication and is significantly faster than $O(|\Gamma| n^6)$ in the case of $|\Gamma| = \Theta(n^{12})$ would imply a breakthrough for Boolean matrix multiplication. Following an approach by Abboud et al. (FOCS'15) for context-free grammar recognition, in this paper we resolve many of the disadvantages of the previous lower bound. We show that, even on constant-size grammars, any improvement on Rajasekaran and Yooseph's parser would imply a breakthrough for the $k$-Clique problem. This establishes tree-adjoining grammar parsing as a practically relevant problem with the unusual running time of $n^{2\omega}$, up to lower order factors.}, }
Endnote
%0 Report %A Bringmann, Karl %A Wellnitz, Philip %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Clique-Based Lower Bounds for Parsing Tree-Adjoining Grammars : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E2A-C %U http://arxiv.org/abs/1803.00804 %D 2018 %X Tree-adjoining grammars are a generalization of context-free grammars that are well suited to model human languages and are thus popular in computational linguistics. In the tree-adjoining grammar recognition problem, given a grammar $\Gamma$ and a string $s$ of length $n$, the task is to decide whether $s$ can be obtained from $\Gamma$. Rajasekaran and Yooseph's parser (JCSS'98) solves this problem in time $O(n^{2\omega})$, where $\omega < 2.373$ is the matrix multiplication exponent. The best algorithms avoiding fast matrix multiplication take time $O(n^6)$. The first evidence for hardness was given by Satta (J. Comp. Linguist.'94): For a more general parsing problem, any algorithm that avoids fast matrix multiplication and is significantly faster than $O(|\Gamma| n^6)$ in the case of $|\Gamma| = \Theta(n^{12})$ would imply a breakthrough for Boolean matrix multiplication. Following an approach by Abboud et al. (FOCS'15) for context-free grammar recognition, in this paper we resolve many of the disadvantages of the previous lower bound. We show that, even on constant-size grammars, any improvement on Rajasekaran and Yooseph's parser would imply a breakthrough for the $k$-Clique problem. This establishes tree-adjoining grammar parsing as a practically relevant problem with the unusual running time of $n^{2\omega}$, up to lower order factors. %K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[14]
K. Bringmann and S. Krinninger, “A Note on Hardness of Diameter Approximation,” Information Processing Letters, vol. 133, 2018.
Export
BibTeX
@article{Bringmann2018, TITLE = {A Note on Hardness of Diameter Approximation}, AUTHOR = {Bringmann, Karl and Krinninger, Sebastian}, LANGUAGE = {eng}, ISSN = {0020-0190}, DOI = {10.1016/j.ipl.2017.12.010}, PUBLISHER = {Elsevier}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Information Processing Letters}, VOLUME = {133}, PAGES = {10--15}, }
Endnote
%0 Journal Article %A Bringmann, Karl %A Krinninger, Sebastian %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T A Note on Hardness of Diameter Approximation : %G eng %U http://hdl.handle.net/21.11116/0000-0001-2C44-2 %R 10.1016/j.ipl.2017.12.010 %7 2018 %D 2018 %J Information Processing Letters %V 133 %& 10 %P 10 - 15 %I Elsevier %@ false
[15]
K. Bringmann and M. Künnemann, “Multivariate Fine-Grained Complexity of Longest Common Subsequence,” 2018. [Online]. Available: http://arxiv.org/abs/1803.00938. (arXiv: 1803.00938)
Abstract
We revisit the classic combinatorial pattern matching problem of finding a longest common subsequence (LCS). For strings $x$ and $y$ of length $n$, a textbook algorithm solves LCS in time $O(n^2)$, but although much effort has been spent, no $O(n^{2-\varepsilon})$-time algorithm is known. Recent work indeed shows that such an algorithm would refute the Strong Exponential Time Hypothesis (SETH) [Abboud, Backurs, Vassilevska Williams + Bringmann, K\"unnemann FOCS'15]. Despite the quadratic-time barrier, for over 40 years an enduring scientific interest continued to produce fast algorithms for LCS and its variations. Particular attention was put into identifying and exploiting input parameters that yield strongly subquadratic time algorithms for special cases of interest, e.g., differential file comparison. This line of research was successfully pursued until 1990, at which time significant improvements came to a halt. In this paper, using the lens of fine-grained complexity, our goal is to (1) justify the lack of further improvements and (2) determine whether some special cases of LCS admit faster algorithms than currently known. To this end, we provide a systematic study of the multivariate complexity of LCS, taking into account all parameters previously discussed in the literature: the input size $n:=\max\{|x|,|y|\}$, the length of the shorter string $m:=\min\{|x|,|y|\}$, the length $L$ of an LCS of $x$ and $y$, the numbers of deletions $\delta := m-L$ and $\Delta := n-L$, the alphabet size, as well as the numbers of matching pairs $M$ and dominant pairs $d$. For any class of instances defined by fixing each parameter individually to a polynomial in terms of the input size, we prove a SETH-based lower bound matching one of three known algorithms. Specifically, we determine the optimal running time for LCS under SETH as $(n+\min\{d, \delta \Delta, \delta m\})^{1\pm o(1)}$. [...]
Export
BibTeX
@online{Bringmann_arXiv1803.00938, TITLE = {Multivariate Fine-Grained Complexity of Longest Common Subsequence}, AUTHOR = {Bringmann, Karl and K{\"u}nnemann, Marvin}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00938}, EPRINT = {1803.00938}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {We revisit the classic combinatorial pattern matching problem of finding a longest common subsequence (LCS). For strings $x$ and $y$ of length $n$, a textbook algorithm solves LCS in time $O(n^2)$, but although much effort has been spent, no $O(n^{2-\varepsilon})$-time algorithm is known. Recent work indeed shows that such an algorithm would refute the Strong Exponential Time Hypothesis (SETH) [Abboud, Backurs, Vassilevska Williams + Bringmann, K\"unnemann FOCS'15]. Despite the quadratic-time barrier, for over 40 years an enduring scientific interest continued to produce fast algorithms for LCS and its variations. Particular attention was put into identifying and exploiting input parameters that yield strongly subquadratic time algorithms for special cases of interest, e.g., differential file comparison. This line of research was successfully pursued until 1990, at which time significant improvements came to a halt. In this paper, using the lens of fine-grained complexity, our goal is to (1) justify the lack of further improvements and (2) determine whether some special cases of LCS admit faster algorithms than currently known. To this end, we provide a systematic study of the multivariate complexity of LCS, taking into account all parameters previously discussed in the literature: the input size $n:=\max\{|x|,|y|\}$, the length of the shorter string $m:=\min\{|x|,|y|\}$, the length $L$ of an LCS of $x$ and $y$, the numbers of deletions $\delta := m-L$ and $\Delta := n-L$, the alphabet size, as well as the numbers of matching pairs $M$ and dominant pairs $d$. For any class of instances defined by fixing each parameter individually to a polynomial in terms of the input size, we prove a SETH-based lower bound matching one of three known algorithms. Specifically, we determine the optimal running time for LCS under SETH as $(n+\min\{d, \delta \Delta, \delta m\})^{1\pm o(1)}$. [...]}, }
Endnote
%0 Report %A Bringmann, Karl %A K&#252;nnemann, Marvin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Multivariate Fine-Grained Complexity of Longest Common Subsequence : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E02-8 %U http://arxiv.org/abs/1803.00938 %D 2018 %X We revisit the classic combinatorial pattern matching problem of finding a longest common subsequence (LCS). For strings $x$ and $y$ of length $n$, a textbook algorithm solves LCS in time $O(n^2)$, but although much effort has been spent, no $O(n^{2-\varepsilon})$-time algorithm is known. Recent work indeed shows that such an algorithm would refute the Strong Exponential Time Hypothesis (SETH) [Abboud, Backurs, Vassilevska Williams + Bringmann, K\"unnemann FOCS'15]. Despite the quadratic-time barrier, for over 40 years an enduring scientific interest continued to produce fast algorithms for LCS and its variations. Particular attention was put into identifying and exploiting input parameters that yield strongly subquadratic time algorithms for special cases of interest, e.g., differential file comparison. This line of research was successfully pursued until 1990, at which time significant improvements came to a halt. In this paper, using the lens of fine-grained complexity, our goal is to (1) justify the lack of further improvements and (2) determine whether some special cases of LCS admit faster algorithms than currently known. To this end, we provide a systematic study of the multivariate complexity of LCS, taking into account all parameters previously discussed in the literature: the input size $n:=\max\{|x|,|y|\}$, the length of the shorter string $m:=\min\{|x|,|y|\}$, the length $L$ of an LCS of $x$ and $y$, the numbers of deletions $\delta := m-L$ and $\Delta := n-L$, the alphabet size, as well as the numbers of matching pairs $M$ and dominant pairs $d$. For any class of instances defined by fixing each parameter individually to a polynomial in terms of the input size, we prove a SETH-based lower bound matching one of three known algorithms. Specifically, we determine the optimal running time for LCS under SETH as $(n+\min\{d, \delta \Delta, \delta m\})^{1\pm o(1)}$. [...] %K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[16]
K. Bringmann and M. Künnemann, “Multivariate Fine-Grained Complexity of Longest Common Subsequence,” in SODA’18, Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, New Orleans, LA, USA, 2018.
Export
BibTeX
@inproceedings{Bringmann_SODA18, TITLE = {Multivariate Fine-Grained Complexity of Longest Common Subsequence}, AUTHOR = {Bringmann, Karl and K{\"u}nnemann, Marvin}, LANGUAGE = {eng}, ISBN = {978-1-61197-503-1}, DOI = {10.1137/1.9781611975031.79}, PUBLISHER = {SIAM}, YEAR = {2018}, DATE = {2018}, BOOKTITLE = {SODA'18, Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms}, EDITOR = {Czumaj, Artur}, PAGES = {1216--1235}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Bringmann, Karl %A K&#252;nnemann, Marvin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Multivariate Fine-Grained Complexity of Longest Common Subsequence : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3F0E-C %R 10.1137/1.9781611975031.79 %D 2018 %B Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %Z date of event: 2018-01-07 - 2018-01-10 %C New Orleans, LA, USA %B SODA'18 %E Czumaj, Artur %P 1216 - 1235 %I SIAM %@ 978-1-61197-503-1
[17]
J. Bund, C. Lenzen, and M. Medina, “Optimal Metastability-containing Sorting Networks,” in Proceedings of the 2018 Design, Automation & Test in Europe (DATE 2018), Dresden, Germany, 2018.
Export
BibTeX
@inproceedings{Bund_DATE2018, TITLE = {Optimal Metastability-containing Sorting Networks}, AUTHOR = {Bund, Johannes and Lenzen, Christoph and Medina, Moti}, LANGUAGE = {eng}, ISBN = {978-3-9819263-1-6}, DOI = {10.23919/DATE.2018.8342063}, PUBLISHER = {IEEE}, YEAR = {2018}, DATE = {2018}, BOOKTITLE = {Proceedings of the 2018 Design, Automation \& Test in Europe (DATE 2018)}, PAGES = {521--526}, ADDRESS = {Dresden, Germany}, }
Endnote
%0 Conference Proceedings %A Bund, Johannes %A Lenzen, Christoph %A Medina, Moti %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Optimal Metastability-containing Sorting Networks : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3F69-4 %R 10.23919/DATE.2018.8342063 %D 2018 %B Design, Automation & Test in Europe Conference & Exhibition %Z date of event: 2018-03-19 - 2018-03-23 %C Dresden, Germany %B Proceedings of the 2018 Design, Automation & Test in Europe %P 521 - 526 %I IEEE %@ 978-3-9819263-1-6
[18]
L. Chiantini, J. D. Hauenstein, C. Ikenmeyer, J. M. Landsberg, and G. Ottaviani, “Polynomials and the Exponent of Matrix Multiplication,” Bulletin of the London Mathematical Society, vol. 50, no. 3, 2018.
Export
BibTeX
@article{Chaintini2018, TITLE = {Polynomials and the Exponent of Matrix Multiplication}, AUTHOR = {Chiantini, Luca and Hauenstein, Jonathan D. and Ikenmeyer, Christian and Landsberg, Joseph M. and Ottaviani, Giorgio}, LANGUAGE = {eng}, ISSN = {0024-6093}, DOI = {10.1112/blms.12147}, PUBLISHER = {London Mathematical Society}, ADDRESS = {London}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Bulletin of the London Mathematical Society}, VOLUME = {50}, NUMBER = {3}, PAGES = {369--389}, }
Endnote
%0 Journal Article %A Chiantini, Luca %A Hauenstein, Jonathan D. %A Ikenmeyer, Christian %A Landsberg, Joseph M. %A Ottaviani, Giorgio %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Polynomials and the Exponent of Matrix Multiplication : %G eng %U http://hdl.handle.net/21.11116/0000-0001-88D0-A %R 10.1112/blms.12147 %7 2018 %D 2018 %J Bulletin of the London Mathematical Society %V 50 %N 3 %& 369 %P 369 - 389 %I London Mathematical Society %C London %@ false
[19]
C. Croitoru and K. Mehlhorn, “On Testing Substitutability,” Information Processing Letters, vol. 138, 2018.
Export
BibTeX
@article{Croitoru_2018, TITLE = {On Testing Substitutability}, AUTHOR = {Croitoru, Cosmina and Mehlhorn, Kurt}, LANGUAGE = {eng}, ISSN = {0020-0190}, DOI = {10.1016/j.ipl.2018.05.006}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Information Processing Letters}, VOLUME = {138}, PAGES = {19--21}, }
Endnote
%0 Journal Article %A Croitoru, Cosmina %A Mehlhorn, Kurt %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On Testing Substitutability : %G eng %U http://hdl.handle.net/21.11116/0000-0001-EE14-D %R 10.1016/j.ipl.2018.05.006 %7 2018 %D 2018 %J Information Processing Letters %V 138 %& 19 %P 19 - 21 %I Elsevier %C Amsterdam %@ false
[20]
M. Cygan, S. Kratsch, and J. Nederlof, “Fast Hamiltonicity Checking Via Bases of Perfect Matchings,” Journal of the ACM, vol. 65, no. 3, 2018.
Export
BibTeX
@article{Cygan2018, TITLE = {Fast {Hamiltonicity} Checking Via Bases of Perfect Matchings}, AUTHOR = {Cygan, Marek and Kratsch, Stefan and Nederlof, Jesper}, LANGUAGE = {eng}, ISSN = {0004-5411}, DOI = {10.1145/3148227}, PUBLISHER = {Association for Computing Machinery, Inc.}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Journal of the ACM}, VOLUME = {65}, NUMBER = {3}, EID = {12}, }
Endnote
%0 Journal Article %A Cygan, Marek %A Kratsch, Stefan %A Nederlof, Jesper %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Fast Hamiltonicity Checking Via Bases of Perfect Matchings : %G eng %U http://hdl.handle.net/21.11116/0000-0001-7AE5-4 %R 10.1145/3148227 %7 2018 %D 2018 %J Journal of the ACM %V 65 %N 3 %Z sequence number: 12 %I Association for Computing Machinery, Inc. %C New York, NY %@ false
[21]
S. Friedrichs, M. Függer, and C. Lenzen, “Metastability-Containing Circuits,” IEEE Transactions on Computers, vol. 67, no. 8, 2018.
Abstract
Communication across unsynchronized clock domains is inherently vulnerable to metastable upsets; no digital circuit can deterministically avoid, resolve, or detect metastability (Marino, 1981). Traditionally, a possibly metastable input is stored in synchronizers, decreasing the odds of maintained metastability over time. This approach costs time, and does not guarantee success. We propose a fundamentally different approach: It is possible to \emph{contain} metastability by logical masking, so that it cannot infect the entire circuit. This technique guarantees a limited degree of metastability in---and uncertainty about---the output. We present a synchronizer-free, fault-tolerant clock synchronization algorithm as application, synchronizing clock domains and thus enabling metastability-free communication. At the heart of our approach lies a model for metastability in synchronous clocked digital circuits. Metastability is propagated in a worst-case fashion, allowing to derive deterministic guarantees, without and unlike synchronizers. The proposed model permits positive results while at the same time reproducing established impossibility results regarding avoidance, resolution, and detection of metastability. Furthermore, we fully classify which functions can be computed by synchronous circuits with standard registers, and show that masking registers are computationally strictly more powerful.
Export
BibTeX
@article{Friedrichs_Fuegger_Lenzen2018, TITLE = {Metastability-Containing Circuits}, AUTHOR = {Friedrichs, Stephan and F{\"u}gger, Matthias and Lenzen, Christoph}, LANGUAGE = {eng}, ISSN = {0018-9340}, DOI = {10.1109/TC.2018.2808185}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2018}, DATE = {2018}, ABSTRACT = {Communication across unsynchronized clock domains is inherently vulnerable to metastable upsets; no digital circuit can deterministically avoid, resolve, or detect metastability (Marino, 1981). Traditionally, a possibly metastable input is stored in synchronizers, decreasing the odds of maintained metastability over time. This approach costs time, and does not guarantee success. We propose a fundamentally different approach: It is possible to \emph{contain} metastability by logical masking, so that it cannot infect the entire circuit. This technique guarantees a limited degree of metastability in---and uncertainty about---the output. We present a synchronizer-free, fault-tolerant clock synchronization algorithm as application, synchronizing clock domains and thus enabling metastability-free communication. At the heart of our approach lies a model for metastability in synchronous clocked digital circuits. Metastability is propagated in a worst-case fashion, allowing to derive deterministic guarantees, without and unlike synchronizers. The proposed model permits positive results while at the same time reproducing established impossibility results regarding avoidance, resolution, and detection of metastability. Furthermore, we fully classify which functions can be computed by synchronous circuits with standard registers, and show that masking registers are computationally strictly more powerful.}, JOURNAL = {IEEE Transactions on Computers}, VOLUME = {67}, NUMBER = {8}, PAGES = {1167--1183}, }
Endnote
%0 Journal Article %A Friedrichs, Stephan %A F&#252;gger, Matthias %A Lenzen, Christoph %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Metastability-Containing Circuits : %G eng %U http://hdl.handle.net/21.11116/0000-0001-E5A0-7 %R 10.1109/TC.2018.2808185 %7 2018 %D 2018 %X Communication across unsynchronized clock domains is inherently vulnerable to metastable upsets; no digital circuit can deterministically avoid, resolve, or detect metastability (Marino, 1981). Traditionally, a possibly metastable input is stored in synchronizers, decreasing the odds of maintained metastability over time. This approach costs time, and does not guarantee success. We propose a fundamentally different approach: It is possible to \emph{contain} metastability by logical masking, so that it cannot infect the entire circuit. This technique guarantees a limited degree of metastability in---and uncertainty about---the output. We present a synchronizer-free, fault-tolerant clock synchronization algorithm as application, synchronizing clock domains and thus enabling metastability-free communication. At the heart of our approach lies a model for metastability in synchronous clocked digital circuits. Metastability is propagated in a worst-case fashion, allowing to derive deterministic guarantees, without and unlike synchronizers. The proposed model permits positive results while at the same time reproducing established impossibility results regarding avoidance, resolution, and detection of metastability. Furthermore, we fully classify which functions can be computed by synchronous circuits with standard registers, and show that masking registers are computationally strictly more powerful. %K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC %J IEEE Transactions on Computers %V 67 %N 8 %& 1167 %P 1167 - 1183 %I IEEE %C Piscataway, NJ %@ false
[22]
J. Garg, M. Hoefer, and K. Mehlhorn, “Approximating the Nash Social Welfare with Budget-Additive Valuations,” in SODA’18, Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms, New Orleans, LA, USA, 2018.
Export
BibTeX
@inproceedings{GargHoeferMehlhornSODA18, TITLE = {Approximating the {Nash} Social Welfare with Budget-Additive Valuations}, AUTHOR = {Garg, Jugal and Hoefer, Martin and Mehlhorn, Kurt}, LANGUAGE = {eng}, ISBN = {978-1-61197-503-1}, DOI = {10.1137/1.9781611975031.150}, PUBLISHER = {SIAM}, YEAR = {2018}, DATE = {2018}, BOOKTITLE = {SODA'18, Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms}, EDITOR = {Czumaj, Artur}, PAGES = {2326--2340}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Garg, Jugal %A Hoefer, Martin %A Mehlhorn, Kurt %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Approximating the Nash Social Welfare with Budget-Additive Valuations : %G eng %U http://hdl.handle.net/21.11116/0000-0000-37F9-A %R 10.1137/1.9781611975031.150 %D 2018 %B Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %Z date of event: 2018-01-07 - 2018-01-10 %C New Orleans, LA, USA %B SODA'18 %E Czumaj, Artur %P 2326 - 2340 %I SIAM %@ 978-1-61197-503-1
[23]
T. A. G. Hageman, P. A. Loethman, M. Dirnberger, M. C. Elwenspoek, A. Manz, and L. Abelmann, “Macroscopic Equivalence for Microscopic Motion in a Turbulence Driven Three-dimensional Self-assembly Reactor,” Journal of Applied Physics, vol. 123, no. 2, 2018.
Export
BibTeX
@article{Hageman2018, TITLE = {Macroscopic Equivalence for Microscopic Motion in a Turbulence Driven Three-dimensional Self-assembly Reactor}, AUTHOR = {Hageman, T. A. G. and Loethman, P. A. and Dirnberger, Michael and Elwenspoek, M. C. and Manz, A. and Abelmann, L.}, LANGUAGE = {eng}, ISSN = {0021-8979}, DOI = {10.1063/1.5007029}, PUBLISHER = {AIP Publishing}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Journal of Applied Physics}, VOLUME = {123}, NUMBER = {2}, PAGES = {1--10}, EID = {024901}, }
Endnote
%0 Journal Article %A Hageman, T. A. G. %A Loethman, P. A. %A Dirnberger, Michael %A Elwenspoek, M. C. %A Manz, A. %A Abelmann, L. %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Macroscopic Equivalence for Microscopic Motion in a Turbulence Driven Three-dimensional Self-assembly Reactor : %G eng %U http://hdl.handle.net/21.11116/0000-0000-431A-8 %R 10.1063/1.5007029 %7 2018 %D 2018 %J Journal of Applied Physics %O J. Appl. Phys. %V 123 %N 2 %& 1 %P 1 - 10 %Z sequence number: 024901 %I AIP Publishing %C New York, NY %@ false
[24]
S. Heydrich, “A Tale of Two Packing Problems: Improved Algorithms and Tighter Bounds for Online Bin Packing and the Geometric Knapsack Problem,” Universität des Saarlandes, Saarbrücken, 2018.
Abstract
Abstract In this thesis, we deal with two packing problems: the online bin packing and the geometric knapsack problem. In online bin packing, the aim is to pack a given number of items of dierent size into a minimal number of containers. The items need to be packed one by one without knowing future items. For online bin packing in one dimension, we present a new family of algorithms that constitutes the rst improvement over the previously best algorithm in almost 15 years. While the algorithmic ideas are intuitive, an elaborate analysis is required to prove its competitive ratio. We also give a lower bound for the competitive ratio of this family of algorithms. For online bin packing in higher dimensions, we discuss lower bounds for the competitive ratio and show that the ideas from the one-dimensional case cannot be easily transferred to obtain better two-dimensional algorithms. In the geometric knapsack problem, one aims to pack a maximum weight subset of given rectangles into one square container. For this problem, we consider oine approximation algorithms. For geometric knapsack with square items, we improve the running time of the best known PTAS and obtain an EPTAS . This shows that large running times caused by some standard techniques for geometric packing problems are not always necessary and can be improved. Finally, we show how to use resource augmentation to compute optimal solutions in EPTAS -time, thereby improving upon the known PTAS for this case.
Export
BibTeX
@phdthesis{Heydrphd18, TITLE = {A Tale of Two Packing Problems: Improved Algorithms and Tighter Bounds for Online Bin Packing and the Geometric Knapsack Problem}, AUTHOR = {Heydrich, Sandy}, LANGUAGE = {eng}, DOI = {10.22028/D291-27240}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2018}, DATE = {2018}, ABSTRACT = {Abstract In this thesis, we deal with two packing problems: the online bin packing and the geometric knapsack problem. In online bin packing, the aim is to pack a given number of items of dierent size into a minimal number of containers. The items need to be packed one by one without knowing future items. For online bin packing in one dimension, we present a new family of algorithms that constitutes the rst improvement over the previously best algorithm in almost 15 years. While the algorithmic ideas are intuitive, an elaborate analysis is required to prove its competitive ratio. We also give a lower bound for the competitive ratio of this family of algorithms. For online bin packing in higher dimensions, we discuss lower bounds for the competitive ratio and show that the ideas from the one-dimensional case cannot be easily transferred to obtain better two-dimensional algorithms. In the geometric knapsack problem, one aims to pack a maximum weight subset of given rectangles into one square container. For this problem, we consider oine approximation algorithms. For geometric knapsack with square items, we improve the running time of the best known PTAS and obtain an EPTAS . This shows that large running times caused by some standard techniques for geometric packing problems are not always necessary and can be improved. Finally, we show how to use resource augmentation to compute optimal solutions in EPTAS -time, thereby improving upon the known PTAS for this case.}, }
Endnote
%0 Thesis %A Heydrich, Sandy %Y van Stee, Rob %A referee: Mehlhorn, Kurt %A referee: Grandoni, Fabrizio %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Discrete Optimization, MPI for Informatics, Max Planck Society %T A Tale of Two Packing Problems: Improved Algorithms and Tighter Bounds for Online Bin Packing and the Geometric Knapsack Problem : %G eng %U http://hdl.handle.net/21.11116/0000-0001-E3DC-7 %R 10.22028/D291-27240 %I Universit&#228;t des Saarlandes %C Saarbr&#252;cken %D 2018 %P viii, 161 p. %V phd %9 phd %X Abstract In this thesis, we deal with two packing problems: the online bin packing and the geometric knapsack problem. In online bin packing, the aim is to pack a given number of items of dierent size into a minimal number of containers. The items need to be packed one by one without knowing future items. For online bin packing in one dimension, we present a new family of algorithms that constitutes the rst improvement over the previously best algorithm in almost 15 years. While the algorithmic ideas are intuitive, an elaborate analysis is required to prove its competitive ratio. We also give a lower bound for the competitive ratio of this family of algorithms. For online bin packing in higher dimensions, we discuss lower bounds for the competitive ratio and show that the ideas from the one-dimensional case cannot be easily transferred to obtain better two-dimensional algorithms. In the geometric knapsack problem, one aims to pack a maximum weight subset of given rectangles into one square container. For this problem, we consider oine approximation algorithms. For geometric knapsack with square items, we improve the running time of the best known PTAS and obtain an EPTAS . This shows that large running times caused by some standard techniques for geometric packing problems are not always necessary and can be improved. Finally, we show how to use resource augmentation to compute optimal solutions in EPTAS -time, thereby improving upon the known PTAS for this case. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/27141
[25]
C. Ikenmeyer and S. Mengel, “On the Relative Power of Reduction Notions in Arithmetic Circuit Complexity,” Information Processing Letters, vol. 130, 2018.
Export
BibTeX
@article{Ikenmeyer2018, TITLE = {On the Relative Power of Reduction Notions in Arithmetic Circuit Complexity}, AUTHOR = {Ikenmeyer, Christian and Mengel, Stefan}, LANGUAGE = {eng}, ISSN = {0020-0190}, DOI = {10.1016/j.ipl.2017.09.009}, PUBLISHER = {Elsevier}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Information Processing Letters}, VOLUME = {130}, PAGES = {7--10}, }
Endnote
%0 Journal Article %A Ikenmeyer, Christian %A Mengel, Stefan %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T On the Relative Power of Reduction Notions in Arithmetic Circuit Complexity : %G eng %U http://hdl.handle.net/21.11116/0000-0000-0361-F %R 10.1016/j.ipl.2017.09.009 %7 2017 %D 2018 %J Information Processing Letters %V 130 %& 7 %P 7 - 10 %I Elsevier %@ false
[26]
P. Khanchandani and C. Lenzen, “Self-Stabilizing Byzantine Clock Synchronization with Optimal Precision,” Theory of Computing Systems, 2018.
Export
BibTeX
@article{_Khanchandani2018, TITLE = {Self-Stabilizing {B}yzantine Clock Synchronization with Optimal Precision}, AUTHOR = {Khanchandani, Pankaj and Lenzen, Christoph}, LANGUAGE = {eng}, ISSN = {1432-4350}, DOI = {10.1007/s00224-017-9840-3}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2018}, JOURNAL = {Theory of Computing Systems}, }
Endnote
%0 Journal Article %A Khanchandani, Pankaj %A Lenzen, Christoph %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Self-Stabilizing Byzantine Clock Synchronization with Optimal Precision : %G eng %U http://hdl.handle.net/21.11116/0000-0000-73AC-D %R 10.1007/s00224-017-9840-3 %7 2018-01-20 %D 2018 %8 20.01.2018 %J Theory of Computing Systems %I Springer %C New York, NY %@ false
[27]
P. Koprowski, K. Mehlhorn, and S. Ray, “Corrigendum to ‘Faster algorithms for computing Hong’s bound on absolute positiveness’ [J. Symb. Comput. 45 (2010) 677–683],” Journal of Symbolic Computation, vol. 87, 2018.
Export
BibTeX
@article{Koprowski2018, TITLE = {Corrigendum to {\textquotedblleft}Faster algorithms for computing Hong's bound on absolute positiveness{\textquotedblright} [J. Symb. Comput. 45 (2010) 677--683]}, AUTHOR = {Koprowski, Przemys{\l}aw and Mehlhorn, Kurt and Ray, Saurabh}, LANGUAGE = {eng}, ISSN = {0747-7171}, DOI = {10.1016/j.jsc.2017.05.008}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Journal of Symbolic Computation}, VOLUME = {87}, PAGES = {238--241}, }
Endnote
%0 Journal Article %A Koprowski, Przemys&#322;aw %A Mehlhorn, Kurt %A Ray, Saurabh %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Corrigendum to &#8220;Faster algorithms for computing Hong's bound on absolute positiveness&#8221; [J. Symb. Comput. 45 (2010) 677&#8211;683] : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3C55-D %R 10.1016/j.jsc.2017.05.008 %7 2017 %D 2018 %J Journal of Symbolic Computation %V 87 %& 238 %P 238 - 241 %I Elsevier %C Amsterdam %@ false
[28]
J.-H. Lange, A. Karrenbauer, and B. Andres, “Partial Optimality and Fast Lower Bounds for Weighted Correlation Clustering,” in Proceedings of the 35th International Conference on Machine Learning (ICML 2018), Stockholm, Sweden, 2018.
Export
BibTeX
@inproceedings{pmlr-v80-lange18a, TITLE = {Partial Optimality and Fast Lower Bounds for Weighted Correlation Clustering}, AUTHOR = {Lange, Jan-Hendrik and Karrenbauer, Andreas and Andres, Bjoern}, LANGUAGE = {eng}, ISSN = {1938-7228}, URL = {http://proceedings.mlr.press/v80/lange18a.html}, YEAR = {2018}, BOOKTITLE = {Proceedings of the 35th International Conference on Machine Learning (ICML 2018)}, EDITOR = {Dy, Jennifer and Krause, Andreas}, PAGES = {2898--2907}, SERIES = {Proceedings of Machine Learning Research}, VOLUME = {80}, ADDRESS = {Stockholm, Sweden}, }
Endnote
%0 Conference Proceedings %A Lange, Jan-Hendrik %A Karrenbauer, Andreas %A Andres, Bjoern %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Partial Optimality and Fast Lower Bounds for Weighted Correlation Clustering : %G eng %U http://hdl.handle.net/21.11116/0000-0001-A71C-4 %U http://proceedings.mlr.press/v80/lange18a.html %D 2018 %B 35th International Conference on Machine Learning %Z date of event: 2018-07-10 - 2018-07-15 %C Stockholm, Sweden %B Proceedings of the 35th International Conference on Machine Learning %E Dy, Jennifer; Krause, Andreas %P 2898 - 2907 %B Proceedings of Machine Learning Research %N 80 %@ false %U http://proceedings.mlr.press/v80/lange18a/lange18a.pdf
[29]
A. Schmid and J. M. Schmidt, “Computing 2-Walks in Polynomial Time,” ACM Transactions on Algorithms, vol. 14, no. 2, 2018.
Export
BibTeX
@article{Schmid2018, TITLE = {Computing 2-Walks in Polynomial Time}, AUTHOR = {Schmid, Andreas and Schmidt, Jens M.}, LANGUAGE = {eng}, ISSN = {1549-6325}, DOI = {10.1145/3183368}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {ACM Transactions on Algorithms}, VOLUME = {14}, NUMBER = {2}, EID = {22}, }
Endnote
%0 Journal Article %A Schmid, Andreas %A Schmidt, Jens M. %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Computing 2-Walks in Polynomial Time : %G eng %U http://hdl.handle.net/21.11116/0000-0001-949E-6 %R 10.1145/3183368 %7 2018 %D 2018 %J ACM Transactions on Algorithms %V 14 %N 2 %Z sequence number: 22 %I ACM %C New York, NY %@ false