Last Year

[1]
A. Abboud, A. Backurs, K. Bringmann, and M. Künnemann, “Fine-Grained Complexity of Analyzing Compressed Data: Quantifying Improvements over Decompress-And-Solve,” 2018. [Online]. Available: http://arxiv.org/abs/1803.00796. (arXiv: 1803.00796)
Abstract
Can we analyze data without decompressing it? As our data keeps growing, understanding the time complexity of problems on compressed inputs, rather than in convenient uncompressed forms, becomes more and more relevant. Suppose we are given a compression of size $n$ of data that originally has size $N$, and we want to solve a problem with time complexity $T(\cdot)$. The naive strategy of "decompress-and-solve" gives time $T(N)$, whereas "the gold standard" is time $T(n)$: to analyze the compression as efficiently as if the original data was small. We restrict our attention to data in the form of a string (text, files, genomes, etc.) and study the most ubiquitous tasks. While the challenge might seem to depend heavily on the specific compression scheme, most methods of practical relevance (Lempel-Ziv-family, dictionary methods, and others) can be unified under the elegant notion of Grammar Compressions. A vast literature, across many disciplines, established this as an influential notion for Algorithm design. We introduce a framework for proving (conditional) lower bounds in this field, allowing us to assess whether decompress-and-solve can be improved, and by how much. Our main results are: - The $O(nN\sqrt{\log{N/n}})$ bound for LCS and the $O(\min\{N \log N, nM\})$ bound for Pattern Matching with Wildcards are optimal up to $N^{o(1)}$ factors, under the Strong Exponential Time Hypothesis. (Here, $M$ denotes the uncompressed length of the compressed pattern.) - Decompress-and-solve is essentially optimal for Context-Free Grammar Parsing and RNA Folding, under the $k$-Clique conjecture. - We give an algorithm showing that decompress-and-solve is not optimal for Disjointness.
Export
BibTeX
@online{Abboud_arXiv1803.00796, TITLE = {Fine-Grained Complexity of Analyzing Compressed Data: Quantifying Improvements over Decompress-And-Solve}, AUTHOR = {Abboud, Amir and Backurs, Arturs and Bringmann, Karl and K{\"u}nnemann, Marvin}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00796}, EPRINT = {1803.00796}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Can we analyze data without decompressing it? As our data keeps growing, understanding the time complexity of problems on compressed inputs, rather than in convenient uncompressed forms, becomes more and more relevant. Suppose we are given a compression of size $n$ of data that originally has size $N$, and we want to solve a problem with time complexity $T(\cdot)$. The naive strategy of "decompress-and-solve" gives time $T(N)$, whereas "the gold standard" is time $T(n)$: to analyze the compression as efficiently as if the original data was small. We restrict our attention to data in the form of a string (text, files, genomes, etc.) and study the most ubiquitous tasks. While the challenge might seem to depend heavily on the specific compression scheme, most methods of practical relevance (Lempel-Ziv-family, dictionary methods, and others) can be unified under the elegant notion of Grammar Compressions. A vast literature, across many disciplines, established this as an influential notion for Algorithm design. We introduce a framework for proving (conditional) lower bounds in this field, allowing us to assess whether decompress-and-solve can be improved, and by how much. Our main results are: -- The $O(nN\sqrt{\log{N/n}})$ bound for LCS and the $O(\min\{N \log N, nM\})$ bound for Pattern Matching with Wildcards are optimal up to $N^{o(1)}$ factors, under the Strong Exponential Time Hypothesis. (Here, $M$ denotes the uncompressed length of the compressed pattern.) -- Decompress-and-solve is essentially optimal for Context-Free Grammar Parsing and RNA Folding, under the $k$-Clique conjecture. -- We give an algorithm showing that decompress-and-solve is not optimal for Disjointness.}, }
Endnote
%0 Report %A Abboud, Amir %A Backurs, Arturs %A Bringmann, Karl %A Künnemann, Marvin %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Fine-Grained Complexity of Analyzing Compressed Data: Quantifying Improvements over Decompress-And-Solve : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E38-C %U http://arxiv.org/abs/1803.00796 %D 2018 %X Can we analyze data without decompressing it? As our data keeps growing, understanding the time complexity of problems on compressed inputs, rather than in convenient uncompressed forms, becomes more and more relevant. Suppose we are given a compression of size $n$ of data that originally has size $N$, and we want to solve a problem with time complexity $T(\cdot)$. The naive strategy of "decompress-and-solve" gives time $T(N)$, whereas "the gold standard" is time $T(n)$: to analyze the compression as efficiently as if the original data was small. We restrict our attention to data in the form of a string (text, files, genomes, etc.) and study the most ubiquitous tasks. While the challenge might seem to depend heavily on the specific compression scheme, most methods of practical relevance (Lempel-Ziv-family, dictionary methods, and others) can be unified under the elegant notion of Grammar Compressions. A vast literature, across many disciplines, established this as an influential notion for Algorithm design. We introduce a framework for proving (conditional) lower bounds in this field, allowing us to assess whether decompress-and-solve can be improved, and by how much. Our main results are: - The $O(nN\sqrt{\log{N/n}})$ bound for LCS and the $O(\min\{N \log N, nM\})$ bound for Pattern Matching with Wildcards are optimal up to $N^{o(1)}$ factors, under the Strong Exponential Time Hypothesis. (Here, $M$ denotes the uncompressed length of the compressed pattern.) - Decompress-and-solve is essentially optimal for Context-Free Grammar Parsing and RNA Folding, under the $k$-Clique conjecture. - We give an algorithm showing that decompress-and-solve is not optimal for Disjointness. %K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[2]
A. Abboud, K. Bringmann, H. Dell, and J. Nederlof, “More Consequences of Falsifying SETH and the Orthogonal Vectors Conjecture,” in STOC’18, 50th Annual ACM SIGACT Symposium on Theory of Computing, Los Angeles, CA, USA, 2018.
Export
BibTeX
@inproceedings{Abboud_STOC2018, TITLE = {More Consequences of Falsifying {SETH} and the Orthogonal Vectors Conjecture}, AUTHOR = {Abboud, Amir and Bringmann, Karl and Dell, Holger and Nederlof, Jesper}, LANGUAGE = {eng}, ISBN = {978-1-4503-5559-9}, DOI = {10.1145/3188745.3188938}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {STOC'18, 50th Annual ACM SIGACT Symposium on Theory of Computing}, PAGES = {253--266}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Abboud, Amir %A Bringmann, Karl %A Dell, Holger %A Nederlof, Jesper %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T More Consequences of Falsifying SETH and the Orthogonal Vectors Conjecture : %G eng %U http://hdl.handle.net/21.11116/0000-0002-1707-D %R 10.1145/3188745.3188938 %D 2018 %B 50th Annual ACM SIGACT Symposium on Theory of Computing %Z date of event: 2018-06-25 - 2017-06-29 %C Los Angeles, CA, USA %B STOC'18 %P 253 - 266 %I ACM %@ 978-1-4503-5559-9
[3]
A. Abboud and K. Bringmann, “Tighter Connections Between Formula-SAT and Shaving Logs,” in 45th International Colloquium on Automata, Languages, and Programming (ICALP 2018), Prague, Czech Republic, 2018.
Export
BibTeX
@inproceedings{Abboud_ICALP2018, TITLE = {Tighter Connections Between Formula-{SAT} and Shaving Logs}, AUTHOR = {Abboud, Amir and Bringmann, Karl}, LANGUAGE = {eng}, ISBN = {978-3-95977-076-7}, URL = {urn:nbn:de:0030-drops-90129}, DOI = {10.4230/LIPIcs.ICALP.2018.8}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {45th International Colloquium on Automata, Languages, and Programming (ICALP 2018)}, EDITOR = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, D{\'a}niel and Sannella, Donald}, PAGES = {1--18}, EID = {8}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {107}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Abboud, Amir %A Bringmann, Karl %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Tighter Connections Between Formula-SAT and Shaving Logs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-16FB-B %R 10.4230/LIPIcs.ICALP.2018.8 %U urn:nbn:de:0030-drops-90129 %D 2018 %B 45th International Colloquium on Automata, Languages, and Programming %Z date of event: 2018-07-09 - 2018-07-13 %C Prague, Czech Republic %B 45th International Colloquium on Automata, Languages, and Programming %E Chatzigiannakis, Ioannis; Kaklamanis, Christos; Marx, Dániel; Sannella, Donald %P 1 - 18 %Z sequence number: 8 %I Schloss Dagstuhl %@ 978-3-95977-076-7 %B Leibniz International Proceedings in Informatics %N 107 %U http://drops.dagstuhl.de/opus/volltexte/2018/9012/http://drops.dagstuhl.de/doku/urheberrecht1.html
[4]
A. Abboud and K. Bringmann, “Tighter Connections Between Formula-SAT and Shaving Logs,” 2018. [Online]. Available: http://arxiv.org/abs/1804.08978. (arXiv: 1804.08978)
Abstract
A noticeable fraction of Algorithms papers in the last few decades improve the running time of well-known algorithms for fundamental problems by logarithmic factors. For example, the $O(n^2)$ dynamic programming solution to the Longest Common Subsequence problem (LCS) was improved to $O(n^2/\log^2 n)$ in several ways and using a variety of ingenious tricks. This line of research, also known as "the art of shaving log factors", lacks a tool for proving negative results. Specifically, how can we show that it is unlikely that LCS can be solved in time $O(n^2/\log^3 n)$? Perhaps the only approach for such results was suggested in a recent paper of Abboud, Hansen, Vassilevska W. and Williams (STOC'16). The authors blame the hardness of shaving logs on the hardness of solving satisfiability on Boolean formulas (Formula-SAT) faster than exhaustive search. They show that an $O(n^2/\log^{1000} n)$ algorithm for LCS would imply a major advance in circuit lower bounds. Whether this approach can lead to tighter barriers was unclear. In this paper, we push this approach to its limit and, in particular, prove that a well-known barrier from complexity theory stands in the way for shaving five additional log factors for fundamental combinatorial problems. For LCS, regular expression pattern matching, as well as the Fr\'echet distance problem from Computational Geometry, we show that an $O(n^2/\log^{7+\varepsilon} n)$ runtime would imply new Formula-SAT algorithms. Our main result is a reduction from SAT on formulas of size $s$ over $n$ variables to LCS on sequences of length $N=2^{n/2} \cdot s^{1+o(1)}$. Our reduction is essentially as efficient as possible, and it greatly improves the previously known reduction for LCS with $N=2^{n/2} \cdot s^c$, for some $c \geq 100$.
Export
BibTeX
@online{Abboud_arXiv1804.08978, TITLE = {Tighter Connections Between Formula-{SAT} and Shaving Logs}, AUTHOR = {Abboud, Amir and Bringmann, Karl}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.08978}, EPRINT = {1804.08978}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {A noticeable fraction of Algorithms papers in the last few decades improve the running time of well-known algorithms for fundamental problems by logarithmic factors. For example, the $O(n^2)$ dynamic programming solution to the Longest Common Subsequence problem (LCS) was improved to $O(n^2/\log^2 n)$ in several ways and using a variety of ingenious tricks. This line of research, also known as "the art of shaving log factors", lacks a tool for proving negative results. Specifically, how can we show that it is unlikely that LCS can be solved in time $O(n^2/\log^3 n)$? Perhaps the only approach for such results was suggested in a recent paper of Abboud, Hansen, Vassilevska W. and Williams (STOC'16). The authors blame the hardness of shaving logs on the hardness of solving satisfiability on Boolean formulas (Formula-SAT) faster than exhaustive search. They show that an $O(n^2/\log^{1000} n)$ algorithm for LCS would imply a major advance in circuit lower bounds. Whether this approach can lead to tighter barriers was unclear. In this paper, we push this approach to its limit and, in particular, prove that a well-known barrier from complexity theory stands in the way for shaving five additional log factors for fundamental combinatorial problems. For LCS, regular expression pattern matching, as well as the Fr\'echet distance problem from Computational Geometry, we show that an $O(n^2/\log^{7+\varepsilon} n)$ runtime would imply new Formula-SAT algorithms. Our main result is a reduction from SAT on formulas of size $s$ over $n$ variables to LCS on sequences of length $N=2^{n/2} \cdot s^{1+o(1)}$. Our reduction is essentially as efficient as possible, and it greatly improves the previously known reduction for LCS with $N=2^{n/2} \cdot s^c$, for some $c \geq 100$.}, }
Endnote
%0 Report %A Abboud, Amir %A Bringmann, Karl %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Tighter Connections Between Formula-SAT and Shaving Logs : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3DF7-5 %U http://arxiv.org/abs/1804.08978 %D 2018 %X A noticeable fraction of Algorithms papers in the last few decades improve the running time of well-known algorithms for fundamental problems by logarithmic factors. For example, the $O(n^2)$ dynamic programming solution to the Longest Common Subsequence problem (LCS) was improved to $O(n^2/\log^2 n)$ in several ways and using a variety of ingenious tricks. This line of research, also known as "the art of shaving log factors", lacks a tool for proving negative results. Specifically, how can we show that it is unlikely that LCS can be solved in time $O(n^2/\log^3 n)$? Perhaps the only approach for such results was suggested in a recent paper of Abboud, Hansen, Vassilevska W. and Williams (STOC'16). The authors blame the hardness of shaving logs on the hardness of solving satisfiability on Boolean formulas (Formula-SAT) faster than exhaustive search. They show that an $O(n^2/\log^{1000} n)$ algorithm for LCS would imply a major advance in circuit lower bounds. Whether this approach can lead to tighter barriers was unclear. In this paper, we push this approach to its limit and, in particular, prove that a well-known barrier from complexity theory stands in the way for shaving five additional log factors for fundamental combinatorial problems. For LCS, regular expression pattern matching, as well as the Fr\'echet distance problem from Computational Geometry, we show that an $O(n^2/\log^{7+\varepsilon} n)$ runtime would imply new Formula-SAT algorithms. Our main result is a reduction from SAT on formulas of size $s$ over $n$ variables to LCS on sequences of length $N=2^{n/2} \cdot s^{1+o(1)}$. Our reduction is essentially as efficient as possible, and it greatly improves the previously known reduction for LCS with $N=2^{n/2} \cdot s^c$, for some $c \geq 100$. %K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[5]
A. Abboud, K. Bringmann, D. Hermelin, and D. Shabtay, “SETH-Based Lower Bounds for Subset Sum and Bicriteria Path,” 2018. [Online]. Available: http://arxiv.org/abs/1704.04546. (arXiv: 1704.04546)
Abstract
Subset-Sum and k-SAT are two of the most extensively studied problems in computer science, and conjectures about their hardness are among the cornerstones of fine-grained complexity. One of the most intriguing open problems in this area is to base the hardness of one of these problems on the other. Our main result is a tight reduction from k-SAT to Subset-Sum on dense instances, proving that Bellman's 1962 pseudo-polynomial $O^{*}(T)$-time algorithm for Subset-Sum on $n$ numbers and target $T$ cannot be improved to time $T^{1-\varepsilon}\cdot 2^{o(n)}$ for any $\varepsilon>0$, unless the Strong Exponential Time Hypothesis (SETH) fails. This is one of the strongest known connections between any two of the core problems of fine-grained complexity. As a corollary, we prove a "Direct-OR" theorem for Subset-Sum under SETH, offering a new tool for proving conditional lower bounds: It is now possible to assume that deciding whether one out of $N$ given instances of Subset-Sum is a YES instance requires time $(N T)^{1-o(1)}$. As an application of this corollary, we prove a tight SETH-based lower bound for the classical Bicriteria s,t-Path problem, which is extensively studied in Operations Research. We separate its complexity from that of Subset-Sum: On graphs with $m$ edges and edge lengths bounded by $L$, we show that the $O(Lm)$ pseudo-polynomial time algorithm by Joksch from 1966 cannot be improved to $\tilde{O}(L+m)$, in contrast to a recent improvement for Subset Sum (Bringmann, SODA 2017).
Export
BibTeX
@online{Abboud_arXiv1704.04546, TITLE = {{SETH}-Based Lower Bounds for Subset Sum and Bicriteria Path}, AUTHOR = {Abboud, Amir and Bringmann, Karl and Hermelin, Danny and Shabtay, Dvir}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1704.04546}, EPRINT = {1704.04546}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Subset-Sum and k-SAT are two of the most extensively studied problems in computer science, and conjectures about their hardness are among the cornerstones of fine-grained complexity. One of the most intriguing open problems in this area is to base the hardness of one of these problems on the other. Our main result is a tight reduction from k-SAT to Subset-Sum on dense instances, proving that Bellman's 1962 pseudo-polynomial $O^{*}(T)$-time algorithm for Subset-Sum on $n$ numbers and target $T$ cannot be improved to time $T^{1-\varepsilon}\cdot 2^{o(n)}$ for any $\varepsilon>0$, unless the Strong Exponential Time Hypothesis (SETH) fails. This is one of the strongest known connections between any two of the core problems of fine-grained complexity. As a corollary, we prove a "Direct-OR" theorem for Subset-Sum under SETH, offering a new tool for proving conditional lower bounds: It is now possible to assume that deciding whether one out of $N$ given instances of Subset-Sum is a YES instance requires time $(N T)^{1-o(1)}$. As an application of this corollary, we prove a tight SETH-based lower bound for the classical Bicriteria s,t-Path problem, which is extensively studied in Operations Research. We separate its complexity from that of Subset-Sum: On graphs with $m$ edges and edge lengths bounded by $L$, we show that the $O(Lm)$ pseudo-polynomial time algorithm by Joksch from 1966 cannot be improved to $\tilde{O}(L+m)$, in contrast to a recent improvement for Subset Sum (Bringmann, SODA 2017).}, }
Endnote
%0 Report %A Abboud, Amir %A Bringmann, Karl %A Hermelin, Danny %A Shabtay, Dvir %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T SETH-Based Lower Bounds for Subset Sum and Bicriteria Path : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9E17-3 %U http://arxiv.org/abs/1704.04546 %D 2018 %X Subset-Sum and k-SAT are two of the most extensively studied problems in computer science, and conjectures about their hardness are among the cornerstones of fine-grained complexity. One of the most intriguing open problems in this area is to base the hardness of one of these problems on the other. Our main result is a tight reduction from k-SAT to Subset-Sum on dense instances, proving that Bellman's 1962 pseudo-polynomial $O^{*}(T)$-time algorithm for Subset-Sum on $n$ numbers and target $T$ cannot be improved to time $T^{1-\varepsilon}\cdot 2^{o(n)}$ for any $\varepsilon>0$, unless the Strong Exponential Time Hypothesis (SETH) fails. This is one of the strongest known connections between any two of the core problems of fine-grained complexity. As a corollary, we prove a "Direct-OR" theorem for Subset-Sum under SETH, offering a new tool for proving conditional lower bounds: It is now possible to assume that deciding whether one out of $N$ given instances of Subset-Sum is a YES instance requires time $(N T)^{1-o(1)}$. As an application of this corollary, we prove a tight SETH-based lower bound for the classical Bicriteria s,t-Path problem, which is extensively studied in Operations Research. We separate its complexity from that of Subset-Sum: On graphs with $m$ edges and edge lengths bounded by $L$, we show that the $O(Lm)$ pseudo-polynomial time algorithm by Joksch from 1966 cannot be improved to $\tilde{O}(L+m)$, in contrast to a recent improvement for Subset Sum (Bringmann, SODA 2017). %K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Computational Complexity, cs.CC
[6]
M. Abrahamsen, A. Adamaszek, K. Bringmann, V. Cohen-Addad, M. Mehr, E. Rotenberg, A. Roytman, and M. Thorup, “Fast Fencing,” 2018. [Online]. Available: http://arxiv.org/abs/1804.00101. (arXiv: 1804.00101)
Abstract
We consider very natural "fence enclosure" problems studied by Capoyleas, Rote, and Woeginger and Arkin, Khuller, and Mitchell in the early 90s. Given a set $S$ of $n$ points in the plane, we aim at finding a set of closed curves such that (1) each point is enclosed by a curve and (2) the total length of the curves is minimized. We consider two main variants. In the first variant, we pay a unit cost per curve in addition to the total length of the curves. An equivalent formulation of this version is that we have to enclose $n$ unit disks, paying only the total length of the enclosing curves. In the other variant, we are allowed to use at most $k$ closed curves and pay no cost per curve. For the variant with at most $k$ closed curves, we present an algorithm that is polynomial in both $n$ and $k$. For the variant with unit cost per curve, or unit disks, we present a near-linear time algorithm. Capoyleas, Rote, and Woeginger solved the problem with at most $k$ curves in $n^{O(k)}$ time. Arkin, Khuller, and Mitchell used this to solve the unit cost per curve version in exponential time. At the time, they conjectured that the problem with $k$ curves is NP-hard for general $k$. Our polynomial time algorithm refutes this unless P equals NP.
Export
BibTeX
@online{Abrahamsen_arXiv1804.00101, TITLE = {Fast Fencing}, AUTHOR = {Abrahamsen, Mikkel and Adamaszek, Anna and Bringmann, Karl and Cohen-Addad, Vincent and Mehr, Mehran and Rotenberg, Eva and Roytman, Alan and Thorup, Mikkel}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.00101}, EPRINT = {1804.00101}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We consider very natural "fence enclosure" problems studied by Capoyleas, Rote, and Woeginger and Arkin, Khuller, and Mitchell in the early 90s. Given a set $S$ of $n$ points in the plane, we aim at finding a set of closed curves such that (1) each point is enclosed by a curve and (2) the total length of the curves is minimized. We consider two main variants. In the first variant, we pay a unit cost per curve in addition to the total length of the curves. An equivalent formulation of this version is that we have to enclose $n$ unit disks, paying only the total length of the enclosing curves. In the other variant, we are allowed to use at most $k$ closed curves and pay no cost per curve. For the variant with at most $k$ closed curves, we present an algorithm that is polynomial in both $n$ and $k$. For the variant with unit cost per curve, or unit disks, we present a near-linear time algorithm. Capoyleas, Rote, and Woeginger solved the problem with at most $k$ curves in $n^{O(k)}$ time. Arkin, Khuller, and Mitchell used this to solve the unit cost per curve version in exponential time. At the time, they conjectured that the problem with $k$ curves is NP-hard for general $k$. Our polynomial time algorithm refutes this unless P equals NP.}, }
Endnote
%0 Report %A Abrahamsen, Mikkel %A Adamaszek, Anna %A Bringmann, Karl %A Cohen-Addad, Vincent %A Mehr, Mehran %A Rotenberg, Eva %A Roytman, Alan %A Thorup, Mikkel %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations %T Fast Fencing : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3DFE-E %U http://arxiv.org/abs/1804.00101 %D 2018 %X We consider very natural "fence enclosure" problems studied by Capoyleas, Rote, and Woeginger and Arkin, Khuller, and Mitchell in the early 90s. Given a set $S$ of $n$ points in the plane, we aim at finding a set of closed curves such that (1) each point is enclosed by a curve and (2) the total length of the curves is minimized. We consider two main variants. In the first variant, we pay a unit cost per curve in addition to the total length of the curves. An equivalent formulation of this version is that we have to enclose $n$ unit disks, paying only the total length of the enclosing curves. In the other variant, we are allowed to use at most $k$ closed curves and pay no cost per curve. For the variant with at most $k$ closed curves, we present an algorithm that is polynomial in both $n$ and $k$. For the variant with unit cost per curve, or unit disks, we present a near-linear time algorithm. Capoyleas, Rote, and Woeginger solved the problem with at most $k$ curves in $n^{O(k)}$ time. Arkin, Khuller, and Mitchell used this to solve the unit cost per curve version in exponential time. At the time, they conjectured that the problem with $k$ curves is NP-hard for general $k$. Our polynomial time algorithm refutes this unless P equals NP. %K Computer Science, Computational Geometry, cs.CG
[7]
M. Abrahamsen, A. Adamaszek, K. Bringmann, V. Cohen-Addad, M. Mehr, E. Rotenberg, A. Roytman, and M. Thorup, “Fast Fencing,” in STOC’18, 50th Annual ACM SIGACT Symposium on Theory of Computing, Los Angeles, CA, USA, 2018.
Export
BibTeX
@inproceedings{Abrahamsen_STOC2018, TITLE = {Fast Fencing}, AUTHOR = {Abrahamsen, Mikkel and Adamaszek, Anna and Bringmann, Karl and Cohen-Addad, Vincent and Mehr, Mehran and Rotenberg, Eva and Roytman, Alan and Thorup, Mikkel}, LANGUAGE = {eng}, ISBN = {978-1-4503-5559-9}, DOI = {10.1145/3188745.3188878}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {STOC'18, 50th Annual ACM SIGACT Symposium on Theory of Computing}, PAGES = {564--573}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Abrahamsen, Mikkel %A Adamaszek, Anna %A Bringmann, Karl %A Cohen-Addad, Vincent %A Mehr, Mehran %A Rotenberg, Eva %A Roytman, Alan %A Thorup, Mikkel %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations %T Fast Fencing : %G eng %U http://hdl.handle.net/21.11116/0000-0002-171F-3 %R 10.1145/3188745.3188878 %D 2018 %B 50th Annual ACM SIGACT Symposium on Theory of Computing %Z date of event: 2018-06-25 - 2017-06-29 %C Los Angeles, CA, USA %B STOC'18 %P 564 - 573 %I ACM %@ 978-1-4503-5559-9
[8]
A. Adamaszek, P. Chalermsook, A. Ene, and A. Wiese, “Submodular Unsplittable Flow on Trees,” Mathematical Programming / B, vol. 172, no. 1–2, 2018.
Export
BibTeX
@article{Adamaszek2018, TITLE = {Submodular Unsplittable Flow on Trees}, AUTHOR = {Adamaszek, Anna and Chalermsook, Parinya and Ene, Alina and Wiese, Andreas}, LANGUAGE = {eng}, ISSN = {0025-5610}, DOI = {10.1007/s10107-017-1218-4}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Mathematical Programming / B}, VOLUME = {172}, NUMBER = {1-2}, PAGES = {565--589}, }
Endnote
%0 Journal Article %A Adamaszek, Anna %A Chalermsook, Parinya %A Ene, Alina %A Wiese, Andreas %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Submodular Unsplittable Flow on Trees : %G eng %U http://hdl.handle.net/21.11116/0000-0000-73B6-1 %R 10.1007/s10107-017-1218-4 %7 2018-01-17 %D 2018 %J Mathematical Programming / B %V 172 %N 1-2 %& 565 %P 565 - 589 %I Springer %C Berlin %@ false
[9]
A. Adamaszek, A. Antoniadis, A. Kumar, and T. Mömke, “Approximating Airports and Railways,” in 35th Symposium on Theoretical Aspects of Computer Science (STACS 2018), Caen, France, 2018.
Export
BibTeX
@inproceedings{Adamaszek_STACS2018, TITLE = {Approximating Airports and Railways}, AUTHOR = {Adamaszek, Anna and Antoniadis, Antonios and Kumar, Amit and M{\"o}mke, Tobias}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-062-0}, URL = {urn:nbn:de:0030-drops-85183}, DOI = {10.4230/LIPIcs.STACS.2018.5}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {35th Symposium on Theoretical Aspects of Computer Science (STACS 2018)}, EDITOR = {Niedermeier, Rolf and Vall{\'e}e, Brigitte}, PAGES = {1--13}, EID = {5}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {96}, ADDRESS = {Caen, France}, }
Endnote
%0 Conference Proceedings %A Adamaszek, Anna %A Antoniadis, Antonios %A Kumar, Amit %A Mömke, Tobias %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Approximating Airports and Railways : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9F43-0 %R 10.4230/LIPIcs.STACS.2018.5 %U urn:nbn:de:0030-drops-85183 %D 2018 %B 35th Symposium on Theoretical Aspects of Computer Science %Z date of event: 2018-02-28 - 2018-03-03 %C Caen, France %B 35th Symposium on Theoretical Aspects of Computer Science %E Niedermeier, Rolf; Vallée, Brigitte %P 1 - 13 %Z sequence number: 5 %I Schloss Dagstuhl %@ 978-3-95977-062-0 %B Leibniz International Proceedings in Informatics %N 96 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/8518/http://drops.dagstuhl.de/doku/urheberrecht1.html
[10]
S. A. Amiri, K.-T. Foerster, and S. Schmid, “Walking Through Waypoints,” in LATIN 2018: Theoretical Informatics, Buenos Aires, Argentinia, 2018.
Export
BibTeX
@inproceedings{Amiri_LATIN2018, TITLE = {Walking Through Waypoints}, AUTHOR = {Amiri, Saeed Akhoondian and Foerster, Klaus-Tycho and Schmid, Stefan}, LANGUAGE = {eng}, ISBN = {978-3-319-77403-9}, DOI = {10.1007/978-3-319-77404-6_4}, PUBLISHER = {Springer}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {LATIN 2018: Theoretical Informatics}, EDITOR = {Bender, Michael A. and Farach-Colton, Mart{\'i}n and Mosteiro, Miguel A.}, PAGES = {37--51}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {10807}, ADDRESS = {Buenos Aires, Argentinia}, }
Endnote
%0 Conference Proceedings %A Amiri, Saeed Akhoondian %A Foerster, Klaus-Tycho %A Schmid, Stefan %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Walking Through Waypoints : %G eng %U http://hdl.handle.net/21.11116/0000-0002-5765-B %R 10.1007/978-3-319-77404-6_4 %D 2018 %B 13th Latin American Theoretical Informatics Symposium %Z date of event: 2018-04-16 - 2018-04-19 %C Buenos Aires, Argentinia %B LATIN 2018: Theoretical Informatics %E Bender, Michael A.; Farach-Colton, Martín; Mosteiro, Miguel A. %P 37 - 51 %I Springer %@ 978-3-319-77403-9 %B Lecture Notes in Computer Science %N 10807
[11]
S. A. Amiri, S. Dudycz, S. Schmid, and S. Wiederrecht, “Congestion-Free Rerouting of Flows on DAGs,” in 45th International Colloquium on Automata, Languages, and Programming (ICALP 2018), Prague, Czech Republic, 2018.
Export
BibTeX
@inproceedings{Amiri_ICALP2018, TITLE = {Congestion-Free Rerouting of Flows on {DAGs}}, AUTHOR = {Amiri, Saeed Akhoondian and Dudycz, Szymon and Schmid, Stefan and Wiederrecht, Sebastian}, LANGUAGE = {eng}, ISBN = {978-3-95977-076-7}, URL = {urn:nbn:de:0030-drops-91471}, DOI = {10.4230/LIPIcs.ICALP.2018.143}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {45th International Colloquium on Automata, Languages, and Programming (ICALP 2018)}, EDITOR = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, D{\'a}niel and Sannella, Donald}, PAGES = {1--13}, EID = {143}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {107}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Amiri, Saeed Akhoondian %A Dudycz, Szymon %A Schmid, Stefan %A Wiederrecht, Sebastian %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Congestion-Free Rerouting of Flows on DAGs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-707F-2 %R 10.4230/LIPIcs.ICALP.2018.143 %U urn:nbn:de:0030-drops-91471 %D 2018 %B 45th International Colloquium on Automata, Languages, and Programming %Z date of event: 2018-07-09 - 2018-07-13 %C Prague, Czech Republic %B 45th International Colloquium on Automata, Languages, and Programming %E Chatzigiannakis, Ioannis; Kaklamanis, Christos; Marx, Dániel; Sannella, Donald %P 1 - 13 %Z sequence number: 143 %I Schloss Dagstuhl %@ 978-3-95977-076-7 %B Leibniz International Proceedings in Informatics %N 107 %U http://drops.dagstuhl.de/opus/volltexte/2018/9147/http://drops.dagstuhl.de/doku/urheberrecht1.html
[12]
S. A. Amiri, K.-T. Foerster, R. Jacob, and S. Schmid, “Charting the Algorithmic Complexity of Waypoint Routing,” ACM SIGCOMM Computer Communication Review, vol. 48, no. 1, 2018.
Export
BibTeX
@article{Amiri_CCR2018, TITLE = {Charting the Algorithmic Complexity of Waypoint Routing}, AUTHOR = {Amiri, Saeed Akhoondian and Foerster, Klaus-Tycho and Jacob, Riko and Schmid, Stefan}, LANGUAGE = {eng}, ISSN = {0146-4833}, DOI = {10.1145/3211852.3211859}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM SIGCOMM Computer Communication Review}, VOLUME = {48}, NUMBER = {1}, PAGES = {42--48}, }
Endnote
%0 Journal Article %A Amiri, Saeed Akhoondian %A Foerster, Klaus-Tycho %A Jacob, Riko %A Schmid, Stefan %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Charting the Algorithmic Complexity of Waypoint Routing : %G eng %U http://hdl.handle.net/21.11116/0000-0002-7083-B %R 10.1145/3211852.3211859 %7 2018 %D 2018 %J ACM SIGCOMM Computer Communication Review %V 48 %N 1 %& 42 %P 42 - 48 %I ACM %C New York, NY %@ false
[13]
S. A. Amiri, P. Ossona de Mendez, R. Rabinovich, and S. Siebertz, “Distributed Domination on Graph Classes of Bounded Expansion,” in SPAA’18, 30th ACM Symposium on Parallelism in Algorithms and Architectures, Vienna, Austria, 2018.
Export
BibTeX
@inproceedings{Amiri_SPAA2018, TITLE = {Distributed Domination on Graph Classes of Bounded Expansion}, AUTHOR = {Amiri, Saeed Akhoondian and Ossona de Mendez, Patrice and Rabinovich, Roman and Siebertz, Sebastian}, LANGUAGE = {eng}, ISBN = {978-1-4503-5799-9}, DOI = {10.1145/3210377.3210383}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {SPAA'18, 30th ACM Symposium on Parallelism in Algorithms and Architectures}, PAGES = {143--151}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Amiri, Saeed Akhoondian %A Ossona de Mendez, Patrice %A Rabinovich, Roman %A Siebertz, Sebastian %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Distributed Domination on Graph Classes of Bounded Expansion : %G eng %U http://hdl.handle.net/21.11116/0000-0002-7081-D %R 10.1145/3210377.3210383 %D 2018 %B 30th ACM Symposium on Parallelism in Algorithms and Architectures %Z date of event: 2018-07-16 - 2018-07-18 %C Vienna, Austria %B SPAA'18 %P 143 - 151 %I ACM %@ 978-1-4503-5799-9
[14]
A. Antoniadis, C. Fischer, and A. Tonnis, “A Collection of Lower Bounds for Online Matching on the Line,” in LATIN 2018: Theoretical Informatics, Buenos Aires, Argentinia, 2018.
Export
BibTeX
@inproceedings{AntoniadisLATIN2018, TITLE = {A Collection of Lower Bounds for Online Matching on the Line}, AUTHOR = {Antoniadis, Antonios and Fischer, Carsten and Tonnis, Andreas}, LANGUAGE = {eng}, ISBN = {978-3-319-77403-9}, DOI = {10.1007/978-3-319-77404-6_5}, PUBLISHER = {Springer}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {LATIN 2018: Theoretical Informatics}, EDITOR = {Bender, Michael A. and Farach-Colton, Mart{\'i}n and Mosteiro, Miguel A.}, PAGES = {52--65}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {10807}, ADDRESS = {Buenos Aires, Argentinia}, }
Endnote
%0 Conference Proceedings %A Antoniadis, Antonios %A Fischer, Carsten %A Tonnis, Andreas %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T A Collection of Lower Bounds for Online Matching on the Line : %G eng %U http://hdl.handle.net/21.11116/0000-0002-5763-D %R 10.1007/978-3-319-77404-6_5 %D 2018 %B 13th Latin American Theoretical Informatics Symposium %Z date of event: 2018-04-16 - 2018-04-19 %C Buenos Aires, Argentinia %B LATIN 2018: Theoretical Informatics %E Bender, Michael A.; Farach-Colton, Martín; Mosteiro, Miguel A. %P 52 - 65 %I Springer %@ 978-3-319-77403-9 %B Lecture Notes in Computer Science %N 10807
[15]
A. Antoniadis, K. Fleszar, R. Hoeksma, and K. Schewior, “A PTAS for Euclidean TSP with Hyperplane Neighborhoods,” 2018. [Online]. Available: http://arxiv.org/abs/1804.03953. (arXiv: 1804.03953)
Abstract
In the Traveling Salesperson Problem with Neighborhoods (TSPN), we are given a collection of geometric regions in some space. The goal is to output a tour of minimum length that visits at least one point in each region. Even in the Euclidean plane, TSPN is known to be APX-hard, which gives rise to studying more tractable special cases of the problem. In this paper, we focus on the fundamental special case of regions that are hyperplanes in the $d$-dimensional Euclidean space. This case contrasts the much-better understood case of so-called fat regions. While for $d=2$ an exact algorithm with running time $O(n^5)$ is known, settling the exact approximability of the problem for $d=3$ has been repeatedly posed as an open question. To date, only an approximation algorithm with guarantee exponential in $d$ is known, and NP-hardness remains open. For arbitrary fixed $d$, we develop a Polynomial Time Approximation Scheme (PTAS) that works for both the tour and path version of the problem. Our algorithm is based on approximating the convex hull of the optimal tour by a convex polytope of bounded complexity. Such polytopes are represented as solutions of a sophisticated LP formulation, which we combine with the enumeration of crucial properties of the tour. As the approximation guarantee approaches $1$, our scheme adjusts the complexity of the considered polytopes accordingly. In the analysis of our approximation scheme, we show that our search space includes a sufficiently good approximation of the optimum. To do so, we develop a novel and general sparsification technique to transform an arbitrary convex polytope into one with a constant number of vertices and, in turn, into one of bounded complexity in the above sense. Hereby, we maintain important properties of the polytope.
Export
BibTeX
@online{Antoniadis_arXiv1804.03953, TITLE = {A {PTAS} for {E}uclidean {TSP} with Hyperplane Neighborhoods}, AUTHOR = {Antoniadis, Antonios and Fleszar, Krzysztof and Hoeksma, Ruben and Schewior, Kevin}, URL = {http://arxiv.org/abs/1804.03953}, EPRINT = {1804.03953}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In the Traveling Salesperson Problem with Neighborhoods (TSPN), we are given a collection of geometric regions in some space. The goal is to output a tour of minimum length that visits at least one point in each region. Even in the Euclidean plane, TSPN is known to be APX-hard, which gives rise to studying more tractable special cases of the problem. In this paper, we focus on the fundamental special case of regions that are hyperplanes in the $d$-dimensional Euclidean space. This case contrasts the much-better understood case of so-called fat regions. While for $d=2$ an exact algorithm with running time $O(n^5)$ is known, settling the exact approximability of the problem for $d=3$ has been repeatedly posed as an open question. To date, only an approximation algorithm with guarantee exponential in $d$ is known, and NP-hardness remains open. For arbitrary fixed $d$, we develop a Polynomial Time Approximation Scheme (PTAS) that works for both the tour and path version of the problem. Our algorithm is based on approximating the convex hull of the optimal tour by a convex polytope of bounded complexity. Such polytopes are represented as solutions of a sophisticated LP formulation, which we combine with the enumeration of crucial properties of the tour. As the approximation guarantee approaches $1$, our scheme adjusts the complexity of the considered polytopes accordingly. In the analysis of our approximation scheme, we show that our search space includes a sufficiently good approximation of the optimum. To do so, we develop a novel and general sparsification technique to transform an arbitrary convex polytope into one with a constant number of vertices and, in turn, into one of bounded complexity in the above sense. Hereby, we maintain important properties of the polytope.}, }
Endnote
%0 Report %A Antoniadis, Antonios %A Fleszar, Krzysztof %A Hoeksma, Ruben %A Schewior, Kevin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T A PTAS for Euclidean TSP with Hyperplane Neighborhoods : %U http://hdl.handle.net/21.11116/0000-0002-9F37-E %U http://arxiv.org/abs/1804.03953 %D 2018 %X In the Traveling Salesperson Problem with Neighborhoods (TSPN), we are given a collection of geometric regions in some space. The goal is to output a tour of minimum length that visits at least one point in each region. Even in the Euclidean plane, TSPN is known to be APX-hard, which gives rise to studying more tractable special cases of the problem. In this paper, we focus on the fundamental special case of regions that are hyperplanes in the $d$-dimensional Euclidean space. This case contrasts the much-better understood case of so-called fat regions. While for $d=2$ an exact algorithm with running time $O(n^5)$ is known, settling the exact approximability of the problem for $d=3$ has been repeatedly posed as an open question. To date, only an approximation algorithm with guarantee exponential in $d$ is known, and NP-hardness remains open. For arbitrary fixed $d$, we develop a Polynomial Time Approximation Scheme (PTAS) that works for both the tour and path version of the problem. Our algorithm is based on approximating the convex hull of the optimal tour by a convex polytope of bounded complexity. Such polytopes are represented as solutions of a sophisticated LP formulation, which we combine with the enumeration of crucial properties of the tour. As the approximation guarantee approaches $1$, our scheme adjusts the complexity of the considered polytopes accordingly. In the analysis of our approximation scheme, we show that our search space includes a sufficiently good approximation of the optimum. To do so, we develop a novel and general sparsification technique to transform an arbitrary convex polytope into one with a constant number of vertices and, in turn, into one of bounded complexity in the above sense. Hereby, we maintain important properties of the polytope. %K Computer Science, Data Structures and Algorithms, cs.DS
[16]
A. Antoniadis and K. Schewior, “A Tight Lower Bound for Online Convex Optimization with Switching Costs,” in Approximation and Online Algorithms (WAOA 2017), Vienna, Austria, 2018.
Export
BibTeX
@inproceedings{Antoniadis_WAOA2017, TITLE = {A Tight Lower Bound for Online Convex Optimization with Switching Costs}, AUTHOR = {Antoniadis, Antonios and Schewior, Kevin}, LANGUAGE = {eng}, ISBN = {978-3-319-89440-9}, DOI = {10.1007/978-3-319-89441-6_13}, PUBLISHER = {Springer}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Approximation and Online Algorithms (WAOA 2017)}, EDITOR = {Solis-Oba, Roberto and Fleischer, Rudolf}, PAGES = {164--165}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {10787}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Antoniadis, Antonios %A Schewior, Kevin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T A Tight Lower Bound for Online Convex Optimization with Switching Costs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9F30-5 %R 10.1007/978-3-319-89441-6_13 %D 2018 %B 15th Workshop on Approximation and Online Algorithms %Z date of event: 2017-09-07 - 2017-09-08 %C Vienna, Austria %B Approximation and Online Algorithms %E Solis-Oba, Roberto; Fleischer, Rudolf %P 164 - 165 %I Springer %@ 978-3-319-89440-9 %B Lecture Notes in Computer Science %N 10787
[17]
A. Antoniadis and A. Cristi, “Near Optimal Mechanism for Energy Aware Scheduling,” in Algorithmic Game Theory (SAGT 2018), Beijing, China, 2018.
Export
BibTeX
@inproceedings{Antoniadis_SAGT2017, TITLE = {Near Optimal Mechanism for Energy Aware Scheduling}, AUTHOR = {Antoniadis, Antonios and Cristi, Andr{\'e}s}, LANGUAGE = {eng}, ISBN = {978-3-319-99659-2}, DOI = {10.1007/978-3-319-99660-8_4}, PUBLISHER = {Springer}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Algorithmic Game Theory (SAGT 2018)}, EDITOR = {Deng, Xiaotie}, PAGES = {31--42}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {11059}, ADDRESS = {Beijing, China}, }
Endnote
%0 Conference Proceedings %A Antoniadis, Antonios %A Cristi, Andrés %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Near Optimal Mechanism for Energy Aware Scheduling : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9F48-B %R 10.1007/978-3-319-99660-8_4 %D 2018 %B 11th International Symposium on Algorithmic Game Theory %Z date of event: 2018-09-11 - 2018-09-14 %C Beijing, China %B Algorithmic Game Theory %E Deng, Xiaotie %P 31 - 42 %I Springer %@ 978-3-319-99659-2 %B Lecture Notes in Computer Science %N 11059
[18]
S. Arunachalam, S. Chakraborty, M. Koucký, N. Saurabh, and R. de Wolf, “Improved Bounds on Fourier Entropy and Min-entropy,” 2018. [Online]. Available: http://arxiv.org/abs/1809.09819. (arXiv: 1809.09819)
Abstract
Given a Boolean function $f:\{-1,1\}^n\to \{-1,1\}$, the Fourier distribution assigns probability $\widehat{f}(S)^2$ to $S\subseteq [n]$. The Fourier Entropy-Influence (FEI) conjecture of Friedgut and Kalai asks if there exist a universal constant C>0 such that $H(\hat{f}^2)\leq C Inf(f)$, where $H(\hat{f}^2)$ is the Shannon entropy of the Fourier distribution of $f$ and $Inf(f)$ is the total influence of $f$. 1) We consider the weaker Fourier Min-entropy-Influence (FMEI) conjecture. This asks if $H_{\infty}(\hat{f}^2)\leq C Inf(f)$, where $H_{\infty}(\hat{f}^2)$ is the min-entropy of the Fourier distribution. We show $H_{\infty}(\hat{f}^2)\leq 2C_{\min}^\oplus(f)$, where $C_{\min}^\oplus(f)$ is the minimum parity certificate complexity of $f$. We also show that for every $\epsilon\geq 0$, we have $H_{\infty}(\hat{f}^2)\leq 2\log (\|\hat{f}\|_{1,\epsilon}/(1-\epsilon))$, where $\|\hat{f}\|_{1,\epsilon}$ is the approximate spectral norm of $f$. As a corollary, we verify the FMEI conjecture for the class of read-$k$ $DNF$s (for constant $k$). 2) We show that $H(\hat{f}^2)\leq 2 aUC^\oplus(f)$, where $aUC^\oplus(f)$ is the average unambiguous parity certificate complexity of $f$. This improves upon Chakraborty et al. An important consequence of the FEI conjecture is the long-standing Mansour's conjecture. We show that a weaker version of FEI already implies Mansour's conjecture: is $H(\hat{f}^2)\leq C \min\{C^0(f),C^1(f)\}$?, where $C^0(f), C^1(f)$ are the 0- and 1-certificate complexities of $f$, respectively. 3) We study what FEI implies about the structure of polynomials that 1/3-approximate a Boolean function. We pose a conjecture (which is implied by FEI): no "flat" degree-$d$ polynomial of sparsity $2^{\omega(d)}$ can 1/3-approximate a Boolean function. We prove this conjecture unconditionally for a particular class of polynomials.
Export
BibTeX
@online{Arunachalam_arXiv1809.09819, TITLE = {{Improved Bounds on Fourier Entropy and Min-entropy}}, AUTHOR = {Arunachalam, Srinivasan and Chakraborty, Sourav and Kouck{\'y}, Michal and Saurabh, Nitin and de Wolf, Ronald}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1809.09819}, EPRINT = {1809.09819}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Given a Boolean function $f:\{-1,1\}^n\to \{-1,1\}$, the Fourier distribution assigns probability $\widehat{f}(S)^2$ to $S\subseteq [n]$. The Fourier Entropy-Influence (FEI) conjecture of Friedgut and Kalai asks if there exist a universal constant C>0 such that $H(\hat{f}^2)\leq C Inf(f)$, where $H(\hat{f}^2)$ is the Shannon entropy of the Fourier distribution of $f$ and $Inf(f)$ is the total influence of $f$. 1) We consider the weaker Fourier Min-entropy-Influence (FMEI) conjecture. This asks if $H_{\infty}(\hat{f}^2)\leq C Inf(f)$, where $H_{\infty}(\hat{f}^2)$ is the min-entropy of the Fourier distribution. We show $H_{\infty}(\hat{f}^2)\leq 2C_{\min}^\oplus(f)$, where $C_{\min}^\oplus(f)$ is the minimum parity certificate complexity of $f$. We also show that for every $\epsilon\geq 0$, we have $H_{\infty}(\hat{f}^2)\leq 2\log (\|\hat{f}\|_{1,\epsilon}/(1-\epsilon))$, where $\|\hat{f}\|_{1,\epsilon}$ is the approximate spectral norm of $f$. As a corollary, we verify the FMEI conjecture for the class of read-$k$ $DNF$s (for constant $k$). 2) We show that $H(\hat{f}^2)\leq 2 aUC^\oplus(f)$, where $aUC^\oplus(f)$ is the average unambiguous parity certificate complexity of $f$. This improves upon Chakraborty et al. An important consequence of the FEI conjecture is the long-standing Mansour's conjecture. We show that a weaker version of FEI already implies Mansour's conjecture: is $H(\hat{f}^2)\leq C \min\{C^0(f),C^1(f)\}$?, where $C^0(f), C^1(f)$ are the 0- and 1-certificate complexities of $f$, respectively. 3) We study what FEI implies about the structure of polynomials that 1/3-approximate a Boolean function. We pose a conjecture (which is implied by FEI): no "flat" degree-$d$ polynomial of sparsity $2^{\omega(d)}$ can 1/3-approximate a Boolean function. We prove this conjecture unconditionally for a particular class of polynomials.}, }
Endnote
%0 Report %A Arunachalam, Srinivasan %A Chakraborty, Sourav %A Koucký, Michal %A Saurabh, Nitin %A de Wolf, Ronald %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Improved Bounds on Fourier Entropy and Min-entropy : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AA5A-A %U http://arxiv.org/abs/1809.09819 %D 2018 %X Given a Boolean function $f:\{-1,1\}^n\to \{-1,1\}$, the Fourier distribution assigns probability $\widehat{f}(S)^2$ to $S\subseteq [n]$. The Fourier Entropy-Influence (FEI) conjecture of Friedgut and Kalai asks if there exist a universal constant C>0 such that $H(\hat{f}^2)\leq C Inf(f)$, where $H(\hat{f}^2)$ is the Shannon entropy of the Fourier distribution of $f$ and $Inf(f)$ is the total influence of $f$. 1) We consider the weaker Fourier Min-entropy-Influence (FMEI) conjecture. This asks if $H_{\infty}(\hat{f}^2)\leq C Inf(f)$, where $H_{\infty}(\hat{f}^2)$ is the min-entropy of the Fourier distribution. We show $H_{\infty}(\hat{f}^2)\leq 2C_{\min}^\oplus(f)$, where $C_{\min}^\oplus(f)$ is the minimum parity certificate complexity of $f$. We also show that for every $\epsilon\geq 0$, we have $H_{\infty}(\hat{f}^2)\leq 2\log (\|\hat{f}\|_{1,\epsilon}/(1-\epsilon))$, where $\|\hat{f}\|_{1,\epsilon}$ is the approximate spectral norm of $f$. As a corollary, we verify the FMEI conjecture for the class of read-$k$ $DNF$s (for constant $k$). 2) We show that $H(\hat{f}^2)\leq 2 aUC^\oplus(f)$, where $aUC^\oplus(f)$ is the average unambiguous parity certificate complexity of $f$. This improves upon Chakraborty et al. An important consequence of the FEI conjecture is the long-standing Mansour's conjecture. We show that a weaker version of FEI already implies Mansour's conjecture: is $H(\hat{f}^2)\leq C \min\{C^0(f),C^1(f)\}$?, where $C^0(f), C^1(f)$ are the 0- and 1-certificate complexities of $f$, respectively. 3) We study what FEI implies about the structure of polynomials that 1/3-approximate a Boolean function. We pose a conjecture (which is implied by FEI): no "flat" degree-$d$ polynomial of sparsity $2^{\omega(d)}$ can 1/3-approximate a Boolean function. We prove this conjecture unconditionally for a particular class of polynomials. %K Computer Science, Computational Complexity, cs.CC
[19]
J. Baldus and K. Bringmann, “A Fast Implementation of Near Neighbors Queries for Fréchet Distance (GIS Cup),” 2018. [Online]. Available: http://arxiv.org/abs/1803.00806. (arXiv: 1803.00806)
Abstract
This paper describes an implementation of fast near-neighbours queries (also known as range searching) with respect to the Fr\'echet distance. The algorithm is designed to be efficient on practical data such as GPS trajectories. Our approach is to use a quadtree data structure to enumerate all curves in the database that have similar start and endpoints as the query curve. On these curves we run positive and negative filters to narrow the set of potential results. Only for those trajectories where these heuristics fail, we compute the Fr\'echet distance exactly, by running a novel recursive variant of the classic free-space diagram algorithm. Our implementation won the ACM SIGSPATIAL GIS Cup 2017.
Export
BibTeX
@online{Baldus_arXiv1803.00806, TITLE = {A Fast Implementation of Near Neighbors Queries for {F}r\'{e}chet Distance ({GIS Cup})}, AUTHOR = {Baldus, Julian and Bringmann, Karl}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00806}, EPRINT = {1803.00806}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {This paper describes an implementation of fast near-neighbours queries (also known as range searching) with respect to the Fr\'echet distance. The algorithm is designed to be efficient on practical data such as GPS trajectories. Our approach is to use a quadtree data structure to enumerate all curves in the database that have similar start and endpoints as the query curve. On these curves we run positive and negative filters to narrow the set of potential results. Only for those trajectories where these heuristics fail, we compute the Fr\'echet distance exactly, by running a novel recursive variant of the classic free-space diagram algorithm. Our implementation won the ACM SIGSPATIAL GIS Cup 2017.}, }
Endnote
%0 Report %A Baldus, Julian %A Bringmann, Karl %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T A Fast Implementation of Near Neighbors Queries for Fréchet Distance (GIS Cup) : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E1A-E %U http://arxiv.org/abs/1803.00806 %D 2018 %X This paper describes an implementation of fast near-neighbours queries (also known as range searching) with respect to the Fr\'echet distance. The algorithm is designed to be efficient on practical data such as GPS trajectories. Our approach is to use a quadtree data structure to enumerate all curves in the database that have similar start and endpoints as the query curve. On these curves we run positive and negative filters to narrow the set of potential results. Only for those trajectories where these heuristics fail, we compute the Fr\'echet distance exactly, by running a novel recursive variant of the classic free-space diagram algorithm. Our implementation won the ACM SIGSPATIAL GIS Cup 2017. %K Computer Science, Computational Geometry, cs.CG
[20]
G. Ballard, C. Ikenmeyer, J. M. Landsberg, and N. Ryder, “The Geometry of Rank Decompositions of Matrix Multiplication II: 3 x 3 Matrices,” 2018. [Online]. Available: http://arxiv.org/abs/1801.00843. (arXiv: 1801.00843)
Abstract
This is the second in a series of papers on rank decompositions of the matrix multiplication tensor. We present new rank $23$ decompositions for the $3\times 3$ matrix multiplication tensor $M_{\langle 3\rangle}$. All our decompositions have symmetry groups that include the standard cyclic permutation of factors but otherwise exhibit a range of behavior. One of them has 11 cubes as summands and admits an unexpected symmetry group of order 12. We establish basic information regarding symmetry groups of decompositions and outline two approaches for finding new rank decompositions of $M_{\langle n\rangle}$ for larger $n$.
Export
BibTeX
@online{Ballard_arXiv1801.00843, TITLE = {{The Geometry of Rank Decompositions of Matrix Multiplication II: $3\times 3$ Matrices}}, AUTHOR = {Ballard, Grey and Ikenmeyer, Christian and Landsberg, J. M. and Ryder, Nick}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1801.00843}, EPRINT = {1801.00843}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {This is the second in a series of papers on rank decompositions of the matrix multiplication tensor. We present new rank $23$ decompositions for the $3\times 3$ matrix multiplication tensor $M_{\langle 3\rangle}$. All our decompositions have symmetry groups that include the standard cyclic permutation of factors but otherwise exhibit a range of behavior. One of them has 11 cubes as summands and admits an unexpected symmetry group of order 12. We establish basic information regarding symmetry groups of decompositions and outline two approaches for finding new rank decompositions of $M_{\langle n\rangle}$ for larger $n$.}, }
Endnote
%0 Report %A Ballard, Grey %A Ikenmeyer, Christian %A Landsberg, J. M. %A Ryder, Nick %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T The Geometry of Rank Decompositions of Matrix Multiplication II: 3 x 3 Matrices : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3F64-9 %U http://arxiv.org/abs/1801.00843 %D 2018 %X This is the second in a series of papers on rank decompositions of the matrix multiplication tensor. We present new rank $23$ decompositions for the $3\times 3$ matrix multiplication tensor $M_{\langle 3\rangle}$. All our decompositions have symmetry groups that include the standard cyclic permutation of factors but otherwise exhibit a range of behavior. One of them has 11 cubes as summands and admits an unexpected symmetry group of order 12. We establish basic information regarding symmetry groups of decompositions and outline two approaches for finding new rank decompositions of $M_{\langle n\rangle}$ for larger $n$. %K Computer Science, Computational Complexity, cs.CC,
[21]
F. Ban, V. Bhattiprolu, K. Bringmann, P. Kolev, E. Lee, and D. P. Woodruff, “A PTAS for l p-Low Rank Approximation,” 2018. [Online]. Available: http://arxiv.org/abs/1807.06101. (arXiv: 1807.06101)
Abstract
A number of recent works have studied algorithms for entrywise $\ell_p$-low rank approximation, namely, algorithms which given an $n \times d$ matrix $A$ (with $n \geq d$), output a rank-$k$ matrix $B$ minimizing $\|A-B\|_p^p=\sum_{i,j}|A_{i,j}-B_{i,j}|^p$ when $p > 0$; and $\|A-B\|_0=\sum_{i,j}[A_{i,j}\neq B_{i,j}]$ for $p=0$. On the algorithmic side, for $p \in (0,2)$, we give the first $(1+\epsilon)$-approximation algorithm running in time $n^{\text{poly}(k/\epsilon)}$. Further, for $p = 0$, we give the first almost-linear time approximation scheme for what we call the Generalized Binary $\ell_0$-Rank-$k$ problem. Our algorithm computes $(1+\epsilon)$-approximation in time $(1/\epsilon)^{2^{O(k)}/\epsilon^{2}} \cdot nd^{1+o(1)}$. On the hardness of approximation side, for $p \in (1,2)$, assuming the Small Set Expansion Hypothesis and the Exponential Time Hypothesis (ETH), we show that there exists $\delta := \delta(\alpha) > 0$ such that the entrywise $\ell_p$-Rank-$k$ problem has no $\alpha$-approximation algorithm running in time $2^{k^{\delta}}$.
Export
BibTeX
@online{Ban_arXiv1807.06101, TITLE = {A {PTAS} for $\ell_p$-Low Rank Approximation}, AUTHOR = {Ban, Frank and Bhattiprolu, Vijay and Bringmann, Karl and Kolev, Pavel and Lee, Euiwoong and Woodruff, David P.}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1807.06101}, EPRINT = {1807.06101}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {A number of recent works have studied algorithms for entrywise $\ell_p$-low rank approximation, namely, algorithms which given an $n \times d$ matrix $A$ (with $n \geq d$), output a rank-$k$ matrix $B$ minimizing $\|A-B\|_p^p=\sum_{i,j}|A_{i,j}-B_{i,j}|^p$ when $p > 0$; and $\|A-B\|_0=\sum_{i,j}[A_{i,j}\neq B_{i,j}]$ for $p=0$. On the algorithmic side, for $p \in (0,2)$, we give the first $(1+\epsilon)$-approximation algorithm running in time $n^{\text{poly}(k/\epsilon)}$. Further, for $p = 0$, we give the first almost-linear time approximation scheme for what we call the Generalized Binary $\ell_0$-Rank-$k$ problem. Our algorithm computes $(1+\epsilon)$-approximation in time $(1/\epsilon)^{2^{O(k)}/\epsilon^{2}} \cdot nd^{1+o(1)}$. On the hardness of approximation side, for $p \in (1,2)$, assuming the Small Set Expansion Hypothesis and the Exponential Time Hypothesis (ETH), we show that there exists $\delta := \delta(\alpha) > 0$ such that the entrywise $\ell_p$-Rank-$k$ problem has no $\alpha$-approximation algorithm running in time $2^{k^{\delta}}$.}, }
Endnote
%0 Report %A Ban, Frank %A Bhattiprolu, Vijay %A Bringmann, Karl %A Kolev, Pavel %A Lee, Euiwoong %A Woodruff, David P. %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T A PTAS for l p-Low Rank Approximation : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9D17-4 %U http://arxiv.org/abs/1807.06101 %D 2018 %X A number of recent works have studied algorithms for entrywise $\ell_p$-low rank approximation, namely, algorithms which given an $n \times d$ matrix $A$ (with $n \geq d$), output a rank-$k$ matrix $B$ minimizing $\|A-B\|_p^p=\sum_{i,j}|A_{i,j}-B_{i,j}|^p$ when $p > 0$; and $\|A-B\|_0=\sum_{i,j}[A_{i,j}\neq B_{i,j}]$ for $p=0$. On the algorithmic side, for $p \in (0,2)$, we give the first $(1+\epsilon)$-approximation algorithm running in time $n^{\text{poly}(k/\epsilon)}$. Further, for $p = 0$, we give the first almost-linear time approximation scheme for what we call the Generalized Binary $\ell_0$-Rank-$k$ problem. Our algorithm computes $(1+\epsilon)$-approximation in time $(1/\epsilon)^{2^{O(k)}/\epsilon^{2}} \cdot nd^{1+o(1)}$. On the hardness of approximation side, for $p \in (1,2)$, assuming the Small Set Expansion Hypothesis and the Exponential Time Hypothesis (ETH), we show that there exists $\delta := \delta(\alpha) > 0$ such that the entrywise $\ell_p$-Rank-$k$ problem has no $\alpha$-approximation algorithm running in time $2^{k^{\delta}}$. %K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Computational Complexity, cs.CC,Computer Science, Learning, cs.LG
[22]
L. Becchetti, A. Clementi, P. Manurangsi, E. Natale, F. Pasquale, P. Raghavendra, and L. Trevisan, “Average Whenever You Meet: Opportunistic Protocols for Community Detection,” in 26th Annual European Symposium on Algorithms (ESA 2018), Helsinki, Finland, 2018.
Export
BibTeX
@inproceedings{Becchetti_ESA2018, TITLE = {Average Whenever You Meet: {O}pportunistic Protocols for Community Detection}, AUTHOR = {Becchetti, Luca and Clementi, Andrea and Manurangsi, Pasin and Natale, Emanuele and Pasquale, Francesco and Raghavendra, Prasad and Trevisan, Luca}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-081-1}, URL = {urn:nbn:de:0030-drops-94705}, DOI = {10.4230/LIPIcs.ESA.2018.7}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {26th Annual European Symposium on Algorithms (ESA 2018)}, EDITOR = {Azar, Yossi and Bast, Hannah and Herman, Grzegorz}, PAGES = {1--13}, EID = {7}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {112}, ADDRESS = {Helsinki, Finland}, }
Endnote
%0 Conference Proceedings %A Becchetti, Luca %A Clementi, Andrea %A Manurangsi, Pasin %A Natale, Emanuele %A Pasquale, Francesco %A Raghavendra, Prasad %A Trevisan, Luca %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Average Whenever You Meet: Opportunistic Protocols for Community Detection : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A957-E %R 10.4230/LIPIcs.ESA.2018.7 %U urn:nbn:de:0030-drops-94705 %D 2018 %B 26th Annual European Symposium on Algorithms %Z date of event: 2018-08-20 - 2018-08-22 %C Helsinki, Finland %B 26th Annual European Symposium on Algorithms %E Azar, Yossi; Bast, Hannah; Herman, Grzegorz %P 1 - 13 %Z sequence number: 7 %I Schloss Dagstuhl %@ 978-3-95977-081-1 %B Leibniz International Proceedings in Informatics %N 112 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9470/http://drops.dagstuhl.de/doku/urheberrecht1.html
[23]
L. Becchetti, V. Bonifaci, and E. Natale, “Pooling or Sampling: Collective Dynamics for Electrical Flow Estimation,” in AAMAS’18, 17th International Conference on Autonomous Agents and MultiAgent Systems, Stockholm, Sweden, 2018.
Export
BibTeX
@inproceedings{Becchetti_AAMAS2018, TITLE = {Pooling or Sampling: {C}ollective Dynamics for Electrical Flow Estimation}, AUTHOR = {Becchetti, Luca and Bonifaci, Vincenzo and Natale, Emanuele}, LANGUAGE = {eng}, ISBN = {978-1-4503-5649-7}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {AAMAS'18, 17th International Conference on Autonomous Agents and MultiAgent Systems}, PAGES = {1576--1584}, ADDRESS = {Stockholm, Sweden}, }
Endnote
%0 Conference Proceedings %A Becchetti, Luca %A Bonifaci, Vincenzo %A Natale, Emanuele %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Pooling or Sampling: Collective Dynamics for Electrical Flow Estimation : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A953-2 %D 2018 %B 17th International Conference on Autonomous Agents and MultiAgent Systems %Z date of event: 2018-07-10 - 2018-07-15 %C Stockholm, Sweden %B AAMAS'18 %P 1576 - 1584 %I ACM %@ 978-1-4503-5649-7
[24]
L. Becchetti, A. Clementi, E. Natale, F. Pasquale, and L. Trevisan, “Finding a Bounded-Degree Expander Inside a Dense One,” 2018. [Online]. Available: http://arxiv.org/abs/1811.10316. (arXiv: 1811.10316)
Abstract
It follows from the Marcus-Spielman-Srivastava proof of the Kadison-Singer conjecture that if $G=(V,E)$ is a $\Delta$-regular dense expander then there is an edge-induced subgraph $H=(V,E_H)$ of $G$ of constant maximum degree which is also an expander. As with other consequences of the MSS theorem, it is not clear how one would explicitly construct such a subgraph. We show that such a subgraph (although with quantitatively weaker expansion and near-regularity properties than those predicted by MSS) can be constructed with high probability in linear time, via a simple algorithm. Our algorithm allows a distributed implementation that runs in $\mathcal O(\log n)$ rounds and does $\bigO(n)$ total work with high probability. The analysis of the algorithm is complicated by the complex dependencies that arise between edges and between choices made in different rounds. We sidestep these difficulties by following the combinatorial approach of counting the number of possible random choices of the algorithm which lead to failure. We do so by a compression argument showing that such random choices can be encoded with a non-trivial compression. Our algorithm bears some similarity to the way agents construct a communication graph in a peer-to-peer network, and, in the bipartite case, to the way agents select servers in blockchain protocols.
Export
BibTeX
@online{Becchetti_arXiv1811.10316, TITLE = {Finding a Bounded-Degree Expander Inside a Dense One}, AUTHOR = {Becchetti, Luca and Clementi, Andrea and Natale, Emanuele and Pasquale, Francesco and Trevisan, Luca}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1811.10316}, EPRINT = {1811.10316}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {It follows from the Marcus-Spielman-Srivastava proof of the Kadison-Singer conjecture that if $G=(V,E)$ is a $\Delta$-regular dense expander then there is an edge-induced subgraph $H=(V,E_H)$ of $G$ of constant maximum degree which is also an expander. As with other consequences of the MSS theorem, it is not clear how one would explicitly construct such a subgraph. We show that such a subgraph (although with quantitatively weaker expansion and near-regularity properties than those predicted by MSS) can be constructed with high probability in linear time, via a simple algorithm. Our algorithm allows a distributed implementation that runs in $\mathcal O(\log n)$ rounds and does $\bigO(n)$ total work with high probability. The analysis of the algorithm is complicated by the complex dependencies that arise between edges and between choices made in different rounds. We sidestep these difficulties by following the combinatorial approach of counting the number of possible random choices of the algorithm which lead to failure. We do so by a compression argument showing that such random choices can be encoded with a non-trivial compression. Our algorithm bears some similarity to the way agents construct a communication graph in a peer-to-peer network, and, in the bipartite case, to the way agents select servers in blockchain protocols.}, }
Endnote
%0 Report %A Becchetti, Luca %A Clementi, Andrea %A Natale, Emanuele %A Pasquale, Francesco %A Trevisan, Luca %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Finding a Bounded-Degree Expander Inside a Dense One : %G eng %U http://hdl.handle.net/21.11116/0000-0002-B967-A %U http://arxiv.org/abs/1811.10316 %D 2018 %X It follows from the Marcus-Spielman-Srivastava proof of the Kadison-Singer conjecture that if $G=(V,E)$ is a $\Delta$-regular dense expander then there is an edge-induced subgraph $H=(V,E_H)$ of $G$ of constant maximum degree which is also an expander. As with other consequences of the MSS theorem, it is not clear how one would explicitly construct such a subgraph. We show that such a subgraph (although with quantitatively weaker expansion and near-regularity properties than those predicted by MSS) can be constructed with high probability in linear time, via a simple algorithm. Our algorithm allows a distributed implementation that runs in $\mathcal O(\log n)$ rounds and does $\bigO(n)$ total work with high probability. The analysis of the algorithm is complicated by the complex dependencies that arise between edges and between choices made in different rounds. We sidestep these difficulties by following the combinatorial approach of counting the number of possible random choices of the algorithm which lead to failure. We do so by a compression argument showing that such random choices can be encoded with a non-trivial compression. Our algorithm bears some similarity to the way agents construct a communication graph in a peer-to-peer network, and, in the bipartite case, to the way agents select servers in blockchain protocols. %K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
[25]
R. Becker, M. Sagraloff, V. Sharma, and C. Yap, “A Simple Near-Optimal Subdivision Algorithm for Complex Root Isolation based on the Pellet Test and Newton Iteration,” Journal of Symbolic Computation, vol. 86, 2018.
Export
BibTeX
@article{Becker2017JSC, TITLE = {A Simple Near-Optimal Subdivision Algorithm for Complex Root Isolation based on the {Pellet} Test and {Newton} Iteration}, AUTHOR = {Becker, Ruben and Sagraloff, Michael and Sharma, Vikram and Yap, Chee}, LANGUAGE = {eng}, ISSN = {0747-7171}, DOI = {10.1016/j.jsc.2017.03.009}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Journal of Symbolic Computation}, VOLUME = {86}, PAGES = {51--96}, }
Endnote
%0 Journal Article %A Becker, Ruben %A Sagraloff, Michael %A Sharma, Vikram %A Yap, Chee %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T A Simple Near-Optimal Subdivision Algorithm for Complex Root Isolation based on the Pellet Test and Newton Iteration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5717-8 %R 10.1016/j.jsc.2017.03.009 %7 2017-03-29 %D 2018 %J Journal of Symbolic Computation %V 86 %& 51 %P 51 - 96 %I Elsevier %C Amsterdam %@ false
[26]
A. Bhattacharya, D. Issac, R. Jaiswal, and A. Kumar, “Sampling in Space Restricted Settings,” Algorithmica, vol. 80, no. 5, 2018.
Export
BibTeX
@article{Bhattacharya2018, TITLE = {Sampling in Space Restricted Settings}, AUTHOR = {Bhattacharya, Anup and Issac, Davis and Jaiswal, Ragesh and Kumar, Amit}, LANGUAGE = {eng}, ISSN = {0178-4617}, DOI = {10.1007/s00453-017-0335-z}, PUBLISHER = {Springer-Verlag}, ADDRESS = {New York}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Algorithmica}, VOLUME = {80}, NUMBER = {5}, PAGES = {1439--1458}, }
Endnote
%0 Journal Article %A Bhattacharya, Anup %A Issac, Davis %A Jaiswal, Ragesh %A Kumar, Amit %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Sampling in Space Restricted Settings : %G eng %U http://hdl.handle.net/21.11116/0000-0001-2C37-1 %R 10.1007/s00453-017-0335-z %7 2017 %D 2018 %J Algorithmica %V 80 %N 5 %& 1439 %P 1439 - 1458 %I Springer-Verlag %C New York %@ false
[27]
M. Bläser, C. Ikenmeyer, G. Jindal, and V. Lysikov, “Generalized Matrix Completion and Algebraic Natural Proofs,” in STOC’18, 50th Annual ACM SIGACT Symposium on Theory of Computing, Los Angeles, CA, USA, 2018.
Export
BibTeX
@inproceedings{Blaeser_STOC2018, TITLE = {Generalized Matrix Completion and Algebraic Natural Proofs}, AUTHOR = {Bl{\"a}ser, Markus and Ikenmeyer, Christian and Jindal, Gorav and Lysikov, Vladimir}, LANGUAGE = {eng}, ISBN = {978-1-4503-5559-9}, DOI = {10.1145/3188745.3188832}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {STOC'18, 50th Annual ACM SIGACT Symposium on Theory of Computing}, PAGES = {1193--1206}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Bläser, Markus %A Ikenmeyer, Christian %A Jindal, Gorav %A Lysikov, Vladimir %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Generalized Matrix Completion and Algebraic Natural Proofs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-17DF-A %R 10.1145/3188745.3188832 %D 2018 %B 50th Annual ACM SIGACT Symposium on Theory of Computing %Z date of event: 2018-06-25 - 2017-06-29 %C Los Angeles, CA, USA %B STOC'18 %P 1193 - 1206 %I ACM %@ 978-1-4503-5559-9
[28]
M. Bläser, C. Ikenmeyer, G. Jindal, and V. Lysikov, “Generalized Matrix Completion and Algebraic Natural Proofs Contact Add Comment RSS-Feed,” Electronic Colloquium on Computational Complexity (ECCC): Report Series, vol. 18–064, 2018.
Export
BibTeX
@article{BlaeserCCC18_064, TITLE = {Generalized Matrix Completion and Algebraic Natural Proofs Contact Add Comment {RSS}-Feed}, AUTHOR = {Bl{\"a}ser, Markus and Ikenmeyer, Christian and Jindal, Gorav and Lysikov, Vladimir}, LANGUAGE = {eng}, ISSN = {1433-8092}, PUBLISHER = {Hasso-Plattner-Institut f{\"u}r Softwaretechnik GmbH}, ADDRESS = {Potsdam}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, JOURNAL = {Electronic Colloquium on Computational Complexity (ECCC): Report Series}, VOLUME = {18-064}, PAGES = {1--27}, }
Endnote
%0 Journal Article %A Bläser, Markus %A Ikenmeyer, Christian %A Jindal, Gorav %A Lysikov, Vladimir %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Generalized Matrix Completion and Algebraic Natural Proofs Contact Add Comment RSS-Feed : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3F5F-0 %7 2018 %D 2018 %J Electronic Colloquium on Computational Complexity (ECCC): Report Series %V 18-064 %& 1 %P 1 - 27 %I Hasso-Plattner-Institut für Softwaretechnik GmbH %C Potsdam %@ false %U https://eccc.weizmann.ac.il/report/2018/064/
[29]
M. Bläser, G. Jindal, and A. Pandey, “A Deterministic PTAS for Commutative Rank of Matrix Spaces,” Theory of Computing, vol. 14, 2018.
Export
BibTeX
@article{Blaeser_ToC18, TITLE = {A Deterministic {PTAS} for Commutative Rank of Matrix Spaces}, AUTHOR = {Bl{\"a}ser, Markus and Jindal, Gorav and Pandey, Anurag}, LANGUAGE = {eng}, ISSN = {1557-2862}, DOI = {10.4086/toc.2018.v014a003}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, JOURNAL = {Theory of Computing}, VOLUME = {14}, PAGES = {1--21}, EID = {3}, }
Endnote
%0 Journal Article %A Bläser, Markus %A Jindal, Gorav %A Pandey, Anurag %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T A Deterministic PTAS for Commutative Rank of Matrix Spaces : %G eng %U http://hdl.handle.net/21.11116/0000-0002-B48E-3 %R 10.4086/toc.2018.v014a003 %7 2018 %D 2018 %J Theory of Computing %V 14 %& 1 %P 1 - 21 %Z sequence number: 3 %@ false
[30]
L. Boczkowski, E. Natale, O. Feinerman, and A. Korman, “Limits on Reliable Information Flows through Stochastic Populations,” PLoS Computational Biology, vol. 14, no. 6, 2018.
Export
BibTeX
@article{Boczkowski2018, TITLE = {Limits on Reliable Information Flows through Stochastic Populations}, AUTHOR = {Boczkowski, Lucas and Natale, Emanuele and Feinerman, Ofer and Korman, Amos}, LANGUAGE = {eng}, ISSN = {1553-734X}, DOI = {10.1371/journal.pcbi.1006195}, PUBLISHER = {Public Library of Science}, ADDRESS = {San Francisco, CA}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, JOURNAL = {PLoS Computational Biology}, VOLUME = {14}, NUMBER = {6}, EID = {e1006195}, }
Endnote
%0 Journal Article %A Boczkowski, Lucas %A Natale, Emanuele %A Feinerman, Ofer %A Korman, Amos %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Limits on Reliable Information Flows through Stochastic Populations : %G eng %U http://hdl.handle.net/21.11116/0000-0001-999D-2 %R 10.1371/journal.pcbi.1006195 %7 2018 %D 2018 %J PLoS Computational Biology %V 14 %N 6 %Z sequence number: e1006195 %I Public Library of Science %C San Francisco, CA %@ false
[31]
L. Boczkowski, O. Feinerman, A. Korman, and E. Natale, “Limits for Rumor Spreading in Stochastic Populations,” in 9th Innovations in Theoretical Computer Science (ITCS 2018), Cambridge, MA, USA, 2018.
Export
BibTeX
@inproceedings{Boczkowski_ITCS2018, TITLE = {Limits for Rumor Spreading in Stochastic Populations}, AUTHOR = {Boczkowski, Lucas and Feinerman, Ofer and Korman, Amos and Natale, Emanuele}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-060-6}, URL = {urn:nbn:de:0030-drops-83207}, DOI = {10.4230/LIPIcs.ITCS.2018.49}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {9th Innovations in Theoretical Computer Science (ITCS 2018)}, EDITOR = {Karlin, Anna R.}, PAGES = {1--21}, EID = {49}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {94}, ADDRESS = {Cambridge, MA, USA}, }
Endnote
%0 Conference Proceedings %A Boczkowski, Lucas %A Feinerman, Ofer %A Korman, Amos %A Natale, Emanuele %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Limits for Rumor Spreading in Stochastic Populations : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A962-1 %R 10.4230/LIPIcs.ITCS.2018.49 %U urn:nbn:de:0030-drops-83207 %D 2018 %B 9th Innovations in Theoretical Computer Science %Z date of event: 2018-01-11 - 2018-01-14 %C Cambridge, MA, USA %B 9th Innovations in Theoretical Computer Science %E Karlin, Anna R. %P 1 - 21 %Z sequence number: 49 %I Schloss Dagstuhl %@ 978-3-95977-060-6 %B Leibniz International Proceedings in Informatics %N 94 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/8320/http://drops.dagstuhl.de/opus/volltexte/2018/8320/
[32]
J.-D. Boissonnat, R. Dyer, and A. Ghosh, “Delaunay Triangulation of Manifolds,” Foundations of Computational Mathematics, vol. 18, no. 2, 2018.
Export
BibTeX
@article{Boissonnat2017, TITLE = {Delaunay Triangulation of Manifolds}, AUTHOR = {Boissonnat, Jean-Daniel and Dyer, Ramsay and Ghosh, Arijit}, LANGUAGE = {eng}, ISSN = {1615-3375}, DOI = {10.1007/s10208-017-9344-1}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Foundations of Computational Mathematics}, VOLUME = {18}, NUMBER = {2}, PAGES = {399--431}, }
Endnote
%0 Journal Article %A Boissonnat, Jean-Daniel %A Dyer, Ramsay %A Ghosh, Arijit %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Delaunay Triangulation of Manifolds : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-7945-0 %R 10.1007/s10208-017-9344-1 %7 2017-02-01 %D 2018 %J Foundations of Computational Mathematics %V 18 %N 2 %& 399 %P 399 - 431 %I Springer %C New York, NY %@ false
[33]
K. Bringmann and P. Wellnitz, “Clique-Based Lower Bounds for Parsing Tree-Adjoining Grammars,” 2018. [Online]. Available: http://arxiv.org/abs/1803.00804. (arXiv: 1803.00804)
Abstract
Tree-adjoining grammars are a generalization of context-free grammars that are well suited to model human languages and are thus popular in computational linguistics. In the tree-adjoining grammar recognition problem, given a grammar $\Gamma$ and a string $s$ of length $n$, the task is to decide whether $s$ can be obtained from $\Gamma$. Rajasekaran and Yooseph's parser (JCSS'98) solves this problem in time $O(n^{2\omega})$, where $\omega < 2.373$ is the matrix multiplication exponent. The best algorithms avoiding fast matrix multiplication take time $O(n^6)$. The first evidence for hardness was given by Satta (J. Comp. Linguist.'94): For a more general parsing problem, any algorithm that avoids fast matrix multiplication and is significantly faster than $O(|\Gamma| n^6)$ in the case of $|\Gamma| = \Theta(n^{12})$ would imply a breakthrough for Boolean matrix multiplication. Following an approach by Abboud et al. (FOCS'15) for context-free grammar recognition, in this paper we resolve many of the disadvantages of the previous lower bound. We show that, even on constant-size grammars, any improvement on Rajasekaran and Yooseph's parser would imply a breakthrough for the $k$-Clique problem. This establishes tree-adjoining grammar parsing as a practically relevant problem with the unusual running time of $n^{2\omega}$, up to lower order factors.
Export
BibTeX
@online{Bringmann_arXiv1803.00804, TITLE = {Clique-Based Lower Bounds for Parsing Tree-Adjoining Grammars}, AUTHOR = {Bringmann, Karl and Wellnitz, Philip}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00804}, EPRINT = {1803.00804}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Tree-adjoining grammars are a generalization of context-free grammars that are well suited to model human languages and are thus popular in computational linguistics. In the tree-adjoining grammar recognition problem, given a grammar $\Gamma$ and a string $s$ of length $n$, the task is to decide whether $s$ can be obtained from $\Gamma$. Rajasekaran and Yooseph's parser (JCSS'98) solves this problem in time $O(n^{2\omega})$, where $\omega < 2.373$ is the matrix multiplication exponent. The best algorithms avoiding fast matrix multiplication take time $O(n^6)$. The first evidence for hardness was given by Satta (J. Comp. Linguist.'94): For a more general parsing problem, any algorithm that avoids fast matrix multiplication and is significantly faster than $O(|\Gamma| n^6)$ in the case of $|\Gamma| = \Theta(n^{12})$ would imply a breakthrough for Boolean matrix multiplication. Following an approach by Abboud et al. (FOCS'15) for context-free grammar recognition, in this paper we resolve many of the disadvantages of the previous lower bound. We show that, even on constant-size grammars, any improvement on Rajasekaran and Yooseph's parser would imply a breakthrough for the $k$-Clique problem. This establishes tree-adjoining grammar parsing as a practically relevant problem with the unusual running time of $n^{2\omega}$, up to lower order factors.}, }
Endnote
%0 Report %A Bringmann, Karl %A Wellnitz, Philip %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Clique-Based Lower Bounds for Parsing Tree-Adjoining Grammars : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E2A-C %U http://arxiv.org/abs/1803.00804 %D 2018 %X Tree-adjoining grammars are a generalization of context-free grammars that are well suited to model human languages and are thus popular in computational linguistics. In the tree-adjoining grammar recognition problem, given a grammar $\Gamma$ and a string $s$ of length $n$, the task is to decide whether $s$ can be obtained from $\Gamma$. Rajasekaran and Yooseph's parser (JCSS'98) solves this problem in time $O(n^{2\omega})$, where $\omega < 2.373$ is the matrix multiplication exponent. The best algorithms avoiding fast matrix multiplication take time $O(n^6)$. The first evidence for hardness was given by Satta (J. Comp. Linguist.'94): For a more general parsing problem, any algorithm that avoids fast matrix multiplication and is significantly faster than $O(|\Gamma| n^6)$ in the case of $|\Gamma| = \Theta(n^{12})$ would imply a breakthrough for Boolean matrix multiplication. Following an approach by Abboud et al. (FOCS'15) for context-free grammar recognition, in this paper we resolve many of the disadvantages of the previous lower bound. We show that, even on constant-size grammars, any improvement on Rajasekaran and Yooseph's parser would imply a breakthrough for the $k$-Clique problem. This establishes tree-adjoining grammar parsing as a practically relevant problem with the unusual running time of $n^{2\omega}$, up to lower order factors. %K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[34]
K. Bringmann, S. Cabello, and M. T. M. Emmerich, “Maximum Volume Subset Selection for Anchored Boxes,” 2018. [Online]. Available: http://arxiv.org/abs/1803.00849. (arXiv: 1803.00849)
Abstract
Let $B$ be a set of $n$ axis-parallel boxes in $\mathbb{R}^d$ such that each box has a corner at the origin and the other corner in the positive quadrant of $\mathbb{R}^d$, and let $k$ be a positive integer. We study the problem of selecting $k$ boxes in $B$ that maximize the volume of the union of the selected boxes. This research is motivated by applications in skyline queries for databases and in multicriteria optimization, where the problem is known as the hypervolume subset selection problem. It is known that the problem can be solved in polynomial time in the plane, while the best known running time in any dimension $d \ge 3$ is $\Omega\big(\binom{n}{k}\big)$. We show that: - The problem is NP-hard already in 3 dimensions. - In 3 dimensions, we break the bound $\Omega\big(\binom{n}{k}\big)$, by providing an $n^{O(\sqrt{k})}$ algorithm. - For any constant dimension $d$, we present an efficient polynomial-time approximation scheme.
Export
BibTeX
@online{Bringmann_arXiv1803.00849, TITLE = {Maximum Volume Subset Selection for Anchored Boxes}, AUTHOR = {Bringmann, Karl and Cabello, Sergio and Emmerich, Michael T. M.}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00849}, EPRINT = {1803.00849}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Let $B$ be a set of $n$ axis-parallel boxes in $\mathbb{R}^d$ such that each box has a corner at the origin and the other corner in the positive quadrant of $\mathbb{R}^d$, and let $k$ be a positive integer. We study the problem of selecting $k$ boxes in $B$ that maximize the volume of the union of the selected boxes. This research is motivated by applications in skyline queries for databases and in multicriteria optimization, where the problem is known as the hypervolume subset selection problem. It is known that the problem can be solved in polynomial time in the plane, while the best known running time in any dimension $d \ge 3$ is $\Omega\big(\binom{n}{k}\big)$. We show that: -- The problem is NP-hard already in 3 dimensions. -- In 3 dimensions, we break the bound $\Omega\big(\binom{n}{k}\big)$, by providing an $n^{O(\sqrt{k})}$ algorithm. -- For any constant dimension $d$, we present an efficient polynomial-time approximation scheme.}, }
Endnote
%0 Report %A Bringmann, Karl %A Cabello, Sergio %A Emmerich, Michael T. M. %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Maximum Volume Subset Selection for Anchored Boxes : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E08-2 %U http://arxiv.org/abs/1803.00849 %D 2018 %X Let $B$ be a set of $n$ axis-parallel boxes in $\mathbb{R}^d$ such that each box has a corner at the origin and the other corner in the positive quadrant of $\mathbb{R}^d$, and let $k$ be a positive integer. We study the problem of selecting $k$ boxes in $B$ that maximize the volume of the union of the selected boxes. This research is motivated by applications in skyline queries for databases and in multicriteria optimization, where the problem is known as the hypervolume subset selection problem. It is known that the problem can be solved in polynomial time in the plane, while the best known running time in any dimension $d \ge 3$ is $\Omega\big(\binom{n}{k}\big)$. We show that: - The problem is NP-hard already in 3 dimensions. - In 3 dimensions, we break the bound $\Omega\big(\binom{n}{k}\big)$, by providing an $n^{O(\sqrt{k})}$ algorithm. - For any constant dimension $d$, we present an efficient polynomial-time approximation scheme. %K Computer Science, Computational Geometry, cs.CG,Computer Science, Data Structures and Algorithms, cs.DS
[35]
K. Bringmann, T. Husfeldt, and M. Magnusson, “Multivariate Analysis of Orthogonal Range Searching and Graph Distances Parameterized by Treewidth,” 2018. [Online]. Available: http://arxiv.org/abs/1805.07135. (arXiv: 1805.07135)
Abstract
We show that the eccentricities, diameter, radius, and Wiener index of an undirected $n$-vertex graph with nonnegative edge lengths can be computed in time $O(n\cdot \binom{k+\lceil\log n\rceil}{k} \cdot 2^k k^2 \log n)$, where $k$ is the treewidth of the graph. For every $\epsilon>0$, this bound is $n^{1+\epsilon}\exp O(k)$, which matches a hardness result of Abboud, Vassilevska Williams, and Wang (SODA 2015) and closes an open problem in the multivariate analysis of polynomial-time computation. To this end, we show that the analysis of an algorithm of Cabello and Knauer (Comp. Geom., 2009) in the regime of non-constant treewidth can be improved by revisiting the analysis of orthogonal range searching, improving bounds of the form $\log^d n$ to $\binom{d+\lceil\log n\rceil}{d}$, as originally observed by Monier (J. Alg. 1980). We also investigate the parameterization by vertex cover number.
Export
BibTeX
@online{Bringmann_arXiv1805.07135, TITLE = {Multivariate Analysis of Orthogonal Range Searching and Graph Distances Parameterized by Treewidth}, AUTHOR = {Bringmann, Karl and Husfeldt, Thore and Magnusson, M{\aa}ns}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1805.07135}, EPRINT = {1805.07135}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We show that the eccentricities, diameter, radius, and Wiener index of an undirected $n$-vertex graph with nonnegative edge lengths can be computed in time $O(n\cdot \binom{k+\lceil\log n\rceil}{k} \cdot 2^k k^2 \log n)$, where $k$ is the treewidth of the graph. For every $\epsilon>0$, this bound is $n^{1+\epsilon}\exp O(k)$, which matches a hardness result of Abboud, Vassilevska Williams, and Wang (SODA 2015) and closes an open problem in the multivariate analysis of polynomial-time computation. To this end, we show that the analysis of an algorithm of Cabello and Knauer (Comp. Geom., 2009) in the regime of non-constant treewidth can be improved by revisiting the analysis of orthogonal range searching, improving bounds of the form $\log^d n$ to $\binom{d+\lceil\log n\rceil}{d}$, as originally observed by Monier (J. Alg. 1980). We also investigate the parameterization by vertex cover number.}, }
Endnote
%0 Report %A Bringmann, Karl %A Husfeldt, Thore %A Magnusson, M&#229;ns %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Multivariate Analysis of Orthogonal Range Searching and Graph Distances Parameterized by Treewidth : %G eng %U http://hdl.handle.net/21.11116/0000-0002-173B-3 %U http://arxiv.org/abs/1805.07135 %D 2018 %X We show that the eccentricities, diameter, radius, and Wiener index of an undirected $n$-vertex graph with nonnegative edge lengths can be computed in time $O(n\cdot \binom{k+\lceil\log n\rceil}{k} \cdot 2^k k^2 \log n)$, where $k$ is the treewidth of the graph. For every $\epsilon>0$, this bound is $n^{1+\epsilon}\exp O(k)$, which matches a hardness result of Abboud, Vassilevska Williams, and Wang (SODA 2015) and closes an open problem in the multivariate analysis of polynomial-time computation. To this end, we show that the analysis of an algorithm of Cabello and Knauer (Comp. Geom., 2009) in the regime of non-constant treewidth can be improved by revisiting the analysis of orthogonal range searching, improving bounds of the form $\log^d n$ to $\binom{d+\lceil\log n\rceil}{d}$, as originally observed by Monier (J. Alg. 1980). We also investigate the parameterization by vertex cover number. %K Computer Science, Data Structures and Algorithms, cs.DS
[36]
K. Bringmann and M. Künnemann, “Multivariate Fine-Grained Complexity of Longest Common Subsequence,” 2018. [Online]. Available: http://arxiv.org/abs/1803.00938. (arXiv: 1803.00938)
Abstract
We revisit the classic combinatorial pattern matching problem of finding a longest common subsequence (LCS). For strings $x$ and $y$ of length $n$, a textbook algorithm solves LCS in time $O(n^2)$, but although much effort has been spent, no $O(n^{2-\varepsilon})$-time algorithm is known. Recent work indeed shows that such an algorithm would refute the Strong Exponential Time Hypothesis (SETH) [Abboud, Backurs, Vassilevska Williams + Bringmann, K\"unnemann FOCS'15]. Despite the quadratic-time barrier, for over 40 years an enduring scientific interest continued to produce fast algorithms for LCS and its variations. Particular attention was put into identifying and exploiting input parameters that yield strongly subquadratic time algorithms for special cases of interest, e.g., differential file comparison. This line of research was successfully pursued until 1990, at which time significant improvements came to a halt. In this paper, using the lens of fine-grained complexity, our goal is to (1) justify the lack of further improvements and (2) determine whether some special cases of LCS admit faster algorithms than currently known. To this end, we provide a systematic study of the multivariate complexity of LCS, taking into account all parameters previously discussed in the literature: the input size $n:=\max\{|x|,|y|\}$, the length of the shorter string $m:=\min\{|x|,|y|\}$, the length $L$ of an LCS of $x$ and $y$, the numbers of deletions $\delta := m-L$ and $\Delta := n-L$, the alphabet size, as well as the numbers of matching pairs $M$ and dominant pairs $d$. For any class of instances defined by fixing each parameter individually to a polynomial in terms of the input size, we prove a SETH-based lower bound matching one of three known algorithms. Specifically, we determine the optimal running time for LCS under SETH as $(n+\min\{d, \delta \Delta, \delta m\})^{1\pm o(1)}$. [...]
Export
BibTeX
@online{Bringmann_arXiv1803.00938, TITLE = {Multivariate Fine-Grained Complexity of Longest Common Subsequence}, AUTHOR = {Bringmann, Karl and K{\"u}nnemann, Marvin}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.00938}, EPRINT = {1803.00938}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We revisit the classic combinatorial pattern matching problem of finding a longest common subsequence (LCS). For strings $x$ and $y$ of length $n$, a textbook algorithm solves LCS in time $O(n^2)$, but although much effort has been spent, no $O(n^{2-\varepsilon})$-time algorithm is known. Recent work indeed shows that such an algorithm would refute the Strong Exponential Time Hypothesis (SETH) [Abboud, Backurs, Vassilevska Williams + Bringmann, K\"unnemann FOCS'15]. Despite the quadratic-time barrier, for over 40 years an enduring scientific interest continued to produce fast algorithms for LCS and its variations. Particular attention was put into identifying and exploiting input parameters that yield strongly subquadratic time algorithms for special cases of interest, e.g., differential file comparison. This line of research was successfully pursued until 1990, at which time significant improvements came to a halt. In this paper, using the lens of fine-grained complexity, our goal is to (1) justify the lack of further improvements and (2) determine whether some special cases of LCS admit faster algorithms than currently known. To this end, we provide a systematic study of the multivariate complexity of LCS, taking into account all parameters previously discussed in the literature: the input size $n:=\max\{|x|,|y|\}$, the length of the shorter string $m:=\min\{|x|,|y|\}$, the length $L$ of an LCS of $x$ and $y$, the numbers of deletions $\delta := m-L$ and $\Delta := n-L$, the alphabet size, as well as the numbers of matching pairs $M$ and dominant pairs $d$. For any class of instances defined by fixing each parameter individually to a polynomial in terms of the input size, we prove a SETH-based lower bound matching one of three known algorithms. Specifically, we determine the optimal running time for LCS under SETH as $(n+\min\{d, \delta \Delta, \delta m\})^{1\pm o(1)}$. [...]}, }
Endnote
%0 Report %A Bringmann, Karl %A K&#252;nnemann, Marvin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Multivariate Fine-Grained Complexity of Longest Common Subsequence : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3E02-8 %U http://arxiv.org/abs/1803.00938 %D 2018 %X We revisit the classic combinatorial pattern matching problem of finding a longest common subsequence (LCS). For strings $x$ and $y$ of length $n$, a textbook algorithm solves LCS in time $O(n^2)$, but although much effort has been spent, no $O(n^{2-\varepsilon})$-time algorithm is known. Recent work indeed shows that such an algorithm would refute the Strong Exponential Time Hypothesis (SETH) [Abboud, Backurs, Vassilevska Williams + Bringmann, K\"unnemann FOCS'15]. Despite the quadratic-time barrier, for over 40 years an enduring scientific interest continued to produce fast algorithms for LCS and its variations. Particular attention was put into identifying and exploiting input parameters that yield strongly subquadratic time algorithms for special cases of interest, e.g., differential file comparison. This line of research was successfully pursued until 1990, at which time significant improvements came to a halt. In this paper, using the lens of fine-grained complexity, our goal is to (1) justify the lack of further improvements and (2) determine whether some special cases of LCS admit faster algorithms than currently known. To this end, we provide a systematic study of the multivariate complexity of LCS, taking into account all parameters previously discussed in the literature: the input size $n:=\max\{|x|,|y|\}$, the length of the shorter string $m:=\min\{|x|,|y|\}$, the length $L$ of an LCS of $x$ and $y$, the numbers of deletions $\delta := m-L$ and $\Delta := n-L$, the alphabet size, as well as the numbers of matching pairs $M$ and dominant pairs $d$. For any class of instances defined by fixing each parameter individually to a polynomial in terms of the input size, we prove a SETH-based lower bound matching one of three known algorithms. Specifically, we determine the optimal running time for LCS under SETH as $(n+\min\{d, \delta \Delta, \delta m\})^{1\pm o(1)}$. [...] %K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[37]
K. Bringmann and M. Künnemann, “Multivariate Fine-Grained Complexity of Longest Common Subsequence,” in Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA 2018), New Orleans, LA, USA, 2018.
Export
BibTeX
@inproceedings{Bringmann_SODA18, TITLE = {Multivariate Fine-Grained Complexity of Longest Common Subsequence}, AUTHOR = {Bringmann, Karl and K{\"u}nnemann, Marvin}, LANGUAGE = {eng}, ISBN = {978-1-61197-503-1}, DOI = {10.1137/1.9781611975031.79}, PUBLISHER = {SIAM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA 2018)}, EDITOR = {Czumaj, Artur}, PAGES = {1216--1235}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Bringmann, Karl %A K&#252;nnemann, Marvin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Multivariate Fine-Grained Complexity of Longest Common Subsequence : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3F0E-C %R 10.1137/1.9781611975031.79 %D 2018 %B Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %Z date of event: 2018-01-07 - 2018-01-10 %C New Orleans, LA, USA %B Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %E Czumaj, Artur %P 1216 - 1235 %I SIAM %@ 978-1-61197-503-1
[38]
K. Bringmann, T. Friedrich, and A. Krohmer, “De-anonymization of Heterogeneous Random Graphs in Quasilinear Time,” Algorithmica, vol. 80, no. 11, 2018.
Export
BibTeX
@article{bringmann_deanonymization_2018, TITLE = {De-anonymization of Heterogeneous Random Graphs in Quasilinear Time}, AUTHOR = {Bringmann, Karl and Friedrich, Tobias and Krohmer, Anton}, LANGUAGE = {eng}, ISSN = {0178-4617}, DOI = {10.1007/s00453-017-0395-0}, PUBLISHER = {Springer-Verlag}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Algorithmica}, VOLUME = {80}, NUMBER = {11}, PAGES = {3397--3427}, }
Endnote
%0 Journal Article %A Bringmann, Karl %A Friedrich, Tobias %A Krohmer, Anton %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T De-anonymization of Heterogeneous Random Graphs in Quasilinear Time : %G eng %U http://hdl.handle.net/21.11116/0000-0001-F6A3-1 %R 10.1007/s00453-017-0395-0 %7 2017-11-15 %D 2018 %J Algorithmica %V 80 %N 11 %& 3397 %P 3397 - 3427 %I Springer-Verlag %C New York, NY %@ false
[39]
K. Bringmann, P. Gawrychowski, S. Mozes, and O. Weimann, “Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless APSP can),” in Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA 2018), New Orleans, LA, USA, 2018.
Export
BibTeX
@inproceedings{Bringmann_SODA18b, TITLE = {Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless {APSP} can)}, AUTHOR = {Bringmann, Karl and Gawrychowski, Pawe{\l} and Mozes, Shay and Weimann, Oren}, LANGUAGE = {eng}, ISBN = {978-1-61197-503-1}, DOI = {10.1137/1.9781611975031.77}, PUBLISHER = {SIAM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA 2018)}, EDITOR = {Czumaj, Artur}, PAGES = {1190--1206}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Bringmann, Karl %A Gawrychowski, Pawe&#322; %A Mozes, Shay %A Weimann, Oren %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless APSP can) : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3F13-5 %R 10.1137/1.9781611975031.77 %D 2018 %B Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %Z date of event: 2018-01-07 - 2018-01-10 %C New Orleans, LA, USA %B Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %E Czumaj, Artur %P 1190 - 1206 %I SIAM %@ 978-1-61197-503-1
[40]
K. Bringmann and S. Krinninger, “A Note on Hardness of Diameter Approximation,” Information Processing Letters, vol. 133, 2018.
Export
BibTeX
@article{Bringmann2018, TITLE = {A Note on Hardness of Diameter Approximation}, AUTHOR = {Bringmann, Karl and Krinninger, Sebastian}, LANGUAGE = {eng}, ISSN = {0020-0190}, DOI = {10.1016/j.ipl.2017.12.010}, PUBLISHER = {Elsevier}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Information Processing Letters}, VOLUME = {133}, PAGES = {10--15}, }
Endnote
%0 Journal Article %A Bringmann, Karl %A Krinninger, Sebastian %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T A Note on Hardness of Diameter Approximation : %G eng %U http://hdl.handle.net/21.11116/0000-0001-2C44-2 %R 10.1016/j.ipl.2017.12.010 %7 2018 %D 2018 %J Information Processing Letters %V 133 %& 10 %P 10 - 15 %I Elsevier %@ false
[41]
K. Bringmann, C. Ikenmeyer, and J. Zuiddam, “On Algebraic Branching Programs of Small Width,” Journal of the ACM, vol. 65, no. 5, 2018.
Export
BibTeX
@article{Bringmann_JACM2018, TITLE = {On Algebraic Branching Programs of Small Width}, AUTHOR = {Bringmann, Karl and Ikenmeyer, Christian and Zuiddam, Jeroen}, LANGUAGE = {eng}, ISSN = {0004-5411}, DOI = {10.1145/3209663}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Journal of the ACM}, VOLUME = {65}, NUMBER = {5}, PAGES = {1--29}, EID = {32}, }
Endnote
%0 Journal Article %A Bringmann, Karl %A Ikenmeyer, Christian %A Zuiddam, Jeroen %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T On Algebraic Branching Programs of Small Width : %G eng %U http://hdl.handle.net/21.11116/0000-0002-1B53-3 %R 10.1145/3209663 %7 2018 %D 2018 %J Journal of the ACM %V 65 %N 5 %& 1 %P 1 - 29 %Z sequence number: 32 %I ACM %C New York, NY %@ false
[42]
K. Bringmann and B. Ray Chaudhury, “Sketching, Streaming, and Fine-Grained Complexity of (Weighted) LCS,” 2018. [Online]. Available: http://arxiv.org/abs/1810.01238. (arXiv: 1810.01238)
Abstract
We study sketching and streaming algorithms for the Longest Common Subsequence problem (LCS) on strings of small alphabet size $|\Sigma|$. For the problem of deciding whether the LCS of strings $x,y$ has length at least $L$, we obtain a sketch size and streaming space usage of $\mathcal{O}(L^{|\Sigma| - 1} \log L)$. We also prove matching unconditional lower bounds. As an application, we study a variant of LCS where each alphabet symbol is equipped with a weight that is given as input, and the task is to compute a common subsequence of maximum total weight. Using our sketching algorithm, we obtain an $\mathcal{O}(\textrm{min}\{nm, n + m^{{\lvert \Sigma \rvert}}\})$-time algorithm for this problem, on strings $x,y$ of length $n,m$, with $n \ge m$. We prove optimality of this running time up to lower order factors, assuming the Strong Exponential Time Hypothesis.
Export
BibTeX
@online{Bringmann_arXiv1810.01238, TITLE = {Sketching, Streaming, and Fine-Grained Complexity of (Weighted) {LCS}}, AUTHOR = {Bringmann, Karl and Ray Chaudhury, Bhaskar}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1810.01238}, EPRINT = {1810.01238}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We study sketching and streaming algorithms for the Longest Common Subsequence problem (LCS) on strings of small alphabet size $|\Sigma|$. For the problem of deciding whether the LCS of strings $x,y$ has length at least $L$, we obtain a sketch size and streaming space usage of $\mathcal{O}(L^{|\Sigma| - 1} \log L)$. We also prove matching unconditional lower bounds. As an application, we study a variant of LCS where each alphabet symbol is equipped with a weight that is given as input, and the task is to compute a common subsequence of maximum total weight. Using our sketching algorithm, we obtain an $\mathcal{O}(\textrm{min}\{nm, n + m^{{\lvert \Sigma \rvert}}\})$-time algorithm for this problem, on strings $x,y$ of length $n,m$, with $n \ge m$. We prove optimality of this running time up to lower order factors, assuming the Strong Exponential Time Hypothesis.}, }
Endnote
%0 Report %A Bringmann, Karl %A Ray Chaudhury, Bhaskar %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Sketching, Streaming, and Fine-Grained Complexity of (Weighted) LCS : %G eng %U http://hdl.handle.net/21.11116/0000-0002-57B9-C %U http://arxiv.org/abs/1810.01238 %D 2018 %X We study sketching and streaming algorithms for the Longest Common Subsequence problem (LCS) on strings of small alphabet size $|\Sigma|$. For the problem of deciding whether the LCS of strings $x,y$ has length at least $L$, we obtain a sketch size and streaming space usage of $\mathcal{O}(L^{|\Sigma| - 1} \log L)$. We also prove matching unconditional lower bounds. As an application, we study a variant of LCS where each alphabet symbol is equipped with a weight that is given as input, and the task is to compute a common subsequence of maximum total weight. Using our sketching algorithm, we obtain an $\mathcal{O}(\textrm{min}\{nm, n + m^{{\lvert \Sigma \rvert}}\})$-time algorithm for this problem, on strings $x,y$ of length $n,m$, with $n \ge m$. We prove optimality of this running time up to lower order factors, assuming the Strong Exponential Time Hypothesis. %K Computer Science, Data Structures and Algorithms, cs.DS,
[43]
K. Bringmann and B. Ray Chaudhury, “Sketching, Streaming, and Fine-Grained Complexity of (Weighted) LCS,” in 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS 2018), Ahmedabad, India, 2018.
Export
BibTeX
@inproceedings{Bringmann_FSTTCS2018, TITLE = {Sketching, Streaming, and Fine-Grained Complexity of (Weighted) {LCS}}, AUTHOR = {Bringmann, Karl and Ray Chaudhury, Bhaskar}, LANGUAGE = {eng}, ISSN = {1868-896}, ISBN = {978-3-95977-093-4}, URL = {urn:nbn:de:0030-drops-99390}, DOI = {10.4230/LIPIcs.FSTTCS.2018.40}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS 2018)}, EDITOR = {Ganguly, Sumit and Pandya, Paritosh}, PAGES = {1--16}, EID = {40}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {122}, ADDRESS = {Ahmedabad, India}, }
Endnote
%0 Conference Proceedings %A Bringmann, Karl %A Ray Chaudhury, Bhaskar %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Sketching, Streaming, and Fine-Grained Complexity of (Weighted) LCS : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9D0B-2 %R 10.4230/LIPIcs.FSTTCS.2018.40 %U urn:nbn:de:0030-drops-99390 %D 2018 %B 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science %Z date of event: 2018-12-11 - 2018-12-13 %C Ahmedabad, India %B 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science %E Ganguly, Sumit; Pandya, Paritosh %P 1 - 16 %Z sequence number: 40 %I Schloss Dagstuhl %@ 978-3-95977-093-4 %B Leibniz International Proceedings in Informatics %N 122 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9939/http://drops.dagstuhl.de/doku/urheberrecht1.html
[44]
K. Bringmann, M. Künnemann, and A. Nusser, “Fréchet Distance Under Translation: Conditional Hardness and an Algorithm via Offline Dynamic Grid Reachability,” 2018. [Online]. Available: http://arxiv.org/abs/1810.10982. (arXiv: 1810.10982)
Abstract
The discrete Fr\'echet distance is a popular measure for comparing polygonal curves. An important variant is the discrete Fr\'echet distance under translation, which enables detection of similar movement patterns in different spatial domains. For polygonal curves of length $n$ in the plane, the fastest known algorithm runs in time $\tilde{\cal O}(n^{5})$ [Ben Avraham, Kaplan, Sharir '15]. This is achieved by constructing an arrangement of disks of size ${\cal O}(n^{4})$, and then traversing its faces while updating reachability in a directed grid graph of size $N := {\cal O}(n^2)$, which can be done in time $\tilde{\cal O}(\sqrt{N})$ per update [Diks, Sankowski '07]. The contribution of this paper is two-fold. First, although it is an open problem to solve dynamic reachability in directed grid graphs faster than $\tilde{\cal O}(\sqrt{N})$, we improve this part of the algorithm: We observe that an offline variant of dynamic $s$-$t$-reachability in directed grid graphs suffices, and we solve this variant in amortized time $\tilde{\cal O}(N^{1/3})$ per update, resulting in an improved running time of $\tilde{\cal O}(n^{4.66...})$ for the discrete Fr\'echet distance under translation. Second, we provide evidence that constructing the arrangement of size ${\cal O}(n^{4})$ is necessary in the worst case, by proving a conditional lower bound of $n^{4 - o(1)}$ on the running time for the discrete Fr\'echet distance under translation, assuming the Strong Exponential Time Hypothesis.
Export
BibTeX
@online{Bringmann_arXiv1810.10982, TITLE = {Fr{\'e}chet Distance Under Translation: Conditional Hardness and an Algorithm via Offline Dynamic Grid Reachability}, AUTHOR = {Bringmann, Karl and K{\"u}nnemann, Marvin and Nusser, Andr{\'e}}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1810.10982}, EPRINT = {1810.10982}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {The discrete Fr\'echet distance is a popular measure for comparing polygonal curves. An important variant is the discrete Fr\'echet distance under translation, which enables detection of similar movement patterns in different spatial domains. For polygonal curves of length $n$ in the plane, the fastest known algorithm runs in time $\tilde{\cal O}(n^{5})$ [Ben Avraham, Kaplan, Sharir '15]. This is achieved by constructing an arrangement of disks of size ${\cal O}(n^{4})$, and then traversing its faces while updating reachability in a directed grid graph of size $N := {\cal O}(n^2)$, which can be done in time $\tilde{\cal O}(\sqrt{N})$ per update [Diks, Sankowski '07]. The contribution of this paper is two-fold. First, although it is an open problem to solve dynamic reachability in directed grid graphs faster than $\tilde{\cal O}(\sqrt{N})$, we improve this part of the algorithm: We observe that an offline variant of dynamic $s$-$t$-reachability in directed grid graphs suffices, and we solve this variant in amortized time $\tilde{\cal O}(N^{1/3})$ per update, resulting in an improved running time of $\tilde{\cal O}(n^{4.66...})$ for the discrete Fr\'echet distance under translation. Second, we provide evidence that constructing the arrangement of size ${\cal O}(n^{4})$ is necessary in the worst case, by proving a conditional lower bound of $n^{4 -- o(1)}$ on the running time for the discrete Fr\'echet distance under translation, assuming the Strong Exponential Time Hypothesis.}, }
Endnote
%0 Report %A Bringmann, Karl %A K&#252;nnemann, Marvin %A Nusser, Andr&#233; %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Fr&#233;chet Distance Under Translation: Conditional Hardness and an Algorithm via Offline Dynamic Grid Reachability : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9E35-1 %U http://arxiv.org/abs/1810.10982 %D 2018 %X The discrete Fr\'echet distance is a popular measure for comparing polygonal curves. An important variant is the discrete Fr\'echet distance under translation, which enables detection of similar movement patterns in different spatial domains. For polygonal curves of length $n$ in the plane, the fastest known algorithm runs in time $\tilde{\cal O}(n^{5})$ [Ben Avraham, Kaplan, Sharir '15]. This is achieved by constructing an arrangement of disks of size ${\cal O}(n^{4})$, and then traversing its faces while updating reachability in a directed grid graph of size $N := {\cal O}(n^2)$, which can be done in time $\tilde{\cal O}(\sqrt{N})$ per update [Diks, Sankowski '07]. The contribution of this paper is two-fold. First, although it is an open problem to solve dynamic reachability in directed grid graphs faster than $\tilde{\cal O}(\sqrt{N})$, we improve this part of the algorithm: We observe that an offline variant of dynamic $s$-$t$-reachability in directed grid graphs suffices, and we solve this variant in amortized time $\tilde{\cal O}(N^{1/3})$ per update, resulting in an improved running time of $\tilde{\cal O}(n^{4.66...})$ for the discrete Fr\'echet distance under translation. Second, we provide evidence that constructing the arrangement of size ${\cal O}(n^{4})$ is necessary in the worst case, by proving a conditional lower bound of $n^{4 - o(1)}$ on the running time for the discrete Fr\'echet distance under translation, assuming the Strong Exponential Time Hypothesis. %K Computer Science, Data Structures and Algorithms, cs.DS
[45]
J. Bund, C. Lenzen, and M. Medina, “Optimal Metastability-containing Sorting Networks,” in Proceedings of the 2018 Design, Automation & Test in Europe (DATE 2018), Dresden, Germany, 2018.
Export
BibTeX
@inproceedings{Bund_DATE2018, TITLE = {Optimal Metastability-containing Sorting Networks}, AUTHOR = {Bund, Johannes and Lenzen, Christoph and Medina, Moti}, LANGUAGE = {eng}, ISBN = {978-3-9819263-1-6}, DOI = {10.23919/DATE.2018.8342063}, PUBLISHER = {IEEE}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Proceedings of the 2018 Design, Automation \& Test in Europe (DATE 2018)}, PAGES = {521--526}, ADDRESS = {Dresden, Germany}, }
Endnote
%0 Conference Proceedings %A Bund, Johannes %A Lenzen, Christoph %A Medina, Moti %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Optimal Metastability-containing Sorting Networks : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3F69-4 %R 10.23919/DATE.2018.8342063 %D 2018 %B Design, Automation & Test in Europe Conference & Exhibition %Z date of event: 2018-03-19 - 2018-03-23 %C Dresden, Germany %B Proceedings of the 2018 Design, Automation & Test in Europe %P 521 - 526 %I IEEE %@ 978-3-9819263-1-6
[46]
J. Bund, C. Lenzen, and M. Medina, “Optimal Metastability-Containing Sorting Networks,” 2018. [Online]. Available: http://arxiv.org/abs/1801.07549. (arXiv: 1801.07549)
Abstract
When setup/hold times of bistable elements are violated, they may become metastable, i.e., enter a transient state that is neither digital 0 nor 1. In general, metastability cannot be avoided, a problem that manifests whenever taking discrete measurements of analog values. Metastability of the output then reflects uncertainty as to whether a measurement should be rounded up or down to the next possible measurement outcome. Surprisingly, Lenzen and Medina (ASYNC 2016) showed that metastability can be contained, i.e., measurement values can be correctly sorted without resolving metastability first. However, both their work and the state of the art by Bund et al. (DATE 2017) leave open whether such a solution can be as small and fast as standard sorting networks. We show that this is indeed possible, by providing a circuit that sorts Gray code inputs (possibly containing a metastable bit) and has asymptotically optimal depth and size. Concretely, for 10-channel sorting networks and 16-bit wide inputs, we improve by 48.46% in delay and by 71.58% in area over Bund et al. Our simulations indicate that straightforward transistor-level optimization is likely to result in performance on par with standard (non-containing) solutions.
Export
BibTeX
@online{Bund_arXiv1801.07549, TITLE = {Optimal Metastability-Containing Sorting Networks}, AUTHOR = {Bund, Johannes and Lenzen, Christoph and Medina, Moti}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1801.07549}, EPRINT = {1801.07549}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {When setup/hold times of bistable elements are violated, they may become metastable, i.e., enter a transient state that is neither digital 0 nor 1. In general, metastability cannot be avoided, a problem that manifests whenever taking discrete measurements of analog values. Metastability of the output then reflects uncertainty as to whether a measurement should be rounded up or down to the next possible measurement outcome. Surprisingly, Lenzen and Medina (ASYNC 2016) showed that metastability can be contained, i.e., measurement values can be correctly sorted without resolving metastability first. However, both their work and the state of the art by Bund et al. (DATE 2017) leave open whether such a solution can be as small and fast as standard sorting networks. We show that this is indeed possible, by providing a circuit that sorts Gray code inputs (possibly containing a metastable bit) and has asymptotically optimal depth and size. Concretely, for 10-channel sorting networks and 16-bit wide inputs, we improve by 48.46% in delay and by 71.58% in area over Bund et al. Our simulations indicate that straightforward transistor-level optimization is likely to result in performance on par with standard (non-containing) solutions.}, }
Endnote
%0 Report %A Bund, Johannes %A Lenzen, Christoph %A Medina, Moti %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Optimal Metastability-Containing Sorting Networks : %G eng %U http://hdl.handle.net/21.11116/0000-0002-1801-2 %U http://arxiv.org/abs/1801.07549 %D 2018 %X When setup/hold times of bistable elements are violated, they may become metastable, i.e., enter a transient state that is neither digital 0 nor 1. In general, metastability cannot be avoided, a problem that manifests whenever taking discrete measurements of analog values. Metastability of the output then reflects uncertainty as to whether a measurement should be rounded up or down to the next possible measurement outcome. Surprisingly, Lenzen and Medina (ASYNC 2016) showed that metastability can be contained, i.e., measurement values can be correctly sorted without resolving metastability first. However, both their work and the state of the art by Bund et al. (DATE 2017) leave open whether such a solution can be as small and fast as standard sorting networks. We show that this is indeed possible, by providing a circuit that sorts Gray code inputs (possibly containing a metastable bit) and has asymptotically optimal depth and size. Concretely, for 10-channel sorting networks and 16-bit wide inputs, we improve by 48.46% in delay and by 71.58% in area over Bund et al. Our simulations indicate that straightforward transistor-level optimization is likely to result in performance on par with standard (non-containing) solutions. %K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
[47]
J. Bund, C. Lenzen, and M. Medina, “Small Hazard-free Transducers,” 2018. [Online]. Available: http://arxiv.org/abs/1811.12369. (arXiv: 1811.12369)
Abstract
Recently, an unconditional exponential separation between the hazard-free complexity and (standard) circuit complexity of explicit functions has been shown. This raises the question: which classes of functions permit efficient hazard-free circuits? Our main result is as follows. A \emph{transducer} is a finite state machine that transcribes, symbol by symbol, an input string of length $n$ into an output string of length $n$. We prove that any function arising from a transducer with $s$ states, that is input symbols which are encoded by $\ell$ bits, has a hazard-free circuit of size $2^{\BO(s+\ell)}\cdot n$ and depth $\BO(\ell+ s\cdot \log n)$; in particular, if $s, \ell\in \BO(1)$, size and depth are asymptotically optimal. We utilize our main result to derive efficient circuits for \emph{$k$-recoverable addition}. Informally speaking, a code is \emph{$k$-recoverable} if it does not increase uncertainty regarding the encoded value, so long as it is guaranteed that it is from $\{x,x+1,\ldots,x+k\}$ for some $x\in \NN_0$. We provide an asymptotically optimal $k$-recoverable code. We also realize a transducer with $\BO(k)$ states that adds two codewords from this $k$-recoverable code. Combined with our main result, we obtain a hazard-free adder circuit of size $2^{\BO(k)}n$ and depth $\BO(k\log n)$ with respect to this code, i.e., a $k$-recoverable adder circuit that adds two codewords of $n$ bits each. In other words, $k$-recoverable addition is fixed-parameter tractable with respect to $k$.
Export
BibTeX
@online{Bund_arXiv1811.12369, TITLE = {Small Hazard-free Transducers}, AUTHOR = {Bund, Johannes and Lenzen, Christoph and Medina, Moti}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1811.12369}, EPRINT = {1811.12369}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Recently, an unconditional exponential separation between the hazard-free complexity and (standard) circuit complexity of explicit functions has been shown. This raises the question: which classes of functions permit efficient hazard-free circuits? Our main result is as follows. A \emph{transducer} is a finite state machine that transcribes, symbol by symbol, an input string of length $n$ into an output string of length $n$. We prove that any function arising from a transducer with $s$ states, that is input symbols which are encoded by $\ell$ bits, has a hazard-free circuit of size $2^{\BO(s+\ell)}\cdot n$ and depth $\BO(\ell+ s\cdot \log n)$; in particular, if $s, \ell\in \BO(1)$, size and depth are asymptotically optimal. We utilize our main result to derive efficient circuits for \emph{$k$-recoverable addition}. Informally speaking, a code is \emph{$k$-recoverable} if it does not increase uncertainty regarding the encoded value, so long as it is guaranteed that it is from $\{x,x+1,\ldots,x+k\}$ for some $x\in \NN_0$. We provide an asymptotically optimal $k$-recoverable code. We also realize a transducer with $\BO(k)$ states that adds two codewords from this $k$-recoverable code. Combined with our main result, we obtain a hazard-free adder circuit of size $2^{\BO(k)}n$ and depth $\BO(k\log n)$ with respect to this code, i.e., a $k$-recoverable adder circuit that adds two codewords of $n$ bits each. In other words, $k$-recoverable addition is fixed-parameter tractable with respect to $k$.}, }
Endnote
%0 Report %A Bund, Johannes %A Lenzen, Christoph %A Medina, Moti %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Small Hazard-free Transducers : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9FAD-9 %U http://arxiv.org/abs/1811.12369 %D 2018 %X Recently, an unconditional exponential separation between the hazard-free complexity and (standard) circuit complexity of explicit functions has been shown. This raises the question: which classes of functions permit efficient hazard-free circuits? Our main result is as follows. A \emph{transducer} is a finite state machine that transcribes, symbol by symbol, an input string of length $n$ into an output string of length $n$. We prove that any function arising from a transducer with $s$ states, that is input symbols which are encoded by $\ell$ bits, has a hazard-free circuit of size $2^{\BO(s+\ell)}\cdot n$ and depth $\BO(\ell+ s\cdot \log n)$; in particular, if $s, \ell\in \BO(1)$, size and depth are asymptotically optimal. We utilize our main result to derive efficient circuits for \emph{$k$-recoverable addition}. Informally speaking, a code is \emph{$k$-recoverable} if it does not increase uncertainty regarding the encoded value, so long as it is guaranteed that it is from $\{x,x+1,\ldots,x+k\}$ for some $x\in \NN_0$. We provide an asymptotically optimal $k$-recoverable code. We also realize a transducer with $\BO(k)$ states that adds two codewords from this $k$-recoverable code. Combined with our main result, we obtain a hazard-free adder circuit of size $2^{\BO(k)}n$ and depth $\BO(k\log n)$ with respect to this code, i.e., a $k$-recoverable adder circuit that adds two codewords of $n$ bits each. In other words, $k$-recoverable addition is fixed-parameter tractable with respect to $k$. %K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Computational Complexity, cs.CC
[48]
P. Chalermsook, A. Schmid, and S. Uniyal, “A Tight Extremal Bound on the Lovász Cactus Number in Planar Graphs,” 2018. [Online]. Available: http://arxiv.org/abs/1804.03485. (arXiv: 1804.03485)
Abstract
A cactus graph is a graph in which any two cycles are edge-disjoint. We present a constructive proof of the fact that any plane graph $G$ contains a cactus subgraph $C$ where $C$ contains at least a $\frac{1}{6}$ fraction of the triangular faces of $G$. We also show that this ratio cannot be improved by showing a tight lower bound. Together with an algorithm for linear matroid parity, our bound implies two approximation algorithms for computing "dense planar structures" inside any graph: (i) A $\frac{1}{6}$ approximation algorithm for, given any graph $G$, finding a planar subgraph with a maximum number of triangular faces; this improves upon the previous $\frac{1}{11}$-approximation; (ii) An alternate (and arguably more illustrative) proof of the $\frac{4}{9}$ approximation algorithm for finding a planar subgraph with a maximum number of edges. Our bound is obtained by analyzing a natural local search strategy and heavily exploiting the exchange arguments. Therefore, this suggests the power of local search in handling problems of this kind.
Export
BibTeX
@online{Chalermsook_arXiv1804.03485, TITLE = {A Tight Extremal Bound on the {Lov\'{a}sz} Cactus Number in Planar Graphs}, AUTHOR = {Chalermsook, Parinya and Schmid, Andreas and Uniyal, Sumedha}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.03485}, EPRINT = {1804.03485}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {A cactus graph is a graph in which any two cycles are edge-disjoint. We present a constructive proof of the fact that any plane graph $G$ contains a cactus subgraph $C$ where $C$ contains at least a $\frac{1}{6}$ fraction of the triangular faces of $G$. We also show that this ratio cannot be improved by showing a tight lower bound. Together with an algorithm for linear matroid parity, our bound implies two approximation algorithms for computing "dense planar structures" inside any graph: (i) A $\frac{1}{6}$ approximation algorithm for, given any graph $G$, finding a planar subgraph with a maximum number of triangular faces; this improves upon the previous $\frac{1}{11}$-approximation; (ii) An alternate (and arguably more illustrative) proof of the $\frac{4}{9}$ approximation algorithm for finding a planar subgraph with a maximum number of edges. Our bound is obtained by analyzing a natural local search strategy and heavily exploiting the exchange arguments. Therefore, this suggests the power of local search in handling problems of this kind.}, }
Endnote
%0 Report %A Chalermsook, Parinya %A Schmid, Andreas %A Uniyal, Sumedha %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T A Tight Extremal Bound on the Lov&#225;sz Cactus Number in Planar Graphs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5D0-0 %U http://arxiv.org/abs/1804.03485 %D 2018 %X A cactus graph is a graph in which any two cycles are edge-disjoint. We present a constructive proof of the fact that any plane graph $G$ contains a cactus subgraph $C$ where $C$ contains at least a $\frac{1}{6}$ fraction of the triangular faces of $G$. We also show that this ratio cannot be improved by showing a tight lower bound. Together with an algorithm for linear matroid parity, our bound implies two approximation algorithms for computing "dense planar structures" inside any graph: (i) A $\frac{1}{6}$ approximation algorithm for, given any graph $G$, finding a planar subgraph with a maximum number of triangular faces; this improves upon the previous $\frac{1}{11}$-approximation; (ii) An alternate (and arguably more illustrative) proof of the $\frac{4}{9}$ approximation algorithm for finding a planar subgraph with a maximum number of edges. Our bound is obtained by analyzing a natural local search strategy and heavily exploiting the exchange arguments. Therefore, this suggests the power of local search in handling problems of this kind. %K Computer Science, Discrete Mathematics, cs.DM,Computer Science, Data Structures and Algorithms, cs.DS,Mathematics, Combinatorics, math.CO
[49]
P. Chalermsook, M. Goswami, L. Kozma, K. Mehlhorn, and T. Saranurak, “Multi-Finger Binary Search Trees,” in 29th International Symposium on Algorithms and Computation (ISAAC 2018), Jiaoxi, Yilan, Taiwan, 2018.
Export
BibTeX
@inproceedings{Chalermsook_ISAAC2018b, TITLE = {Multi-Finger Binary Search Trees}, AUTHOR = {Chalermsook, Parinya and Goswami, Mayank and Kozma, L{\`a}sz{\`o} and Mehlhorn, Kurt and Saranurak, Thatchaphol}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-094-1}, URL = {urn:nbn:de:0030-drops-100032}, DOI = {10.4230/LIPIcs.ISAAC.2018.55}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {29th International Symposium on Algorithms and Computation (ISAAC 2018)}, EDITOR = {Hsu, Wen-Lian and Lee, Der-Tsai and Liao, Chung-Shou}, PAGES = {1--26}, EID = {55}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {123}, ADDRESS = {Jiaoxi, Yilan, Taiwan}, }
Endnote
%0 Conference Proceedings %A Chalermsook, Parinya %A Goswami, Mayank %A Kozma, L&#224;sz&#242; %A Mehlhorn, Kurt %A Saranurak, Thatchaphol %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Multi-Finger Binary Search Trees : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AADE-5 %R 10.4230/LIPIcs.ISAAC.2018.55 %U urn:nbn:de:0030-drops-100032 %D 2018 %B 29th International Symposium on Algorithms and Computation %Z date of event: 2018-12-16 - 2018-12-19 %C Jiaoxi, Yilan, Taiwan %B 29th International Symposium on Algorithms and Computation %E Hsu, Wen-Lian; Lee, Der-Tsai; Liao, Chung-Shou %P 1 - 26 %Z sequence number: 55 %I Schloss Dagstuhl %@ 978-3-95977-094-1 %B Leibniz International Proceedings in Informatics %N 123 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/10003/http://drops.dagstuhl.de/doku/urheberrecht1.html
[50]
P. Chalermsook, S. Das, G. Even, B. Laekhanukit, and D. Vaz, “Survivable Network Design for Group Connectivity in Low-Treewidth Graphs,” 2018. [Online]. Available: http://arxiv.org/abs/1802.10403. (arXiv: 1802.10403)
Abstract
In the Group Steiner Tree problem (GST), we are given a (vertex or edge)-weighted graph $G=(V,E)$ on $n$ vertices, a root vertex $r$ and a collection of groups $\{S_i\}_{i\in[h]}: S_i\subseteq V(G)$. The goal is to find a min-cost subgraph $H$ that connects the root to every group. We consider a fault-tolerant variant of GST, which we call Restricted (Rooted) Group SNDP. In this setting, each group $S_i$ has a demand $k_i\in[k],k\in\mathbb N$, and we wish to find a min-cost $H\subseteq G$ such that, for each group $S_i$, there is a vertex in $S_i$ connected to the root via $k_i$ (vertex or edge) disjoint paths. While GST admits $O(\log^2 n\log h)$ approximation, its high connectivity variants are Label-Cover hard, and for the vertex-weighted version, the hardness holds even when $k=2$. Previously, positive results were known only for the edge-weighted version when $k=2$ [Gupta et al., SODA 2010; Khandekar et al., Theor. Comput. Sci., 2012] and for a relaxed variant where the disjoint paths may end at different vertices in a group [Chalermsook et al., SODA 2015]. Our main result is an $O(\log n\log h)$ approximation for Restricted Group SNDP that runs in time $n^{f(k, w)}$, where $w$ is the treewidth of $G$. This nearly matches the lower bound when $k$ and $w$ are constant. The key to achieving this result is a non-trivial extension of the framework in [Chalermsook et al., SODA 2017], which embeds all feasible solutions to the problem into a dynamic program (DP) table. However, finding the optimal solution in the DP table remains intractable. We formulate a linear program relaxation for the DP and obtain an approximate solution via randomized rounding. This framework also allows us to systematically construct DP tables for high-connectivity problems. As a result, we present new exact algorithms for several variants of survivable network design problems in low-treewidth graphs.
Export
BibTeX
@online{Chalermsook_arXiv1802.10403, TITLE = {Survivable Network Design for Group Connectivity in Low-Treewidth Graphs}, AUTHOR = {Chalermsook, Parinya and Das, Syamantak and Even, Guy and Laekhanukit, Bundit and Vaz, Daniel}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1802.10403}, EPRINT = {1802.10403}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In the Group Steiner Tree problem (GST), we are given a (vertex or edge)-weighted graph $G=(V,E)$ on $n$ vertices, a root vertex $r$ and a collection of groups $\{S_i\}_{i\in[h]}: S_i\subseteq V(G)$. The goal is to find a min-cost subgraph $H$ that connects the root to every group. We consider a fault-tolerant variant of GST, which we call Restricted (Rooted) Group SNDP. In this setting, each group $S_i$ has a demand $k_i\in[k],k\in\mathbb N$, and we wish to find a min-cost $H\subseteq G$ such that, for each group $S_i$, there is a vertex in $S_i$ connected to the root via $k_i$ (vertex or edge) disjoint paths. While GST admits $O(\log^2 n\log h)$ approximation, its high connectivity variants are Label-Cover hard, and for the vertex-weighted version, the hardness holds even when $k=2$. Previously, positive results were known only for the edge-weighted version when $k=2$ [Gupta et al., SODA 2010; Khandekar et al., Theor. Comput. Sci., 2012] and for a relaxed variant where the disjoint paths may end at different vertices in a group [Chalermsook et al., SODA 2015]. Our main result is an $O(\log n\log h)$ approximation for Restricted Group SNDP that runs in time $n^{f(k, w)}$, where $w$ is the treewidth of $G$. This nearly matches the lower bound when $k$ and $w$ are constant. The key to achieving this result is a non-trivial extension of the framework in [Chalermsook et al., SODA 2017], which embeds all feasible solutions to the problem into a dynamic program (DP) table. However, finding the optimal solution in the DP table remains intractable. We formulate a linear program relaxation for the DP and obtain an approximate solution via randomized rounding. This framework also allows us to systematically construct DP tables for high-connectivity problems. As a result, we present new exact algorithms for several variants of survivable network design problems in low-treewidth graphs.}, }
Endnote
%0 Report %A Chalermsook, Parinya %A Das, Syamantak %A Even, Guy %A Laekhanukit, Bundit %A Vaz, Daniel %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Survivable Network Design for Group Connectivity in Low-Treewidth Graphs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A84E-A %U http://arxiv.org/abs/1802.10403 %D 2018 %X In the Group Steiner Tree problem (GST), we are given a (vertex or edge)-weighted graph $G=(V,E)$ on $n$ vertices, a root vertex $r$ and a collection of groups $\{S_i\}_{i\in[h]}: S_i\subseteq V(G)$. The goal is to find a min-cost subgraph $H$ that connects the root to every group. We consider a fault-tolerant variant of GST, which we call Restricted (Rooted) Group SNDP. In this setting, each group $S_i$ has a demand $k_i\in[k],k\in\mathbb N$, and we wish to find a min-cost $H\subseteq G$ such that, for each group $S_i$, there is a vertex in $S_i$ connected to the root via $k_i$ (vertex or edge) disjoint paths. While GST admits $O(\log^2 n\log h)$ approximation, its high connectivity variants are Label-Cover hard, and for the vertex-weighted version, the hardness holds even when $k=2$. Previously, positive results were known only for the edge-weighted version when $k=2$ [Gupta et al., SODA 2010; Khandekar et al., Theor. Comput. Sci., 2012] and for a relaxed variant where the disjoint paths may end at different vertices in a group [Chalermsook et al., SODA 2015]. Our main result is an $O(\log n\log h)$ approximation for Restricted Group SNDP that runs in time $n^{f(k, w)}$, where $w$ is the treewidth of $G$. This nearly matches the lower bound when $k$ and $w$ are constant. The key to achieving this result is a non-trivial extension of the framework in [Chalermsook et al., SODA 2017], which embeds all feasible solutions to the problem into a dynamic program (DP) table. However, finding the optimal solution in the DP table remains intractable. We formulate a linear program relaxation for the DP and obtain an approximate solution via randomized rounding. This framework also allows us to systematically construct DP tables for high-connectivity problems. As a result, we present new exact algorithms for several variants of survivable network design problems in low-treewidth graphs. %K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Discrete Mathematics, cs.DM
[51]
P. Chalermsook, S. Das, G. Even, B. Laekhanukit, and D. Vaz, “Survivable Network Design for Group Connectivity in Low-Treewidth Graphs,” in Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques (APPROX/RANDOM 2018), Princeton, NJ, USA, 2018.
Export
BibTeX
@inproceedings{Chalermsook_APPROXRANDOM18, TITLE = {Survivable Network Design for Group Connectivity in Low-Treewidth Graphs}, AUTHOR = {Chalermsook, Parinya and Das, Syamantak and Even, Guy and Laekhanukit, Bundit and Vaz, Daniel}, LANGUAGE = {eng}, ISBN = {978-3-95977-085-9}, URL = {urn:nbn:de:0030-drops-94127}, DOI = {10.4230/LIPIcs.APPROX-RANDOM.2018.8}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques (APPROX/RANDOM 2018)}, EDITOR = {Blais, Eric and Jansen, Klaus and Rolim, Jos{\'e} D. P. and Steurer, David}, PAGES = {1--19}, EID = {8}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {116}, ADDRESS = {Princeton, NJ, USA}, }
Endnote
%0 Conference Proceedings %A Chalermsook, Parinya %A Das, Syamantak %A Even, Guy %A Laekhanukit, Bundit %A Vaz, Daniel %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Survivable Network Design for Group Connectivity in Low-Treewidth Graphs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A832-8 %R 10.4230/LIPIcs.APPROX-RANDOM.2018.8 %U urn:nbn:de:0030-drops-94127 %D 2018 %B 21st International Workshop on Approximation Algorithms for Combinatorial Optimization Problems / 22nd International Workshop on Randomization and Computation %Z date of event: 2018-08-20 - 2018-08-22 %C Princeton, NJ, USA %B Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques %E Blais, Eric; Jansen, Klaus; Rolim, Jos&#233; D. P.; Steurer, David %P 1 - 19 %Z sequence number: 8 %I Schloss Dagstuhl %@ 978-3-95977-085-9 %B Leibniz International Proceedings in Informatics %N 116 %U http://drops.dagstuhl.de/opus/volltexte/2018/9412/http://drops.dagstuhl.de/doku/urheberrecht1.html
[52]
L. S. Chandran, A. Das, D. Issac, and E. J. van Leeuwen, “Algorithms and Bounds for Very Strong Rainbow Coloring,” in LATIN 2018: Theoretical Informatics, Buenos Aires, Argentinia, 2018.
Export
BibTeX
@inproceedings{Chandran_LATIN2018, TITLE = {Algorithms and Bounds for Very Strong Rainbow Coloring}, AUTHOR = {Chandran, L. Sunil and Das, Anita and Issac, Davis and van Leeuwen, Erik Jan}, LANGUAGE = {eng}, ISBN = {978-3-319-77403-9}, DOI = {10.1007/978-3-319-77404-6_46}, PUBLISHER = {Springer}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {LATIN 2018: Theoretical Informatics}, EDITOR = {Bender, Michael A. and Farach-Colton, Mart{\'i}n and Mosteiro, Miguel A.}, PAGES = {625--639}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {10807}, ADDRESS = {Buenos Aires, Argentinia}, }
Endnote
%0 Conference Proceedings %A Chandran, L. Sunil %A Das, Anita %A Issac, Davis %A van Leeuwen, Erik Jan %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Algorithms and Bounds for Very Strong Rainbow Coloring : %G eng %U http://hdl.handle.net/21.11116/0000-0002-576A-6 %R 10.1007/978-3-319-77404-6_46 %D 2018 %B 13th Latin American Theoretical Informatics Symposium %Z date of event: 2018-04-16 - 2018-04-19 %C Buenos Aires, Argentinia %B LATIN 2018: Theoretical Informatics %E Bender, Michael A.; Farach-Colton, Mart&#237;n; Mosteiro, Miguel A. %P 625 - 639 %I Springer %@ 978-3-319-77403-9 %B Lecture Notes in Computer Science %N 10807
[53]
L. S. Chandran, Y. K. Cheung, and D. Issac, “Spanning Tree Congestion and Computation of Generalized Györi-Lovász Partition,” in 45th International Colloquium on Automata, Languages, and Programming (ICALP 2018), Prague, Czech Republic, 2018.
Export
BibTeX
@inproceedings{stc-gyo-lov-2018-chandran, TITLE = {Spanning Tree Congestion and Computation of Generalized {Gy{\"o}ri-Lov{\'a}sz} Partition}, AUTHOR = {Chandran, L. Sunil and Cheung, Yun Kuen and Issac, Davis}, LANGUAGE = {eng}, ISBN = {978-3-95977-076-7}, URL = {urn:nbn:de:0030-drops-90361}, DOI = {10.4230/LIPIcs.ICALP.2018.32}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {45th International Colloquium on Automata, Languages, and Programming (ICALP 2018)}, EDITOR = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, D{\'a}niel and Sannella, Donald}, PAGES = {1--14}, EID = {32}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {107}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Chandran, L. Sunil %A Cheung, Yun Kuen %A Issac, Davis %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Spanning Tree Congestion and Computation of Generalized Gy&#246;ri-Lov&#225;sz Partition : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9E67-9 %R 10.4230/LIPIcs.ICALP.2018.32 %U urn:nbn:de:0030-drops-90361 %D 2018 %B 45th International Colloquium on Automata, Languages, and Programming %Z date of event: 2018-07-09 - 2018-07-13 %C Prague, Czech Republic %B 45th International Colloquium on Automata, Languages, and Programming %E Chatzigiannakis, Ioannis; Kaklamanis, Christos; Marx, D&#225;niel; Sannella, Donald %P 1 - 14 %Z sequence number: 32 %I Schloss Dagstuhl %@ 978-3-95977-076-7 %B Leibniz International Proceedings in Informatics %N 107 %U http://drops.dagstuhl.de/opus/volltexte/2018/9036/http://drops.dagstuhl.de/doku/urheberrecht1.html
[54]
T. M. Chan, T. C. van Dijk, K. Fleszar, J. Spoerhase, and A. Wolff, “Stabbing Rectangles by Line Segments - How Decomposition Reduces the Shallow-Cell Complexity,” in 29th International Symposium on Algorithms and Computation (ISAAC 2018), Jiaoxi, Yilan, Taiwan, 2018.
Export
BibTeX
@inproceedings{Chan_ISAAC2018b, TITLE = {Stabbing Rectangles by Line Segments -- How Decomposition Reduces the Shallow-Cell Complexity}, AUTHOR = {Chan, Timothy M. and van Dijk, Thomas C. and Fleszar, Krzysztof and Spoerhase, Joachim and Wolff, Alexander}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-094-1}, URL = {urn:nbn:de:0030-drops-100094}, DOI = {10.4230/LIPIcs.ISAAC.2018.61}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {29th International Symposium on Algorithms and Computation (ISAAC 2018)}, EDITOR = {Hsu, Wen-Lian and Lee, Der-Tsai and Liao, Chung-Shou}, PAGES = {1--13}, EID = {61}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {123}, ADDRESS = {Jiaoxi, Yilan, Taiwan}, }
Endnote
%0 Conference Proceedings %A Chan, Timothy M. %A van Dijk, Thomas C. %A Fleszar, Krzysztof %A Spoerhase, Joachim %A Wolff, Alexander %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Stabbing Rectangles by Line Segments - How Decomposition Reduces the Shallow-Cell Complexity : %G eng %U http://hdl.handle.net/21.11116/0000-0002-ADEA-4 %R 10.4230/LIPIcs.ISAAC.2018.61 %U urn:nbn:de:0030-drops-100094 %D 2018 %B 29th International Symposium on Algorithms and Computation %Z date of event: 2018-12-16 - 2018-12-19 %C Jiaoxi, Yilan, Taiwan %B 29th International Symposium on Algorithms and Computation %E Hsu, Wen-Lian; Lee, Der-Tsai; Liao, Chung-Shou %P 1 - 13 %Z sequence number: 61 %I Schloss Dagstuhl %@ 978-3-95977-094-1 %B Leibniz International Proceedings in Informatics %N 123 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/10009/http://drops.dagstuhl.de/doku/urheberrecht1.html
[55]
N. Chen, M. Hoefer, M. Künnemann, C. Lin, and P. Miao, “Secretary Markets with Local Information,” Distributed Computing. (Accepted/in press)
Export
BibTeX
@article{Chen2018, TITLE = {Secretary Markets with Local Information}, AUTHOR = {Chen, Ning and Hoefer, Martin and K{\"u}nnemann, Marvin and Lin, Chengyu and Miao, Peihan}, LANGUAGE = {eng}, ISSN = {0178-2770}, DOI = {10.1007/s00446-018-0327-5}, PUBLISHER = {Springer International}, ADDRESS = {Berlin}, YEAR = {2018}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {Distributed Computing}, }
Endnote
%0 Journal Article %A Chen, Ning %A Hoefer, Martin %A K&#252;nnemann, Marvin %A Lin, Chengyu %A Miao, Peihan %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Secretary Markets with Local Information : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A90C-3 %R 10.1007/s00446-018-0327-5 %D 2018 %J Distributed Computing %I Springer International %C Berlin %@ false
[56]
Y. K. Cheung, M. Hoefer, and P. Nakhe, “Tracing Equilibrium in Dynamic Markets via Distributed Adaptation,” 2018. [Online]. Available: http://arxiv.org/abs/1804.08017. (arXiv: 1804.08017)
Abstract
Competitive equilibrium is a central concept in economics with numerous applications beyond markets, such as scheduling, fair allocation of goods, or bandwidth distribution in networks. Computation of competitive equilibria has received a significant amount of interest in algorithmic game theory, mainly for the prominent case of Fisher markets. Natural and decentralized processes like tatonnement and proportional response dynamics (PRD) converge quickly towards equilibrium in large classes of Fisher markets. Almost all of the literature assumes that the market is a static environment and that the parameters of agents and goods do not change over time. In contrast, many large real-world markets are subject to frequent and dynamic changes. In this paper, we provide the first provable performance guarantees of discrete-time tatonnement and PRD in markets that are subject to perturbation over time. We analyze the prominent class of Fisher markets with CES utilities and quantify the impact of changes in supplies of goods, budgets of agents, and utility functions of agents on the convergence of tatonnement to market equilibrium. Since the equilibrium becomes a dynamic object and will rarely be reached, our analysis provides bounds expressing the distance to equilibrium that will be maintained via tatonnement and PRD updates. Our results indicate that in many cases, tatonnement and PRD follow the equilibrium rather closely and quickly recover conditions of approximate market clearing. Our approach can be generalized to analyzing a general class of Lyapunov dynamical systems with changing system parameters, which might be of independent interest.
Export
BibTeX
@online{Cheung_arXiv1804.08017, TITLE = {Tracing Equilibrium in Dynamic Markets via Distributed Adaptation}, AUTHOR = {Cheung, Yun Kuen and Hoefer, Martin and Nakhe, Paresh}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.08017}, EPRINT = {1804.08017}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Competitive equilibrium is a central concept in economics with numerous applications beyond markets, such as scheduling, fair allocation of goods, or bandwidth distribution in networks. Computation of competitive equilibria has received a significant amount of interest in algorithmic game theory, mainly for the prominent case of Fisher markets. Natural and decentralized processes like tatonnement and proportional response dynamics (PRD) converge quickly towards equilibrium in large classes of Fisher markets. Almost all of the literature assumes that the market is a static environment and that the parameters of agents and goods do not change over time. In contrast, many large real-world markets are subject to frequent and dynamic changes. In this paper, we provide the first provable performance guarantees of discrete-time tatonnement and PRD in markets that are subject to perturbation over time. We analyze the prominent class of Fisher markets with CES utilities and quantify the impact of changes in supplies of goods, budgets of agents, and utility functions of agents on the convergence of tatonnement to market equilibrium. Since the equilibrium becomes a dynamic object and will rarely be reached, our analysis provides bounds expressing the distance to equilibrium that will be maintained via tatonnement and PRD updates. Our results indicate that in many cases, tatonnement and PRD follow the equilibrium rather closely and quickly recover conditions of approximate market clearing. Our approach can be generalized to analyzing a general class of Lyapunov dynamical systems with changing system parameters, which might be of independent interest.}, }
Endnote
%0 Report %A Cheung, Yun Kuen %A Hoefer, Martin %A Nakhe, Paresh %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Tracing Equilibrium in Dynamic Markets via Distributed Adaptation : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AE08-2 %U http://arxiv.org/abs/1804.08017 %D 2018 %X Competitive equilibrium is a central concept in economics with numerous applications beyond markets, such as scheduling, fair allocation of goods, or bandwidth distribution in networks. Computation of competitive equilibria has received a significant amount of interest in algorithmic game theory, mainly for the prominent case of Fisher markets. Natural and decentralized processes like tatonnement and proportional response dynamics (PRD) converge quickly towards equilibrium in large classes of Fisher markets. Almost all of the literature assumes that the market is a static environment and that the parameters of agents and goods do not change over time. In contrast, many large real-world markets are subject to frequent and dynamic changes. In this paper, we provide the first provable performance guarantees of discrete-time tatonnement and PRD in markets that are subject to perturbation over time. We analyze the prominent class of Fisher markets with CES utilities and quantify the impact of changes in supplies of goods, budgets of agents, and utility functions of agents on the convergence of tatonnement to market equilibrium. Since the equilibrium becomes a dynamic object and will rarely be reached, our analysis provides bounds expressing the distance to equilibrium that will be maintained via tatonnement and PRD updates. Our results indicate that in many cases, tatonnement and PRD follow the equilibrium rather closely and quickly recover conditions of approximate market clearing. Our approach can be generalized to analyzing a general class of Lyapunov dynamical systems with changing system parameters, which might be of independent interest. %K Computer Science, Computer Science and Game Theory, cs.GT
[57]
Y. K. Cheung, R. Cole, and Y. Tao, “Parallel Stochastic Asynchronous Coordinate Descent: Tight Bounds on the Possible Parallelism,” 2018. [Online]. Available: http://arxiv.org/abs/1811.05087. (arXiv: 1811.05087)
Abstract
Several works have shown linear speedup is achieved by an asynchronous parallel implementation of stochastic coordinate descent so long as there is not too much parallelism. More specifically, it is known that if all updates are of similar duration, then linear speedup is possible with up to $\Theta(\sqrt n/L_{\mathsf{res}})$ processors, where $L_{\mathsf{res}}$ is a suitable Lipschitz parameter. This paper shows the bound is tight for essentially all possible values of $L_{\mathsf{res}}$.
Export
BibTeX
@online{corr/abs-1811-05087, TITLE = {Parallel Stochastic Asynchronous Coordinate Descent: {T}ight Bounds on the Possible Parallelism}, AUTHOR = {Cheung, Yun Kuen and Cole, Richard and Tao, Yixin}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1811.05087}, EPRINT = {1811.05087}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Several works have shown linear speedup is achieved by an asynchronous parallel implementation of stochastic coordinate descent so long as there is not too much parallelism. More specifically, it is known that if all updates are of similar duration, then linear speedup is possible with up to $\Theta(\sqrt n/L_{\mathsf{res}})$ processors, where $L_{\mathsf{res}}$ is a suitable Lipschitz parameter. This paper shows the bound is tight for essentially all possible values of $L_{\mathsf{res}}$.}, }
Endnote
%0 Report %A Cheung, Yun Kuen %A Cole, Richard %A Tao, Yixin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Parallel Stochastic Asynchronous Coordinate Descent: Tight Bounds on the Possible Parallelism : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AAF2-D %U http://arxiv.org/abs/1811.05087 %D 2018 %X Several works have shown linear speedup is achieved by an asynchronous parallel implementation of stochastic coordinate descent so long as there is not too much parallelism. More specifically, it is known that if all updates are of similar duration, then linear speedup is possible with up to $\Theta(\sqrt n/L_{\mathsf{res}})$ processors, where $L_{\mathsf{res}}$ is a suitable Lipschitz parameter. This paper shows the bound is tight for essentially all possible values of $L_{\mathsf{res}}$. %K Mathematics, Optimization and Control, math.OC,Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
[58]
Y. K. Cheung, “Multiplicative Weights Updates with Constant Step-Size in Graphical Constant-Sum Games,” in Advances in Neural Information Processing Systems 31 (NIPS 2018), Montréal, Canada, 2018.
Export
BibTeX
@inproceedings{NeurIPS/Cheung18, TITLE = {Multiplicative Weights Updates with Constant Step-Size in Graphical Constant-Sum Games}, AUTHOR = {Cheung, Yun Kuen}, LANGUAGE = {eng}, PUBLISHER = {Curran Associates}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Advances in Neural Information Processing Systems 31 (NIPS 2018)}, EDITOR = {Bengio, S. and Wallach, H. and Larochelle, H. and Grauman, K. and Cesa-Bianchi, N. and Garnett, R.}, PAGES = {3532--3542}, ADDRESS = {Montr{\'e}al, Canada}, }
Endnote
%0 Conference Proceedings %A Cheung, Yun Kuen %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Multiplicative Weights Updates with Constant Step-Size in Graphical Constant-Sum Games : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AB07-6 %D 2018 %B Thirty-second Conference on Neural Information Processing Systems %Z date of event: 2018-12-02 - 2018-12-08 %C Montr&#233;al, Canada %B Advances in Neural Information Processing Systems 31 %E Bengio, S.; Wallach, H.; Larochelle, H.; Grauman, K.; Cesa-Bianchi, N.; Garnett, R. %P 3532 - 3542 %I Curran Associates %U http://papers.nips.cc/paper/7612-multiplicative-weights-updates-with-constant-step-size-in-graphical-constant-sum-games.pdf
[59]
Y. K. Cheung, R. Cole, and Y. Tao, “Dynamics of Distributed Updating in Fisher Markets,” in ACM EC’18, Nineteenth ACM Conference on Economics and Computation, Ithaca, NY, USA, 2018.
Export
BibTeX
@inproceedings{EC/CCT18, TITLE = {Dynamics of Distributed Updating in {F}isher Markets}, AUTHOR = {Cheung, Yun Kuen and Cole, Richard and Tao, Yixin}, LANGUAGE = {eng}, ISBN = {978-1-4503-5829-3}, DOI = {10.1145/3219166.3219189}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {ACM EC'18, Nineteenth ACM Conference on Economics and Computation}, PAGES = {351--368}, ADDRESS = {Ithaca, NY, USA}, }
Endnote
%0 Conference Proceedings %A Cheung, Yun Kuen %A Cole, Richard %A Tao, Yixin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Dynamics of Distributed Updating in Fisher Markets : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AAE9-8 %R 10.1145/3219166.3219189 %D 2018 %B Nineteenth ACM Conference on Economics and Computation %Z date of event: 2018-06-18 - 2018-06-22 %C Ithaca, NY, USA %B ACM EC'18 %P 351 - 368 %I ACM %@ 978-1-4503-5829-3
[60]
Y. K. Cheung and R. Cole, “Amortized Analysis of Asynchronous Price Dynamics,” in 26th Annual European Symposium on Algorithms (ESA 2018), Helsinki, Finland, 2018.
Export
BibTeX
@inproceedings{Cheung_ESA2018, TITLE = {Amortized Analysis of Asynchronous Price Dynamics}, AUTHOR = {Cheung, Yun Kuen and Cole, Richard}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-081-1}, URL = {urn:nbn:de:0030-drops-94812}, DOI = {10.4230/LIPIcs.ESA.2018.18}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {26th Annual European Symposium on Algorithms (ESA 2018)}, EDITOR = {Azar, Yossi and Bast, Hannah and Herman, Grzegorz}, PAGES = {1--15}, EID = {18}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {112}, ADDRESS = {Helsinki, Finland}, }
Endnote
%0 Conference Proceedings %A Cheung, Yun Kuen %A Cole, Richard %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Amortized Analysis of Asynchronous Price Dynamics : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AAEE-3 %R 10.4230/LIPIcs.ESA.2018.18 %U urn:nbn:de:0030-drops-94812 %D 2018 %B 26th Annual European Symposium on Algorithms %Z date of event: 2018-08-20 - 2018-08-22 %C Helsinki, Finland %B 26th Annual European Symposium on Algorithms %E Azar, Yossi; Bast, Hannah; Herman, Grzegorz %P 1 - 15 %Z sequence number: 18 %I Schloss Dagstuhl %@ 978-3-95977-081-1 %B Leibniz International Proceedings in Informatics %N 112 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9481/http://drops.dagstuhl.de/doku/urheberrecht1.html
[61]
Y. K. Cheung, R. Cole, and Y. Tao, “(Near) Optimal Parallelism Bound for Fully Asynchronous Coordinate Descent with Linear Speedup,” 2018. [Online]. Available: http://arxiv.org/abs/1811.03254. (arXiv: 1811.03254)
Abstract
When solving massive optimization problems in areas such as machine learning, it is a common practice to seek speedup via massive parallelism. However, especially in an asynchronous environment, there are limits on the possible parallelism. Accordingly, we seek tight bounds on the viable parallelism in asynchronous implementations of coordinate descent. We focus on asynchronous coordinate descent (ACD) algorithms on convex functions $F:\mathbb{R}^n \rightarrow \mathbb{R}$ of the form $$F(x) = f(x) ~+~ \sum_{k=1}^n \Psi_k(x_k),$$ where $f:\mathbb{R}^n \rightarrow \mathbb{R}$ is a smooth convex function, and each $\Psi_k:\mathbb{R} \rightarrow \mathbb{R}$ is a univariate and possibly non-smooth convex function. Our approach is to quantify the shortfall in progress compared to the standard sequential stochastic gradient descent. This leads to a truly simple yet optimal analysis of the standard stochastic ACD in a partially asynchronous environment, which already generalizes and improves on the bounds in prior work. We also give a considerably more involved analysis for general asynchronous environments in which the only constraint is that each update can overlap with at most $q$ others, where $q$ is at most the number of processors times the ratio in the lengths of the longest and shortest updates. The main technical challenge is to demonstrate linear speedup in the latter environment. This stems from the subtle interplay of asynchrony and randomization. This improves Liu and Wright's (SIOPT'15) lower bound on the maximum degree of parallelism almost quadratically, and we show that our new bound is almost optimal.
Export
BibTeX
@online{corr/abs-1811-03254, TITLE = {(Near) Optimal Parallelism Bound for Fully Asynchronous Coordinate Descent with Linear Speedup}, AUTHOR = {Cheung, Yun Kuen and Cole, Richard and Tao, Yixin}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1811.03254}, EPRINT = {1811.03254}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {When solving massive optimization problems in areas such as machine learning, it is a common practice to seek speedup via massive parallelism. However, especially in an asynchronous environment, there are limits on the possible parallelism. Accordingly, we seek tight bounds on the viable parallelism in asynchronous implementations of coordinate descent. We focus on asynchronous coordinate descent (ACD) algorithms on convex functions $F:\mathbb{R}^n \rightarrow \mathbb{R}$ of the form $$F(x) = f(x) ~+~ \sum_{k=1}^n \Psi_k(x_k),$$ where $f:\mathbb{R}^n \rightarrow \mathbb{R}$ is a smooth convex function, and each $\Psi_k:\mathbb{R} \rightarrow \mathbb{R}$ is a univariate and possibly non-smooth convex function. Our approach is to quantify the shortfall in progress compared to the standard sequential stochastic gradient descent. This leads to a truly simple yet optimal analysis of the standard stochastic ACD in a partially asynchronous environment, which already generalizes and improves on the bounds in prior work. We also give a considerably more involved analysis for general asynchronous environments in which the only constraint is that each update can overlap with at most $q$ others, where $q$ is at most the number of processors times the ratio in the lengths of the longest and shortest updates. The main technical challenge is to demonstrate linear speedup in the latter environment. This stems from the subtle interplay of asynchrony and randomization. This improves Liu and Wright's (SIOPT'15) lower bound on the maximum degree of parallelism almost quadratically, and we show that our new bound is almost optimal.}, }
Endnote
%0 Report %A Cheung, Yun Kuen %A Cole, Richard %A Tao, Yixin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T (Near) Optimal Parallelism Bound for Fully Asynchronous Coordinate Descent with Linear Speedup : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AAF5-A %U http://arxiv.org/abs/1811.03254 %D 2018 %X When solving massive optimization problems in areas such as machine learning, it is a common practice to seek speedup via massive parallelism. However, especially in an asynchronous environment, there are limits on the possible parallelism. Accordingly, we seek tight bounds on the viable parallelism in asynchronous implementations of coordinate descent. We focus on asynchronous coordinate descent (ACD) algorithms on convex functions $F:\mathbb{R}^n \rightarrow \mathbb{R}$ of the form $$F(x) = f(x) ~+~ \sum_{k=1}^n \Psi_k(x_k),$$ where $f:\mathbb{R}^n \rightarrow \mathbb{R}$ is a smooth convex function, and each $\Psi_k:\mathbb{R} \rightarrow \mathbb{R}$ is a univariate and possibly non-smooth convex function. Our approach is to quantify the shortfall in progress compared to the standard sequential stochastic gradient descent. This leads to a truly simple yet optimal analysis of the standard stochastic ACD in a partially asynchronous environment, which already generalizes and improves on the bounds in prior work. We also give a considerably more involved analysis for general asynchronous environments in which the only constraint is that each update can overlap with at most $q$ others, where $q$ is at most the number of processors times the ratio in the lengths of the longest and shortest updates. The main technical challenge is to demonstrate linear speedup in the latter environment. This stems from the subtle interplay of asynchrony and randomization. This improves Liu and Wright's (SIOPT'15) lower bound on the maximum degree of parallelism almost quadratically, and we show that our new bound is almost optimal. %K Mathematics, Optimization and Control, math.OC,Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
[62]
Y. K. Cheung, “Steiner Point Removal - Distant Terminals Don’t (Really) Bother,” in Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA 2018), New Orleans, LA, USA, 2018.
Export
BibTeX
@inproceedings{Cheung_SODA18, TITLE = {{S}teiner Point Removal -- Distant Terminals Don't (Really) Bother}, AUTHOR = {Cheung, Yun Kuen}, LANGUAGE = {eng}, ISBN = {978-1-61197-503-1}, DOI = {10.1137/1.9781611975031.89}, PUBLISHER = {SIAM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA 2018)}, EDITOR = {Czumaj, Artur}, PAGES = {1353--1360}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Cheung, Yun Kuen %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Steiner Point Removal - Distant Terminals Don't (Really) Bother : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AA8C-1 %R 10.1137/1.9781611975031.89 %D 2018 %B Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %Z date of event: 2018-01-07 - 2018-01-10 %C New Orleans, LA, USA %B Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %E Czumaj, Artur %P 1353 - 1360 %I SIAM %@ 978-1-61197-503-1
[63]
L. Chiantini, J. D. Hauenstein, C. Ikenmeyer, J. M. Landsberg, and G. Ottaviani, “Polynomials and the Exponent of Matrix Multiplication,” Bulletin of the London Mathematical Society, vol. 50, no. 3, 2018.
Export
BibTeX
@article{Chaintini2018, TITLE = {Polynomials and the Exponent of Matrix Multiplication}, AUTHOR = {Chiantini, Luca and Hauenstein, Jonathan D. and Ikenmeyer, Christian and Landsberg, Joseph M. and Ottaviani, Giorgio}, LANGUAGE = {eng}, ISSN = {0024-6093}, DOI = {10.1112/blms.12147}, PUBLISHER = {London Mathematical Society}, ADDRESS = {London}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Bulletin of the London Mathematical Society}, VOLUME = {50}, NUMBER = {3}, PAGES = {369--389}, }
Endnote
%0 Journal Article %A Chiantini, Luca %A Hauenstein, Jonathan D. %A Ikenmeyer, Christian %A Landsberg, Joseph M. %A Ottaviani, Giorgio %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Polynomials and the Exponent of Matrix Multiplication : %G eng %U http://hdl.handle.net/21.11116/0000-0001-88D0-A %R 10.1112/blms.12147 %7 2018 %D 2018 %J Bulletin of the London Mathematical Society %V 50 %N 3 %& 369 %P 369 - 389 %I London Mathematical Society %C London %@ false
[64]
A. Choudhary, S. Kachanovich, and M. Wintraecken, “Coxeter Triangulations Have Good Quality,” in EuroCG 18 Extended Abstracts, Berlin, Germany, 2018.
Export
BibTeX
@inproceedings{Choudhary-coxeter, TITLE = {Coxeter Triangulations Have Good Quality}, AUTHOR = {Choudhary, Aruni and Kachanovich, Siargey and Wintraecken, Mathijs}, LANGUAGE = {eng}, URL = {https://conference.imp.fu-berlin.de/eurocg18/download/eurocg_proc.pdf}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {EuroCG 18 Extended Abstracts}, PAGES = {37--42}, ADDRESS = {Berlin, Germany}, }
Endnote
%0 Conference Proceedings %A Choudhary, Aruni %A Kachanovich, Siargey %A Wintraecken, Mathijs %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Coxeter Triangulations Have Good Quality : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5C4-E %D 2018 %B 34th European Workshop on Computational Geometry %Z date of event: 2018-03-21 - 2018-03-23 %C Berlin, Germany %B EuroCG 18 Extended Abstracts %P 37 - 42 %U https://conference.imp.fu-berlin.de/eurocg18/download/eurocg_proc.pdf
[65]
G. Christodoulou and A. Sgouritsa, “Designing Networks with Good Equilibria under Uncertainty,” SIAM Journal on Computing. (Accepted/in press)
Export
BibTeX
@article{Christodoulou2019SICOMP, TITLE = {Designing Networks with Good Equilibria under Uncertainty}, AUTHOR = {Christodoulou, George and Sgouritsa, Alkmini}, LANGUAGE = {eng}, ISSN = {0097-5397}, PUBLISHER = {Society for Industrial and Applied Mathematics.}, ADDRESS = {Philadelphia}, YEAR = {2018}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {SIAM Journal on Computing}, }
Endnote
%0 Journal Article %A Christodoulou, George %A Sgouritsa, Alkmini %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Designing Networks with Good Equilibria under Uncertainty : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AEC7-A %D 2018 %J SIAM Journal on Computing %I Society for Industrial and Applied Mathematics. %C Philadelphia %@ false
[66]
A. Clementi, M. Ghaffari, L. Gualà, E. Natale, F. Pasquale, and G. Scornavacca, “A Tight Analysis of the Parallel Undecided-State Dynamics with Two Colors,” in 43rd International Symposium on Mathematical Foundations of Computer Science (MFCS 2018), Liverpool, UK, 2018.
Export
BibTeX
@inproceedings{Clementi_MFCS2018, TITLE = {A Tight Analysis of the Parallel Undecided-State Dynamics with Two Colors}, AUTHOR = {Clementi, Andrea and Ghaffari, Mohsen and Gual{\`a}, Luciano and Natale, Emanuele and Pasquale, Francesco and Scornavacca, Giacomo}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-086-6}, URL = {urn:nbn:de:0030-drops-96107}, DOI = {10.4230/LIPIcs.MFCS.2018.28}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {43rd International Symposium on Mathematical Foundations of Computer Science (MFCS 2018)}, EDITOR = {Potapov, Igor and Spirakis, Paul and Worrell, James}, PAGES = {1--15}, EID = {28}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {117}, ADDRESS = {Liverpool, UK}, }
Endnote
%0 Conference Proceedings %A Clementi, Andrea %A Ghaffari, Mohsen %A Gual&#224;, Luciano %A Natale, Emanuele %A Pasquale, Francesco %A Scornavacca, Giacomo %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T A Tight Analysis of the Parallel Undecided-State Dynamics with Two Colors : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A96C-7 %R 10.4230/LIPIcs.MFCS.2018.28 %U urn:nbn:de:0030-drops-96107 %D 2018 %B 43rd International Symposium on Mathematical Foundations of Computer Science %Z date of event: 2018-08-27 - 2018-08-31 %C Liverpool, UK %B 43rd International Symposium on Mathematical Foundations of Computer Science %E Potapov, Igor; Spirakis, Paul; Worrell, James %P 1 - 15 %Z sequence number: 28 %I Schloss Dagstuhl %@ 978-3-95977-086-6 %B Leibniz International Proceedings in Informatics %N 117 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9610/http://drops.dagstuhl.de/doku/urheberrecht1.html
[67]
A. Clementi, L. Gualà, E. Natale, F. Pasquale, G. Scornavacca, and L. Trevisan, “Consensus Needs Broadcast in Noiseless Models but can be Exponentially Easier in the Presence of Noise,” 2018. [Online]. Available: http://arxiv.org/abs/1807.05626. (arXiv: 1807.05626)
Abstract
Consensus and Broadcast are two fundamental problems in distributed computing, whose solutions have several applications. Intuitively, Consensus should be no harder than Broadcast, and this can be rigorously established in several models. Can Consensus be easier than Broadcast? In models that allow noiseless communication, we prove a reduction of (a suitable variant of) Broadcast to binary Consensus, that preserves the communication model and all complexity parameters such as randomness, number of rounds, communication per round, etc., while there is a loss in the success probability of the protocol. Using this reduction, we get, among other applications, the first logarithmic lower bound on the number of rounds needed to achieve Consensus in the uniform GOSSIP model on the complete graph. The lower bound is tight and, in this model, Consensus and Broadcast are equivalent. We then turn to distributed models with noisy communication channels that have been studied in the context of some bio-inspired systems. In such models, only one noisy bit is exchanged when a communication channel is established between two nodes, and so one cannot easily simulate a noiseless protocol by using error-correcting codes. An $\Omega(\epsilon^{-2} n)$ lower bound on the number of rounds needed for Broadcast is proved by Boczkowski et al. [PLOS Comp. Bio. 2018] in one such model (noisy uniform PULL, where $\epsilon$ is a parameter that measures the amount of noise). In such model, we prove a new $\Theta(\epsilon^{-2} n \log n)$ bound for Broadcast and a $\Theta(\epsilon^{-2} \log n)$ bound for binary Consensus, thus establishing an exponential gap between the number of rounds necessary for Consensus versus Broadcast.
Export
BibTeX
@online{Clementi_arXiv1807.05626, TITLE = {Consensus Needs Broadcast in Noiseless Models but can be Exponentially Easier in the Presence of Noise}, AUTHOR = {Clementi, Andrea and Gual{\`a}, Luciano and Natale, Emanuele and Pasquale, Francesco and Scornavacca, Giacomo and Trevisan, Luca}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1807.05626}, EPRINT = {1807.05626}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Consensus and Broadcast are two fundamental problems in distributed computing, whose solutions have several applications. Intuitively, Consensus should be no harder than Broadcast, and this can be rigorously established in several models. Can Consensus be easier than Broadcast? In models that allow noiseless communication, we prove a reduction of (a suitable variant of) Broadcast to binary Consensus, that preserves the communication model and all complexity parameters such as randomness, number of rounds, communication per round, etc., while there is a loss in the success probability of the protocol. Using this reduction, we get, among other applications, the first logarithmic lower bound on the number of rounds needed to achieve Consensus in the uniform GOSSIP model on the complete graph. The lower bound is tight and, in this model, Consensus and Broadcast are equivalent. We then turn to distributed models with noisy communication channels that have been studied in the context of some bio-inspired systems. In such models, only one noisy bit is exchanged when a communication channel is established between two nodes, and so one cannot easily simulate a noiseless protocol by using error-correcting codes. An $\Omega(\epsilon^{-2} n)$ lower bound on the number of rounds needed for Broadcast is proved by Boczkowski et al. [PLOS Comp. Bio. 2018] in one such model (noisy uniform PULL, where $\epsilon$ is a parameter that measures the amount of noise). In such model, we prove a new $\Theta(\epsilon^{-2} n \log n)$ bound for Broadcast and a $\Theta(\epsilon^{-2} \log n)$ bound for binary Consensus, thus establishing an exponential gap between the number of rounds necessary for Consensus versus Broadcast.}, }
Endnote
%0 Report %A Clementi, Andrea %A Gual&#224;, Luciano %A Natale, Emanuele %A Pasquale, Francesco %A Scornavacca, Giacomo %A Trevisan, Luca %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Consensus Needs Broadcast in Noiseless Models but can be Exponentially Easier in the Presence of Noise : %G eng %U http://hdl.handle.net/21.11116/0000-0002-B985-7 %U http://arxiv.org/abs/1807.05626 %D 2018 %X Consensus and Broadcast are two fundamental problems in distributed computing, whose solutions have several applications. Intuitively, Consensus should be no harder than Broadcast, and this can be rigorously established in several models. Can Consensus be easier than Broadcast? In models that allow noiseless communication, we prove a reduction of (a suitable variant of) Broadcast to binary Consensus, that preserves the communication model and all complexity parameters such as randomness, number of rounds, communication per round, etc., while there is a loss in the success probability of the protocol. Using this reduction, we get, among other applications, the first logarithmic lower bound on the number of rounds needed to achieve Consensus in the uniform GOSSIP model on the complete graph. The lower bound is tight and, in this model, Consensus and Broadcast are equivalent. We then turn to distributed models with noisy communication channels that have been studied in the context of some bio-inspired systems. In such models, only one noisy bit is exchanged when a communication channel is established between two nodes, and so one cannot easily simulate a noiseless protocol by using error-correcting codes. An $\Omega(\epsilon^{-2} n)$ lower bound on the number of rounds needed for Broadcast is proved by Boczkowski et al. [PLOS Comp. Bio. 2018] in one such model (noisy uniform PULL, where $\epsilon$ is a parameter that measures the amount of noise). In such model, we prove a new $\Theta(\epsilon^{-2} n \log n)$ bound for Broadcast and a $\Theta(\epsilon^{-2} \log n)$ bound for binary Consensus, thus establishing an exponential gap between the number of rounds necessary for Consensus versus Broadcast. %K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
[68]
C. Croitoru and K. Mehlhorn, “On Testing Substitutability,” Information Processing Letters, vol. 138, 2018.
Export
BibTeX
@article{Croitoru_2018, TITLE = {On Testing Substitutability}, AUTHOR = {Croitoru, Cosmina and Mehlhorn, Kurt}, LANGUAGE = {eng}, ISSN = {0020-0190}, DOI = {10.1016/j.ipl.2018.05.006}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Information Processing Letters}, VOLUME = {138}, PAGES = {19--21}, }
Endnote
%0 Journal Article %A Croitoru, Cosmina %A Mehlhorn, Kurt %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On Testing Substitutability : %G eng %U http://hdl.handle.net/21.11116/0000-0001-EE14-D %R 10.1016/j.ipl.2018.05.006 %7 2018 %D 2018 %J Information Processing Letters %V 138 %& 19 %P 19 - 21 %I Elsevier %C Amsterdam %@ false
[69]
C. Croitoru and K. Mehlhorn, “On Testing Substitutability,” 2018. [Online]. Available: http://arxiv.org/abs/1805.07642. (arXiv: 1805.07642)
Abstract
The papers~\cite{hatfimmokomi11} and~\cite{azizbrilharr13} propose algorithms for testing whether the choice function induced by a (strict) preference list of length $N$ over a universe $U$ is substitutable. The running time of these algorithms is $O(|U|^3\cdot N^3)$, respectively $O(|U|^2\cdot N^3)$. In this note we present an algorithm with running time $O(|U|^2\cdot N^2)$. Note that $N$ may be exponential in the size $|U|$ of the universe.
Export
BibTeX
@online{Croitoru_arXiv1805.07642, TITLE = {On Testing Substitutability}, AUTHOR = {Croitoru, Cosmina and Mehlhorn, Kurt}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1805.07642}, EPRINT = {1805.07642}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {The papers~\cite{hatfimmokomi11} and~\cite{azizbrilharr13} propose algorithms for testing whether the choice function induced by a (strict) preference list of length $N$ over a universe $U$ is substitutable. The running time of these algorithms is $O(|U|^3\cdot N^3)$, respectively $O(|U|^2\cdot N^3)$. In this note we present an algorithm with running time $O(|U|^2\cdot N^2)$. Note that $N$ may be exponential in the size $|U|$ of the universe.}, }
Endnote
%0 Report %A Croitoru, Cosmina %A Mehlhorn, Kurt %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On Testing Substitutability : %G eng %U http://hdl.handle.net/21.11116/0000-0002-05FA-F %U http://arxiv.org/abs/1805.07642 %D 2018 %X The papers~\cite{hatfimmokomi11} and~\cite{azizbrilharr13} propose algorithms for testing whether the choice function induced by a (strict) preference list of length $N$ over a universe $U$ is substitutable. The running time of these algorithms is $O(|U|^3\cdot N^3)$, respectively $O(|U|^2\cdot N^3)$. In this note we present an algorithm with running time $O(|U|^2\cdot N^2)$. Note that $N$ may be exponential in the size $|U|$ of the universe. %K Computer Science, Data Structures and Algorithms, cs.DS,econ.EM
[70]
E. Cruciani, E. Natale, A. Nusser, and G. Scornavacca, “On the Emergent Behavior of the 2-Choices Dynamics,” in Proceedings of the 19th Italian Conference on Theoretical Computer Science (ICTCS 2018), Urbino, Italy, 2018.
Export
BibTeX
@inproceedings{Cruciano_ICTCS2018, TITLE = {On the Emergent Behavior of the 2-Choices Dynamics}, AUTHOR = {Cruciani, Emilio and Natale, Emanuele and Nusser, Andr{\'e} and Scornavacca, Giacomo}, LANGUAGE = {eng}, URL = {urn:nbn:de:0074-2243-4}, PUBLISHER = {CEUR-WS}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the 19th Italian Conference on Theoretical Computer Science (ICTCS 2018)}, EDITOR = {Aldini, Alessandro and Bernardo, Marco}, SERIES = {CEUR Workshop Proceedings}, VOLUME = {2243}, ADDRESS = {Urbino, Italy}, }
Endnote
%0 Conference Proceedings %A Cruciani, Emilio %A Natale, Emanuele %A Nusser, Andr&#233; %A Scornavacca, Giacomo %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T On the Emergent Behavior of the 2-Choices Dynamics : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A44E-E %D 2018 %B 19th Italian Conference on Theoretical Computer Science %Z date of event: 2018-09-18 - 2018-09-20 %C Urbino, Italy %B Proceedings of the 19th Italian Conference on Theoretical Computer Science %E Aldini, Alessandro; Bernardo, Marco %I CEUR-WS %B CEUR Workshop Proceedings %N 2243 %U http://ceur-ws.org/Vol-2243/paper4.pdf
[71]
E. Cruciani, E. Natale, A. Nusser, and G. Scornavacca, “Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks,” 2018. [Online]. Available: http://arxiv.org/abs/1804.07223. (arXiv: 1804.07223)
Abstract
Consider the following process on a network: Each agent initially holds either opinion blue or red; then, in each round, each agent looks at two random neighbors and, if the two have the same opinion, the agent adopts it. This process is known as the 2-Choices dynamics and is arguably the most basic non-trivial opinion dynamics modeling voting behavior on social networks. Despite its apparent simplicity, 2-Choices has been analytically characterized only on networks with a strong expansion property -- under assumptions on the initial configuration that establish it as a fast majority consensus protocol. In this work, we aim at contributing to the understanding of the 2-Choices dynamics by considering its behavior on a class of networks with core-periphery structure, a well-known topological assumption in social networks. In a nutshell, assume that a densely-connected subset of agents, the core, holds a different opinion from the rest of the network, the periphery. Then, depending on the strength of the cut between the core and the periphery, a phase-transition phenomenon occurs: Either the core's opinion rapidly spreads among the rest of the network, or a metastability phase takes place, in which both opinions coexist in the network for superpolynomial time. The interest of our result is twofold. On the one hand, by looking at the 2-Choices dynamics as a simplistic model of competition among opinions in social networks, our theorem sheds light on the influence of the core on the rest of the network, as a function of the core's connectivity towards the latter. On the other hand, to the best of our knowledge, we provide the first analytical result which shows a heterogeneous behavior of a simple dynamics as a function of structural parameters of the network. Finally, we validate our theoretical predictions with extensive experiments on real networks.
Export
BibTeX
@online{Cruciano_arXiv1804.07223, TITLE = {Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks}, AUTHOR = {Cruciani, Emilio and Natale, Emanuele and Nusser, Andr{\'e} and Scornavacca, Giacomo}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.07223}, EPRINT = {1804.07223}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Consider the following process on a network: Each agent initially holds either opinion blue or red; then, in each round, each agent looks at two random neighbors and, if the two have the same opinion, the agent adopts it. This process is known as the 2-Choices dynamics and is arguably the most basic non-trivial opinion dynamics modeling voting behavior on social networks. Despite its apparent simplicity, 2-Choices has been analytically characterized only on networks with a strong expansion property -- under assumptions on the initial configuration that establish it as a fast majority consensus protocol. In this work, we aim at contributing to the understanding of the 2-Choices dynamics by considering its behavior on a class of networks with core-periphery structure, a well-known topological assumption in social networks. In a nutshell, assume that a densely-connected subset of agents, the core, holds a different opinion from the rest of the network, the periphery. Then, depending on the strength of the cut between the core and the periphery, a phase-transition phenomenon occurs: Either the core's opinion rapidly spreads among the rest of the network, or a metastability phase takes place, in which both opinions coexist in the network for superpolynomial time. The interest of our result is twofold. On the one hand, by looking at the 2-Choices dynamics as a simplistic model of competition among opinions in social networks, our theorem sheds light on the influence of the core on the rest of the network, as a function of the core's connectivity towards the latter. On the other hand, to the best of our knowledge, we provide the first analytical result which shows a heterogeneous behavior of a simple dynamics as a function of structural parameters of the network. Finally, we validate our theoretical predictions with extensive experiments on real networks.}, }
Endnote
%0 Report %A Cruciani, Emilio %A Natale, Emanuele %A Nusser, Andr&#233; %A Scornavacca, Giacomo %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A446-6 %U http://arxiv.org/abs/1804.07223 %D 2018 %X Consider the following process on a network: Each agent initially holds either opinion blue or red; then, in each round, each agent looks at two random neighbors and, if the two have the same opinion, the agent adopts it. This process is known as the 2-Choices dynamics and is arguably the most basic non-trivial opinion dynamics modeling voting behavior on social networks. Despite its apparent simplicity, 2-Choices has been analytically characterized only on networks with a strong expansion property -- under assumptions on the initial configuration that establish it as a fast majority consensus protocol. In this work, we aim at contributing to the understanding of the 2-Choices dynamics by considering its behavior on a class of networks with core-periphery structure, a well-known topological assumption in social networks. In a nutshell, assume that a densely-connected subset of agents, the core, holds a different opinion from the rest of the network, the periphery. Then, depending on the strength of the cut between the core and the periphery, a phase-transition phenomenon occurs: Either the core's opinion rapidly spreads among the rest of the network, or a metastability phase takes place, in which both opinions coexist in the network for superpolynomial time. The interest of our result is twofold. On the one hand, by looking at the 2-Choices dynamics as a simplistic model of competition among opinions in social networks, our theorem sheds light on the influence of the core on the rest of the network, as a function of the core's connectivity towards the latter. On the other hand, to the best of our knowledge, we provide the first analytical result which shows a heterogeneous behavior of a simple dynamics as a function of structural parameters of the network. Finally, we validate our theoretical predictions with extensive experiments on real networks. %K cs.SI, Physics, Physics and Society, physics.soc-ph
[72]
E. Cruciani, E. Natale, and G. Scornavacca, “On the Metastability of Quadratic Majority Dynamics on Clustered Graphs and its Biological Implications,” Bulletin of the EATCS, vol. 125, 2018.
Export
BibTeX
@article{Cruciani_EATCS2018b, TITLE = {On the Metastability of Quadratic Majority Dynamics on Clustered Graphs and its Biological Implications}, AUTHOR = {Cruciani, Emilio and Natale, Emanuele and Scornavacca, Giacomo}, LANGUAGE = {eng}, ISSN = {0252-9742}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Bulletin of the EATCS}, VOLUME = {125}, EID = {535}, }
Endnote
%0 Journal Article %A Cruciani, Emilio %A Natale, Emanuele %A Scornavacca, Giacomo %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T On the Metastability of Quadratic Majority Dynamics on Clustered Graphs and its Biological Implications : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A94B-C %7 2018 %D 2018 %J Bulletin of the EATCS %O EATCS %V 125 %Z sequence number: 535 %@ false
[73]
E. Cruciani, E. Natale, A. Nusser, and G. Scornavacca, “Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks,” in AAMAS’18, 17th International Conference on Autonomous Agents and MultiAgent Systems, Stockholm, Sweden, 2018.
Export
BibTeX
@inproceedings{Cruciani_AAMAS2018, TITLE = {Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks}, AUTHOR = {Cruciani, Emilio and Natale, Emanuele and Nusser, Andr{\'e} and Scornavacca, Giacomo}, LANGUAGE = {eng}, ISBN = {978-1-4503-5649-7}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {AAMAS'18, 17th International Conference on Autonomous Agents and MultiAgent Systems}, PAGES = {777--785}, ADDRESS = {Stockholm, Sweden}, }
Endnote
%0 Conference Proceedings %A Cruciani, Emilio %A Natale, Emanuele %A Nusser, Andr&#233; %A Scornavacca, Giacomo %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A47E-8 %D 2018 %B 17th International Conference on Autonomous Agents and MultiAgent Systems %Z date of event: 2018-07-10 - 2018-07-15 %C Stockholm, Sweden %B AAMAS'18 %P 777 - 785 %I ACM %@ 978-1-4503-5649-7
[74]
E. Cruciani, E. Natale, A. Nusser, and G. Scornavacca, “Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks,” Bulletin of the EATCS, vol. 125, 2018.
Export
BibTeX
@article{Cruciani_EATCS2018, TITLE = {Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks}, AUTHOR = {Cruciani, Emilio and Natale, Emanuele and Nusser, Andr{\'e} and Scornavacca, Giacomo}, LANGUAGE = {eng}, ISSN = {0252-9742}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Bulletin of the EATCS}, VOLUME = {125}, EID = {542}, }
Endnote
%0 Journal Article %A Cruciani, Emilio %A Natale, Emanuele %A Nusser, Andr&#233; %A Scornavacca, Giacomo %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Phase Transition of the 2-Choices Dynamics on Core-Periphery Networks : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A48F-4 %7 2018 %D 2018 %J Bulletin of the EATCS %O EATCS %V 125 %Z sequence number: 542 %@ false
[75]
M. Cygan, S. Kratsch, and J. Nederlof, “Fast Hamiltonicity Checking Via Bases of Perfect Matchings,” Journal of the ACM, vol. 65, no. 3, 2018.
Export
BibTeX
@article{Cygan2018, TITLE = {Fast {Hamiltonicity} Checking Via Bases of Perfect Matchings}, AUTHOR = {Cygan, Marek and Kratsch, Stefan and Nederlof, Jesper}, LANGUAGE = {eng}, ISSN = {0004-5411}, DOI = {10.1145/3148227}, PUBLISHER = {Association for Computing Machinery, Inc.}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Journal of the ACM}, VOLUME = {65}, NUMBER = {3}, EID = {12}, }
Endnote
%0 Journal Article %A Cygan, Marek %A Kratsch, Stefan %A Nederlof, Jesper %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Fast Hamiltonicity Checking Via Bases of Perfect Matchings : %G eng %U http://hdl.handle.net/21.11116/0000-0001-7AE5-4 %R 10.1145/3148227 %7 2018 %D 2018 %J Journal of the ACM %V 65 %N 3 %Z sequence number: 12 %I Association for Computing Machinery, Inc. %C New York, NY %@ false
[76]
M. Cygan, G. Kortsarz, and B. Laekhanukit, “On Subexponential Running Times for Approximating Directed Steiner Tree and Related Problems,” 2018. [Online]. Available: http://arxiv.org/abs/1811.00710. (arXiv: 1811.00710)
Abstract
This paper concerns proving almost tight (super-polynomial) running times, for achieving desired approximation ratios for various problems. To illustrate, the question we study, let us consider the Set-Cover problem with n elements and m sets. Now we specify our goal to approximate Set-Cover to a factor of (1-d)ln n, for a given parameter 0<d<1. What is the best possible running time for achieving such approximation? This question was answered implicitly in the work of Moshkovitz [Theory of Computing, 2015]: Assuming both the Projection Games Conjecture (PGC) and the Exponential-Time Hypothesis (ETH), any ((1-d) ln n)-approximation algorithm for Set-Cover must run in time >= 2^{n^{c d}}, for some constant 0<d<1. We study the questions along this line. First, we show that under ETH and PGC any ((1-d) \ln n)-approximation for Set-Cover requires 2^{n^{d}}-time. This (almost) matches the running time of 2^{O(n^d)} for approximating Set-Cover to a factor (1-d) ln n by Cygan et al. [IPL, 2009]. Our result is tight up to the constant multiplying the n^{d} terms in the exponent. This lower bound applies to all of its generalizations, e.g., Group Steiner Tree (GST), Directed Steiner (DST), Covering Steiner Tree (CST), Connected Polymatroid (CP). We also show that in almost exponential time, these problems reduce to Set-Cover: We show (1-d)ln n approximation algorithms for all these problems that run in time 2^{n^{d \log n } poly(m). We also study log^{2-d}n approximation for GST. Chekuri-Pal [FOCS, 2005] showed that GST admits (log^{2-d}n)-approximation in time exp(2^{log^{d+o(1)}n}), for any 0 < d < 1. We show the lower bound of GST: any (log^{2-d}n)-approximation for GST must run in time >= exp((1+o(1)){log^{d-c}n}), for any c>0, unless the ETH is false. Our result follows by analyzing the work of Halperin and Krauthgamer [STOC, 2003]. The same lower and upper bounds hold for CST.
Export
BibTeX
@online{Cygan_arXiv1811.00710, TITLE = {{On Subexponential Running Times for Approximating Directed Steiner Tree and Related Problems}}, AUTHOR = {Cygan, Marek and Kortsarz, Guy and Laekhanukit, Bundit}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1811.00710}, EPRINT = {1811.00710}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {This paper concerns proving almost tight (super-polynomial) running times, for achieving desired approximation ratios for various problems. To illustrate, the question we study, let us consider the Set-Cover problem with n elements and m sets. Now we specify our goal to approximate Set-Cover to a factor of (1-d)ln n, for a given parameter 0<d<1. What is the best possible running time for achieving such approximation? This question was answered implicitly in the work of Moshkovitz [Theory of Computing, 2015]: Assuming both the Projection Games Conjecture (PGC) and the Exponential-Time Hypothesis (ETH), any ((1-d) ln n)-approximation algorithm for Set-Cover must run in time >= 2^{n^{c d}}, for some constant 0<d<1. We study the questions along this line. First, we show that under ETH and PGC any ((1-d) \ln n)-approximation for Set-Cover requires 2^{n^{d}}-time. This (almost) matches the running time of 2^{O(n^d)} for approximating Set-Cover to a factor (1-d) ln n by Cygan et al. [IPL, 2009]. Our result is tight up to the constant multiplying the n^{d} terms in the exponent. This lower bound applies to all of its generalizations, e.g., Group Steiner Tree (GST), Directed Steiner (DST), Covering Steiner Tree (CST), Connected Polymatroid (CP). We also show that in almost exponential time, these problems reduce to Set-Cover: We show (1-d)ln n approximation algorithms for all these problems that run in time 2^{n^{d \log n } poly(m). We also study log^{2-d}n approximation for GST. Chekuri-Pal [FOCS, 2005] showed that GST admits (log^{2-d}n)-approximation in time exp(2^{log^{d+o(1)}n}), for any 0 < d < 1. We show the lower bound of GST: any (log^{2-d}n)-approximation for GST must run in time >= exp((1+o(1)){log^{d-c}n}), for any c>0, unless the ETH is false. Our result follows by analyzing the work of Halperin and Krauthgamer [STOC, 2003]. The same lower and upper bounds hold for CST.}, }
Endnote
%0 Report %A Cygan, Marek %A Kortsarz, Guy %A Laekhanukit, Bundit %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On Subexponential Running Times for Approximating Directed Steiner Tree and Related Problems : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A85B-B %U http://arxiv.org/abs/1811.00710 %D 2018 %X This paper concerns proving almost tight (super-polynomial) running times, for achieving desired approximation ratios for various problems. To illustrate, the question we study, let us consider the Set-Cover problem with n elements and m sets. Now we specify our goal to approximate Set-Cover to a factor of (1-d)ln n, for a given parameter 0<d<1. What is the best possible running time for achieving such approximation? This question was answered implicitly in the work of Moshkovitz [Theory of Computing, 2015]: Assuming both the Projection Games Conjecture (PGC) and the Exponential-Time Hypothesis (ETH), any ((1-d) ln n)-approximation algorithm for Set-Cover must run in time >= 2^{n^{c d}}, for some constant 0<d<1. We study the questions along this line. First, we show that under ETH and PGC any ((1-d) \ln n)-approximation for Set-Cover requires 2^{n^{d}}-time. This (almost) matches the running time of 2^{O(n^d)} for approximating Set-Cover to a factor (1-d) ln n by Cygan et al. [IPL, 2009]. Our result is tight up to the constant multiplying the n^{d} terms in the exponent. This lower bound applies to all of its generalizations, e.g., Group Steiner Tree (GST), Directed Steiner (DST), Covering Steiner Tree (CST), Connected Polymatroid (CP). We also show that in almost exponential time, these problems reduce to Set-Cover: We show (1-d)ln n approximation algorithms for all these problems that run in time 2^{n^{d \log n } poly(m). We also study log^{2-d}n approximation for GST. Chekuri-Pal [FOCS, 2005] showed that GST admits (log^{2-d}n)-approximation in time exp(2^{log^{d+o(1)}n}), for any 0 < d < 1. We show the lower bound of GST: any (log^{2-d}n)-approximation for GST must run in time >= exp((1+o(1)){log^{d-c}n}), for any c>0, unless the ETH is false. Our result follows by analyzing the work of Halperin and Krauthgamer [STOC, 2003]. The same lower and upper bounds hold for CST. %K Computer Science, Data Structures and Algorithms, cs.DS
[77]
R. David, C. S. Karthik, and B. Laekhanukit, “On the Complexity of Closest Pair via Polar-Pair of Point-Sets,” in 34th International Symposium on Computational Geometry (SoCG 2018), Budapest, Hungary, 2018.
Export
BibTeX
@inproceedings{David_SoCG2018, TITLE = {On the Complexity of Closest Pair via Polar-Pair of Point-Sets}, AUTHOR = {David, Roee and Karthik, C. S. and Laekhanukit, Bundit}, LANGUAGE = {eng}, ISBN = {978-3-95977-066-8}, URL = {urn:nbn:de:0030-drops-87412}, DOI = {10.4230/LIPIcs.SoCG.2018.28}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {34th International Symposium on Computational Geometry (SoCG 2018)}, EDITOR = {Speckmann, Bettina and T{\'o}th, Csaba D.}, PAGES = {1--15}, EID = {28}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {99}, ADDRESS = {Budapest, Hungary}, }
Endnote
%0 Conference Proceedings %A David, Roee %A Karthik, C. S. %A Laekhanukit, Bundit %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On the Complexity of Closest Pair via Polar-Pair of Point-Sets : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A827-5 %R 10.4230/LIPIcs.SoCG.2018.28 %U urn:nbn:de:0030-drops-87412 %D 2018 %B 34th International Symposium on Computational Geometry %Z date of event: 2018-06-11 - 2018-06-14 %C Budapest, Hungary %B 34th International Symposium on Computational Geometry %E Speckmann, Bettina; T&#243;th, Csaba D. %P 1 - 15 %Z sequence number: 28 %I Schloss Dagstuhl %@ 978-3-95977-066-8 %B Leibniz International Proceedings in Informatics %N 99 %U http://drops.dagstuhl.de/opus/volltexte/2018/8741/http://drops.dagstuhl.de/doku/urheberrecht1.html
[78]
L. Duraj, M. Künnemann, and A. Polak, “Tight Conditional Lower Bounds for Longest Common Increasing Subsequence,” Algorithmica, 2018.
Export
BibTeX
@article{Duraj2018, TITLE = {Tight Conditional Lower Bounds for Longest Common Increasing Subsequence}, AUTHOR = {Duraj, Lech and K{\"u}nnemann, Marvin and Polak, Adam}, LANGUAGE = {eng}, ISSN = {0178-4617}, DOI = {10.1007/s00453-018-0485-7}, PUBLISHER = {Springer-Verlag}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, JOURNAL = {Algorithmica}, }
Endnote
%0 Journal Article %A Duraj, Lech %A K&#252;nnemann, Marvin %A Polak, Adam %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Tight Conditional Lower Bounds for Longest Common Increasing Subsequence : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A906-9 %R 10.1007/s00453-018-0485-7 %7 2018 %D 2018 %J Algorithmica %I Springer-Verlag %C New York, NY %@ false
[79]
G. Even, M. Ghaffari, and M. Medina, “Distributed Set Cover Approximation: Primal-Dual with Optimal Locality,” in 32nd International Symposium on Distributed Computing (DISC 2018), New Orleans, LA, USA, 2018.
Export
BibTeX
@inproceedings{Even_DISC2018, TITLE = {Distributed Set Cover Approximation: {P}rimal-Dual with Optimal Locality}, AUTHOR = {Even, Guy and Ghaffari, Mohsen and Medina, Moti}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-092-7}, URL = {urn:nbn:de:0030-drops-98114}, DOI = {10.4230/LIPIcs.DISC.2018.22}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {32nd International Symposium on Distributed Computing (DISC 2018)}, EDITOR = {Schmid, Ulrich and Widder, Josef}, PAGES = {1--14}, EID = {22}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {121}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Even, Guy %A Ghaffari, Mohsen %A Medina, Moti %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Distributed Set Cover Approximation: Primal-Dual with Optimal Locality : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AC02-A %R 10.4230/LIPIcs.DISC.2018.22 %U urn:nbn:de:0030-drops-98114 %D 2018 %B 32nd International Symposium on Distributed Computing %Z date of event: 2018-10-15 - 2018-10-19 %C New Orleans, LA, USA %B 32nd International Symposium on Distributed Computing %E Schmid, Ulrich; Widder, Josef %P 1 - 14 %Z sequence number: 22 %I Schloss Dagstuhl %@ 978-3-95977-092-7 %B Leibniz International Proceedings in Informatics %N 121 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9811/http://drops.dagstuhl.de/doku/urheberrecht1.html
[80]
G. Even, M. Medina, and D. Rawitz, “Online Generalized Caching with Varying Weights and Costs,” in SPAA’18, 30th ACM Symposium on Parallelism in Algorithms and Architectures, Vienna, Austria, 2018.
Export
BibTeX
@inproceedings{Even_SPAA2018, TITLE = {Online Generalized Caching with Varying Weights and Costs}, AUTHOR = {Even, Guy and Medina, Moti and Rawitz, Dror}, LANGUAGE = {eng}, ISBN = {978-1-4503-5799-9}, DOI = {10.1145/3210377.3210404}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {SPAA'18, 30th ACM Symposium on Parallelism in Algorithms and Architectures}, PAGES = {205--2012}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Even, Guy %A Medina, Moti %A Rawitz, Dror %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Online Generalized Caching with Varying Weights and Costs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-ABEA-6 %R 10.1145/3210377.3210404 %D 2018 %B 30th ACM Symposium on Parallelism in Algorithms and Architectures %Z date of event: 2018-07-16 - 2018-07-18 %C Vienna, Austria %B SPAA'18 %P 205 - 2012 %I ACM %@ 978-1-4503-5799-9
[81]
H. Fichtenberger, R. Levi, Y. Vasudev, and M. Wötzel, “A Sublinear Tester for Outerplanarity (and Other Forbidden Minors) With One-Sided Error,” in 45th International Colloquium on Automata, Languages, and Programming (ICALP 2018), Prague, Czech Republic, 2018.
Export
BibTeX
@inproceedings{FLVW18, TITLE = {A Sublinear Tester for Outerplanarity (and Other Forbidden Minors) With One-Sided Error}, AUTHOR = {Fichtenberger, Hendrik and Levi, Reut and Vasudev, Yadu and W{\"o}tzel, Maximilian}, LANGUAGE = {eng}, ISBN = {978-3-95977-076-7}, URL = {urn:nbn:de:0030-drops-90563}, DOI = {10.4230/LIPIcs.ICALP.2018.52}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {45th International Colloquium on Automata, Languages, and Programming (ICALP 2018)}, EDITOR = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, D{\'a}niel and Sannella, Donald}, PAGES = {1--14}, EID = {52}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {107}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Fichtenberger, Hendrik %A Levi, Reut %A Vasudev, Yadu %A W&#246;tzel, Maximilian %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T A Sublinear Tester for Outerplanarity (and Other Forbidden Minors) With One-Sided Error : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E86A-2 %R 10.4230/LIPIcs.ICALP.2018.52 %U urn:nbn:de:0030-drops-90563 %D 2018 %B 45th International Colloquium on Automata, Languages, and Programming %Z date of event: 2018-07-09 - 2018-07-13 %C Prague, Czech Republic %B 45th International Colloquium on Automata, Languages, and Programming %E Chatzigiannakis, Ioannis; Kaklamanis, Christos; Marx, D&#225;niel; Sannella, Donald %P 1 - 14 %Z sequence number: 52 %I Schloss Dagstuhl %@ 978-3-95977-076-7 %B Leibniz International Proceedings in Informatics %N 107 %U http://drops.dagstuhl.de/opus/volltexte/2018/9056/http://drops.dagstuhl.de/doku/urheberrecht1.html
[82]
K. Fleszar, M. Mnich, and J. Spoerhase, “New Algorithms for Maximum Disjoint Paths Based on Tree-likeness,” Mathematical Programming / A, vol. 171, no. 1–2, 2018.
Export
BibTeX
@article{edge-disjoint-paths-mapr-17, TITLE = {New Algorithms for Maximum Disjoint Paths Based on Tree-likeness}, AUTHOR = {Fleszar, Krzysztof and Mnich, Matthias and Spoerhase, Joachim}, LANGUAGE = {eng}, ISSN = {0025-5610}, DOI = {10.1007/s10107-017-1199-3}, PUBLISHER = {North-Holland}, ADDRESS = {Heidelberg}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Mathematical Programming / A}, VOLUME = {171}, NUMBER = {1-2}, PAGES = {433--461}, }
Endnote
%0 Journal Article %A Fleszar, Krzysztof %A Mnich, Matthias %A Spoerhase, Joachim %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T New Algorithms for Maximum Disjoint Paths Based on Tree-likeness : %G eng %U http://hdl.handle.net/21.11116/0000-0000-B54C-F %R 10.1007/s10107-017-1199-3 %7 2017 %D 2018 %J Mathematical Programming / A %V 171 %N 1-2 %& 433 %P 433 - 461 %I North-Holland %C Heidelberg %@ false
[83]
S. Friedrichs, M. Függer, and C. Lenzen, “Metastability-Containing Circuits,” IEEE Transactions on Computers, vol. 67, no. 8, 2018.
Abstract
Communication across unsynchronized clock domains is inherently vulnerable to metastable upsets; no digital circuit can deterministically avoid, resolve, or detect metastability (Marino, 1981). Traditionally, a possibly metastable input is stored in synchronizers, decreasing the odds of maintained metastability over time. This approach costs time, and does not guarantee success. We propose a fundamentally different approach: It is possible to \emph{contain} metastability by logical masking, so that it cannot infect the entire circuit. This technique guarantees a limited degree of metastability in---and uncertainty about---the output. We present a synchronizer-free, fault-tolerant clock synchronization algorithm as application, synchronizing clock domains and thus enabling metastability-free communication. At the heart of our approach lies a model for metastability in synchronous clocked digital circuits. Metastability is propagated in a worst-case fashion, allowing to derive deterministic guarantees, without and unlike synchronizers. The proposed model permits positive results while at the same time reproducing established impossibility results regarding avoidance, resolution, and detection of metastability. Furthermore, we fully classify which functions can be computed by synchronous circuits with standard registers, and show that masking registers are computationally strictly more powerful.
Export
BibTeX
@article{Friedrichs_Fuegger_Lenzen2018, TITLE = {Metastability-Containing Circuits}, AUTHOR = {Friedrichs, Stephan and F{\"u}gger, Matthias and Lenzen, Christoph}, LANGUAGE = {eng}, ISSN = {0018-9340}, DOI = {10.1109/TC.2018.2808185}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, ABSTRACT = {Communication across unsynchronized clock domains is inherently vulnerable to metastable upsets; no digital circuit can deterministically avoid, resolve, or detect metastability (Marino, 1981). Traditionally, a possibly metastable input is stored in synchronizers, decreasing the odds of maintained metastability over time. This approach costs time, and does not guarantee success. We propose a fundamentally different approach: It is possible to \emph{contain} metastability by logical masking, so that it cannot infect the entire circuit. This technique guarantees a limited degree of metastability in---and uncertainty about---the output. We present a synchronizer-free, fault-tolerant clock synchronization algorithm as application, synchronizing clock domains and thus enabling metastability-free communication. At the heart of our approach lies a model for metastability in synchronous clocked digital circuits. Metastability is propagated in a worst-case fashion, allowing to derive deterministic guarantees, without and unlike synchronizers. The proposed model permits positive results while at the same time reproducing established impossibility results regarding avoidance, resolution, and detection of metastability. Furthermore, we fully classify which functions can be computed by synchronous circuits with standard registers, and show that masking registers are computationally strictly more powerful.}, JOURNAL = {IEEE Transactions on Computers}, VOLUME = {67}, NUMBER = {8}, PAGES = {1167--1183}, }
Endnote
%0 Journal Article %A Friedrichs, Stephan %A F&#252;gger, Matthias %A Lenzen, Christoph %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Metastability-Containing Circuits : %G eng %U http://hdl.handle.net/21.11116/0000-0001-E5A0-7 %R 10.1109/TC.2018.2808185 %7 2018 %D 2018 %X Communication across unsynchronized clock domains is inherently vulnerable to metastable upsets; no digital circuit can deterministically avoid, resolve, or detect metastability (Marino, 1981). Traditionally, a possibly metastable input is stored in synchronizers, decreasing the odds of maintained metastability over time. This approach costs time, and does not guarantee success. We propose a fundamentally different approach: It is possible to \emph{contain} metastability by logical masking, so that it cannot infect the entire circuit. This technique guarantees a limited degree of metastability in---and uncertainty about---the output. We present a synchronizer-free, fault-tolerant clock synchronization algorithm as application, synchronizing clock domains and thus enabling metastability-free communication. At the heart of our approach lies a model for metastability in synchronous clocked digital circuits. Metastability is propagated in a worst-case fashion, allowing to derive deterministic guarantees, without and unlike synchronizers. The proposed model permits positive results while at the same time reproducing established impossibility results regarding avoidance, resolution, and detection of metastability. Furthermore, we fully classify which functions can be computed by synchronous circuits with standard registers, and show that masking registers are computationally strictly more powerful. %K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC %J IEEE Transactions on Computers %V 67 %N 8 %& 1167 %P 1167 - 1183 %I IEEE %C Piscataway, NJ %@ false
[84]
S. Friedrichs and C. Lenzen, “Parallel Metric Tree Embedding based on an Algebraic View on Moore-Bellman-Ford,” Journal of the ACM, vol. 65, no. 6, 2018.
Export
BibTeX
@article{FriedrichsJACM2018, TITLE = {Parallel Metric Tree Embedding based on an Algebraic View on {Moore}-{Bellman}-{Ford}}, AUTHOR = {Friedrichs, Stephan and Lenzen, Christoph}, LANGUAGE = {eng}, ISSN = {0004-5411}, DOI = {10.1145/3231591}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Journal of the ACM}, VOLUME = {65}, NUMBER = {6}, EID = {43}, }
Endnote
%0 Journal Article %A Friedrichs, Stephan %A Lenzen, Christoph %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Parallel Metric Tree Embedding based on an Algebraic View on Moore-Bellman-Ford : %G eng %U http://hdl.handle.net/21.11116/0000-0002-8892-F %R 10.1145/3231591 %7 2018 %D 2018 %J Journal of the ACM %V 65 %N 6 %Z sequence number: 43 %I ACM %C New York, NY %@ false
[85]
M. Függer, A. Kinali, C. Lenzen, and B. Wiederhake, “Fast All-Digital Clock Frequency Adaptation Circuit for Voltage Droop Tolerance,” in 24th IEEE International Symposium on Asynchronous Circuits and Systems, Vienna, Austria, 2018.
Export
BibTeX
@inproceedings{Fuegger_ASYNC2018, TITLE = {Fast All-Digital Clock Frequency Adaptation Circuit for Voltage Droop Tolerance}, AUTHOR = {F{\"u}gger, Matthias and Kinali, Attila and Lenzen, Christoph and Wiederhake, Ben}, LANGUAGE = {eng}, ISBN = {978-1-5386-5883-3}, DOI = {10.1109/ASYNC.2018.00025}, PUBLISHER = {IEEE}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {24th IEEE International Symposium on Asynchronous Circuits and Systems}, PAGES = {68--77}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A F&#252;gger, Matthias %A Kinali, Attila %A Lenzen, Christoph %A Wiederhake, Ben %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Fast All-Digital Clock Frequency Adaptation Circuit for Voltage Droop Tolerance : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9FA4-2 %R 10.1109/ASYNC.2018.00025 %D 2018 %B 24th IEEE International Symposium on Asynchronous Circuits and Systems %Z date of event: 2018-05-13 - 2018-05-16 %C Vienna, Austria %B 24th IEEE International Symposium on Asynchronous Circuits and Systems %P 68 - 77 %I IEEE %@ 978-1-5386-5883-3
[86]
J. Garg, M. Hoefer, and K. Mehlhorn, “Approximating the Nash Social Welfare with Budget-Additive Valuations,” in Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA 2018), New Orleans, LA, USA, 2018.
Export
BibTeX
@inproceedings{GargHoeferMehlhornSODA18, TITLE = {Approximating the {Nash} Social Welfare with Budget-Additive Valuations}, AUTHOR = {Garg, Jugal and Hoefer, Martin and Mehlhorn, Kurt}, LANGUAGE = {eng}, ISBN = {978-1-61197-503-1}, DOI = {10.1137/1.9781611975031.150}, PUBLISHER = {SIAM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms (SODA 2018)}, EDITOR = {Czumaj, Artur}, PAGES = {2326--2340}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Garg, Jugal %A Hoefer, Martin %A Mehlhorn, Kurt %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Approximating the Nash Social Welfare with Budget-Additive Valuations : %G eng %U http://hdl.handle.net/21.11116/0000-0000-37F9-A %R 10.1137/1.9781611975031.150 %D 2018 %B Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %Z date of event: 2018-01-07 - 2018-01-10 %C New Orleans, LA, USA %B Proceedings of the Twenty-Ninth Annual ACM-SIAM Symposium on Discrete Algorithms %E Czumaj, Artur %P 2326 - 2340 %I SIAM %@ 978-1-61197-503-1
[87]
M. Ghaffari, A. Karrenbauer, F. Kuhn, C. Lenzen, and B. Patt-Shamir, “Near-Optimal Distributed Maximum Flow,” SIAM Journal on Computing, vol. 47, no. 6, 2018.
Export
BibTeX
@article{GKKLP2018, TITLE = {Near-Optimal Distributed Maximum Flow}, AUTHOR = {Ghaffari, Mohsen and Karrenbauer, Andreas and Kuhn, Fabian and Lenzen, Christoph and Patt-Shamir, Boaz}, LANGUAGE = {eng}, ISSN = {0097-5397}, DOI = {10.1137/17M113277X}, PUBLISHER = {Society for Industrial and Applied Mathematics.}, ADDRESS = {Philadelphia, PA}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {SIAM Journal on Computing}, VOLUME = {47}, NUMBER = {6}, PAGES = {2078--2117}, }
Endnote
%0 Journal Article %A Ghaffari, Mohsen %A Karrenbauer, Andreas %A Kuhn, Fabian %A Lenzen, Christoph %A Patt-Shamir, Boaz %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Near-Optimal Distributed Maximum Flow : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A3A9-7 %R 10.1137/17M113277X %7 2018 %D 2018 %J SIAM Journal on Computing %V 47 %N 6 %& 2078 %P 2078 - 2117 %I Society for Industrial and Applied Mathematics. %C Philadelphia, PA %@ false
[88]
F. Grandoni, T. Mömke, A. Wiese, and H. Zhou, “A(5/3+ε)-Approximation for Unsplittable Flow on a Path: Placing Small Tasks into Boxes,” in STOC’18, 50th Annual ACM SIGACT Symposium on Theory of Computing, Los Angeles, CA, USA, 2018.
Export
BibTeX
@inproceedings{DBLP:conf/stoc/0001MW018, TITLE = {A {(5/3} + {\(\epsilon\)})-approximation for unsplittable flow on a path: {P}lacing small tasks into boxes}, AUTHOR = {Grandoni, Fabrizio and M{\"o}mke, Tobias and Wiese, Andreas and Zhou, Hang}, LANGUAGE = {eng}, ISBN = {978-1-4503-5559-9}, DOI = {10.1145/3188745.3188894}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {STOC'18, 50th Annual ACM SIGACT Symposium on Theory of Computing}, PAGES = {607--619}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Grandoni, Fabrizio %A M&#246;mke, Tobias %A Wiese, Andreas %A Zhou, Hang %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T A(5/3+&#949;)-Approximation for Unsplittable Flow on a Path: Placing Small Tasks into Boxes : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5F3-9 %R 10.1145/3188745.3188894 %D 2018 %B 50th Annual ACM SIGACT Symposium on Theory of Computing %Z date of event: 2018-06-25 - 2017-06-29 %C Los Angeles, CA, USA %B STOC'18 %P 607 - 619 %I ACM %@ 978-1-4503-5559-9
[89]
F. Grandoni, B. Laekhanukit, and S. Li, “O(log 2 k/ log log k)-Approximation Algorithm for Directed Steiner Tree: A Tight Quasi-Polynomial-Time Algorithm,” 2018. [Online]. Available: http://arxiv.org/abs/1811.03020. (arXiv: 1811.03020)
Abstract
In the Directed Steiner Tree (DST) problem we are given an $n$-vertex directed edge-weighted graph, a root $r$, and a collection of $k$ terminal nodes. Our goal is to find a minimum-cost arborescence that contains a directed path from $r$ to every terminal. We present an $O(\log^2 k/\log\log{k})$-approximation algorithm for DST that runs in quasi-polynomial-time. By adjusting the parameters in the hardness result of Halperin and Krauthgamer, we show the matching lower bound of $\Omega(\log^2{k}/\log\log{k})$ for the class of quasi-polynomial-time algorithms. This is the first improvement on the DST problem since the classical quasi-polynomial-time $O(\log^3 k)$ approximation algorithm by Charikar et al. (The paper erroneously claims an $O(\log^2k)$ approximation due to a mistake in prior work.) Our approach is based on two main ingredients. First, we derive an approximation preserving reduction to the Label-Consistent Subtree (LCST) problem. The LCST instance has quasi-polynomial size and logarithmic height. We remark that, in contrast, Zelikovsky's heigh-reduction theorem used in all prior work on DST achieves a reduction to a tree instance of the related Group Steiner Tree (GST) problem of similar height, however losing a logarithmic factor in the approximation ratio. Our second ingredient is an LP-rounding algorithm to approximately solve LCST instances, which is inspired by the framework developed by Rothvo{\ss}. We consider a Sherali-Adams lifting of a proper LP relaxation of LCST. Our rounding algorithm proceeds level by level from the root to the leaves, rounding and conditioning each time on a proper subset of label variables. A small enough (namely, polylogarithmic) number of Sherali-Adams lifting levels is sufficient to condition up to the leaves.
Export
BibTeX
@online{Grandoni_arXiv1811.03020, TITLE = {{$O(\log^2k/\log\log{k})$}-{A}pproximation {A}lgorithm for {D}irected {S}teiner {T}ree: {A} {Tight Quasi-Polynomial-Time Algorithm}}, AUTHOR = {Grandoni, Fabrizio and Laekhanukit, Bundit and Li, Shi}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1811.03020}, EPRINT = {1811.03020}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In the Directed Steiner Tree (DST) problem we are given an $n$-vertex directed edge-weighted graph, a root $r$, and a collection of $k$ terminal nodes. Our goal is to find a minimum-cost arborescence that contains a directed path from $r$ to every terminal. We present an $O(\log^2 k/\log\log{k})$-approximation algorithm for DST that runs in quasi-polynomial-time. By adjusting the parameters in the hardness result of Halperin and Krauthgamer, we show the matching lower bound of $\Omega(\log^2{k}/\log\log{k})$ for the class of quasi-polynomial-time algorithms. This is the first improvement on the DST problem since the classical quasi-polynomial-time $O(\log^3 k)$ approximation algorithm by Charikar et al. (The paper erroneously claims an $O(\log^2k)$ approximation due to a mistake in prior work.) Our approach is based on two main ingredients. First, we derive an approximation preserving reduction to the Label-Consistent Subtree (LCST) problem. The LCST instance has quasi-polynomial size and logarithmic height. We remark that, in contrast, Zelikovsky's heigh-reduction theorem used in all prior work on DST achieves a reduction to a tree instance of the related Group Steiner Tree (GST) problem of similar height, however losing a logarithmic factor in the approximation ratio. Our second ingredient is an LP-rounding algorithm to approximately solve LCST instances, which is inspired by the framework developed by Rothvo{\ss}. We consider a Sherali-Adams lifting of a proper LP relaxation of LCST. Our rounding algorithm proceeds level by level from the root to the leaves, rounding and conditioning each time on a proper subset of label variables. A small enough (namely, polylogarithmic) number of Sherali-Adams lifting levels is sufficient to condition up to the leaves.}, }
Endnote
%0 Report %A Grandoni, Fabrizio %A Laekhanukit, Bundit %A Li, Shi %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T O(log 2 k/ log log k)-Approximation Algorithm for Directed Steiner Tree: A Tight Quasi-Polynomial-Time Algorithm : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A880-F %U http://arxiv.org/abs/1811.03020 %D 2018 %X In the Directed Steiner Tree (DST) problem we are given an $n$-vertex directed edge-weighted graph, a root $r$, and a collection of $k$ terminal nodes. Our goal is to find a minimum-cost arborescence that contains a directed path from $r$ to every terminal. We present an $O(\log^2 k/\log\log{k})$-approximation algorithm for DST that runs in quasi-polynomial-time. By adjusting the parameters in the hardness result of Halperin and Krauthgamer, we show the matching lower bound of $\Omega(\log^2{k}/\log\log{k})$ for the class of quasi-polynomial-time algorithms. This is the first improvement on the DST problem since the classical quasi-polynomial-time $O(\log^3 k)$ approximation algorithm by Charikar et al. (The paper erroneously claims an $O(\log^2k)$ approximation due to a mistake in prior work.) Our approach is based on two main ingredients. First, we derive an approximation preserving reduction to the Label-Consistent Subtree (LCST) problem. The LCST instance has quasi-polynomial size and logarithmic height. We remark that, in contrast, Zelikovsky's heigh-reduction theorem used in all prior work on DST achieves a reduction to a tree instance of the related Group Steiner Tree (GST) problem of similar height, however losing a logarithmic factor in the approximation ratio. Our second ingredient is an LP-rounding algorithm to approximately solve LCST instances, which is inspired by the framework developed by Rothvo{\ss}. We consider a Sherali-Adams lifting of a proper LP relaxation of LCST. Our rounding algorithm proceeds level by level from the root to the leaves, rounding and conditioning each time on a proper subset of label variables. A small enough (namely, polylogarithmic) number of Sherali-Adams lifting levels is sufficient to condition up to the leaves. %K Computer Science, Data Structures and Algorithms, cs.DS
[90]
T. A. G. Hageman, P. A. Loethman, M. Dirnberger, M. C. Elwenspoek, A. Manz, and L. Abelmann, “Macroscopic Equivalence for Microscopic Motion in a Turbulence Driven Three-dimensional Self-assembly Reactor,” Journal of Applied Physics, vol. 123, no. 2, 2018.
Export
BibTeX
@article{Hageman2018, TITLE = {Macroscopic Equivalence for Microscopic Motion in a Turbulence Driven Three-dimensional Self-assembly Reactor}, AUTHOR = {Hageman, T. A. G. and Loethman, P. A. and Dirnberger, Michael and Elwenspoek, M. C. and Manz, A. and Abelmann, L.}, LANGUAGE = {eng}, ISSN = {0021-8979}, DOI = {10.1063/1.5007029}, PUBLISHER = {AIP Publishing}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Journal of Applied Physics}, VOLUME = {123}, NUMBER = {2}, PAGES = {1--10}, EID = {024901}, }
Endnote
%0 Journal Article %A Hageman, T. A. G. %A Loethman, P. A. %A Dirnberger, Michael %A Elwenspoek, M. C. %A Manz, A. %A Abelmann, L. %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Macroscopic Equivalence for Microscopic Motion in a Turbulence Driven Three-dimensional Self-assembly Reactor : %G eng %U http://hdl.handle.net/21.11116/0000-0000-431A-8 %R 10.1063/1.5007029 %7 2018 %D 2018 %J Journal of Applied Physics %O J. Appl. Phys. %V 123 %N 2 %& 1 %P 1 - 10 %Z sequence number: 024901 %I AIP Publishing %C New York, NY %@ false
[91]
P. Heggernes, D. Issac, J. Lauri, P. T. Lima, and E. J. van Leeuwen, “Rainbow Vertex Coloring Bipartite Graphs and Chordal Graphs,” in 43rd International Symposium on Mathematical Foundations of Computer Science (MFCS 2018), Liverpool, UK, 2018.
Export
BibTeX
@inproceedings{heggernes_et_al-2018-rainbow-vertex, TITLE = {Rainbow Vertex Coloring Bipartite Graphs and Chordal Graphs}, AUTHOR = {Heggernes, Pinar and Issac, Davis and Lauri, Juho and Lima, Paloma T. and van Leeuwen, Erik Jan}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-086-6}, URL = {urn:nbn:de:0030-drops-96657}, DOI = {10.4230/LIPIcs.MFCS.2018.83}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {43rd International Symposium on Mathematical Foundations of Computer Science (MFCS 2018)}, EDITOR = {Potapov, Igor and Spirakis, Paul and Worrell, James}, PAGES = {1--13}, EID = {83}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {117}, ADDRESS = {Liverpool, UK}, }
Endnote
%0 Conference Proceedings %A Heggernes, Pinar %A Issac, Davis %A Lauri, Juho %A Lima, Paloma T. %A van Leeuwen, Erik Jan %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Rainbow Vertex Coloring Bipartite Graphs and Chordal Graphs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-9E4D-7 %U urn:nbn:de:0030-drops-96657 %R 10.4230/LIPIcs.MFCS.2018.83 %D 2018 %B 43rd International Symposium on Mathematical Foundations of Computer Science %Z date of event: 2018-08-27 - 2018-08-31 %C Liverpool, UK %B 43rd International Symposium on Mathematical Foundations of Computer Science %E Potapov, Igor; Spirakis, Paul; Worrell, James %P 1 - 13 %Z sequence number: 83 %I Schloss Dagstuhl %@ 978-3-95977-086-6 %B Leibniz International Proceedings in Informatics %N 117 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9665/http://drops.dagstuhl.de/doku/urheberrecht1.html
[92]
S. Heydrich, “A Tale of Two Packing Problems: Improved Algorithms and Tighter Bounds for Online Bin Packing and the Geometric Knapsack Problem,” Universität des Saarlandes, Saarbrücken, 2018.
Abstract
Abstract In this thesis, we deal with two packing problems: the online bin packing and the geometric knapsack problem. In online bin packing, the aim is to pack a given number of items of dierent size into a minimal number of containers. The items need to be packed one by one without knowing future items. For online bin packing in one dimension, we present a new family of algorithms that constitutes the rst improvement over the previously best algorithm in almost 15 years. While the algorithmic ideas are intuitive, an elaborate analysis is required to prove its competitive ratio. We also give a lower bound for the competitive ratio of this family of algorithms. For online bin packing in higher dimensions, we discuss lower bounds for the competitive ratio and show that the ideas from the one-dimensional case cannot be easily transferred to obtain better two-dimensional algorithms. In the geometric knapsack problem, one aims to pack a maximum weight subset of given rectangles into one square container. For this problem, we consider oine approximation algorithms. For geometric knapsack with square items, we improve the running time of the best known PTAS and obtain an EPTAS . This shows that large running times caused by some standard techniques for geometric packing problems are not always necessary and can be improved. Finally, we show how to use resource augmentation to compute optimal solutions in EPTAS -time, thereby improving upon the known PTAS for this case.
Export
BibTeX
@phdthesis{Heydrphd18, TITLE = {A Tale of Two Packing Problems: Improved Algorithms and Tighter Bounds for Online Bin Packing and the Geometric Knapsack Problem}, AUTHOR = {Heydrich, Sandy}, LANGUAGE = {eng}, DOI = {10.22028/D291-27240}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, ABSTRACT = {Abstract In this thesis, we deal with two packing problems: the online bin packing and the geometric knapsack problem. In online bin packing, the aim is to pack a given number of items of dierent size into a minimal number of containers. The items need to be packed one by one without knowing future items. For online bin packing in one dimension, we present a new family of algorithms that constitutes the rst improvement over the previously best algorithm in almost 15 years. While the algorithmic ideas are intuitive, an elaborate analysis is required to prove its competitive ratio. We also give a lower bound for the competitive ratio of this family of algorithms. For online bin packing in higher dimensions, we discuss lower bounds for the competitive ratio and show that the ideas from the one-dimensional case cannot be easily transferred to obtain better two-dimensional algorithms. In the geometric knapsack problem, one aims to pack a maximum weight subset of given rectangles into one square container. For this problem, we consider oine approximation algorithms. For geometric knapsack with square items, we improve the running time of the best known PTAS and obtain an EPTAS . This shows that large running times caused by some standard techniques for geometric packing problems are not always necessary and can be improved. Finally, we show how to use resource augmentation to compute optimal solutions in EPTAS -time, thereby improving upon the known PTAS for this case.}, }
Endnote
%0 Thesis %A Heydrich, Sandy %Y van Stee, Rob %A referee: Mehlhorn, Kurt %A referee: Grandoni, Fabrizio %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Discrete Optimization, MPI for Informatics, Max Planck Society %T A Tale of Two Packing Problems: Improved Algorithms and Tighter Bounds for Online Bin Packing and the Geometric Knapsack Problem : %G eng %U http://hdl.handle.net/21.11116/0000-0001-E3DC-7 %R 10.22028/D291-27240 %I Universit&#228;t des Saarlandes %C Saarbr&#252;cken %D 2018 %P viii, 161 p. %V phd %9 phd %X Abstract In this thesis, we deal with two packing problems: the online bin packing and the geometric knapsack problem. In online bin packing, the aim is to pack a given number of items of dierent size into a minimal number of containers. The items need to be packed one by one without knowing future items. For online bin packing in one dimension, we present a new family of algorithms that constitutes the rst improvement over the previously best algorithm in almost 15 years. While the algorithmic ideas are intuitive, an elaborate analysis is required to prove its competitive ratio. We also give a lower bound for the competitive ratio of this family of algorithms. For online bin packing in higher dimensions, we discuss lower bounds for the competitive ratio and show that the ideas from the one-dimensional case cannot be easily transferred to obtain better two-dimensional algorithms. In the geometric knapsack problem, one aims to pack a maximum weight subset of given rectangles into one square container. For this problem, we consider oine approximation algorithms. For geometric knapsack with square items, we improve the running time of the best known PTAS and obtain an EPTAS . This shows that large running times caused by some standard techniques for geometric packing problems are not always necessary and can be improved. Finally, we show how to use resource augmentation to compute optimal solutions in EPTAS -time, thereby improving upon the known PTAS for this case. %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/27141
[93]
R. Hodges and V. Lakshmibai, “A Classification of Spherical Schubert Varieties in the Grassmannian,” 2018. [Online]. Available: http://arxiv.org/abs/1809.08003. (arXiv: 1809.08003)
Abstract
Let $L$ be a Levi subgroup of $GL_N$ which acts by left multiplication on a Schubert variety $X(w)$ in the Grassmannian $G_{d,N}$. We say that $X(w)$ is a spherical Schubert variety if $X(w)$ is a spherical variety for the action of $L$. In earlier work we provide a combinatorial description of the decomposition of the homogeneous coordinate ring of $X(w)$ into irreducible $L$-modules for the induced action of $L$. In this work we classify those decompositions into irreducible $L$-modules that are multiplicity-free. This is then applied towards giving a complete classification of the spherical Schubert varieties in the Grassmannian.
Export
BibTeX
@online{2018arXiv180908003H, TITLE = {A Classification of Spherical Schubert Varieties in the Grassmannian}, AUTHOR = {Hodges, Reuven and Lakshmibai, Venkatramani}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1809.08003}, EPRINT = {1809.08003}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Let $L$ be a Levi subgroup of $GL_N$ which acts by left multiplication on a Schubert variety $X(w)$ in the Grassmannian $G_{d,N}$. We say that $X(w)$ is a spherical Schubert variety if $X(w)$ is a spherical variety for the action of $L$. In earlier work we provide a combinatorial description of the decomposition of the homogeneous coordinate ring of $X(w)$ into irreducible $L$-modules for the induced action of $L$. In this work we classify those decompositions into irreducible $L$-modules that are multiplicity-free. This is then applied towards giving a complete classification of the spherical Schubert varieties in the Grassmannian.}, }
Endnote
%0 Report %A Hodges, Reuven %A Lakshmibai, Venkatramani %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T A Classification of Spherical Schubert Varieties in the Grassmannian : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F4AA-B %U http://arxiv.org/abs/1809.08003 %D 2018 %X Let $L$ be a Levi subgroup of $GL_N$ which acts by left multiplication on a Schubert variety $X(w)$ in the Grassmannian $G_{d,N}$. We say that $X(w)$ is a spherical Schubert variety if $X(w)$ is a spherical variety for the action of $L$. In earlier work we provide a combinatorial description of the decomposition of the homogeneous coordinate ring of $X(w)$ into irreducible $L$-modules for the induced action of $L$. In this work we classify those decompositions into irreducible $L$-modules that are multiplicity-free. This is then applied towards giving a complete classification of the spherical Schubert varieties in the Grassmannian. %K Mathematics, Representation Theory, math.RT,Mathematics, Algebraic Geometry, math.AG,Mathematics, Combinatorics, math.CO,
[94]
M. Hoefer, D. Vaz, and L. Wagner, “Dynamics in Matching and Coalition Formation Games with Structural Constraints,” Artificial Intelligence, vol. 262, 2018.
Export
BibTeX
@article{Hoefer_2018, TITLE = {Dynamics in Matching and Coalition Formation Games with Structural Constraints}, AUTHOR = {Hoefer, Martin and Vaz, Daniel and Wagner, Lisa}, LANGUAGE = {eng}, ISSN = {0004-3702}, DOI = {10.1016/j.artint.2018.06.004}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Artificial Intelligence}, VOLUME = {262}, PAGES = {222--247}, }
Endnote
%0 Journal Article %A Hoefer, Martin %A Vaz, Daniel %A Wagner, Lisa %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Dynamics in Matching and Coalition Formation Games with Structural Constraints : %G eng %U http://hdl.handle.net/21.11116/0000-0002-02F6-6 %R 10.1016/j.artint.2018.06.004 %7 2018 %D 2018 %J Artificial Intelligence %V 262 %& 222 %P 222 - 247 %I Elsevier %C Amsterdam %@ false
[95]
W. Höhn, J. Mestre, and A. Wiese, “How Unsplittable-flow-covering Helps Scheduling with Job-dependent Cost Functions,” Algorithmica, vol. 80, no. 4, 2018.
Export
BibTeX
@article{Hoehn2017, TITLE = {How Unsplittable-flow-covering Helps Scheduling with Job-dependent Cost Functions}, AUTHOR = {H{\"o}hn, Wiebke and Mestre, Julian and Wiese, Andreas}, LANGUAGE = {eng}, ISSN = {0178-4617}, DOI = {10.1007/s00453-017-0300-x}, PUBLISHER = {Springer-Verlag}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Algorithmica}, VOLUME = {80}, NUMBER = {4}, PAGES = {1191--1213}, }
Endnote
%0 Journal Article %A H&#246;hn, Wiebke %A Mestre, Julian %A Wiese, Andreas %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T How Unsplittable-flow-covering Helps Scheduling with Job-dependent Cost Functions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-2618-3 %R 10.1007/s00453-017-0300-x %7 2017 %D 2018 %J Algorithmica %V 80 %N 4 %& 1191 %P 1191 - 1213 %I Springer-Verlag %C New York, NY %@ false
[96]
C. Ikenmeyer, B. Komarath, C. Lenzen, V. Lysikov, A. Mokhov, and K. Sreenivasaiah, “On the Complexity of Hazard-free Circuits,” in STOC’18, 50th Annual ACM SIGACT Symposium on Theory of Computing, Los Angeles, CA, USA, 2018.
Export
BibTeX
@inproceedings{Ikenmeyer_STOC2018, TITLE = {On the Complexity of Hazard-free Circuits}, AUTHOR = {Ikenmeyer, Christian and Komarath, Balagopal and Lenzen, Christoph and Lysikov, Vladimir and Mokhov, Andrey and Sreenivasaiah, Karteek}, LANGUAGE = {eng}, ISBN = {978-1-4503-5559-9}, DOI = {10.1145/3188745.3188912}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {STOC'18, 50th Annual ACM SIGACT Symposium on Theory of Computing}, PAGES = {878--889}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Ikenmeyer, Christian %A Komarath, Balagopal %A Lenzen, Christoph %A Lysikov, Vladimir %A Mokhov, Andrey %A Sreenivasaiah, Karteek %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T On the Complexity of Hazard-free Circuits : %G eng %U http://hdl.handle.net/21.11116/0000-0002-17E1-6 %R 10.1145/3188745.3188912 %D 2018 %B 50th Annual ACM SIGACT Symposium on Theory of Computing %Z date of event: 2018-06-25 - 2017-06-29 %C Los Angeles, CA, USA %B STOC'18 %P 878 - 889 %I ACM %@ 978-1-4503-5559-9
[97]
C. Ikenmeyer and S. Mengel, “On the Relative Power of Reduction Notions in Arithmetic Circuit Complexity,” Information Processing Letters, vol. 130, 2018.
Export
BibTeX
@article{Ikenmeyer2018, TITLE = {On the Relative Power of Reduction Notions in Arithmetic Circuit Complexity}, AUTHOR = {Ikenmeyer, Christian and Mengel, Stefan}, LANGUAGE = {eng}, ISSN = {0020-0190}, DOI = {10.1016/j.ipl.2017.09.009}, PUBLISHER = {Elsevier}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Information Processing Letters}, VOLUME = {130}, PAGES = {7--10}, }
Endnote
%0 Journal Article %A Ikenmeyer, Christian %A Mengel, Stefan %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T On the Relative Power of Reduction Notions in Arithmetic Circuit Complexity : %G eng %U http://hdl.handle.net/21.11116/0000-0000-0361-F %R 10.1016/j.ipl.2017.09.009 %7 2017 %D 2018 %J Information Processing Letters %V 130 %& 7 %P 7 - 10 %I Elsevier %@ false
[98]
C. S. Karthik, B. Laekhanukit, and P. Manurangsi, “On the Parameterized Complexity of Approximating Dominating Set,” in STOC’18, 50th Annual ACM SIGACT Symposium on Theory of Computing, Los Angeles, CA, USA, 2018.
Export
BibTeX
@inproceedings{Karthik_STOC2018, TITLE = {On the Parameterized Complexity of Approximating Dominating Set}, AUTHOR = {Karthik, C. S. and Laekhanukit, Bundit and Manurangsi, Pasin}, LANGUAGE = {eng}, ISBN = {978-1-4503-5559-9}, DOI = {10.1145/3188745.3188896}, PUBLISHER = {ACM}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {STOC'18, 50th Annual ACM SIGACT Symposium on Theory of Computing}, PAGES = {1283--1296}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Karthik, C. S. %A Laekhanukit, Bundit %A Manurangsi, Pasin %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T On the Parameterized Complexity of Approximating Dominating Set : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A81D-1 %R 10.1145/3188745.3188896 %D 2018 %B 50th Annual ACM SIGACT Symposium on Theory of Computing %Z date of event: 2018-06-25 - 2017-06-29 %C Los Angeles, CA, USA %B STOC'18 %P 1283 - 1296 %I ACM %@ 978-1-4503-5559-9
[99]
T. Kesselheim and B. Kodric, “Price of Anarchy for Mechanisms with Risk-Averse Agents,” in 45th International Colloquium on Automata, Languages, and Programming (ICALP 2018), Prague, Czech Republic, 2018.
Export
BibTeX
@inproceedings{Kesselheim_ICALP2018, TITLE = {Price of Anarchy for Mechanisms with Risk-Averse Agents}, AUTHOR = {Kesselheim, Thomas and Kodric, Bojana}, LANGUAGE = {eng}, ISBN = {978-3-95977-076-7}, URL = {urn:nbn:de:0030-drops-91599}, DOI = {10.4230/LIPIcs.ICALP.2018.155}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {45th International Colloquium on Automata, Languages, and Programming (ICALP 2018)}, EDITOR = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, D{\'a}niel and Sannella, Donald}, PAGES = {1--14}, EID = {155}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {107}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Kesselheim, Thomas %A Kodric, Bojana %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Price of Anarchy for Mechanisms with Risk-Averse Agents : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A32D-4 %R 10.4230/LIPIcs.ICALP.2018.155 %U urn:nbn:de:0030-drops-91599 %D 2018 %B 45th International Colloquium on Automata, Languages, and Programming %Z date of event: 2018-07-09 - 2018-07-13 %C Prague, Czech Republic %B 45th International Colloquium on Automata, Languages, and Programming %E Chatzigiannakis, Ioannis; Kaklamanis, Christos; Marx, D&#225;niel; Sannella, Donald %P 1 - 14 %Z sequence number: 155 %I Schloss Dagstuhl %@ 978-3-95977-076-7 %B Leibniz International Proceedings in Informatics %N 107 %U http://drops.dagstuhl.de/opus/volltexte/2018/9159/http://drops.dagstuhl.de/doku/urheberrecht1.html
[100]
P. Kolev, “Algorithmic Results for Clustering and Refined Physarum Analysis,” Universität des Saarlandes, Saarbrücken, 2018.
Export
BibTeX
@phdthesis{Kolev_PhD2018, TITLE = {Algorithmic Results for Clustering and Refined Physarum Analysis}, AUTHOR = {Kolev, Pavel}, LANGUAGE = {eng}, URL = {urn:nbn:de:bsz:291-scidok-ds-275519}, DOI = {10.22028/D291-27551}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, }
Endnote
%0 Thesis %A Kolev, Pavel %Y Mehlhorn, Kurt %A referee: Bringmann, Karl %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Algorithmic Results for Clustering and Refined Physarum Analysis : %G eng %U http://hdl.handle.net/21.11116/0000-0003-3937-0 %R 10.22028/D291-27551 %U urn:nbn:de:bsz:291-scidok-ds-275519 %F OTHER: hdl:20.500.11880/27234 %I Universit&#228;t des Saarlandes %C Saarbr&#252;cken %D 2018 %P XIV, 123 p. %V phd %9 phd %U http://dx.doi.org/10.22028/D291-27551
[101]
P. Koprowski, K. Mehlhorn, and S. Ray, “Corrigendum to ‘Faster algorithms for computing Hong’s bound on absolute positiveness’ [J. Symb. Comput. 45 (2010) 677–683],” Journal of Symbolic Computation, vol. 87, 2018.
Export
BibTeX
@article{Koprowski2018, TITLE = {Corrigendum to ``{F}aster algorithms for computing {H}ong's bound on absolute positiveness'' [{J. Symb. Comput.} 45 (2010) 677--683]}, AUTHOR = {Koprowski, Przemys{\l}aw and Mehlhorn, Kurt and Ray, Saurabh}, LANGUAGE = {eng}, ISSN = {0747-7171}, DOI = {10.1016/j.jsc.2017.05.008}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Journal of Symbolic Computation}, VOLUME = {87}, PAGES = {238--241}, }
Endnote
%0 Journal Article %A Koprowski, Przemys&#322;aw %A Mehlhorn, Kurt %A Ray, Saurabh %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Corrigendum to &#8220;Faster algorithms for computing Hong's bound on absolute positiveness&#8221; [J. Symb. Comput. 45 (2010) 677&#8211;683] : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3C55-D %R 10.1016/j.jsc.2017.05.008 %7 2017 %D 2018 %J Journal of Symbolic Computation %V 87 %& 238 %P 238 - 241 %I Elsevier %C Amsterdam %@ false
[102]
F. Kuhn, C. Lenzen, T. Locher, and R. Oshman, “Optimal Gradient Clock Synchronization in Dynamic Networks (Version 3),” 2018. [Online]. Available: http://arxiv.org/abs/1005.2894. (arXiv: 1005.2894)
Abstract
We study the problem of clock synchronization in highly dynamic networks, where communication links can appear or disappear at any time. The nodes in the network are equipped with hardware clocks, but the rate of the hardware clocks can vary arbitrarily within specific bounds, and the estimates that nodes can obtain about the clock values of other nodes are inherently inaccurate. Our goal in this setting is to output a logical clock at each node such that the logical clocks of any two nodes are not too far apart, and nodes that remain close to each other in the network for a long time are better synchronized than distant nodes. This property is called gradient clock synchronization. Gradient clock synchronization has been widely studied in the static setting, where the network topology does not change. We show that the asymptotically optimal bounds obtained for the static case also apply to our highly dynamic setting: if two nodes remain at distance $d$ from each other for sufficiently long, it is possible to upper bound the difference between their clock values by $O(d \log (D / d))$, where $D$ is the diameter of the network. This is known to be optimal even for static networks. Furthermore, we show that our algorithm has optimal stabilization time: when a path of length $d$ appears between two nodes, the time required until the clock skew between the two nodes is reduced to $O(d \log (D / d))$ is $O(D)$, which we prove to be optimal. Finally, the techniques employed for the more intricate analysis of the algorithm for dynamic graphs provide additional insights that are also of interest for the static setting. In particular, we establish self-stabilization of the gradient property within $O(D)$ time.
Export
BibTeX
@online{Kuhn2018-gradient, TITLE = {Optimal Gradient Clock Synchronization in Dynamic Networks (Version 3)}, AUTHOR = {Kuhn, Fabian and Lenzen, Christoph and Locher, Thomas and Oshman, Rotem}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1005.2894}, EPRINT = {1005.2894}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We study the problem of clock synchronization in highly dynamic networks, where communication links can appear or disappear at any time. The nodes in the network are equipped with hardware clocks, but the rate of the hardware clocks can vary arbitrarily within specific bounds, and the estimates that nodes can obtain about the clock values of other nodes are inherently inaccurate. Our goal in this setting is to output a logical clock at each node such that the logical clocks of any two nodes are not too far apart, and nodes that remain close to each other in the network for a long time are better synchronized than distant nodes. This property is called gradient clock synchronization. Gradient clock synchronization has been widely studied in the static setting, where the network topology does not change. We show that the asymptotically optimal bounds obtained for the static case also apply to our highly dynamic setting: if two nodes remain at distance $d$ from each other for sufficiently long, it is possible to upper bound the difference between their clock values by $O(d \log (D / d))$, where $D$ is the diameter of the network. This is known to be optimal even for static networks. Furthermore, we show that our algorithm has optimal stabilization time: when a path of length $d$ appears between two nodes, the time required until the clock skew between the two nodes is reduced to $O(d \log (D / d))$ is $O(D)$, which we prove to be optimal. Finally, the techniques employed for the more intricate analysis of the algorithm for dynamic graphs provide additional insights that are also of interest for the static setting. In particular, we establish self-stabilization of the gradient property within $O(D)$ time.}, }
Endnote
%0 Report %A Kuhn, Fabian %A Lenzen, Christoph %A Locher, Thomas %A Oshman, Rotem %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Optimal Gradient Clock Synchronization in Dynamic Networks (Version 3) : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F1E8-8 %U http://arxiv.org/abs/1005.2894 %D 2018 %X We study the problem of clock synchronization in highly dynamic networks, where communication links can appear or disappear at any time. The nodes in the network are equipped with hardware clocks, but the rate of the hardware clocks can vary arbitrarily within specific bounds, and the estimates that nodes can obtain about the clock values of other nodes are inherently inaccurate. Our goal in this setting is to output a logical clock at each node such that the logical clocks of any two nodes are not too far apart, and nodes that remain close to each other in the network for a long time are better synchronized than distant nodes. This property is called gradient clock synchronization. Gradient clock synchronization has been widely studied in the static setting, where the network topology does not change. We show that the asymptotically optimal bounds obtained for the static case also apply to our highly dynamic setting: if two nodes remain at distance $d$ from each other for sufficiently long, it is possible to upper bound the difference between their clock values by $O(d \log (D / d))$, where $D$ is the diameter of the network. This is known to be optimal even for static networks. Furthermore, we show that our algorithm has optimal stabilization time: when a path of length $d$ appears between two nodes, the time required until the clock skew between the two nodes is reduced to $O(d \log (D / d))$ is $O(D)$, which we prove to be optimal. Finally, the techniques employed for the more intricate analysis of the algorithm for dynamic graphs provide additional insights that are also of interest for the static setting. In particular, we establish self-stabilization of the gradient property within $O(D)$ time. %K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC,Computer Science, Data Structures and Algorithms, cs.DS
[103]
M. Künnemann, “On Nondeterministic Derandomization of Freivalds’ Algorithm: Consequences, Avenues and Algorithmic Progress,” in 26th Annual European Symposium on Algorithms (ESA 2018), Helsinki, Finland, 2018.
Export
BibTeX
@inproceedings{Kuennemann_ESA2018, TITLE = {On Nondeterministic Derandomization of {F}reivalds' Algorithm: {C}onsequences, Avenues and Algorithmic Progress}, AUTHOR = {K{\"u}nnemann, Marvin}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-081-1}, URL = {urn:nbn:de:0030-drops-95195}, DOI = {10.4230/LIPIcs.ESA.2018.56}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {26th Annual European Symposium on Algorithms (ESA 2018)}, EDITOR = {Azar, Yossi and Bast, Hannah and Herman, Grzegorz}, PAGES = {1--16}, EID = {56}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {112}, ADDRESS = {Helsinki, Finland}, }
Endnote
%0 Conference Proceedings %A K&#252;nnemann, Marvin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On Nondeterministic Derandomization of Freivalds' Algorithm: Consequences, Avenues and Algorithmic Progress : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A8F1-0 %R 10.4230/LIPIcs.ESA.2018.56 %U urn:nbn:de:0030-drops-95195 %D 2018 %B 26th Annual European Symposium on Algorithms %Z date of event: 2018-08-20 - 2018-08-22 %C Helsinki, Finland %B 26th Annual European Symposium on Algorithms %E Azar, Yossi; Bast, Hannah; Herman, Grzegorz %P 1 - 16 %Z sequence number: 56 %I Schloss Dagstuhl %@ 978-3-95977-081-1 %B Leibniz International Proceedings in Informatics %N 112 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9519/http://drops.dagstuhl.de/doku/urheberrecht1.html
[104]
M. Künnemann, “On Nondeterministic Derandomization of Freivalds’ Algorithm: Consequences, Avenues and Algorithmic Progress,” 2018. [Online]. Available: http://arxiv.org/abs/1806.09189. (arXiv: 1806.09189)
Abstract
Motivated by studying the power of randomness, certifying algorithms and barriers for fine-grained reductions, we investigate the question whether the multiplication of two $n\times n$ matrices can be performed in near-optimal nondeterministic time $\tilde{O}(n^2)$. Since a classic algorithm due to Freivalds verifies correctness of matrix products probabilistically in time $O(n^2)$, our question is a relaxation of the open problem of derandomizing Freivalds' algorithm. We discuss consequences of a positive or negative resolution of this problem and provide potential avenues towards resolving it. Particularly, we show that sufficiently fast deterministic verifiers for 3SUM or univariate polynomial identity testing yield faster deterministic verifiers for matrix multiplication. Furthermore, we present the partial algorithmic progress that distinguishing whether an integer matrix product is correct or contains between 1 and $n$ erroneous entries can be performed in time $\tilde{O}(n^2)$ -- interestingly, the difficult case of deterministic matrix product verification is not a problem of "finding a needle in the haystack", but rather cancellation effects in the presence of many errors. Our main technical contribution is a deterministic algorithm that corrects an integer matrix product containing at most $t$ errors in time $\tilde{O}(\sqrt{t} n^2 + t^2)$. To obtain this result, we show how to compute an integer matrix product with at most $t$ nonzeroes in the same running time. This improves upon known deterministic output-sensitive integer matrix multiplication algorithms for $t = \Omega(n^{2/3})$ nonzeroes, which is of independent interest.
Export
BibTeX
@online{Kuennemann_arXiv1806.09189, TITLE = {On Nondeterministic Derandomization of {F}reivalds' Algorithm: {C}onsequences, Avenues and Algorithmic Progress}, AUTHOR = {K{\"u}nnemann, Marvin}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.09189}, EPRINT = {1806.09189}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Motivated by studying the power of randomness, certifying algorithms and barriers for fine-grained reductions, we investigate the question whether the multiplication of two $n\times n$ matrices can be performed in near-optimal nondeterministic time $\tilde{O}(n^2)$. Since a classic algorithm due to Freivalds verifies correctness of matrix products probabilistically in time $O(n^2)$, our question is a relaxation of the open problem of derandomizing Freivalds' algorithm. We discuss consequences of a positive or negative resolution of this problem and provide potential avenues towards resolving it. Particularly, we show that sufficiently fast deterministic verifiers for 3SUM or univariate polynomial identity testing yield faster deterministic verifiers for matrix multiplication. Furthermore, we present the partial algorithmic progress that distinguishing whether an integer matrix product is correct or contains between 1 and $n$ erroneous entries can be performed in time $\tilde{O}(n^2)$ -- interestingly, the difficult case of deterministic matrix product verification is not a problem of "finding a needle in the haystack", but rather cancellation effects in the presence of many errors. Our main technical contribution is a deterministic algorithm that corrects an integer matrix product containing at most $t$ errors in time $\tilde{O}(\sqrt{t} n^2 + t^2)$. To obtain this result, we show how to compute an integer matrix product with at most $t$ nonzeroes in the same running time. This improves upon known deterministic output-sensitive integer matrix multiplication algorithms for $t = \Omega(n^{2/3})$ nonzeroes, which is of independent interest.}, }
Endnote
%0 Report %A K&#252;nnemann, Marvin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On Nondeterministic Derandomization of Freivalds' Algorithm: Consequences, Avenues and Algorithmic Progress : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A8F5-C %U http://arxiv.org/abs/1806.09189 %D 2018 %X Motivated by studying the power of randomness, certifying algorithms and barriers for fine-grained reductions, we investigate the question whether the multiplication of two $n\times n$ matrices can be performed in near-optimal nondeterministic time $\tilde{O}(n^2)$. Since a classic algorithm due to Freivalds verifies correctness of matrix products probabilistically in time $O(n^2)$, our question is a relaxation of the open problem of derandomizing Freivalds' algorithm. We discuss consequences of a positive or negative resolution of this problem and provide potential avenues towards resolving it. Particularly, we show that sufficiently fast deterministic verifiers for 3SUM or univariate polynomial identity testing yield faster deterministic verifiers for matrix multiplication. Furthermore, we present the partial algorithmic progress that distinguishing whether an integer matrix product is correct or contains between 1 and $n$ erroneous entries can be performed in time $\tilde{O}(n^2)$ -- interestingly, the difficult case of deterministic matrix product verification is not a problem of "finding a needle in the haystack", but rather cancellation effects in the presence of many errors. Our main technical contribution is a deterministic algorithm that corrects an integer matrix product containing at most $t$ errors in time $\tilde{O}(\sqrt{t} n^2 + t^2)$. To obtain this result, we show how to compute an integer matrix product with at most $t$ nonzeroes in the same running time. This improves upon known deterministic output-sensitive integer matrix multiplication algorithms for $t = \Omega(n^{2/3})$ nonzeroes, which is of independent interest. %K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Computational Complexity, cs.CC
[105]
A. Kurpisz, M. Mastrolilli, C. Mathieu, T. Mömke, V. Verdugo, and A. Wiese, “Semidefinite and Linear Programming Integrality Gaps for Scheduling Identical Machines,” Mathematical Programming / B, vol. 172, no. 1–2, 2018.
Export
BibTeX
@article{Kurpisz2018, TITLE = {Semidefinite and Linear Programming Integrality Gaps for Scheduling Identical Machines}, AUTHOR = {Kurpisz, Adam and Mastrolilli, Monaldo and Mathieu, Claire and M{\"o}mke, Tobias and Verdugo, Victor and Wiese, Andreas}, LANGUAGE = {eng}, ISSN = {0025-5610}, DOI = {10.1007/s10107-017-1152-5}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Mathematical Programming / B}, VOLUME = {172}, NUMBER = {1-2}, PAGES = {231--248}, }
Endnote
%0 Journal Article %A Kurpisz, Adam %A Mastrolilli, Monaldo %A Mathieu, Claire %A M&#246;mke, Tobias %A Verdugo, Victor %A Wiese, Andreas %+ External Organizations External Organizations External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Semidefinite and Linear Programming Integrality Gaps for Scheduling Identical Machines : %G eng %U http://hdl.handle.net/21.11116/0000-0002-6BCF-E %R 10.1007/s10107-017-1152-5 %7 2017 %D 2018 %J Mathematical Programming / B %V 172 %N 1-2 %& 231 %P 231 - 248 %@ false
[106]
J.-H. Lange, A. Karrenbauer, and B. Andres, “Partial Optimality and Fast Lower Bounds for Weighted Correlation Clustering,” in Proceedings of the 35th International Conference on Machine Learning (ICML 2018), Stockholm, Sweden, 2018.
Export
BibTeX
@inproceedings{pmlr-v80-lange18a, TITLE = {Partial Optimality and Fast Lower Bounds for Weighted Correlation Clustering}, AUTHOR = {Lange, Jan-Hendrik and Karrenbauer, Andreas and Andres, Bjoern}, LANGUAGE = {eng}, ISSN = {1938-7228}, URL = {http://proceedings.mlr.press/v80/lange18a.html}, PUBLISHER = {PMLR}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the 35th International Conference on Machine Learning (ICML 2018)}, EDITOR = {Dy, Jennifer and Krause, Andreas}, PAGES = {2898--2907}, SERIES = {Proceedings of Machine Learning Research}, VOLUME = {80}, ADDRESS = {Stockholm, Sweden}, }
Endnote
%0 Conference Proceedings %A Lange, Jan-Hendrik %A Karrenbauer, Andreas %A Andres, Bjoern %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Partial Optimality and Fast Lower Bounds for Weighted Correlation Clustering : %G eng %U http://hdl.handle.net/21.11116/0000-0001-A71C-4 %U http://proceedings.mlr.press/v80/lange18a.html %D 2018 %B 35th International Conference on Machine Learning %Z date of event: 2018-07-10 - 2018-07-15 %C Stockholm, Sweden %B Proceedings of the 35th International Conference on Machine Learning %E Dy, Jennifer; Krause, Andreas %P 2898 - 2907 %I PMLR %B Proceedings of Machine Learning Research %N 80 %@ false %U http://proceedings.mlr.press/v80/lange18a/lange18a.pdf
[107]
C. Lenzen and R. Levi, “A Centralized Local Algorithm for the Sparse Spanning Graph Problem,” in 45th International Colloquium on Automata, Languages, and Programming (ICALP 2018), Prague, Czech Republic, 2018.
Export
BibTeX
@inproceedings{Lenzen_ICALP2018, TITLE = {A Centralized Local Algorithm for the Sparse Spanning Graph Problem}, AUTHOR = {Lenzen, Christoph and Levi, Reut}, LANGUAGE = {eng}, ISBN = {978-3-95977-076-7}, URL = {urn:nbn:de:0030-drops-90919}, DOI = {10.4230/LIPIcs.ICALP.2018.87}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {45th International Colloquium on Automata, Languages, and Programming (ICALP 2018)}, EDITOR = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, D{\'a}niel and Sannella, Donald}, PAGES = {1--47}, EID = {87}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {107}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Lenzen, Christoph %A Levi, Reut %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T A Centralized Local Algorithm for the Sparse Spanning Graph Problem : %G eng %U http://hdl.handle.net/21.11116/0000-0002-17EF-8 %R 10.4230/LIPIcs.ICALP.2018.87 %U urn:nbn:de:0030-drops-90919 %D 2018 %B 45th International Colloquium on Automata, Languages, and Programming %Z date of event: 2018-07-09 - 2018-07-13 %C Prague, Czech Republic %B 45th International Colloquium on Automata, Languages, and Programming %E Chatzigiannakis, Ioannis; Kaklamanis, Christos; Marx, D&#225;niel; Sannella, Donald %P 1 - 47 %Z sequence number: 87 %I Schloss Dagstuhl %@ 978-3-95977-076-7 %B Leibniz International Proceedings in Informatics %N 107 %U http://drops.dagstuhl.de/opus/volltexte/2018/9091/http://drops.dagstuhl.de/doku/urheberrecht1.html
[108]
N. Mustafa, K. Dutta, and A. Ghosh, “Simple Proof of Optimal Epsilon Nets,” Combinatorica, vol. 38, no. 5, 2018.
Export
BibTeX
@article{mustafa:hal-01360452, TITLE = {Simple Proof of Optimal Epsilon Nets}, AUTHOR = {Mustafa, Nabil and Dutta, Kunal and Ghosh, Arijit}, LANGUAGE = {eng}, ISSN = {0209-9683}, DOI = {10.1007/s00493-017-3564-5}, PUBLISHER = {Springer}, ADDRESS = {Heidelberg}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Combinatorica}, VOLUME = {38}, NUMBER = {5}, PAGES = {1269--1277}, }
Endnote
%0 Journal Article %A Mustafa, Nabil %A Dutta, Kunal %A Ghosh, Arijit %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Simple Proof of Optimal Epsilon Nets : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-65CA-7 %R 10.1007/s00493-017-3564-5 %7 2017 %D 2018 %J Combinatorica %V 38 %N 5 %& 1269 %P 1269 - 1277 %I Springer %C Heidelberg %@ false
[109]
E. Natale, “On the Computational Power of Simple Dynamics,” Bulletin of the EATCS, vol. 124, 2018.
Export
BibTeX
@article{Natale_EATCS2018, TITLE = {On the Computational Power of Simple Dynamics}, AUTHOR = {Natale, Emanuele}, LANGUAGE = {eng}, PUBLISHER = {EATCS}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, JOURNAL = {Bulletin of the EATCS}, VOLUME = {124}, EID = {526}, }
Endnote
%0 Journal Article %A Natale, Emanuele %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On the Computational Power of Simple Dynamics : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A93E-B %7 2018 %D 2018 %J Bulletin of the EATCS %V 124 %Z sequence number: 526 %I EATCS %U http://eatcs.org/beatcs/index.php/beatcs/article/view/526
[110]
E. Oh, “Point Location in Incremental Planar Subdivisions,” in 29th International Symposium on Algorithms and Computation (ISAAC 2018), Jiaoxi, Yilan, Taiwan, 2018.
Export
BibTeX
@inproceedings{Oh_ISAAC2018, TITLE = {Point Location in Incremental Planar Subdivisions}, AUTHOR = {Oh, Eunjin}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-094-1}, URL = {urn:nbn:de:0030-drops-99991}, DOI = {10.4230/LIPIcs.ISAAC.2018.51}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {29th International Symposium on Algorithms and Computation (ISAAC 2018)}, EDITOR = {Hsu, Wen-Lian and Lee, Der-Tsai and Liao, Chung-Shou}, PAGES = {1--12}, EID = {51}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {123}, ADDRESS = {Jiaoxi, Yilan, Taiwan}, }
Endnote
%0 Conference Proceedings %A Oh, Eunjin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Point Location in Incremental Planar Subdivisions : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AA7B-5 %R 10.4230/LIPIcs.ISAAC.2018.51 %U urn:nbn:de:0030-drops-99991 %D 2018 %B 29th International Symposium on Algorithms and Computation %Z date of event: 2018-12-16 - 2018-12-19 %C Jiaoxi, Yilan, Taiwan %B 29th International Symposium on Algorithms and Computation %E Hsu, Wen-Lian; Lee, Der-Tsai; Liao, Chung-Shou %P 1 - 12 %Z sequence number: 51 %I Schloss Dagstuhl %@ 978-3-95977-094-1 %B Leibniz International Proceedings in Informatics %N 123 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9999/http://drops.dagstuhl.de/doku/urheberrecht1.html
[111]
E. Oh and H.-K. Ahn, “Point Location in Dynamic Planar Subdivisions,” in 34th International Symposium on Computational Geometry (SoCG 2018), Budapest, Hungary, 2018.
Export
BibTeX
@inproceedings{Oh_SoCG2018b, TITLE = {Point Location in Dynamic Planar Subdivisions}, AUTHOR = {Oh, Eunjin and Ahn, Hee-Kap}, LANGUAGE = {eng}, ISBN = {978-3-95977-066-8}, URL = {urn:nbn:de:0030-drops-87769}, DOI = {10.4230/LIPIcs.SoCG.2018.63}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {34th International Symposium on Computational Geometry (SoCG 2018)}, EDITOR = {Speckmann, Bettina and T{\'o}th, Csaba D.}, PAGES = {1--14}, EID = {63}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {99}, ADDRESS = {Budapest, Hungary}, }
Endnote
%0 Conference Proceedings %A Oh, Eunjin %A Ahn, Hee-Kap %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Point Location in Dynamic Planar Subdivisions : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AA86-7 %R 10.4230/LIPIcs.SoCG.2018.63 %U urn:nbn:de:0030-drops-87769 %D 2018 %B 34th International Symposium on Computational Geometry %Z date of event: 2018-06-11 - 2018-06-14 %C Budapest, Hungary %B 34th International Symposium on Computational Geometry %E Speckmann, Bettina; T&#243;th, Csaba D. %P 1 - 14 %Z sequence number: 63 %I Schloss Dagstuhl %@ 978-3-95977-066-8 %B Leibniz International Proceedings in Informatics %N 99 %U http://drops.dagstuhl.de/opus/volltexte/2018/8776/http://drops.dagstuhl.de/doku/urheberrecht1.html
[112]
E. Oh, “Minimizing Distance-to-Sight in Polygonal Domains,” in 29th International Symposium on Algorithms and Computation (ISAAC 2018), Jiaoxi, Yilan, Taiwan, 2018.
Export
BibTeX
@inproceedings{Oh_ISAAC2018b, TITLE = {Minimizing Distance-to-Sight in Polygonal Domains}, AUTHOR = {Oh, Eunjin}, LANGUAGE = {eng}, ISSN = {1868-8969}, ISBN = {978-3-95977-094-1}, URL = {urn:nbn:de:0030-drops-100073}, DOI = {10.4230/LIPIcs.ISAAC.2018.59}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {29th International Symposium on Algorithms and Computation (ISAAC 2018)}, EDITOR = {Hsu, Wen-Lian and Lee, Der-Tsai and Liao, Chung-Shou}, PAGES = {1--12}, EID = {59}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {123}, ADDRESS = {Jiaoxi, Yilan, Taiwan}, }
Endnote
%0 Conference Proceedings %A Oh, Eunjin %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Minimizing Distance-to-Sight in Polygonal Domains : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AA7D-3 %R 10.4230/LIPIcs.ISAAC.2018.59 %U urn:nbn:de:0030-drops-100073 %D 2018 %B 29th International Symposium on Algorithms and Computation %Z date of event: 2018-12-16 - 2018-12-19 %C Jiaoxi, Yilan, Taiwan %B 29th International Symposium on Algorithms and Computation %E Hsu, Wen-Lian; Lee, Der-Tsai; Liao, Chung-Shou %P 1 - 12 %Z sequence number: 59 %I Schloss Dagstuhl %@ 978-3-95977-094-1 %B Leibniz International Proceedings in Informatics %N 123 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/10007/http://drops.dagstuhl.de/doku/urheberrecht1.html
[113]
E. Oh and H.-K. Ahn, “Approximate Range Queries for Clustering,” in 34th International Symposium on Computational Geometry (SoCG 2018), Budapest, Hungary, 2018.
Export
BibTeX
@inproceedings{Oh_SoCG2018, TITLE = {Approximate Range Queries for Clustering}, AUTHOR = {Oh, Eunjin and Ahn, Hee-Kap}, LANGUAGE = {eng}, ISBN = {978-3-95977-066-8}, URL = {urn:nbn:de:0030-drops-87755}, DOI = {10.4230/LIPIcs.SoCG.2018.62}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {34th International Symposium on Computational Geometry (SoCG 2018)}, EDITOR = {Speckmann, Bettina and T{\'o}th, Csaba D.}, PAGES = {1--14}, EID = {62}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {99}, ADDRESS = {Budapest, Hungary}, }
Endnote
%0 Conference Proceedings %A Oh, Eunjin %A Ahn, Hee-Kap %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Approximate Range Queries for Clustering : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AA83-A %R 10.4230/LIPIcs.SoCG.2018.62 %U urn:nbn:de:0030-drops-87755 %D 2018 %B 34th International Symposium on Computational Geometry %Z date of event: 2018-06-11 - 2018-06-14 %C Budapest, Hungary %B 34th International Symposium on Computational Geometry %E Speckmann, Bettina; T&#243;th, Csaba D. %P 1 - 14 %Z sequence number: 62 %I Schloss Dagstuhl %@ 978-3-95977-066-8 %B Leibniz International Proceedings in Informatics %N 99 %U http://drops.dagstuhl.de/opus/volltexte/2018/8775/http://drops.dagstuhl.de/doku/urheberrecht1.html
[114]
N. Olver, K. Pruhs, K. Schewior, R. Sitters, and L. Stougie, “The Itinerant List Update Problem,” in Approximation and Online Algorithms (WAOA 2018), Helsiniki, Finland, 2018.
Export
BibTeX
@inproceedings{Olver_WAOA2018, TITLE = {The Itinerant List Update Problem}, AUTHOR = {Olver, Neil and Pruhs, Kirk and Schewior, Kevin and Sitters, Rene and Stougie, Leen}, LANGUAGE = {eng}, ISBN = {978-3-030-04692-7}, PUBLISHER = {Springer}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Approximation and Online Algorithms (WAOA 2018)}, EDITOR = {Epstein, Leah and Erlebach, Thomas}, PAGES = {310--326}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {11312}, ADDRESS = {Helsiniki, Finland}, }
Endnote
%0 Conference Proceedings %A Olver, Neil %A Pruhs, Kirk %A Schewior, Kevin %A Sitters, Rene %A Stougie, Leen %+ External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T The Itinerant List Update Problem : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F4BA-9 %D 2018 %B 16th Workshop on Approximation and Online Algorithms %Z date of event: 2018-08-23 - 2018-08-24 %C Helsiniki, Finland %B Approximation and Online Algorithms %E Epstein, Leah; Erlebach, Thomas %P 310 - 326 %I Springer %@ 978-3-030-04692-7 %B Lecture Notes in Computer Science %N 11312
[115]
A. Oulasvirta and A. Karrenbauer, “Combinatorial Optimization for UI Design,” in Computational Interaction, Oxford, UK: Oxford University Press, 2018.
Export
BibTeX
@incollection{Oulasvirta2018OUP, TITLE = {Combinatorial Optimization for {UI} Design}, AUTHOR = {Oulasvirta, Antti and Karrenbauer, Andreas}, LANGUAGE = {eng}, ISBN = {9780198799610}, PUBLISHER = {Oxford University Press}, ADDRESS = {Oxford, UK}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, BOOKTITLE = {Computational Interaction}, EDITOR = {Oulasvirta, Antti and Kristensson, Per Ola and Bi, Xiaojun and Howes, Andrew}, PAGES = {97--120}, }
Endnote
%0 Book Section %A Oulasvirta, Antti %A Karrenbauer, Andreas %+ External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Combinatorial Optimization for UI Design : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AA65-D %D 2018 %B Computational Interaction %E Oulasvirta, Antti; Kristensson, Per Ola; Bi, Xiaojun; Howes, Andrew %P 97 - 120 %I Oxford University Press %C Oxford, UK %@ 9780198799610
[116]
A. Pandey, N. Saxena, and A. Sinhababu, “Algebraic Independence over Positive Characteristic: New Criterion and Applications to Locally Low Algebraic Rank Circuits,” Computational Complexity, vol. 27, no. 4, 2018.
Export
BibTeX
@article{Pandey2018, TITLE = {Algebraic Independence over Positive Characteristic: {N}ew Criterion and Applications to Locally Low Algebraic Rank Circuits}, AUTHOR = {Pandey, Anurag and Saxena, Nitin and Sinhababu, Amit}, LANGUAGE = {eng}, ISSN = {1016-3328}, DOI = {10.1007/s00037-018-0167-5}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Computational Complexity}, VOLUME = {27}, NUMBER = {4}, PAGES = {617--670}, }
Endnote
%0 Journal Article %A Pandey, Anurag %A Saxena, Nitin %A Sinhababu, Amit %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Algebraic Independence over Positive Characteristic: New Criterion and Applications to Locally Low Algebraic Rank Circuits : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F6EF-C %R 10.1007/s00037-018-0167-5 %7 2018 %D 2018 %J Computational Complexity %V 27 %N 4 %& 617 %P 617 - 670 %I Springer %C New York, NY %@ false
[117]
M. Pilipczuk, M. Pilipczuk, P. Sankowski, and E. J. van Leeuwen, “Network Sparsification for Steiner Problems on Planar and Bounded-Genus Graphs,” ACM Transactions on Algorithms, vol. 14, no. 4, 2018.
Export
BibTeX
@article{PilipzukTOA2018, TITLE = {Network Sparsification for {Steiner} Problems on Planar and Bounded-Genus Graphs}, AUTHOR = {Pilipczuk, Marcin and Pilipczuk, Micha{\l} and Sankowski, Piotr and van Leeuwen, Erik Jan}, LANGUAGE = {eng}, ISSN = {1549-6325}, DOI = {10.1145/3239560}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {ACM Transactions on Algorithms}, VOLUME = {14}, NUMBER = {4}, EID = {53}, }
Endnote
%0 Journal Article %A Pilipczuk, Marcin %A Pilipczuk, Micha&#322; %A Sankowski, Piotr %A van Leeuwen, Erik Jan %+ External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Network Sparsification for Steiner Problems on Planar and Bounded-Genus Graphs : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F70C-B %R 10.1145/3239560 %7 2018 %D 2018 %J ACM Transactions on Algorithms %V 14 %N 4 %Z sequence number: 53 %I ACM %C New York, NY %@ false
[118]
B. Ray Chaudhury, Y. K. Cheung, J. Garg, N. Garg, M. Hoefer, and K. Mehlhorn, “On Fair Division of Indivisible Items,” 2018. [Online]. Available: http://arxiv.org/abs/1805.06232. (arXiv: 1805.06232)
Abstract
We consider the task of assigning indivisible goods to a set of agents in a fair manner. Our notion of fairness is Nash social welfare, i.e., the goal is to maximize the geometric mean of the utilities of the agents. Each good comes in multiple items or copies, and the utility of an agent diminishes as it receives more items of the same good. The utility of a bundle of items for an agent is the sum of the utilities of the items in the bundle. Each agent has a utility cap beyond which he does not value additional items. We give a polynomial time approximation algorithm that maximizes Nash social welfare up to a factor of $e^{1/e} \approx 1.445$.
Export
BibTeX
@online{Chaudhury_arXiv1805.06232, TITLE = {On Fair Division of Indivisible Items}, AUTHOR = {Ray Chaudhury, Bhaskar and Cheung, Yun Kuen and Garg, Jugal and Garg, Naveen and Hoefer, Martin and Mehlhorn, Kurt}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1805.06232}, EPRINT = {1805.06232}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We consider the task of assigning indivisible goods to a set of agents in a fair manner. Our notion of fairness is Nash social welfare, i.e., the goal is to maximize the geometric mean of the utilities of the agents. Each good comes in multiple items or copies, and the utility of an agent diminishes as it receives more items of the same good. The utility of a bundle of items for an agent is the sum of the utilities of the items in the bundle. Each agent has a utility cap beyond which he does not value additional items. We give a polynomial time approximation algorithm that maximizes Nash social welfare up to a factor of $e^{1/e} \approx 1.445$.}, }
Endnote
%0 Report %A Ray Chaudhury, Bhaskar %A Cheung, Yun Kuen %A Garg, Jugal %A Garg, Naveen %A Hoefer, Martin %A Mehlhorn, Kurt %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On Fair Division of Indivisible Items : %G eng %U http://hdl.handle.net/21.11116/0000-0002-05E7-4 %U http://arxiv.org/abs/1805.06232 %D 2018 %X We consider the task of assigning indivisible goods to a set of agents in a fair manner. Our notion of fairness is Nash social welfare, i.e., the goal is to maximize the geometric mean of the utilities of the agents. Each good comes in multiple items or copies, and the utility of an agent diminishes as it receives more items of the same good. The utility of a bundle of items for an agent is the sum of the utilities of the items in the bundle. Each agent has a utility cap beyond which he does not value additional items. We give a polynomial time approximation algorithm that maximizes Nash social welfare up to a factor of $e^{1/e} \approx 1.445$. %K Computer Science, Data Structures and Algorithms, cs.DS
[119]
B. Ray Chaudhury and K. Mehlhorn, “Combinatorial Algorithms for General Linear Arrow-Debreu Markets,” 2018. [Online]. Available: http://arxiv.org/abs/1810.01237. (arXiv: 1810.01237)
Abstract
We present a combinatorial algorithm for determining the market clearing prices of a general linear Arrow-Debreu market, where every agent can own multiple goods. The existing combinatorial algorithms for linear Arrow-Debreu markets consider the case where each agent can own all of one good only. We present an $\tilde{\mathcal{O}}((n+m)^7 \log^3(UW))$ algorithm where $n$, $m$, $U$ and $W$ refer to the number of agents, the number of goods, the maximal integral utility and the maximum quantity of any good in the market respectively. The algorithm refines the iterative algorithm of Duan, Garg and Mehlhorn using several new ideas. We also identify the hard instances for existing combinatorial algorithms for linear Arrow-Debreu markets. In particular we find instances where the ratio of the maximum to the minimum equilibrium price of a good is $U^{\Omega(n)}$ and the number of iterations required by the existing iterative combinatorial algorithms of Duan, and Mehlhorn and Duan, Garg, and Mehlhorn are high. Our instances also separate the two algorithms.
Export
BibTeX
@online{RayChaudhury_arxiv1810.01237, TITLE = {Combinatorial Algorithms for General Linear {Arrow}-{Debreu} Markets}, AUTHOR = {Ray Chaudhury, Bhaskar and Mehlhorn, Kurt}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1810.01237}, EPRINT = {1810.01237}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present a combinatorial algorithm for determining the market clearing prices of a general linear Arrow-Debreu market, where every agent can own multiple goods. The existing combinatorial algorithms for linear Arrow-Debreu markets consider the case where each agent can own all of one good only. We present an $\tilde{\mathcal{O}}((n+m)^7 \log^3(UW))$ algorithm where $n$, $m$, $U$ and $W$ refer to the number of agents, the number of goods, the maximal integral utility and the maximum quantity of any good in the market respectively. The algorithm refines the iterative algorithm of Duan, Garg and Mehlhorn using several new ideas. We also identify the hard instances for existing combinatorial algorithms for linear Arrow-Debreu markets. In particular we find instances where the ratio of the maximum to the minimum equilibrium price of a good is $U^{\Omega(n)}$ and the number of iterations required by the existing iterative combinatorial algorithms of Duan, and Mehlhorn and Duan, Garg, and Mehlhorn are high. Our instances also separate the two algorithms.}, }
Endnote
%0 Report %A Ray Chaudhury, Bhaskar %A Mehlhorn, Kurt %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Combinatorial Algorithms for General Linear Arrow-Debreu Markets : %G eng %U http://hdl.handle.net/21.11116/0000-0002-57B5-0 %U http://arxiv.org/abs/1810.01237 %D 2018 %8 02.10.2018 %X We present a combinatorial algorithm for determining the market clearing prices of a general linear Arrow-Debreu market, where every agent can own multiple goods. The existing combinatorial algorithms for linear Arrow-Debreu markets consider the case where each agent can own all of one good only. We present an $\tilde{\mathcal{O}}((n+m)^7 \log^3(UW))$ algorithm where $n$, $m$, $U$ and $W$ refer to the number of agents, the number of goods, the maximal integral utility and the maximum quantity of any good in the market respectively. The algorithm refines the iterative algorithm of Duan, Garg and Mehlhorn using several new ideas. We also identify the hard instances for existing combinatorial algorithms for linear Arrow-Debreu markets. In particular we find instances where the ratio of the maximum to the minimum equilibrium price of a good is $U^{\Omega(n)}$ and the number of iterations required by the existing iterative combinatorial algorithms of Duan, and Mehlhorn and Duan, Garg, and Mehlhorn are high. Our instances also separate the two algorithms. %K Computer Science, Computer Science and Game Theory, cs.GT,
[120]
B. Ray Chaudhury, Y. K. Cheung, J. Garg, N. Garg, M. Hoefer, and K. Mehlhorn, “On Fair Division for Indivisible Items,” in 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS 2018), Ahmedabad, India, 2018.
Export
BibTeX
@inproceedings{Chaudhury_FSTTCS2018b, TITLE = {On Fair Division for Indivisible Items}, AUTHOR = {Ray Chaudhury, Bhaskar and Cheung, Yun Kuen and Garg, Jugal and Garg, Naveen and Hoefer, Martin and Mehlhorn, Kurt}, LANGUAGE = {eng}, ISSN = {1868-896}, ISBN = {978-3-95977-093-4}, URL = {urn:nbn:de:0030-drops-99242}, DOI = {10.4230/LIPIcs.FSTTCS.2018.25}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS 2018)}, EDITOR = {Ganguly, Sumit and Pandya, Paritosh}, PAGES = {1--17}, EID = {25}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {122}, ADDRESS = {Ahmedabad, India}, }
Endnote
%0 Conference Proceedings %A Ray Chaudhury, Bhaskar %A Cheung, Yun Kuen %A Garg, Jugal %A Garg, Naveen %A Hoefer, Martin %A Mehlhorn, Kurt %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Algorithms and Complexity, MPI for Informatics, Max Planck Society %T On Fair Division for Indivisible Items : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AAE1-0 %R 10.4230/LIPIcs.FSTTCS.2018.25 %U urn:nbn:de:0030-drops-99242 %D 2018 %B 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science %Z date of event: 2018-12-11 - 2018-12-13 %C Ahmedabad, India %B 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science %E Ganguly, Sumit; Pandya, Paritosh %P 1 - 17 %Z sequence number: 25 %I Schloss Dagstuhl %@ 978-3-95977-093-4 %B Leibniz International Proceedings in Informatics %N 122 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9924/http://drops.dagstuhl.de/doku/urheberrecht1.html
[121]
B. Ray Chaudhury and K. Mehlhorn, “Combinatorial Algorithms for General Linear Arrow-Debreu Markets,” in 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS 2018), Ahmedabad, India, 2018.
Export
BibTeX
@inproceedings{Chaudhury_FSTTCS2018, TITLE = {Combinatorial Algorithms for General Linear {A}rrow-{D}ebreu Markets}, AUTHOR = {Ray Chaudhury, Bhaskar and Mehlhorn, Kurt}, LANGUAGE = {eng}, ISSN = {1868-896}, ISBN = {978-3-95977-093-4}, URL = {urn:nbn:de:0030-drops-99255}, DOI = {10.4230/LIPIcs.FSTTCS.2018.26}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS 2018)}, EDITOR = {Ganguly, Sumit and Pandya, Paritosh}, PAGES = {1--16}, EID = {26}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {122}, ADDRESS = {Ahmedabad, India}, }
Endnote
%0 Conference Proceedings %A Ray Chaudhury, Bhaskar %A Mehlhorn, Kurt %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Combinatorial Algorithms for General Linear Arrow-Debreu Markets : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AADC-7 %R 10.4230/LIPIcs.FSTTCS.2018.26 %U urn:nbn:de:0030-drops-99255 %D 2018 %B 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science %Z date of event: 2018-12-11 - 2018-12-13 %C Ahmedabad, India %B 38th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science %E Ganguly, Sumit; Pandya, Paritosh %P 1 - 16 %Z sequence number: 26 %I Schloss Dagstuhl %@ 978-3-95977-093-4 %B Leibniz International Proceedings in Informatics %N 122 %@ false %U http://drops.dagstuhl.de/opus/volltexte/2018/9925/http://drops.dagstuhl.de/doku/urheberrecht1.html
[122]
A. Schmid and J. M. Schmidt, “Computing 2-Walks in Polynomial Time,” ACM Transactions on Algorithms, vol. 14, no. 2, 2018.
Export
BibTeX
@article{Schmid2018, TITLE = {Computing 2-Walks in Polynomial Time}, AUTHOR = {Schmid, Andreas and Schmidt, Jens M.}, LANGUAGE = {eng}, ISSN = {1549-6325}, DOI = {10.1145/3183368}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {ACM Transactions on Algorithms}, VOLUME = {14}, NUMBER = {2}, EID = {22}, }
Endnote
%0 Journal Article %A Schmid, Andreas %A Schmidt, Jens M. %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Computing 2-Walks in Polynomial Time : %G eng %U http://hdl.handle.net/21.11116/0000-0001-949E-6 %R 10.1145/3183368 %7 2018 %D 2018 %J ACM Transactions on Algorithms %V 14 %N 2 %Z sequence number: 22 %I ACM %C New York, NY %@ false
[123]
A. Schmid and J. M. Schmidt, “Computing Tutte Paths,” in 45th International Colloquium on Automata, Languages, and Programming (ICALP 2018), Prague, Czech Republic, 2018.
Export
BibTeX
@inproceedings{Schmid_ICALP2018, TITLE = {Computing {T}utte Paths}, AUTHOR = {Schmid, Andreas and Schmidt, Jens M.}, LANGUAGE = {eng}, ISBN = {978-3-95977-076-7}, URL = {urn:nbn:de:0030-drops-91029}, DOI = {10.4230/LIPIcs.ICALP.2018.98}, PUBLISHER = {Schloss Dagstuhl}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {45th International Colloquium on Automata, Languages, and Programming (ICALP 2018)}, EDITOR = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, D{\'a}niel and Sannella, Donald}, PAGES = {1--14}, EID = {98}, SERIES = {Leibniz International Proceedings in Informatics}, VOLUME = {107}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Schmid, Andreas %A Schmidt, Jens M. %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society External Organizations %T Computing Tutte Paths : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AB32-5 %R 10.4230/LIPIcs.ICALP.2018.98 %U urn:nbn:de:0030-drops-91029 %D 2018 %B 45th International Colloquium on Automata, Languages, and Programming %Z date of event: 2018-07-09 - 2018-07-13 %C Prague, Czech Republic %B 45th International Colloquium on Automata, Languages, and Programming %E Chatzigiannakis, Ioannis; Kaklamanis, Christos; Marx, D&#225;niel; Sannella, Donald %P 1 - 14 %Z sequence number: 98 %I Schloss Dagstuhl %@ 978-3-95977-076-7 %B Leibniz International Proceedings in Informatics %N 107 %U http://drops.dagstuhl.de/opus/volltexte/2018/9102/http://drops.dagstuhl.de/doku/urheberrecht1.html
[124]
A. Wiese, “Independent Set of Convex Polygons: From nϵ to 1+ϵ via Shrinking,” Algorithmica, vol. 80, no. 3, 2018.
Export
BibTeX
@article{Wiese2017, TITLE = {Independent Set of Convex Polygons: From $n^{\epsilon}$ to 1+$\epsilon$ via Shrinking}, AUTHOR = {Wiese, Andreas}, LANGUAGE = {eng}, ISSN = {0178-4617}, DOI = {10.1007/s00453-017-0347-8}, PUBLISHER = {Springer-Verlag}, ADDRESS = {New York}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, DATE = {2018}, JOURNAL = {Algorithmica}, VOLUME = {80}, NUMBER = {3}, PAGES = {918--934}, }
Endnote
%0 Journal Article %A Wiese, Andreas %+ Algorithms and Complexity, MPI for Informatics, Max Planck Society %T Independent Set of Convex Polygons: From n&#1013; to 1+&#1013; via Shrinking : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-2602-4 %R 10.1007/s00453-017-0347-8 %7 2017 %D 2018 %J Algorithmica %V 80 %N 3 %& 918 %P 918 - 934 %I Springer-Verlag %C New York %@ false