Last Year
[1]
A. Abboud, K. Censor-Hillel, S. Khoury, and C. Lenzen, “Fooling Views: A New Lower Bound Technique for Distributed Computations under Congestion,” Distributed Computing, 2020.
Export
BibTeX
@article{Abboud2020,
TITLE = {Fooling Views: A New Lower Bound Technique for Distributed Computations under Congestion},
AUTHOR = {Abboud, Amir and Censor-Hillel, Keren and Khoury, Seri and Lenzen, Christoph},
LANGUAGE = {eng},
ISSN = {0178-2770},
DOI = {10.1007/s00446-020-00373-4},
PUBLISHER = {Springer},
ADDRESS = {New York, NY},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {Distributed Computing},
}
Endnote
%0 Journal Article
%A Abboud, Amir
%A Censor-Hillel, Keren
%A Khoury, Seri
%A Lenzen, Christoph
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Fooling Views: A New Lower Bound Technique for Distributed Computations under Congestion :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-F28E-9
%R 10.1007/s00446-020-00373-4
%7 2020
%D 2020
%J Distributed Computing
%I Springer
%C New York, NY
%@ false
[2]
A. Abboud, K. Bringmann, D. Hermelin, and D. Shabtay, “Scheduling Lower Bounds via AND Subset Sum,” in 47th International Colloquium on Automata, Languages, and Programming (ICALP 2020), Saarbrücken, Germany (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Abboud_ICALP2020,
TITLE = {Scheduling Lower Bounds via AND Subset Sum},
AUTHOR = {Abboud, Amir and Bringmann, Karl and Hermelin, Danny and Shabtay, Dvir},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-138-2},
URL = {urn:nbn:de:0030-drops-124119},
DOI = {10.4230/LIPIcs.ICALP.2020.4},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {47th International Colloquium on Automata, Languages, and Programming (ICALP 2020)},
EDITOR = {Czumaj, Artur and Dawa, Anuj and Merelli, Emanuela},
EID = {4},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {168},
ADDRESS = {Saarbr{\"u}cken, Germany (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Abboud, Amir
%A Bringmann, Karl
%A Hermelin, Danny
%A Shabtay, Dvir
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Scheduling Lower Bounds via AND Subset Sum :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-2826-2
%R 10.4230/LIPIcs.ICALP.2020.4
%U urn:nbn:de:0030-drops-124119
%D 2020
%B 47th International Colloquium on Automata, Languages, and Programming
%Z date of event: 2020-07-08 - 2020-07-11
%C Saarbrücken, Germany (Virtual Conference)
%B 47th International Colloquium on Automata, Languages, and Programming
%E Czumaj, Artur; Dawa, Anuj; Merelli, Emanuela
%Z sequence number: 4
%I Schloss Dagstuhl
%@ 978-3-95977-138-2
%B Leibniz International Proceedings in Informatics
%N 168
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12411/https://creativecommons.org/licenses/by/3.0/legalcode
[3]
A. Abboud, K. Bringmann, D. Hermelin, and D. Shabtay, “Scheduling Lower Bounds via AND Subset Sum,” 2020. [Online]. Available: https://arxiv.org/abs/2003.07113. (arXiv: 2003.07113)
Abstract
Given $N$ instances $(X_1,t_1),\ldots,(X_N,t_N)$ of Subset Sum, the AND
Subset Sum problem asks to determine whether all of these instances are
yes-instances; that is, whether each set of integers $X_i$ has a subset that
sums up to the target integer $t_i$. We prove that this problem cannot be
solved in time $\tilde{O}((N \cdot t_{max})^{1-\epsilon})$, for $t_{max}=\max_i
t_i$ and any $\epsilon > 0$, assuming the $\forall \exists$ Strong Exponential
Time Hypothesis ($\forall \exists$-SETH). We then use this result to exclude
$\tilde{O}(n+P_{max} \cdot n^{1-\epsilon})$-time algorithms for several
scheduling problems on $n$ jobs with maximum processing time $P_{max}$, based
on $\forall \exists$-SETH. These include classical problems such as $1||\sum
w_jU_j$, the problem of minimizing the total weight of tardy jobs on a single
machine, and $P_2||\sum U_j$, the problem of minimizing the number of tardy
jobs on two identical parallel machines.
Export
BibTeX
@online{Abboud_arXIv2003.07113,
TITLE = {Scheduling Lower Bounds via {AND} Subset Sum},
AUTHOR = {Abboud, Amir and Bringmann, Karl and Hermelin, Danny and Shabtay, Dvir},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2003.07113},
EPRINT = {2003.07113},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Given $N$ instances $(X_1,t_1),\ldots,(X_N,t_N)$ of Subset Sum, the AND Subset Sum problem asks to determine whether all of these instances are yes-instances; that is, whether each set of integers $X_i$ has a subset that sums up to the target integer $t_i$. We prove that this problem cannot be solved in time $\tilde{O}((N \cdot t_{max})^{1-\epsilon})$, for $t_{max}=\max_i t_i$ and any $\epsilon > 0$, assuming the $\forall \exists$ Strong Exponential Time Hypothesis ($\forall \exists$-SETH). We then use this result to exclude $\tilde{O}(n+P_{max} \cdot n^{1-\epsilon})$-time algorithms for several scheduling problems on $n$ jobs with maximum processing time $P_{max}$, based on $\forall \exists$-SETH. These include classical problems such as $1||\sum w_jU_j$, the problem of minimizing the total weight of tardy jobs on a single machine, and $P_2||\sum U_j$, the problem of minimizing the number of tardy jobs on two identical parallel machines.},
}
Endnote
%0 Report
%A Abboud, Amir
%A Bringmann, Karl
%A Hermelin, Danny
%A Shabtay, Dvir
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Scheduling Lower Bounds via AND Subset Sum :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-2A52-E
%U https://arxiv.org/abs/2003.07113
%D 2020
%X Given $N$ instances $(X_1,t_1),\ldots,(X_N,t_N)$ of Subset Sum, the AND
Subset Sum problem asks to determine whether all of these instances are
yes-instances; that is, whether each set of integers $X_i$ has a subset that
sums up to the target integer $t_i$. We prove that this problem cannot be
solved in time $\tilde{O}((N \cdot t_{max})^{1-\epsilon})$, for $t_{max}=\max_i
t_i$ and any $\epsilon > 0$, assuming the $\forall \exists$ Strong Exponential
Time Hypothesis ($\forall \exists$-SETH). We then use this result to exclude
$\tilde{O}(n+P_{max} \cdot n^{1-\epsilon})$-time algorithms for several
scheduling problems on $n$ jobs with maximum processing time $P_{max}$, based
on $\forall \exists$-SETH. These include classical problems such as $1||\sum
w_jU_j$, the problem of minimizing the total weight of tardy jobs on a single
machine, and $P_2||\sum U_j$, the problem of minimizing the number of tardy
jobs on two identical parallel machines.
%K Computer Science, Data Structures and Algorithms, cs.DS
[4]
A. Abboud, A. Backurs, K. Bringmann, and M. Künnemann, “Impossibility Results for Grammar-Compressed Linear Algebra,” in Advances in Neural Information Processing Systems 33 (NIPS 2020), Virtual Event, 2020.
Export
BibTeX
@inproceedings{Abboud_NeurIPS20,
TITLE = {Impossibility Results for Grammar-Compressed Linear Algebra},
AUTHOR = {Abboud, Amir and Backurs, Arturs and Bringmann, Karl and K{\"u}nnemann, Marvin},
LANGUAGE = {eng},
PUBLISHER = {Curran Associates, Inc.},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Advances in Neural Information Processing Systems 33 (NIPS 2020)},
EDITOR = {Larochelle, H. and Ranzato, M. and Hadsell, R. and Balcan, M. F. and Lin, H.},
ADDRESS = {Virtual Event},
}
Endnote
%0 Conference Proceedings
%A Abboud, Amir
%A Backurs, Arturs
%A Bringmann, Karl
%A Künnemann, Marvin
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Impossibility Results for Grammar-Compressed Linear Algebra :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-90DF-B
%D 2020
%B 34th Conference on Neural Information Processing Systems
%Z date of event: 2020-12-06 - 2020-12-12
%C Virtual Event
%B Advances in Neural Information Processing Systems 33
%E Larochelle, H.; Ranzato, M.; Hadsell, R.; Balcan, M. F.; Lin, H.
%I Curran Associates, Inc.
%U https://proceedings.neurips.cc/paper/2020/hash/645e6bfdd05d1a69c5e47b20f0a91d46-Abstract.html
[5]
D. Achlioptas, T. Gouleakis, and F. Iliopoulos, “Simple Local Computation Algorithms for the General Lovász Local Lemma,” in SPAA’20, 32nd ACM Symposium on Parallelism in Algorithms and Architectures, Virtual Event, USA, 2020.
Export
BibTeX
@inproceedings{Achlioptas_SPAA20,
TITLE = {Simple Local Computation Algorithms for the General Lov{\'a}sz Local Lemma},
AUTHOR = {Achlioptas, Dimitris and Gouleakis, Themis and Iliopoulos, Fotis},
LANGUAGE = {eng},
ISBN = {9781450369350},
DOI = {10.1145/3350755.3400250},
PUBLISHER = {ACM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {SPAA'20, 32nd ACM Symposium on Parallelism in Algorithms and Architectures},
PAGES = {1--10},
ADDRESS = {Virtual Event, USA},
}
Endnote
%0 Conference Proceedings
%A Achlioptas, Dimitris
%A Gouleakis, Themis
%A Iliopoulos, Fotis
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Simple Local Computation Algorithms for the General Lovász Local Lemma : Simple Local Computation Algorithms for the General {Lov\'{a}sz} {Local Lemma}
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8B6D-3
%R 10.1145/3350755.3400250
%D 2020
%B 32nd ACM Symposium on Parallelism in Algorithms and Architectures
%Z date of event: 2020-07-15 - 2020-07-17
%C Virtual Event, USA
%B SPAA'20
%P 1 - 10
%I ACM
%@ 9781450369350
[6]
A. Agrawal, D. Lokshtanov, P. Misra, S. Saurabh, and M. Zehavi, “Polylogarithmic Approximation Algorithms for Weighted-F-deletion,” ACM Transactions on Algorithms, vol. 16, no. 4, 2020.
Export
BibTeX
@article{Agrawal2020,
TITLE = {Polylogarithmic Approximation Algorithms for Weighted-F-deletion},
AUTHOR = {Agrawal, Akanksha and Lokshtanov, Daniel and Misra, Pranabendu and Saurabh, Saket and Zehavi, Meirav},
LANGUAGE = {eng},
ISSN = {1549-6325},
DOI = {10.1145/3389338},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Algorithms},
VOLUME = {16},
NUMBER = {4},
EID = {51},
}
Endnote
%0 Journal Article
%A Agrawal, Akanksha
%A Lokshtanov, Daniel
%A Misra, Pranabendu
%A Saurabh, Saket
%A Zehavi, Meirav
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Polylogarithmic Approximation Algorithms for Weighted-F-deletion :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-4903-4
%R 10.1145/3389338
%7 2020
%D 2020
%J ACM Transactions on Algorithms
%V 16
%N 4
%Z sequence number: 51
%I ACM
%C New York, NY
%@ false
[7]
H. Alkema, M. de Berg, and S. Kisfaludi-Bak, “Euclidean TSP in Narrow Strips,” in 36th International Symposium on Computational Geometry (SoCG 2020), Zürich, Switzerland (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{AlkemaBK20,
TITLE = {Euclidean {TSP} in Narrow Strips},
AUTHOR = {Alkema, Henk and de Berg, Mark and Kisfaludi-Bak, S{\'a}ndor},
LANGUAGE = {eng},
ISBN = {978-3-95977-143-6},
URL = {urn:nbn:de:0030-drops-121628},
DOI = {10.4230/LIPIcs.SoCG.2020.4},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {36th International Symposium on Computational Geometry (SoCG 2020)},
EDITOR = {Cabello, Sergio and Chen, Danny Z.},
PAGES = {1--16},
EID = {4},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {164},
ADDRESS = {Z{\"u}rich, Switzerland (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Alkema, Henk
%A de Berg, Mark
%A Kisfaludi-Bak, Sándor
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Euclidean TSP in Narrow Strips :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-76E5-2
%R 10.4230/LIPIcs.SoCG.2020.4
%U urn:nbn:de:0030-drops-121628
%D 2020
%B 36th International Symposium on Computational Geometry
%Z date of event: 2020-06-23 - 2020-06-26
%C Zürich, Switzerland (Virtual Conference)
%B 36th International Symposium on Computational Geometry
%E Cabello, Sergio; Chen, Danny Z.
%P 1 - 16
%Z sequence number: 4
%I Schloss Dagstuhl
%@ 978-3-95977-143-6
%B Leibniz International Proceedings in Informatics
%N 164
%U https://drops.dagstuhl.de/opus/volltexte/2020/12162/https://creativecommons.org/licenses/by/3.0/legalcode
[8]
H. Alkema, M. de Berg, and S. Kisfaludi-Bak, “Euclidean TSP in Narrow Strip,” 2020. [Online]. Available: https://arxiv.org/abs/2003.09948. (arXiv: 2003.09948)
Abstract
We investigate how the complexity of Euclidean TSP for point sets $P$ inside
the strip $(-\infty,+\infty)\times [0,\delta]$ depends on the strip width
$\delta$. We obtain two main results. First, for the case where the points have
distinct integer $x$-coordinates, we prove that a shortest bitonic tour (which
can be computed in $O(n\log^2 n)$ time using an existing algorithm) is
guaranteed to be a shortest tour overall when $\delta\leq 2\sqrt{2}$, a bound
which is best possible. Second, we present an algorithm that is fixed-parameter
tractable with respect to $\delta$. More precisely, our algorithm has running
time $2^{O(\sqrt{\delta})} n^2$ for sparse point sets, where each
$1\times\delta$ rectangle inside the strip contains $O(1)$ points. For random
point sets, where the points are chosen uniformly at random from the
rectangle~$[0,n]\times [0,\delta]$, it has an expected running time of
$2^{O(\sqrt{\delta})} n^2 + O(n^3)$.
Export
BibTeX
@online{Alkema_arXiv2003.09948,
TITLE = {Euclidean {TSP} in Narrow Strip},
AUTHOR = {Alkema, Henk and de Berg, Mark and Kisfaludi-Bak, S{\'a}ndor},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2003.09948},
EPRINT = {2003.09948},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We investigate how the complexity of Euclidean TSP for point sets $P$ inside the strip $(-\infty,+\infty)\times [0,\delta]$ depends on the strip width $\delta$. We obtain two main results. First, for the case where the points have distinct integer $x$-coordinates, we prove that a shortest bitonic tour (which can be computed in $O(n\log^2 n)$ time using an existing algorithm) is guaranteed to be a shortest tour overall when $\delta\leq 2\sqrt{2}$, a bound which is best possible. Second, we present an algorithm that is fixed-parameter tractable with respect to $\delta$. More precisely, our algorithm has running time $2^{O(\sqrt{\delta})} n^2$ for sparse point sets, where each $1\times\delta$ rectangle inside the strip contains $O(1)$ points. For random point sets, where the points are chosen uniformly at random from the rectangle~$[0,n]\times [0,\delta]$, it has an expected running time of $2^{O(\sqrt{\delta})} n^2 + O(n^3)$.},
}
Endnote
%0 Report
%A Alkema, Henk
%A de Berg, Mark
%A Kisfaludi-Bak, Sándor
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Euclidean TSP in Narrow Strip :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-77A3-B
%U https://arxiv.org/abs/2003.09948
%D 2020
%X We investigate how the complexity of Euclidean TSP for point sets $P$ inside
the strip $(-\infty,+\infty)\times [0,\delta]$ depends on the strip width
$\delta$. We obtain two main results. First, for the case where the points have
distinct integer $x$-coordinates, we prove that a shortest bitonic tour (which
can be computed in $O(n\log^2 n)$ time using an existing algorithm) is
guaranteed to be a shortest tour overall when $\delta\leq 2\sqrt{2}$, a bound
which is best possible. Second, we present an algorithm that is fixed-parameter
tractable with respect to $\delta$. More precisely, our algorithm has running
time $2^{O(\sqrt{\delta})} n^2$ for sparse point sets, where each
$1\times\delta$ rectangle inside the strip contains $O(1)$ points. For random
point sets, where the points are chosen uniformly at random from the
rectangle~$[0,n]\times [0,\delta]$, it has an expected running time of
$2^{O(\sqrt{\delta})} n^2 + O(n^3)$.
%K Computer Science, Computational Geometry, cs.CG
[9]
G. Amanatidis and P. Kleer, “Rapid Mixing of the Switch Markov Chain for Strongly Stable Degree Sequences,” Random Structures and Algorithms, vol. 57, no. 3, 2020.
Export
BibTeX
@article{Amanatidis2020,
TITLE = {Rapid mixing of the switch {M}arkov chain for strongly stable degree sequences},
AUTHOR = {Amanatidis, Georgios and Kleer, Pieter},
LANGUAGE = {eng},
ISSN = {1042-9832},
DOI = {10.1002/rsa.20949},
PUBLISHER = {Wiley},
ADDRESS = {New York, N.Y.},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {Random Structures and Algorithms},
VOLUME = {57},
NUMBER = {3},
PAGES = {637--657},
}
Endnote
%0 Journal Article
%A Amanatidis, Georgios
%A Kleer, Pieter
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Rapid Mixing of the Switch Markov Chain for Strongly Stable Degree Sequences :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-DC7A-A
%R 10.1002/rsa.20949
%7 2020
%D 2020
%J Random Structures and Algorithms
%V 57
%N 3
%& 637
%P 637 - 657
%I Wiley
%C New York, N.Y.
%@ false
[10]
S. A. Amiri and B. Wiederhake, “Distributed Distance-r Dominating Set on Sparse High-Girth Graphs,” 2020. [Online]. Available: https://arxiv.org/abs/1910.02794. (arXiv: 1910.02794)
Abstract
The dominating set problem and its generalization, the distance-$r$
dominating set problem, are among the well-studied problems in the sequential
settings. In distributed models of computation, unlike for domination, not much
is known about distance-r domination. This is actually the case for other
important closely-related covering problem, namely, the distance-$r$
independent set problem. By result of Kuhn et al. we know the distributed
domination problem is hard on high girth graphs; we study the problem on a
slightly restricted subclass of these graphs: graphs of bounded expansion with
high girth, i.e. their girth should be at least $4r + 3$. We show that in such
graphs, for every constant $r$, a simple greedy CONGEST algorithm provides a
constant-factor approximation of the minimum distance-$r$ dominating set
problem, in a constant number of rounds. More precisely, our constants are
dependent to $r$, not to the size of the graph. This is the first algorithm
that shows there are non-trivial constant factor approximations in constant
number of rounds for any distance $r$-covering problem in distributed settings.
To show the dependency on r is inevitable, we provide an unconditional lower
bound showing the same problem is hard already on rings. We also show that our
analysis of the algorithm is relatively tight, that is any significant
improvement to the approximation factor requires new algorithmic ideas.
Export
BibTeX
@online{Amiri_arXiv1910.02794,
TITLE = {Distributed Distance-$r$ Dominating Set on Sparse High-Girth Graphs},
AUTHOR = {Amiri, Saeed Akhoondian and Wiederhake, Ben},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/1910.02794},
EPRINT = {1910.02794},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {The dominating set problem and its generalization, the distance-$r$ dominating set problem, are among the well-studied problems in the sequential settings. In distributed models of computation, unlike for domination, not much is known about distance-r domination. This is actually the case for other important closely-related covering problem, namely, the distance-$r$ independent set problem. By result of Kuhn et al. we know the distributed domination problem is hard on high girth graphs; we study the problem on a slightly restricted subclass of these graphs: graphs of bounded expansion with high girth, i.e. their girth should be at least $4r + 3$. We show that in such graphs, for every constant $r$, a simple greedy CONGEST algorithm provides a constant-factor approximation of the minimum distance-$r$ dominating set problem, in a constant number of rounds. More precisely, our constants are dependent to $r$, not to the size of the graph. This is the first algorithm that shows there are non-trivial constant factor approximations in constant number of rounds for any distance $r$-covering problem in distributed settings. To show the dependency on r is inevitable, we provide an unconditional lower bound showing the same problem is hard already on rings. We also show that our analysis of the algorithm is relatively tight, that is any significant improvement to the approximation factor requires new algorithmic ideas.},
}
Endnote
%0 Report
%A Amiri, Saeed Akhoondian
%A Wiederhake, Ben
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Distributed Distance-r Dominating Set on Sparse High-Girth Graphs :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-905B-0
%U https://arxiv.org/abs/1910.02794
%D 2020
%X The dominating set problem and its generalization, the distance-$r$
dominating set problem, are among the well-studied problems in the sequential
settings. In distributed models of computation, unlike for domination, not much
is known about distance-r domination. This is actually the case for other
important closely-related covering problem, namely, the distance-$r$
independent set problem. By result of Kuhn et al. we know the distributed
domination problem is hard on high girth graphs; we study the problem on a
slightly restricted subclass of these graphs: graphs of bounded expansion with
high girth, i.e. their girth should be at least $4r + 3$. We show that in such
graphs, for every constant $r$, a simple greedy CONGEST algorithm provides a
constant-factor approximation of the minimum distance-$r$ dominating set
problem, in a constant number of rounds. More precisely, our constants are
dependent to $r$, not to the size of the graph. This is the first algorithm
that shows there are non-trivial constant factor approximations in constant
number of rounds for any distance $r$-covering problem in distributed settings.
To show the dependency on r is inevitable, we provide an unconditional lower
bound showing the same problem is hard already on rings. We also show that our
analysis of the algorithm is relatively tight, that is any significant
improvement to the approximation factor requires new algorithmic ideas.
%K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC,Computer Science, Discrete Mathematics, cs.DM,Mathematics, Combinatorics, math.CO
[11]
S. A. Amiri, A. Popa, M. Roghani, G. Shahkarami, R. Soltani, and H. Vahidi, “Complexity of Computing the Anti-Ramsey Numbers for Paths,” in 45th International Symposium on Mathematical Foundations of Computer Science (MFCS 2020), Prague, Czech Republic (Virtual Event), 2020.
Export
BibTeX
@inproceedings{Amiri_MFCS20,
TITLE = {Complexity of Computing the Anti-{Ramsey} Numbers for Paths},
AUTHOR = {Amiri, Saeed Akhoondian and Popa, Alexandru and Roghani, Mohammad and Shahkarami, Golnoosh and Soltani, Reza and Vahidi, Hossein},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-159-7},
URL = {urn:nbn:de:0030-drops-126781},
DOI = {10.4230/LIPIcs.MFCS.2020.6},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {45th International Symposium on Mathematical Foundations of Computer Science (MFCS 2020)},
EDITOR = {Esparza, Javier and Kr{\`a}l', Daniel},
EID = {6},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {170},
ADDRESS = {Prague, Czech Republic (Virtual Event)},
}
Endnote
%0 Conference Proceedings
%A Amiri, Saeed Akhoondian
%A Popa, Alexandru
%A Roghani, Mohammad
%A Shahkarami, Golnoosh
%A Soltani, Reza
%A Vahidi, Hossein
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Complexity of Computing the Anti-Ramsey Numbers for Paths :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9422-B
%R 10.4230/LIPIcs.MFCS.2020.6
%U urn:nbn:de:0030-drops-126781
%D 2020
%B 45th International Symposium on Mathematical Foundations of Computer Science
%Z date of event: 2020-08-25 - 2020-08-26
%C Prague, Czech Republic (Virtual Event)
%B 45th International Symposium on Mathematical Foundations of Computer Science
%E Esparza, Javier; Kràl', Daniel
%Z sequence number: 6
%I Schloss Dagstuhl
%@ 978-3-95977-159-7
%B Leibniz International Proceedings in Informatics
%N 170
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12678/https://creativecommons.org/licenses/by/3.0/legalcode
[12]
I. Anagnostides, T. Gouleakis, and A. Marashian, “Robust Learning under Strong Noise via SQs,” 2020. [Online]. Available: https://arxiv.org/abs/2010.09106. (arXiv: 2010.09106)
Abstract
This work provides several new insights on the robustness of Kearns'
statistical query framework against challenging label-noise models. First, we
build on a recent result by \cite{DBLP:journals/corr/abs-2006-04787} that
showed noise tolerance of distribution-independently evolvable concept classes
under Massart noise. Specifically, we extend their characterization to more
general noise models, including the Tsybakov model which considerably
generalizes the Massart condition by allowing the flipping probability to be
arbitrarily close to $\frac{1}{2}$ for a subset of the domain. As a corollary,
we employ an evolutionary algorithm by \cite{DBLP:conf/colt/KanadeVV10} to
obtain the first polynomial time algorithm with arbitrarily small excess error
for learning linear threshold functions over any spherically symmetric
distribution in the presence of spherically symmetric Tsybakov noise. Moreover,
we posit access to a stronger oracle, in which for every labeled example we
additionally obtain its flipping probability. In this model, we show that every
SQ learnable class admits an efficient learning algorithm with OPT + $\epsilon$
misclassification error for a broad class of noise models. This setting
substantially generalizes the widely-studied problem of classification under
RCN with known noise rate, and corresponds to a non-convex optimization problem
even when the noise function -- i.e. the flipping probabilities of all points
-- is known in advance.
Export
BibTeX
@online{Anagnostides_arXiv2010.09106,
TITLE = {Robust Learning under Strong Noise via {SQs}},
AUTHOR = {Anagnostides, Ioannis and Gouleakis, Themis and Marashian, Ali},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2010.09106},
EPRINT = {2010.09106},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {This work provides several new insights on the robustness of Kearns' statistical query framework against challenging label-noise models. First, we build on a recent result by \cite{DBLP:journals/corr/abs-2006-04787} that showed noise tolerance of distribution-independently evolvable concept classes under Massart noise. Specifically, we extend their characterization to more general noise models, including the Tsybakov model which considerably generalizes the Massart condition by allowing the flipping probability to be arbitrarily close to $\frac{1}{2}$ for a subset of the domain. As a corollary, we employ an evolutionary algorithm by \cite{DBLP:conf/colt/KanadeVV10} to obtain the first polynomial time algorithm with arbitrarily small excess error for learning linear threshold functions over any spherically symmetric distribution in the presence of spherically symmetric Tsybakov noise. Moreover, we posit access to a stronger oracle, in which for every labeled example we additionally obtain its flipping probability. In this model, we show that every SQ learnable class admits an efficient learning algorithm with OPT + $\epsilon$ misclassification error for a broad class of noise models. This setting substantially generalizes the widely-studied problem of classification under RCN with known noise rate, and corresponds to a non-convex optimization problem even when the noise function -- i.e. the flipping probabilities of all points -- is known in advance.},
}
Endnote
%0 Report
%A Anagnostides, Ioannis
%A Gouleakis, Themis
%A Marashian, Ali
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Robust Learning under Strong Noise via SQs :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8B5D-5
%U https://arxiv.org/abs/2010.09106
%D 2020
%X This work provides several new insights on the robustness of Kearns'
statistical query framework against challenging label-noise models. First, we
build on a recent result by \cite{DBLP:journals/corr/abs-2006-04787} that
showed noise tolerance of distribution-independently evolvable concept classes
under Massart noise. Specifically, we extend their characterization to more
general noise models, including the Tsybakov model which considerably
generalizes the Massart condition by allowing the flipping probability to be
arbitrarily close to $\frac{1}{2}$ for a subset of the domain. As a corollary,
we employ an evolutionary algorithm by \cite{DBLP:conf/colt/KanadeVV10} to
obtain the first polynomial time algorithm with arbitrarily small excess error
for learning linear threshold functions over any spherically symmetric
distribution in the presence of spherically symmetric Tsybakov noise. Moreover,
we posit access to a stronger oracle, in which for every labeled example we
additionally obtain its flipping probability. In this model, we show that every
SQ learnable class admits an efficient learning algorithm with OPT + $\epsilon$
misclassification error for a broad class of noise models. This setting
substantially generalizes the widely-studied problem of classification under
RCN with known noise rate, and corresponds to a non-convex optimization problem
even when the noise function -- i.e. the flipping probabilities of all points
-- is known in advance.
%K Statistics, Machine Learning, stat.ML,Computer Science, Learning, cs.LG
[13]
A. Antoniadis, N. Garg, G. Kumar, and N. Kumar, “Parallel Machine Scheduling to Minimize Energy Consumption,” in Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020), Salt Lake City, UT, USA, 2020.
Export
BibTeX
@inproceedings{Antoniadis_SODA20,
TITLE = {Parallel Machine Scheduling to Minimize Energy Consumption},
AUTHOR = {Antoniadis, Antonios and Garg, Naveen and Kumar, Gunjan and Kumar, Nikhil},
LANGUAGE = {eng},
ISBN = {978-1-61197-599-4},
DOI = {10.5555/3381089.3381257},
PUBLISHER = {SIAM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020)},
EDITOR = {Chawla, Shuchi},
PAGES = {2758--2769},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Antoniadis, Antonios
%A Garg, Naveen
%A Kumar, Gunjan
%A Kumar, Nikhil
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T Parallel Machine Scheduling to Minimize Energy Consumption :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-F26A-2
%R 10.5555/3381089.3381257
%D 2020
%B 31st Annual ACM-SIAM Symposium on Discrete Algorithms
%Z date of event: 2020-01-05 - 2020-01-08
%C Salt Lake City, UT, USA
%B Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms
%E Chawla, Shuchi
%P 2758 - 2769
%I SIAM
%@ 978-1-61197-599-4
[14]
A. Antoniadis, S. Kisfaludi-Bak, B. Laekhanukit, and D. Vaz, “On the Approximability of the Traveling Salesman Problem with Line Neighborhoods,” 2020. [Online]. Available: https://arxiv.org/abs/2008.12075. (arXiv: 2008.12075)
Abstract
We study the variant of the Euclidean Traveling Salesman problem where
instead of a set of points, we are given a set of lines as input, and the goal
is to find the shortest tour that visits each line. The best known upper and
lower bounds for the problem in $\mathbb{R}^d$, with $d\ge 3$, are
$\mathrm{NP}$-hardness and an $O(\log^3 n)$-approximation algorithm which is
based on a reduction to the group Steiner tree problem.
We show that TSP with lines in $\mathbb{R}^d$ is APX-hard for any $d\ge 3$.
More generally, this implies that TSP with $k$-dimensional flats does not admit
a PTAS for any $1\le k \leq d-2$ unless $\mathrm{P}=\mathrm{NP}$, which gives a
complete classification of the approximability of these problems, as there are
known PTASes for $k=0$ (i.e., points) and $k=d-1$ (hyperplanes). We are able to
give a stronger inapproximability factor for $d=O(\log n)$ by showing that TSP
with lines does not admit a $(2-\epsilon)$-approximation in $d$ dimensions
under the unique games conjecture. On the positive side, we leverage recent
results on restricted variants of the group Steiner tree problem in order to
give an $O(\log^2 n)$-approximation algorithm for the problem, albeit with a
running time of $n^{O(\log\log n)}$.
Export
BibTeX
@online{Antoniadis_arXiv2008.12075,
TITLE = {On the Approximability of the Traveling Salesman Problem with Line Neighborhoods},
AUTHOR = {Antoniadis, Antonios and Kisfaludi-Bak, S{\'a}ndor and Laekhanukit, Bundit and Vaz, Daniel},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2008.12075},
EPRINT = {2008.12075},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We study the variant of the Euclidean Traveling Salesman problem where instead of a set of points, we are given a set of lines as input, and the goal is to find the shortest tour that visits each line. The best known upper and lower bounds for the problem in $\mathbb{R}^d$, with $d\ge 3$, are $\mathrm{NP}$-hardness and an $O(\log^3 n)$-approximation algorithm which is based on a reduction to the group Steiner tree problem. We show that TSP with lines in $\mathbb{R}^d$ is APX-hard for any $d\ge 3$. More generally, this implies that TSP with $k$-dimensional flats does not admit a PTAS for any $1\le k \leq d-2$ unless $\mathrm{P}=\mathrm{NP}$, which gives a complete classification of the approximability of these problems, as there are known PTASes for $k=0$ (i.e., points) and $k=d-1$ (hyperplanes). We are able to give a stronger inapproximability factor for $d=O(\log n)$ by showing that TSP with lines does not admit a $(2-\epsilon)$-approximation in $d$ dimensions under the unique games conjecture. On the positive side, we leverage recent results on restricted variants of the group Steiner tree problem in order to give an $O(\log^2 n)$-approximation algorithm for the problem, albeit with a running time of $n^{O(\log\log n)}$.},
}
Endnote
%0 Report
%A Antoniadis, Antonios
%A Kisfaludi-Bak, Sándor
%A Laekhanukit, Bundit
%A Vaz, Daniel
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T On the Approximability of the Traveling Salesman Problem with Line
Neighborhoods :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-77AD-1
%U https://arxiv.org/abs/2008.12075
%D 2020
%X We study the variant of the Euclidean Traveling Salesman problem where
instead of a set of points, we are given a set of lines as input, and the goal
is to find the shortest tour that visits each line. The best known upper and
lower bounds for the problem in $\mathbb{R}^d$, with $d\ge 3$, are
$\mathrm{NP}$-hardness and an $O(\log^3 n)$-approximation algorithm which is
based on a reduction to the group Steiner tree problem.
We show that TSP with lines in $\mathbb{R}^d$ is APX-hard for any $d\ge 3$.
More generally, this implies that TSP with $k$-dimensional flats does not admit
a PTAS for any $1\le k \leq d-2$ unless $\mathrm{P}=\mathrm{NP}$, which gives a
complete classification of the approximability of these problems, as there are
known PTASes for $k=0$ (i.e., points) and $k=d-1$ (hyperplanes). We are able to
give a stronger inapproximability factor for $d=O(\log n)$ by showing that TSP
with lines does not admit a $(2-\epsilon)$-approximation in $d$ dimensions
under the unique games conjecture. On the positive side, we leverage recent
results on restricted variants of the group Steiner tree problem in order to
give an $O(\log^2 n)$-approximation algorithm for the problem, albeit with a
running time of $n^{O(\log\log n)}$.
%K Computer Science, Data Structures and Algorithms, cs.DS
[15]
A. Antoniadis, T. Gouleakis, P. Kleer, and P. Kolev, “Secretary and Online Matching Problems with Machine Learned Advice,” 2020. [Online]. Available: https://arxiv.org/abs/2006.01026. (arXiv: 2006.01026)
Abstract
The classical analysis of online algorithms, due to its worst-case nature,
can be quite pessimistic when the input instance at hand is far from
worst-case. Often this is not an issue with machine learning approaches, which
shine in exploiting patterns in past inputs in order to predict the future.
However, such predictions, although usually accurate, can be arbitrarily poor.
Inspired by a recent line of work, we augment three well-known online settings
with machine learned predictions about the future, and develop algorithms that
take them into account. In particular, we study the following online selection
problems: (i) the classical secretary problem, (ii) online bipartite matching
and (iii) the graphic matroid secretary problem. Our algorithms still come with
a worst-case performance guarantee in the case that predictions are subpar
while obtaining an improved competitive ratio (over the best-known classical
online algorithm for each problem) when the predictions are sufficiently
accurate. For each algorithm, we establish a trade-off between the competitive
ratios obtained in the two respective cases.
Export
BibTeX
@online{Antoniadis_arXiv2006.01026,
TITLE = {Secretary and Online Matching Problems with Machine Learned Advice},
AUTHOR = {Antoniadis, Antonios and Gouleakis, Themis and Kleer, Pieter and Kolev, Pavel},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2006.01026},
EPRINT = {2006.01026},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {The classical analysis of online algorithms, due to its worst-case nature, can be quite pessimistic when the input instance at hand is far from worst-case. Often this is not an issue with machine learning approaches, which shine in exploiting patterns in past inputs in order to predict the future. However, such predictions, although usually accurate, can be arbitrarily poor. Inspired by a recent line of work, we augment three well-known online settings with machine learned predictions about the future, and develop algorithms that take them into account. In particular, we study the following online selection problems: (i) the classical secretary problem, (ii) online bipartite matching and (iii) the graphic matroid secretary problem. Our algorithms still come with a worst-case performance guarantee in the case that predictions are subpar while obtaining an improved competitive ratio (over the best-known classical online algorithm for each problem) when the predictions are sufficiently accurate. For each algorithm, we establish a trade-off between the competitive ratios obtained in the two respective cases.},
}
Endnote
%0 Report
%A Antoniadis, Antonios
%A Gouleakis, Themis
%A Kleer, Pieter
%A Kolev, Pavel
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Secretary and Online Matching Problems with Machine Learned Advice :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8B7A-4
%U https://arxiv.org/abs/2006.01026
%D 2020
%X The classical analysis of online algorithms, due to its worst-case nature,
can be quite pessimistic when the input instance at hand is far from
worst-case. Often this is not an issue with machine learning approaches, which
shine in exploiting patterns in past inputs in order to predict the future.
However, such predictions, although usually accurate, can be arbitrarily poor.
Inspired by a recent line of work, we augment three well-known online settings
with machine learned predictions about the future, and develop algorithms that
take them into account. In particular, we study the following online selection
problems: (i) the classical secretary problem, (ii) online bipartite matching
and (iii) the graphic matroid secretary problem. Our algorithms still come with
a worst-case performance guarantee in the case that predictions are subpar
while obtaining an improved competitive ratio (over the best-known classical
online algorithm for each problem) when the predictions are sufficiently
accurate. For each algorithm, we establish a trade-off between the competitive
ratios obtained in the two respective cases.
%K Computer Science, Data Structures and Algorithms, cs.DS
[16]
A. Antoniadis, A. Cristi, T. Oosterwijk, and A. Sgouritsa, “A General Framework for Energy-Efficient Cloud Computing Mechanisms,” in AAMAS’20, 19th International Conference on Autonomous Agents and MultiAgent Systems, Auckland, New Zealand (Virtual), 2020.
Export
BibTeX
@inproceedings{Antoniadis_AAMAS20,
TITLE = {A General Framework for Energy-Efficient Cloud Computing Mechanisms},
AUTHOR = {Antoniadis, Antonios and Cristi, Andr{\'e}s and Oosterwijk, Tim and Sgouritsa, Alkmini},
LANGUAGE = {eng},
ISBN = {978-1-4503-7518-4},
URL = {https://dl.acm.org/doi/10.5555/3398761.3398775},
PUBLISHER = {ACM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {AAMAS'20, 19th International Conference on Autonomous Agents and MultiAgent Systems},
EDITOR = {El Fallah Seghruchni, Amal and Sukthankr, Gita and An, Bo and Yorke-Smith, Neil},
PAGES = {70--78},
ADDRESS = {Auckland, New Zealand (Virtual)},
}
Endnote
%0 Conference Proceedings
%A Antoniadis, Antonios
%A Cristi, Andrés
%A Oosterwijk, Tim
%A Sgouritsa, Alkmini
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T A General Framework for Energy-Efficient Cloud Computing Mechanisms :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-93A5-8
%U https://dl.acm.org/doi/10.5555/3398761.3398775
%D 2020
%B 19th International Conference on Autonomous Agents and MultiAgent Systems
%Z date of event: 2020-05-09 - 2020-05-13
%C Auckland, New Zealand (Virtual)
%B AAMAS'20
%E El Fallah Seghruchni, Amal; Sukthankr, Gita; An, Bo; Yorke-Smith, Neil
%P 70 - 78
%I ACM
%@ 978-1-4503-7518-4
[17]
A. Antoniadis, C. Coester, M. Elias, A. Polak, and B. Simon, “Online Metric Algorithms with Untrusted Predictions,” in Proceedings of the 37th International Conference on Machine Learning (ICML 2020), Virtual Conference, 2020.
Export
BibTeX
@inproceedings{Antoniadis_ICML2020,
TITLE = {Online Metric Algorithms with Untrusted Predictions},
AUTHOR = {Antoniadis, Antonios and Coester, Christian and Elias, Marek and Polak, Adam and Simon, Bertrand},
LANGUAGE = {eng},
ISSN = {1938-7228},
URL = {http://proceedings.mlr.press/v119/antoniadis20a.html},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the 37th International Conference on Machine Learning (ICML 2020)},
EDITOR = {Daum{\'e}, Hal and Singh, Aarti},
PAGES = {345--355},
SERIES = {Proceedings of Machine Learning Research},
VOLUME = {119},
ADDRESS = {Virtual Conference},
}
Endnote
%0 Conference Proceedings
%A Antoniadis, Antonios
%A Coester, Christian
%A Elias, Marek
%A Polak, Adam
%A Simon, Bertrand
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
%T Online Metric Algorithms with Untrusted Predictions :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-93BF-C
%U http://proceedings.mlr.press/v119/antoniadis20a.html
%D 2020
%B 37th International Conference on Machine Learning
%Z date of event: 2020-07-13 - 2020-07-18
%C Virtual Conference
%B Proceedings of the 37th International Conference on Machine Learning
%E Daumé, Hal; Singh, Aarti
%P 345 - 355
%B Proceedings of Machine Learning Research
%N 119
%@ false
%U http://proceedings.mlr.press/v119/antoniadis20a/antoniadis20a.pdf
[18]
A. Antoniadis, T. Gouleakis, P. Kleer, and P. Kolev, “Secretary and Online Matching Problems with Machine Learned Advice,” in Advances in Neural Information Processing Systems 33 (NIPS 2020), Virtual Event, 2020.
Export
BibTeX
@inproceedings{Antoniadis_NeurIPS20,
TITLE = {Secretary and Online Matching Problems with Machine Learned Advice},
AUTHOR = {Antoniadis, Antonios and Gouleakis, Themis and Kleer, Pieter and Kolev, Pavel},
LANGUAGE = {eng},
PUBLISHER = {Curran Associates, Inc.},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Advances in Neural Information Processing Systems 33 (NIPS 2020)},
EDITOR = {Larochelle, H. and Ranzato, M. and Hadsell, R. and Balcan, M. F. and Lin, H.},
ADDRESS = {Virtual Event},
}
Endnote
%0 Conference Proceedings
%A Antoniadis, Antonios
%A Gouleakis, Themis
%A Kleer, Pieter
%A Kolev, Pavel
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Secretary and Online Matching Problems with Machine Learned Advice :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-93CA-F
%D 2020
%B 34th Conference on Neural Information Processing Systems
%Z date of event: 2020-12-06 - 2020-12-12
%C Virtual Event
%B Advances in Neural Information Processing Systems 33
%E Larochelle, H.; Ranzato, M.; Hadsell, R.; Balcan, M. F.; Lin, H.
%I Curran Associates, Inc.
[19]
S. Arunachalam, S. Chakraborty, M. Koucký, N. Saurabh, and R. de Wolf, “Improved Bounds on Fourier Entropy and Min-entropy,” in 37th International Symposium on Theoretical Aspects of Computer Science (STACS 2020), Montpellier, France, 2020.
Export
BibTeX
@inproceedings{Arunachalam_STACS2020,
TITLE = {Improved Bounds on {Fourier} Entropy and Min-entropy},
AUTHOR = {Arunachalam, Srinivasan and Chakraborty, Sourav and Kouck{\'y}, Michal and Saurabh, Nitin and de Wolf, Ronald},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-140-5},
URL = {urn:nbn:de:0030-drops-119062},
DOI = {10.4230/LIPIcs.STACS.2020.45},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {37th International Symposium on Theoretical Aspects of Computer Science (STACS 2020)},
EDITOR = {Paul, Christophe and Bl{\"a}ser, Markus},
PAGES = {1--19},
EID = {45},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {154},
ADDRESS = {Montpellier, France},
}
Endnote
%0 Conference Proceedings
%A Arunachalam, Srinivasan
%A Chakraborty, Sourav
%A Koucký, Michal
%A Saurabh, Nitin
%A de Wolf, Ronald
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Improved Bounds on Fourier Entropy and Min-entropy :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-97AB-F
%R 10.4230/LIPIcs.STACS.2020.45
%U urn:nbn:de:0030-drops-119062
%D 2020
%B 37th International Symposium on Theoretical Aspects of Computer Science
%Z date of event: 2020-03-10 - 2020-03-13
%C Montpellier, France
%B 37th International Symposium on Theoretical Aspects of Computer Science
%E Paul, Christophe; Bläser, Markus
%P 1 - 19
%Z sequence number: 45
%I Schloss Dagstuhl
%@ 978-3-95977-140-5
%B Leibniz International Proceedings in Informatics
%N 154
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/11906/
[20]
K. Axiotis, A. Backurs, K. Bringmann, C. Jin, V. Nakos, C. Tzamos, and H. Wu, “Fast and Simple Modular Subset Sum,” 2020. [Online]. Available: https://arxiv.org/abs/2008.10577. (arXiv: 2008.10577)
Abstract
We revisit the Subset Sum problem over the finite cyclic group $\mathbb{Z}_m$
for some given integer $m$. A series of recent works has provided
asymptotically optimal algorithms for this problem under the Strong Exponential
Time Hypothesis. Koiliaris and Xu (SODA'17, TALG'19) gave a deterministic
algorithm running in time $\tilde{O}(m^{5/4})$, which was later improved to
$O(m \log^7 m)$ randomized time by Axiotis et al. (SODA'19). In this work, we
present two simple algorithms for the Modular Subset Sum problem running in
near-linear time in $m$, both efficiently implementing Bellman's iteration over
$\mathbb{Z}_m$. The first one is a randomized algorithm running in time
$O(m\log^2 m)$, that is based solely on rolling hash and an elementary
data-structure for prefix sums; to illustrate its simplicity we provide a short
and efficient implementation of the algorithm in Python. Our second solution is
a deterministic algorithm running in time $O(m\ \mathrm{polylog}\ m)$, that
uses dynamic data structures for string manipulation. We further show that the
techniques developed in this work can also lead to simple algorithms for the
All Pairs Non-Decreasing Paths Problem (APNP) on undirected graphs, matching
the asymptotically optimal running time of $\tilde{O}(n^2)$ provided in the
recent work of Duan et al. (ICALP'19).
Export
BibTeX
@online{Axiotis_arXiv2008.10577,
TITLE = {Fast and Simple Modular Subset Sum},
AUTHOR = {Axiotis, Kyriakos and Backurs, Arturs and Bringmann, Karl and Jin, Ce and Nakos, Vasileios and Tzamos, Christos and Wu, Hongxun},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2008.10577},
EPRINT = {2008.10577},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We revisit the Subset Sum problem over the finite cyclic group $\mathbb{Z}_m$ for some given integer $m$. A series of recent works has provided asymptotically optimal algorithms for this problem under the Strong Exponential Time Hypothesis. Koiliaris and Xu (SODA'17, TALG'19) gave a deterministic algorithm running in time $\tilde{O}(m^{5/4})$, which was later improved to $O(m \log^7 m)$ randomized time by Axiotis et al. (SODA'19). In this work, we present two simple algorithms for the Modular Subset Sum problem running in near-linear time in $m$, both efficiently implementing Bellman's iteration over $\mathbb{Z}_m$. The first one is a randomized algorithm running in time $O(m\log^2 m)$, that is based solely on rolling hash and an elementary data-structure for prefix sums; to illustrate its simplicity we provide a short and efficient implementation of the algorithm in Python. Our second solution is a deterministic algorithm running in time $O(m\ \mathrm{polylog}\ m)$, that uses dynamic data structures for string manipulation. We further show that the techniques developed in this work can also lead to simple algorithms for the All Pairs Non-Decreasing Paths Problem (APNP) on undirected graphs, matching the asymptotically optimal running time of $\tilde{O}(n^2)$ provided in the recent work of Duan et al. (ICALP'19).},
}
Endnote
%0 Report
%A Axiotis, Kyriakos
%A Backurs, Arturs
%A Bringmann, Karl
%A Jin, Ce
%A Nakos, Vasileios
%A Tzamos, Christos
%A Wu, Hongxun
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Fast and Simple Modular Subset Sum :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-2A5B-5
%U https://arxiv.org/abs/2008.10577
%D 2020
%X We revisit the Subset Sum problem over the finite cyclic group $\mathbb{Z}_m$
for some given integer $m$. A series of recent works has provided
asymptotically optimal algorithms for this problem under the Strong Exponential
Time Hypothesis. Koiliaris and Xu (SODA'17, TALG'19) gave a deterministic
algorithm running in time $\tilde{O}(m^{5/4})$, which was later improved to
$O(m \log^7 m)$ randomized time by Axiotis et al. (SODA'19). In this work, we
present two simple algorithms for the Modular Subset Sum problem running in
near-linear time in $m$, both efficiently implementing Bellman's iteration over
$\mathbb{Z}_m$. The first one is a randomized algorithm running in time
$O(m\log^2 m)$, that is based solely on rolling hash and an elementary
data-structure for prefix sums; to illustrate its simplicity we provide a short
and efficient implementation of the algorithm in Python. Our second solution is
a deterministic algorithm running in time $O(m\ \mathrm{polylog}\ m)$, that
uses dynamic data structures for string manipulation. We further show that the
techniques developed in this work can also lead to simple algorithms for the
All Pairs Non-Decreasing Paths Problem (APNP) on undirected graphs, matching
the asymptotically optimal running time of $\tilde{O}(n^2)$ provided in the
recent work of Duan et al. (ICALP'19).
%K Computer Science, Data Structures and Algorithms, cs.DS
[21]
R. Becker, Y. Emek, and C. Lenzen, “Low Diameter Graph Decompositions by Approximate Distance Computation,” in 11th Innovations in Theoretical Computer Science Conference (ITICS 2020), Seattle, WA, USA, 2020.
Export
BibTeX
@inproceedings{Becker_ITCS2020,
TITLE = {Low Diameter Graph Decompositions by Approximate Distance Computation},
AUTHOR = {Becker, Ruben and Emek, Yuval and Lenzen, Christoph},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-134-4},
URL = {urn:nbn:de:0030-drops-117355},
DOI = {10.4230/LIPIcs.ITCS.2020.50},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {11th Innovations in Theoretical Computer Science Conference (ITICS 2020)},
EDITOR = {Vidick, Thomas},
EID = {50},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {151},
ADDRESS = {Seattle, WA, USA},
}
Endnote
%0 Conference Proceedings
%A Becker, Ruben
%A Emek, Yuval
%A Lenzen, Christoph
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Low Diameter Graph Decompositions by Approximate Distance Computation :
%G eng
%U http://hdl.handle.net/21.11116/0000-0005-A7A7-2
%R 10.4230/LIPIcs.ITCS.2020.50
%U urn:nbn:de:0030-drops-117355
%D 2020
%B 11th Innovations in Theoretical Computer Science Conference
%Z date of event: 2020-01-12 - 2020-01-14
%C Seattle, WA, USA
%B 11th Innovations in Theoretical Computer Science Conference
%E Vidick, Thomas
%Z sequence number: 50
%I Schloss Dagstuhl
%@ 978-3-95977-134-4
%B Leibniz International Proceedings in Informatics
%N 151
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/11735/https://drops.dagstuhl.de/doku/urheberrecht1.html
[22]
D. Bilò, L. Gualà, S. Leucci, and G. Proietti, “Tracking Routes in Communication Networks,” Theoretical Computer Science, vol. 844, 2020.
Export
BibTeX
@article{Bilo_2020,
TITLE = {Tracking Routes in Communication Networks},
AUTHOR = {Bil{\`o}, Davide and Gual{\`a}, Luciano and Leucci, Stefano and Proietti, Guido},
LANGUAGE = {eng},
ISSN = {0304-3975},
DOI = {10.1016/j.tcs.2020.07.012},
PUBLISHER = {Elsevier},
ADDRESS = {Amsterdam},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {Theoretical Computer Science},
VOLUME = {844},
PAGES = {1--15},
}
Endnote
%0 Journal Article
%A Bilò, Davide
%A Gualà, Luciano
%A Leucci, Stefano
%A Proietti, Guido
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Tracking Routes in Communication Networks :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-72EF-C
%R 10.1016/j.tcs.2020.07.012
%7 2020
%D 2020
%J Theoretical Computer Science
%V 844
%& 1
%P 1 - 15
%I Elsevier
%C Amsterdam
%@ false
[23]
V. Bonifaci, E. Facca, F. Folz, A. Karrenbauer, P. Kolev, K. Mehlhorn, G. Morigi, G. Shahkarami, and Q. Vermande, “Physarum Multi-Commodity Flow Dynamics,” 2020. [Online]. Available: https://arxiv.org/abs/2009.01498. (arXiv: 2009.01498)
Abstract
In wet-lab experiments \cite{Nakagaki-Yamada-Toth,Tero-Takagi-etal}, the
slime mold Physarum polycephalum has demonstrated its ability to solve shortest
path problems and to design efficient networks, see Figure \ref{Wet-Lab
Experiments} for illustrations. Physarum polycephalum is a slime mold in the
Mycetozoa group. For the shortest path problem, a mathematical model for the
evolution of the slime was proposed in \cite{Tero-Kobayashi-Nakagaki} and its
biological relevance was argued. The model was shown to solve shortest path
problems, first in computer simulations and then by mathematical proof. It was
later shown that the slime mold dynamics can solve more general linear programs
and that many variants of the dynamics have similar convergence behavior. In
this paper, we introduce a dynamics for the network design problem. We
formulate network design as the problem of constructing a network that
efficiently supports a multi-commodity flow problem. We investigate the
dynamics in computer simulations and analytically. The simulations show that
the dynamics is able to construct efficient and elegant networks. In the
theoretical part we show that the dynamics minimizes an objective combining the
cost of the network and the cost of routing the demands through the network. We
also give alternative characterization of the optimum solution.
Export
BibTeX
@online{Bonifaci_arXiv2009.01498,
TITLE = {Physarum Multi-Commodity Flow Dynamics},
AUTHOR = {Bonifaci, Vincenzo and Facca, Enrico and Folz, Frederic and Karrenbauer, Andreas and Kolev, Pavel and Mehlhorn, Kurt and Morigi, Giovanna and Shahkarami, Golnoosh and Vermande, Quentin},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2009.01498},
EPRINT = {2009.01498},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {In wet-lab experiments \cite{Nakagaki-Yamada-Toth,Tero-Takagi-etal}, the slime mold Physarum polycephalum has demonstrated its ability to solve shortest path problems and to design efficient networks, see Figure \ref{Wet-Lab Experiments} for illustrations. Physarum polycephalum is a slime mold in the Mycetozoa group. For the shortest path problem, a mathematical model for the evolution of the slime was proposed in \cite{Tero-Kobayashi-Nakagaki} and its biological relevance was argued. The model was shown to solve shortest path problems, first in computer simulations and then by mathematical proof. It was later shown that the slime mold dynamics can solve more general linear programs and that many variants of the dynamics have similar convergence behavior. In this paper, we introduce a dynamics for the network design problem. We formulate network design as the problem of constructing a network that efficiently supports a multi-commodity flow problem. We investigate the dynamics in computer simulations and analytically. The simulations show that the dynamics is able to construct efficient and elegant networks. In the theoretical part we show that the dynamics minimizes an objective combining the cost of the network and the cost of routing the demands through the network. We also give alternative characterization of the optimum solution.},
}
Endnote
%0 Report
%A Bonifaci, Vincenzo
%A Facca, Enrico
%A Folz, Frederic
%A Karrenbauer, Andreas
%A Kolev, Pavel
%A Mehlhorn, Kurt
%A Morigi, Giovanna
%A Shahkarami, Golnoosh
%A Vermande, Quentin
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Physarum Multi-Commodity Flow Dynamics :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-2312-D
%U https://arxiv.org/abs/2009.01498
%D 2020
%X In wet-lab experiments \cite{Nakagaki-Yamada-Toth,Tero-Takagi-etal}, the
slime mold Physarum polycephalum has demonstrated its ability to solve shortest
path problems and to design efficient networks, see Figure \ref{Wet-Lab
Experiments} for illustrations. Physarum polycephalum is a slime mold in the
Mycetozoa group. For the shortest path problem, a mathematical model for the
evolution of the slime was proposed in \cite{Tero-Kobayashi-Nakagaki} and its
biological relevance was argued. The model was shown to solve shortest path
problems, first in computer simulations and then by mathematical proof. It was
later shown that the slime mold dynamics can solve more general linear programs
and that many variants of the dynamics have similar convergence behavior. In
this paper, we introduce a dynamics for the network design problem. We
formulate network design as the problem of constructing a network that
efficiently supports a multi-commodity flow problem. We investigate the
dynamics in computer simulations and analytically. The simulations show that
the dynamics is able to construct efficient and elegant networks. In the
theoretical part we show that the dynamics minimizes an objective combining the
cost of the network and the cost of routing the demands through the network. We
also give alternative characterization of the optimum solution.
%K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Neural and Evolutionary Computing, cs.NE
[24]
M. Brankovic, K. Buchin, K. Klaren, A. Nusser, A. Popov, and S. Wong, “(k, l)-Medians Clustering of Trajectories Using Continuous Dynamic Time Warpin,” in 28th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL GIS 2020), Seattle, WA, USA (Online), 2020.
Export
BibTeX
@inproceedings{Brankovic_._SIGSPATIAL2020,
TITLE = {{(k, l)}-Medians Clustering of Trajectories Using Continuous Dynamic Time Warpin},
AUTHOR = {Brankovic, Milutin and Buchin, Kevin and Klaren, Koen and Nusser, Andr{\'e} and Popov, Aleksandr and Wong, Sampson},
LANGUAGE = {eng},
ISBN = {9781450380195},
DOI = {0.1145/3397536.3422245},
PUBLISHER = {ACM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {28th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM SIGSPATIAL GIS 2020)},
EDITOR = {Lu, Chang-Tien and Wag, Fusheng and Trajcevski, Goce and Huang, Yan and Newsam, Shawn and Xiong, Li},
PAGES = {99--110},
ADDRESS = {Seattle, WA, USA (Online)},
}
Endnote
%0 Conference Proceedings
%A Brankovic, Milutin
%A Buchin, Kevin
%A Klaren, Koen
%A Nusser, André
%A Popov, Aleksandr
%A Wong, Sampson
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T (k, l)-Medians Clustering of Trajectories Using Continuous Dynamic Time Warpin :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9068-1
%R 0.1145/3397536.3422245
%D 2020
%B 27th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems
%Z date of event: 2020-11-03 - 2020-11-06
%C Seattle, WA, USA (Online)
%B 28th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems
%E Lu, Chang-Tien; Wag, Fusheng; Trajcevski, Goce; Huang, Yan; Newsam, Shawn; Xiong, Li
%P 99 - 110
%I ACM
%@ 9781450380195
[25]
K. Bringmann, T. Husfeldt, and M. Magnusson, “Multivariate Analysis of Orthogonal Range Searching and Graph Distances Parameterized by Treewidth,” Algorithmica, vol. 82, no. 8, 2020.
Export
BibTeX
@article{Bringmann_2020a,
TITLE = {Multivariate Analysis of Orthogonal Range Searching and Graph Distances Parameterized by Treewidth},
AUTHOR = {Bringmann, Karl and Husfeldt, Thore and Magnusson, M{\aa}ns},
LANGUAGE = {eng},
ISSN = {0178-4617},
DOI = {10.1007/s00453-020-00680-z},
PUBLISHER = {Springer},
ADDRESS = {New York},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {Algorithmica},
VOLUME = {82},
NUMBER = {8},
PAGES = {2292--2315},
}
Endnote
%0 Journal Article
%A Bringmann, Karl
%A Husfeldt, Thore
%A Magnusson, Måns
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Multivariate Analysis of Orthogonal Range Searching and Graph Distances Parameterized by Treewidth :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-F289-E
%R 10.1007/s00453-020-00680-z
%7 2020
%D 2020
%J Algorithmica
%V 82
%N 8
%& 2292
%P 2292 - 2315
%I Springer
%C New York
%@ false
[26]
K. Bringmann, P. Gawrychowski, S. Mozes, and O. Weimann, “Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless APSP can),” ACM Transactions on Algorithms, vol. 16, no. 4, 2020.
Export
BibTeX
@article{Bringmann_ToA2020,
TITLE = {Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless {APSP} can)},
AUTHOR = {Bringmann, Karl and Gawrychowski, Pawe{\l} and Mozes, Shay and Weimann, Oren},
LANGUAGE = {eng},
ISSN = {1549-6325},
DOI = {10.1145/3381878},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Algorithms},
VOLUME = {16},
NUMBER = {4},
EID = {48},
}
Endnote
%0 Journal Article
%A Bringmann, Karl
%A Gawrychowski, Paweł
%A Mozes, Shay
%A Weimann, Oren
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T Tree Edit Distance Cannot be Computed in Strongly Subcubic Time (unless APSP can)
:
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-2502-D
%R 10.1145/3381878
%7 2020
%D 2020
%J ACM Transactions on Algorithms
%V 16
%N 4
%Z sequence number: 48
%I ACM
%C New York, NY
%@ false
[27]
K. Bringmann, M. Künnemann, and A. Nusser, “When Lipschitz Walks Your Dog: Algorithm Engineering of the Discrete Fréchet Distance Under Translation,” in 28th Annual European Symposium on Algorithms (ESA 2020), Pisa, Italy (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Bringmann_ESA2020,
TITLE = {When {L}ipschitz Walks Your Dog: {A}lgorithm Engineering of the Discrete {F}r\'{e}chet Distance Under Translation},
AUTHOR = {Bringmann, Karl and K{\"u}nnemann, Marvin and Nusser, Andr{\'e}},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-162-7},
URL = {urn:nbn:de:0030-drops-128912},
DOI = {10.4230/LIPIcs.ESA.2020.25},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {28th Annual European Symposium on Algorithms (ESA 2020)},
EDITOR = {Grandoni, Fabrizio and Herman, Grzegorz and Sanders, Peter},
EID = {25},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {173},
ADDRESS = {Pisa, Italy (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Bringmann, Karl
%A Künnemann, Marvin
%A Nusser, André
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T When Lipschitz Walks Your Dog: Algorithm Engineering of the Discrete Fréchet Distance Under Translation :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-2791-9
%R 10.4230/LIPIcs.ESA.2020.25
%U urn:nbn:de:0030-drops-128912
%D 2020
%B 28th Annual European Symposium on Algorithms
%Z date of event: 2020-09-07 - 2020-09-09
%C Pisa, Italy (Virtual Conference)
%B 28th Annual European Symposium on Algorithms
%E Grandoni, Fabrizio; Herman, Grzegorz; Sanders, Peter
%Z sequence number: 25
%I Schloss Dagstuhl
%@ 978-3-95977-162-7
%B Leibniz International Proceedings in Informatics
%N 173
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12891/https://creativecommons.org/licenses/by/3.0/legalcodehttps://gitlab.com/anusser/frechet_distance_under_translation
[28]
K. Bringmann, N. Fischer, D. Hermelin, D. Shabtay, and P. Wellnitz, “Faster Minimization of Tardy Processing Time on a Single Machine,” 2020. [Online]. Available: https://arxiv.org/abs/2003.07104. (arXiv: 2003.07104)
Abstract
This paper is concerned with the $1||\sum p_jU_j$ problem, the problem of
minimizing the total processing time of tardy jobs on a single machine. This is
not only a fundamental scheduling problem, but also a very important problem
from a theoretical point of view as it generalizes the Subset Sum problem and
is closely related to the 0/1-Knapsack problem. The problem is well-known to be
NP-hard, but only in a weak sense, meaning it admits pseudo-polynomial time
algorithms. The fastest known pseudo-polynomial time algorithm for the problem
is the famous Lawler and Moore algorithm which runs in $O(P \cdot n)$ time,
where $P$ is the total processing time of all $n$ jobs in the input. This
algorithm has been developed in the late 60s, and has yet to be improved to
date.
In this paper we develop two new algorithms for $1||\sum p_jU_j$, each
improving on Lawler and Moore's algorithm in a different scenario. Both
algorithms rely on basic primitive operations between sets of integers and
vectors of integers for the speedup in their running times. The second
algorithm relies on fast polynomial multiplication as its main engine, while
for the first algorithm we define a new "skewed" version of
$(\max,\min)$-convolution which is interesting in its own right.
Export
BibTeX
@online{Bringmann_arXiv2003.07104,
TITLE = {Faster Minimization of Tardy Processing Time on a Single Machine},
AUTHOR = {Bringmann, Karl and Fischer, Nick and Hermelin, Danny and Shabtay, Dvir and Wellnitz, Philip},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2003.07104},
EPRINT = {2003.07104},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {This paper is concerned with the $1||\sum p_jU_j$ problem, the problem of minimizing the total processing time of tardy jobs on a single machine. This is not only a fundamental scheduling problem, but also a very important problem from a theoretical point of view as it generalizes the Subset Sum problem and is closely related to the 0/1-Knapsack problem. The problem is well-known to be NP-hard, but only in a weak sense, meaning it admits pseudo-polynomial time algorithms. The fastest known pseudo-polynomial time algorithm for the problem is the famous Lawler and Moore algorithm which runs in $O(P \cdot n)$ time, where $P$ is the total processing time of all $n$ jobs in the input. This algorithm has been developed in the late 60s, and has yet to be improved to date. In this paper we develop two new algorithms for $1||\sum p_jU_j$, each improving on Lawler and Moore's algorithm in a different scenario. Both algorithms rely on basic primitive operations between sets of integers and vectors of integers for the speedup in their running times. The second algorithm relies on fast polynomial multiplication as its main engine, while for the first algorithm we define a new "skewed" version of $(\max,\min)$-convolution which is interesting in its own right.},
}
Endnote
%0 Report
%A Bringmann, Karl
%A Fischer, Nick
%A Hermelin, Danny
%A Shabtay, Dvir
%A Wellnitz, Philip
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Faster Minimization of Tardy Processing Time on a Single Machine :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-2A4E-4
%U https://arxiv.org/abs/2003.07104
%D 2020
%X This paper is concerned with the $1||\sum p_jU_j$ problem, the problem of
minimizing the total processing time of tardy jobs on a single machine. This is
not only a fundamental scheduling problem, but also a very important problem
from a theoretical point of view as it generalizes the Subset Sum problem and
is closely related to the 0/1-Knapsack problem. The problem is well-known to be
NP-hard, but only in a weak sense, meaning it admits pseudo-polynomial time
algorithms. The fastest known pseudo-polynomial time algorithm for the problem
is the famous Lawler and Moore algorithm which runs in $O(P \cdot n)$ time,
where $P$ is the total processing time of all $n$ jobs in the input. This
algorithm has been developed in the late 60s, and has yet to be improved to
date.
In this paper we develop two new algorithms for $1||\sum p_jU_j$, each
improving on Lawler and Moore's algorithm in a different scenario. Both
algorithms rely on basic primitive operations between sets of integers and
vectors of integers for the speedup in their running times. The second
algorithm relies on fast polynomial multiplication as its main engine, while
for the first algorithm we define a new "skewed" version of
$(\max,\min)$-convolution which is interesting in its own right.
%K Computer Science, Data Structures and Algorithms, cs.DS
[29]
K. Bringmann, N. Fischer, D. Hermelin, D. Shabtay, and P. Wellnitz, “Faster Minimization of Tardy Processing Time on a Single Machine,” in 47th International Colloquium on Automata, Languages, and Programming (ICALP 2020), Saarbrücken, Germany (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Bringmann_ICALP2020,
TITLE = {Faster Minimization of Tardy Processing Time on a Single Machine},
AUTHOR = {Bringmann, Karl and Fischer, Nick and Hermelin, Danny and Shabtay, Dvir and Wellnitz, Philip},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-138-2},
URL = {urn:nbn:de:0030-drops-124269},
DOI = {10.4230/LIPIcs.ICALP.2020.19},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {47th International Colloquium on Automata, Languages, and Programming (ICALP 2020)},
EDITOR = {Czumaj, Artur and Dawa, Anuj and Merelli, Emanuela},
EID = {19},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {168},
ADDRESS = {Saarbr{\"u}cken, Germany (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Bringmann, Karl
%A Fischer, Nick
%A Hermelin, Danny
%A Shabtay, Dvir
%A Wellnitz, Philip
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Faster Minimization of Tardy Processing Time on a Single Machine :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-287E-0
%R 10.4230/LIPIcs.ICALP.2020.19
%U urn:nbn:de:0030-drops-124269
%D 2020
%B 47th International Colloquium on Automata, Languages, and Programming
%Z date of event: 2020-07-08 - 2020-07-11
%C Saarbrücken, Germany (Virtual Conference)
%B 47th International Colloquium on Automata, Languages, and Programming
%E Czumaj, Artur; Dawa, Anuj; Merelli, Emanuela
%Z sequence number: 19
%I Schloss Dagstuhl
%@ 978-3-95977-138-2
%B Leibniz International Proceedings in Informatics
%N 168
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12426/https://creativecommons.org/licenses/by/3.0/legalcode
[30]
K. Bringmann and V. Nakos, “Top-k-convolution and the Quest for Near-linear Output-sensitive Subset Sum,” in STOC ’20, Chicago, IL, USA, 2020.
Export
BibTeX
@inproceedings{Bringmann_STOC2020,
TITLE = {Top-$k$-convolution and the Quest for Near-linear Output-sensitive Subset Sum},
AUTHOR = {Bringmann, Karl and Nakos, Vasileios},
LANGUAGE = {eng},
ISBN = {978-1-4503-6979-4},
DOI = {10.1145/3357713.3384308},
PUBLISHER = {ACM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {STOC '20},
EDITOR = {Makarychev, Konstantin and Makarychev, Yury and Tulsiani, Madhur and Kamath, Gautam and Chuzhoy, Julia},
PAGES = {982--995},
ADDRESS = {Chicago, IL, USA},
}
Endnote
%0 Conference Proceedings
%A Bringmann, Karl
%A Nakos, Vasileios
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Top-k-convolution and the Quest for Near-linear Output-sensitive Subset Sum :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-299D-B
%R 10.1145/3357713.3384308
%D 2020
%B 52nd Annual ACM SIGACT Symposium on Theory of Computing
%Z date of event: 2020-06-22 - 2020-06-26
%C Chicago, IL, USA
%B STOC '20
%E Makarychev, Konstantin; Makarychev, Yury; Tulsiani, Madhur; Kamath, Gautam; Chuzhoy, Julia
%P 982 - 995
%I ACM
%@ 978-1-4503-6979-4
[31]
K. Bringmann, M. Künnemann, and A. Nusser, “When Lipschitz Walks Your Dog: Algorithm Engineering of the Discrete Fréchet Distance under Translation,” 2020. [Online]. Available: https://arxiv.org/abs/2008.07510. (arXiv: 2008.07510)
Abstract
Consider the natural question of how to measure the similarity of curves in
the plane by a quantity that is invariant under translations of the curves.
Such a measure is justified whenever we aim to quantify the similarity of the
curves' shapes rather than their positioning in the plane, e.g., to compare the
similarity of handwritten characters. Perhaps the most natural such notion is
the (discrete) Fr\'echet distance under translation. Unfortunately, the
algorithmic literature on this problem yields a very pessimistic view: On
polygonal curves with $n$ vertices, the fastest algorithm runs in time
$O(n^{4.667})$ and cannot be improved below $n^{4-o(1)}$ unless the Strong
Exponential Time Hypothesis fails. Can we still obtain an implementation that
is efficient on realistic datasets?
Spurred by the surprising performance of recent implementations for the
Fr\'echet distance, we perform algorithm engineering for the Fr\'echet distance
under translation. Our solution combines fast, but inexact tools from
continuous optimization (specifically, branch-and-bound algorithms for global
Lipschitz optimization) with exact, but expensive algorithms from computational
geometry (specifically, problem-specific algorithms based on an arrangement
construction). We combine these two ingredients to obtain an exact decision
algorithm for the Fr\'echet distance under translation. For the related task of
computing the distance value up to a desired precision, we engineer and compare
different methods. On a benchmark set involving handwritten characters and
route trajectories, our implementation answers a typical query for either task
in the range of a few milliseconds up to a second on standard desktop hardware.
We believe that our implementation will enable the use of the Fr\'echet
distance under translation in applications, whereas previous approaches would
have been computationally infeasible.
Export
BibTeX
@online{Bringmann_arXiv2008.07510,
TITLE = {When {L}ipschitz Walks Your Dog: {A}lgorithm Engineering of the Discrete {F}r\'{e}chet Distance Under Translation},
AUTHOR = {Bringmann, Karl and K{\"u}nnemann, Marvin and Nusser, Andr{\'e}},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2008.07510},
EPRINT = {2008.07510},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Consider the natural question of how to measure the similarity of curves in the plane by a quantity that is invariant under translations of the curves. Such a measure is justified whenever we aim to quantify the similarity of the curves' shapes rather than their positioning in the plane, e.g., to compare the similarity of handwritten characters. Perhaps the most natural such notion is the (discrete) Fr\'echet distance under translation. Unfortunately, the algorithmic literature on this problem yields a very pessimistic view: On polygonal curves with $n$ vertices, the fastest algorithm runs in time $O(n^{4.667})$ and cannot be improved below $n^{4-o(1)}$ unless the Strong Exponential Time Hypothesis fails. Can we still obtain an implementation that is efficient on realistic datasets? Spurred by the surprising performance of recent implementations for the Fr\'echet distance, we perform algorithm engineering for the Fr\'echet distance under translation. Our solution combines fast, but inexact tools from continuous optimization (specifically, branch-and-bound algorithms for global Lipschitz optimization) with exact, but expensive algorithms from computational geometry (specifically, problem-specific algorithms based on an arrangement construction). We combine these two ingredients to obtain an exact decision algorithm for the Fr\'echet distance under translation. For the related task of computing the distance value up to a desired precision, we engineer and compare different methods. On a benchmark set involving handwritten characters and route trajectories, our implementation answers a typical query for either task in the range of a few milliseconds up to a second on standard desktop hardware. We believe that our implementation will enable the use of the Fr\'echet distance under translation in applications, whereas previous approaches would have been computationally infeasible.},
}
Endnote
%0 Report
%A Bringmann, Karl
%A Künnemann, Marvin
%A Nusser, André
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T When Lipschitz Walks Your Dog: Algorithm Engineering of the Discrete Fréchet Distance under Translation :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-2A56-A
%U https://arxiv.org/abs/2008.07510
%D 2020
%X Consider the natural question of how to measure the similarity of curves in
the plane by a quantity that is invariant under translations of the curves.
Such a measure is justified whenever we aim to quantify the similarity of the
curves' shapes rather than their positioning in the plane, e.g., to compare the
similarity of handwritten characters. Perhaps the most natural such notion is
the (discrete) Fr\'echet distance under translation. Unfortunately, the
algorithmic literature on this problem yields a very pessimistic view: On
polygonal curves with $n$ vertices, the fastest algorithm runs in time
$O(n^{4.667})$ and cannot be improved below $n^{4-o(1)}$ unless the Strong
Exponential Time Hypothesis fails. Can we still obtain an implementation that
is efficient on realistic datasets?
Spurred by the surprising performance of recent implementations for the
Fr\'echet distance, we perform algorithm engineering for the Fr\'echet distance
under translation. Our solution combines fast, but inexact tools from
continuous optimization (specifically, branch-and-bound algorithms for global
Lipschitz optimization) with exact, but expensive algorithms from computational
geometry (specifically, problem-specific algorithms based on an arrangement
construction). We combine these two ingredients to obtain an exact decision
algorithm for the Fr\'echet distance under translation. For the related task of
computing the distance value up to a desired precision, we engineer and compare
different methods. On a benchmark set involving handwritten characters and
route trajectories, our implementation answers a typical query for either task
in the range of a few milliseconds up to a second on standard desktop hardware.
We believe that our implementation will enable the use of the Fr\'echet
distance under translation in applications, whereas previous approaches would
have been computationally infeasible.
%K Computer Science, Computational Geometry, cs.CG,Computer Science, Data Structures and Algorithms, cs.DS
[32]
K. Bringmann and P. Wellnitz, “On Near-Linear-Time Algorithms for Dense Subset Sum,” 2020. [Online]. Available: https://arxiv.org/abs/2010.09096. (arXiv: 2010.09096)
Abstract
In the Subset Sum problem we are given a set of $n$ positive integers $X$ and
a target $t$ and are asked whether some subset of $X$ sums to $t$. Natural
parameters for this problem that have been studied in the literature are $n$
and $t$ as well as the maximum input number $\rm{mx}_X$ and the sum of all
input numbers $\Sigma_X$. In this paper we study the dense case of Subset Sum,
where all these parameters are polynomial in $n$. In this regime, standard
pseudo-polynomial algorithms solve Subset Sum in polynomial time $n^{O(1)}$.
Our main question is: When can dense Subset Sum be solved in near-linear time
$\tilde{O}(n)$? We provide an essentially complete dichotomy by designing
improved algorithms and proving conditional lower bounds, thereby determining
essentially all settings of the parameters $n,t,\rm{mx}_X,\Sigma_X$ for which
dense Subset Sum is in time $\tilde{O}(n)$. For notational convenience we
assume without loss of generality that $t \ge \rm{mx}_X$ (as larger numbers can
be ignored) and $t \le \Sigma_X/2$ (using symmetry). Then our dichotomy reads
as follows:
- By reviving and improving an additive-combinatorics-based approach by Galil
and Margalit [SICOMP'91], we show that Subset Sum is in near-linear time
$\tilde{O}(n)$ if $t \gg \rm{mx}_X \Sigma_X/n^2$.
- We prove a matching conditional lower bound: If Subset Sum is in
near-linear time for any setting with $t \ll \rm{mx}_X \Sigma_X/n^2$, then the
Strong Exponential Time Hypothesis and the Strong k-Sum Hypothesis fail.
We also generalize our algorithm from sets to multi-sets, albeit with
non-matching upper and lower bounds.
Export
BibTeX
@online{Bringmann_arXiv2010.09096,
TITLE = {On Near-Linear-Time Algorithms for Dense Subset Sum},
AUTHOR = {Bringmann, Karl and Wellnitz, Philip},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2010.09096},
EPRINT = {2010.09096},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {In the Subset Sum problem we are given a set of $n$ positive integers $X$ and a target $t$ and are asked whether some subset of $X$ sums to $t$. Natural parameters for this problem that have been studied in the literature are $n$ and $t$ as well as the maximum input number $\rm{mx}_X$ and the sum of all input numbers $\Sigma_X$. In this paper we study the dense case of Subset Sum, where all these parameters are polynomial in $n$. In this regime, standard pseudo-polynomial algorithms solve Subset Sum in polynomial time $n^{O(1)}$. Our main question is: When can dense Subset Sum be solved in near-linear time $\tilde{O}(n)$? We provide an essentially complete dichotomy by designing improved algorithms and proving conditional lower bounds, thereby determining essentially all settings of the parameters $n,t,\rm{mx}_X,\Sigma_X$ for which dense Subset Sum is in time $\tilde{O}(n)$. For notational convenience we assume without loss of generality that $t \ge \rm{mx}_X$ (as larger numbers can be ignored) and $t \le \Sigma_X/2$ (using symmetry). Then our dichotomy reads as follows: -- By reviving and improving an additive-combinatorics-based approach by Galil and Margalit [SICOMP'91], we show that Subset Sum is in near-linear time $\tilde{O}(n)$ if $t \gg \rm{mx}_X \Sigma_X/n^2$. -- We prove a matching conditional lower bound: If Subset Sum is in near-linear time for any setting with $t \ll \rm{mx}_X \Sigma_X/n^2$, then the Strong Exponential Time Hypothesis and the Strong k-Sum Hypothesis fail. We also generalize our algorithm from sets to multi-sets, albeit with non-matching upper and lower bounds.},
}
Endnote
%0 Report
%A Bringmann, Karl
%A Wellnitz, Philip
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T On Near-Linear-Time Algorithms for Dense Subset Sum :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8C97-1
%U https://arxiv.org/abs/2010.09096
%D 2020
%X In the Subset Sum problem we are given a set of $n$ positive integers $X$ and
a target $t$ and are asked whether some subset of $X$ sums to $t$. Natural
parameters for this problem that have been studied in the literature are $n$
and $t$ as well as the maximum input number $\rm{mx}_X$ and the sum of all
input numbers $\Sigma_X$. In this paper we study the dense case of Subset Sum,
where all these parameters are polynomial in $n$. In this regime, standard
pseudo-polynomial algorithms solve Subset Sum in polynomial time $n^{O(1)}$.
Our main question is: When can dense Subset Sum be solved in near-linear time
$\tilde{O}(n)$? We provide an essentially complete dichotomy by designing
improved algorithms and proving conditional lower bounds, thereby determining
essentially all settings of the parameters $n,t,\rm{mx}_X,\Sigma_X$ for which
dense Subset Sum is in time $\tilde{O}(n)$. For notational convenience we
assume without loss of generality that $t \ge \rm{mx}_X$ (as larger numbers can
be ignored) and $t \le \Sigma_X/2$ (using symmetry). Then our dichotomy reads
as follows:
- By reviving and improving an additive-combinatorics-based approach by Galil
and Margalit [SICOMP'91], we show that Subset Sum is in near-linear time
$\tilde{O}(n)$ if $t \gg \rm{mx}_X \Sigma_X/n^2$.
- We prove a matching conditional lower bound: If Subset Sum is in
near-linear time for any setting with $t \ll \rm{mx}_X \Sigma_X/n^2$, then the
Strong Exponential Time Hypothesis and the Strong k-Sum Hypothesis fail.
We also generalize our algorithm from sets to multi-sets, albeit with
non-matching upper and lower bounds.
%K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Discrete Mathematics, cs.DM
[33]
J. Bund, C. Lenzen, and M. Medina, “Optimal Metastability-Containing Sorting via Parallel Prefix Computation,” IEEE Transactions on Computers, vol. 69, no. 2, 2020.
Export
BibTeX
@article{Bund_IEEETOC2020,
TITLE = {Optimal Metastability-Containing Sorting via Parallel Prefix Computation},
AUTHOR = {Bund, Johannes and Lenzen, Christoph and Medina, Moti},
LANGUAGE = {eng},
ISSN = {0018-9340},
DOI = {10.1109/TC.2019.2939818},
PUBLISHER = {IEEE},
ADDRESS = {Piscataway, NJ},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {IEEE Transactions on Computers},
VOLUME = {69},
NUMBER = {2},
PAGES = {198--211},
}
Endnote
%0 Journal Article
%A Bund, Johannes
%A Lenzen, Christoph
%A Medina, Moti
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Optimal Metastability-Containing Sorting via Parallel Prefix Computation :
%G eng
%U http://hdl.handle.net/21.11116/0000-0005-9E7F-C
%R 10.1109/TC.2019.2939818
%7 2020
%D 2020
%J IEEE Transactions on Computers
%V 69
%N 2
%& 198
%P 198 - 211
%I IEEE
%C Piscataway, NJ
%@ false
[34]
J. Bund, M. Fugger, C. Lenzen, and M. Medina, “Synchronizer-Free Digital Link Controller,” IEEE Transactions on Circuits and Systems / I, Regular Papers, vol. 27, no. 10, 2020.
Export
BibTeX
@article{Bund2020,
TITLE = {Synchronizer-Free Digital Link Controller},
AUTHOR = {Bund, Johannes and Fugger, Matthias and Lenzen, Christoph and Medina, Moti},
LANGUAGE = {eng},
ISSN = {1057-7122},
DOI = {10.1109/TCSI.2020.2989552},
PUBLISHER = {Institute of Electrical and Electronics Engineers},
ADDRESS = {Piscataway, NJ},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {IEEE Transactions on Circuits and Systems / I, Regular Papers},
VOLUME = {27},
NUMBER = {10},
PAGES = {3562--3573},
}
Endnote
%0 Journal Article
%A Bund, Johannes
%A Fugger, Matthias
%A Lenzen, Christoph
%A Medina, Moti
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Synchronizer-Free Digital Link Controller
:
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-35FE-0
%R 10.1109/TCSI.2020.2989552
%7 2020
%D 2020
%J IEEE Transactions on Circuits and Systems / I, Regular Papers
%V 27
%N 10
%& 3562
%P 3562 - 3573
%I Institute of Electrical and Electronics Engineers
%C Piscataway, NJ
%@ false
[35]
J. Bund, M. Függer, C. Lenzen, M. Medina, and W. Rosenbaum, “PALS: Plesiochronous and Locally Synchronous Systems,” in 26th IEEE International Symposium on Asynchronous Circuits and Systems, Salt Lake City, UT, USA, 2020.
Export
BibTeX
@inproceedings{Bund_ASYNC2020,
TITLE = {{PALS}: {P}lesiochronous and Locally Synchronous Systems},
AUTHOR = {Bund, Johannes and F{\"u}gger, Matthias and Lenzen, Christoph and Medina, Moti and Rosenbaum, Will},
LANGUAGE = {eng},
ISBN = {978-1-7281-5495-4},
DOI = {10.1109/ASYNC49171.2020.00013},
PUBLISHER = {IEEE},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {26th IEEE International Symposium on Asynchronous Circuits and Systems},
PAGES = {36--43},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Bund, Johannes
%A Függer, Matthias
%A Lenzen, Christoph
%A Medina, Moti
%A Rosenbaum, Will
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T PALS: Plesiochronous and Locally Synchronous Systems :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-46B8-B
%R 10.1109/ASYNC49171.2020.00013
%D 2020
%B 26th IEEE International Symposium on Asynchronous Circuits and Systems
%Z date of event: 2020-05-17 - 2020-05-20
%C Salt Lake City, UT, USA
%B 26th IEEE International Symposium on Asynchronous Circuits and Systems
%P 36 - 43
%I IEEE
%@ 978-1-7281-5495-4
[36]
J. Bund, M. Függer, C. Lenzen, M. Medina, and W. Rosenbaum, “PALS: Plesiochronous and Locally Synchronous Systems,” 2020. [Online]. Available: https://arxiv.org/abs/2003.05542. (arXiv: 2003.05542)
Abstract
Consider an arbitrary network of communicating modules on a chip, each
requiring a local signal telling it when to execute a computational step. There
are three common solutions to generating such a local clock signal: (i) by
deriving it from a single, central clock source, (ii) by local, free-running
oscillators, or (iii) by handshaking between neighboring modules. Conceptually,
each of these solutions is the result of a perceived dichotomy in which
(sub)systems are either clocked or fully asynchronous, suggesting that the
designer's choice is limited to deciding where to draw the line between
synchronous and asynchronous design. In contrast, we take the view that the
better question to ask is how synchronous the system can and should be. Based
on a distributed clock synchronization algorithm, we present a novel design
providing modules with local clocks whose frequency bounds are almost as good
as those of corresponding free-running oscillators, yet neighboring modules are
guaranteed to have a phase offset substantially smaller than one clock cycle.
Concretely, parameters obtained from a 15nm ASIC implementation running at 2GHz
yield mathematical worst-case bounds of 30ps on phase offset for a 32x32 node
grid network.
Export
BibTeX
@online{Bund_arXiv2003.05542,
TITLE = {{PALS}: Plesiochronous and Locally Synchronous Systems},
AUTHOR = {Bund, Johannes and F{\"u}gger, Matthias and Lenzen, Christoph and Medina, Moti and Rosenbaum, Will},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2003.05542},
EPRINT = {2003.05542},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Consider an arbitrary network of communicating modules on a chip, each requiring a local signal telling it when to execute a computational step. There are three common solutions to generating such a local clock signal: (i) by deriving it from a single, central clock source, (ii) by local, free-running oscillators, or (iii) by handshaking between neighboring modules. Conceptually, each of these solutions is the result of a perceived dichotomy in which (sub)systems are either clocked or fully asynchronous, suggesting that the designer's choice is limited to deciding where to draw the line between synchronous and asynchronous design. In contrast, we take the view that the better question to ask is how synchronous the system can and should be. Based on a distributed clock synchronization algorithm, we present a novel design providing modules with local clocks whose frequency bounds are almost as good as those of corresponding free-running oscillators, yet neighboring modules are guaranteed to have a phase offset substantially smaller than one clock cycle. Concretely, parameters obtained from a 15nm ASIC implementation running at 2GHz yield mathematical worst-case bounds of 30ps on phase offset for a 32x32 node grid network.},
}
Endnote
%0 Report
%A Bund, Johannes
%A Függer, Matthias
%A Lenzen, Christoph
%A Medina, Moti
%A Rosenbaum, Will
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T PALS: Plesiochronous and Locally Synchronous Systems :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-475C-3
%U https://arxiv.org/abs/2003.05542
%D 2020
%X Consider an arbitrary network of communicating modules on a chip, each
requiring a local signal telling it when to execute a computational step. There
are three common solutions to generating such a local clock signal: (i) by
deriving it from a single, central clock source, (ii) by local, free-running
oscillators, or (iii) by handshaking between neighboring modules. Conceptually,
each of these solutions is the result of a perceived dichotomy in which
(sub)systems are either clocked or fully asynchronous, suggesting that the
designer's choice is limited to deciding where to draw the line between
synchronous and asynchronous design. In contrast, we take the view that the
better question to ask is how synchronous the system can and should be. Based
on a distributed clock synchronization algorithm, we present a novel design
providing modules with local clocks whose frequency bounds are almost as good
as those of corresponding free-running oscillators, yet neighboring modules are
guaranteed to have a phase offset substantially smaller than one clock cycle.
Concretely, parameters obtained from a 15nm ASIC implementation running at 2GHz
yield mathematical worst-case bounds of 30ps on phase offset for a 32x32 node
grid network.
%K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
[37]
P. Chalermsook, M. Cygan, G. Kortsarz, B. Laekhanukit, P. Manurangsi, D. Nanongkai, and L. Trevisan, “From Gap-Exponential Time Hypothesis to Fixed Parameter Tractable Inapproximability: Clique, Dominating Set, and More,” SIAM Journal on Computing, vol. 49, no. 4, 2020.
Export
BibTeX
@article{Chalermsook2020,
TITLE = {From Gap-Exponential Time Hypothesis to Fixed Parameter Tractable Inapproximability: {C}lique, Dominating Set, and More},
AUTHOR = {Chalermsook, Parinya and Cygan, Marek and Kortsarz, Guy and Laekhanukit, Bundit and Manurangsi, Pasin and Nanongkai, Danupon and Trevisan, Luca},
LANGUAGE = {eng},
ISSN = {0097-5397},
DOI = {10.1137/18M1166869},
PUBLISHER = {SIAM},
ADDRESS = {Philadelphia, PA},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {SIAM Journal on Computing},
VOLUME = {49},
NUMBER = {4},
PAGES = {772--810},
}
Endnote
%0 Journal Article
%A Chalermsook, Parinya
%A Cygan, Marek
%A Kortsarz, Guy
%A Laekhanukit, Bundit
%A Manurangsi, Pasin
%A Nanongkai, Danupon
%A Trevisan, Luca
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T From Gap-Exponential Time Hypothesis to Fixed Parameter Tractable Inapproximability: Clique, Dominating Set, and More
:
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-1D05-4
%R 10.1137/18M1166869
%7 2020
%D 2020
%J SIAM Journal on Computing
%V 49
%N 4
%& 772
%P 772 - 810
%I SIAM
%C Philadelphia, PA
%@ false
[38]
P. Charalampopoulos, T. Kociumaka, and P. Wellnitz, “Faster Approximate Pattern Matching: A Unified Approach,” 2020. [Online]. Available: https://arxiv.org/abs/2004.08350. (arXiv: 2004.08350)
Abstract
Approximate pattern matching is a natural and well-studied problem on
strings: Given a text $T$, a pattern $P$, and a threshold $k$, find (the
starting positions of) all substrings of $T$ that are at distance at most $k$
from $P$. We consider the two most fundamental string metrics: the Hamming
distance and the edit distance. Under the Hamming distance, we search for
substrings of $T$ that have at most $k$ mismatches with $P$, while under the
edit distance, we search for substrings of $T$ that can be transformed to $P$
with at most $k$ edits.
Exact occurrences of $P$ in $T$ have a very simple structure: If we assume
for simplicity that $|T| \le 3|P|/2$ and trim $T$ so that $P$ occurs both as a
prefix and as a suffix of $T$, then both $P$ and $T$ are periodic with a common
period. However, an analogous characterization for the structure of occurrences
with up to $k$ mismatches was proved only recently by Bringmann et al.
[SODA'19]: Either there are $O(k^2)$ $k$-mismatch occurrences of $P$ in $T$, or
both $P$ and $T$ are at Hamming distance $O(k)$ from strings with a common
period $O(m/k)$. We tighten this characterization by showing that there are
$O(k)$ $k$-mismatch occurrences in the case when the pattern is not
(approximately) periodic, and we lift it to the edit distance setting, where we
tightly bound the number of $k$-edit occurrences by $O(k^2)$ in the
non-periodic case. Our proofs are constructive and let us obtain a unified
framework for approximate pattern matching for both considered distances. We
showcase the generality of our framework with results for the fully-compressed
setting (where $T$ and $P$ are given as a straight-line program) and for the
dynamic setting (where we extend a data structure of Gawrychowski et al.
[SODA'18]).
Export
BibTeX
@online{Charalampopoulos_arXiv2004.08350,
TITLE = {Faster Approximate Pattern Matching: {A} Unified Approach},
AUTHOR = {Charalampopoulos, Panagiotis and Kociumaka, Tomasz and Wellnitz, Philip},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2004.08350},
EPRINT = {2004.08350},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Approximate pattern matching is a natural and well-studied problem on strings: Given a text $T$, a pattern $P$, and a threshold $k$, find (the starting positions of) all substrings of $T$ that are at distance at most $k$ from $P$. We consider the two most fundamental string metrics: the Hamming distance and the edit distance. Under the Hamming distance, we search for substrings of $T$ that have at most $k$ mismatches with $P$, while under the edit distance, we search for substrings of $T$ that can be transformed to $P$ with at most $k$ edits. Exact occurrences of $P$ in $T$ have a very simple structure: If we assume for simplicity that $|T| \le 3|P|/2$ and trim $T$ so that $P$ occurs both as a prefix and as a suffix of $T$, then both $P$ and $T$ are periodic with a common period. However, an analogous characterization for the structure of occurrences with up to $k$ mismatches was proved only recently by Bringmann et al. [SODA'19]: Either there are $O(k^2)$ $k$-mismatch occurrences of $P$ in $T$, or both $P$ and $T$ are at Hamming distance $O(k)$ from strings with a common period $O(m/k)$. We tighten this characterization by showing that there are $O(k)$ $k$-mismatch occurrences in the case when the pattern is not (approximately) periodic, and we lift it to the edit distance setting, where we tightly bound the number of $k$-edit occurrences by $O(k^2)$ in the non-periodic case. Our proofs are constructive and let us obtain a unified framework for approximate pattern matching for both considered distances. We showcase the generality of our framework with results for the fully-compressed setting (where $T$ and $P$ are given as a straight-line program) and for the dynamic setting (where we extend a data structure of Gawrychowski et al. [SODA'18]).},
}
Endnote
%0 Report
%A Charalampopoulos, Panagiotis
%A Kociumaka, Tomasz
%A Wellnitz, Philip
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Faster Approximate Pattern Matching: A Unified Approach :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8C63-C
%U https://arxiv.org/abs/2004.08350
%D 2020
%X Approximate pattern matching is a natural and well-studied problem on
strings: Given a text $T$, a pattern $P$, and a threshold $k$, find (the
starting positions of) all substrings of $T$ that are at distance at most $k$
from $P$. We consider the two most fundamental string metrics: the Hamming
distance and the edit distance. Under the Hamming distance, we search for
substrings of $T$ that have at most $k$ mismatches with $P$, while under the
edit distance, we search for substrings of $T$ that can be transformed to $P$
with at most $k$ edits.
Exact occurrences of $P$ in $T$ have a very simple structure: If we assume
for simplicity that $|T| \le 3|P|/2$ and trim $T$ so that $P$ occurs both as a
prefix and as a suffix of $T$, then both $P$ and $T$ are periodic with a common
period. However, an analogous characterization for the structure of occurrences
with up to $k$ mismatches was proved only recently by Bringmann et al.
[SODA'19]: Either there are $O(k^2)$ $k$-mismatch occurrences of $P$ in $T$, or
both $P$ and $T$ are at Hamming distance $O(k)$ from strings with a common
period $O(m/k)$. We tighten this characterization by showing that there are
$O(k)$ $k$-mismatch occurrences in the case when the pattern is not
(approximately) periodic, and we lift it to the edit distance setting, where we
tightly bound the number of $k$-edit occurrences by $O(k^2)$ in the
non-periodic case. Our proofs are constructive and let us obtain a unified
framework for approximate pattern matching for both considered distances. We
showcase the generality of our framework with results for the fully-compressed
setting (where $T$ and $P$ are given as a straight-line program) and for the
dynamic setting (where we extend a data structure of Gawrychowski et al.
[SODA'18]).
%K Computer Science, Data Structures and Algorithms, cs.DS
[39]
P. Charalampopoulos, T. Kociumaka, and P. Wellnitz, “Faster Approximate Pattern Matching: A Unified Approach,” in FOCS 2020, Durham, NC, USA (Virtual Conference). (Accepted/in press)
Export
BibTeX
@inproceedings{Charalampopoulos_FOCS2020,
TITLE = {Faster Approximate Pattern Matching: {A} Unified Approach},
AUTHOR = {Charalampopoulos, Panagiotis and Kociumaka, Tomasz and Wellnitz, Philip},
LANGUAGE = {eng},
PUBLISHER = {IEEE},
YEAR = {2020},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {FOCS 2020},
ADDRESS = {Durham, NC, USA (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Charalampopoulos, Panagiotis
%A Kociumaka, Tomasz
%A Wellnitz, Philip
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Faster Approximate Pattern Matching: A Unified Approach :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8C66-9
%D 2020
%B 61st Annual IEEE Symposium on Foundations of Computer Science
%Z date of event: 2020-11-16 - 2020-11-19
%C Durham, NC, USA (Virtual Conference)
%B FOCS 2020
%I IEEE
[40]
M. Cheraghchi and V. Nakos, “Combinatorial Group Testing and Sparse Recovery Schemes with Near-Optimal Decoding Time,” in FOCS 2020, Durham, NC, USA (Virtual Conference). (Accepted/in press)
Export
BibTeX
@inproceedings{Cheraghchi_FOCS2020,
TITLE = {Combinatorial Group Testing and Sparse Recovery Schemes with Near-Optimal Decoding Time},
AUTHOR = {Cheraghchi, Mahdi and Nakos, Vasileios},
LANGUAGE = {eng},
PUBLISHER = {IEEE},
YEAR = {2020},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {FOCS 2020},
ADDRESS = {Durham, NC, USA (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Cheraghchi, Mahdi
%A Nakos, Vasileios
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Combinatorial Group Testing and Sparse Recovery Schemes with Near-Optimal Decoding Time :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-56C6-9
%D 2020
%B 61st Annual IEEE Symposium on Foundations of Computer Science
%Z date of event: 2020-11-16 - 2020-11-19
%C Durham, NC, USA (Virtual Conference)
%B FOCS 2020
%I IEEE
[41]
R. H. Chitnis, A. E. Feldmann, M. HajiAghayi, and D. Marx, “Tight Bounds for Planar Strongly Connected Steiner Subgraph with Fixed Number of Terminals (and Extensions),” SIAM Journal on Computing, vol. 49, no. 2, 2020.
Export
BibTeX
@article{Chitnis2020,
TITLE = {Tight Bounds for Planar Strongly Connected {Steiner} Subgraph with Fixed Number of Terminals (and Extensions)},
AUTHOR = {Chitnis, Rajesh H. and Feldmann, Andreas E. and HajiAghayi, MohammadTaghi and Marx, Daniel},
LANGUAGE = {eng},
ISSN = {0097-5397},
DOI = {10.1137/18M122371X},
PUBLISHER = {Society for Industrial and Applied Mathematics.},
ADDRESS = {Philadelphia},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {SIAM Journal on Computing},
VOLUME = {49},
NUMBER = {2},
PAGES = {318--364},
}
Endnote
%0 Journal Article
%A Chitnis, Rajesh H.
%A Feldmann, Andreas E.
%A HajiAghayi, MohammadTaghi
%A Marx, Daniel
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Tight Bounds for Planar Strongly Connected Steiner Subgraph with Fixed Number of Terminals (and Extensions)
:
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-E002-A
%R 10.1137/18M122371X
%7 2020
%D 2020
%J SIAM Journal on Computing
%V 49
%N 2
%& 318
%P 318 - 364
%I Society for Industrial and Applied Mathematics.
%C Philadelphia
%@ false
[42]
G. Christodoulou, V. Gkatzelis, M. Latifian, and A. Sgouritsa, “Resource-Aware Protocols for Network Cost-Sharing Games,” in EC’20, 21st ACM Conference on Economics and Computation, Virtual Event, Hungary, 2020.
Export
BibTeX
@inproceedings{Christodoulou_EC2020,
TITLE = {Resource-Aware Protocols for Network Cost-Sharing Games},
AUTHOR = {Christodoulou, George and Gkatzelis, Vasilis and Latifian, Mohamad and Sgouritsa, Alkmini},
LANGUAGE = {eng},
ISBN = {9781450379755},
DOI = {10.1145/3391403.3399528},
PUBLISHER = {ACM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {EC'20, 21st ACM Conference on Economics and Computation},
EDITOR = {Bir{\'o}, P{\'e}ter and Hartline, Jason},
PAGES = {81--107},
ADDRESS = {Virtual Event, Hungary},
}
Endnote
%0 Conference Proceedings
%A Christodoulou, George
%A Gkatzelis, Vasilis
%A Latifian, Mohamad
%A Sgouritsa, Alkmini
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Resource-Aware Protocols for Network Cost-Sharing Games :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-938C-5
%R 10.1145/3391403.3399528
%D 2020
%B 21st ACM Conference on Economics and Computation
%Z date of event: 2020-07-13 - 2020-07-17
%C Virtual Event, Hungary
%B EC'20
%E Biró, Péter; Hartline, Jason
%P 81 - 107
%I ACM
%@ 9781450379755
[43]
V. Cohen-Addad, P. N. Klein, and D. Marx, “On the Computational Tractability of a Geographic Clustering Problem Arising in Redistricting,” 2020. [Online]. Available: https://arxiv.org/abs/2009.00188. (arXiv: 2009.00188)
Abstract
Redistricting is the problem of dividing a state into a number $k$ of
regions, called districts. Voters in each district elect a representative. The
primary criteria are: each district is connected, district populations are
equal (or nearly equal), and districts are "compact". There are multiple
competing definitions of compactness, usually minimizing some quantity.
One measure that has been recently promoted by Duchin and others is number of
cut edges. In redistricting, one is given atomic regions out of which each
district must be built. The populations of the atomic regions are given.
Consider the graph with one vertex per atomic region (with weight equal to the
region's population) and an edge between atomic regions that share a boundary.
A districting plan is a partition of vertices into $k$ parts, each connnected,
of nearly equal weight. The districts are considered compact to the extent that
the plan minimizes the number of edges crossing between different parts.
Consider two problems: find the most compact districting plan, and sample
districting plans under a compactness constraint uniformly at random. Both
problems are NP-hard so we restrict the input graph to have branchwidth at most
$w$. (A planar graph's branchwidth is bounded by its diameter.) If both $k$ and
$w$ are bounded by constants, the problems are solvable in polynomial time.
Assume vertices have weight~1. One would like algorithms whose running times
are of the form $O(f(k,w) n^c)$ for some constant $c$ independent of $k$ and
$w$, in which case the problems are said to be fixed-parameter tractable with
respect to $k$ and $w$). We show that, under a complexity-theoretic assumption,
no such algorithms exist. However, we do give algorithms with running time
$O(c^wn^{k+1})$. Thus if the diameter of the graph is moderately small and the
number of districts is very small, our algorithm is useable.
Export
BibTeX
@online{Cohen-Addad_arXiv2009.00188,
TITLE = {On the Computational Tractability of a Geographic Clustering Problem Arising in Redistricting},
AUTHOR = {Cohen-Addad, Vincent and Klein, Philip N. and Marx, D{\'a}niel},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2009.00188},
EPRINT = {2009.00188},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Redistricting is the problem of dividing a state into a number $k$ of regions, called districts. Voters in each district elect a representative. The primary criteria are: each district is connected, district populations are equal (or nearly equal), and districts are "compact". There are multiple competing definitions of compactness, usually minimizing some quantity. One measure that has been recently promoted by Duchin and others is number of cut edges. In redistricting, one is given atomic regions out of which each district must be built. The populations of the atomic regions are given. Consider the graph with one vertex per atomic region (with weight equal to the region's population) and an edge between atomic regions that share a boundary. A districting plan is a partition of vertices into $k$ parts, each connnected, of nearly equal weight. The districts are considered compact to the extent that the plan minimizes the number of edges crossing between different parts. Consider two problems: find the most compact districting plan, and sample districting plans under a compactness constraint uniformly at random. Both problems are NP-hard so we restrict the input graph to have branchwidth at most $w$. (A planar graph's branchwidth is bounded by its diameter.) If both $k$ and $w$ are bounded by constants, the problems are solvable in polynomial time. Assume vertices have weight~1. One would like algorithms whose running times are of the form $O(f(k,w) n^c)$ for some constant $c$ independent of $k$ and $w$, in which case the problems are said to be fixed-parameter tractable with respect to $k$ and $w$). We show that, under a complexity-theoretic assumption, no such algorithms exist. However, we do give algorithms with running time $O(c^wn^{k+1})$. Thus if the diameter of the graph is moderately small and the number of districts is very small, our algorithm is useable.},
}
Endnote
%0 Report
%A Cohen-Addad, Vincent
%A Klein, Philip N.
%A Marx, Dániel
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T On the Computational Tractability of a Geographic Clustering Problem Arising in Redistricting :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-495A-3
%U https://arxiv.org/abs/2009.00188
%D 2020
%X Redistricting is the problem of dividing a state into a number $k$ of
regions, called districts. Voters in each district elect a representative. The
primary criteria are: each district is connected, district populations are
equal (or nearly equal), and districts are "compact". There are multiple
competing definitions of compactness, usually minimizing some quantity.
One measure that has been recently promoted by Duchin and others is number of
cut edges. In redistricting, one is given atomic regions out of which each
district must be built. The populations of the atomic regions are given.
Consider the graph with one vertex per atomic region (with weight equal to the
region's population) and an edge between atomic regions that share a boundary.
A districting plan is a partition of vertices into $k$ parts, each connnected,
of nearly equal weight. The districts are considered compact to the extent that
the plan minimizes the number of edges crossing between different parts.
Consider two problems: find the most compact districting plan, and sample
districting plans under a compactness constraint uniformly at random. Both
problems are NP-hard so we restrict the input graph to have branchwidth at most
$w$. (A planar graph's branchwidth is bounded by its diameter.) If both $k$ and
$w$ are bounded by constants, the problems are solvable in polynomial time.
Assume vertices have weight~1. One would like algorithms whose running times
are of the form $O(f(k,w) n^c)$ for some constant $c$ independent of $k$ and
$w$, in which case the problems are said to be fixed-parameter tractable with
respect to $k$ and $w$). We show that, under a complexity-theoretic assumption,
no such algorithms exist. However, we do give algorithms with running time
$O(c^wn^{k+1})$. Thus if the diameter of the graph is moderately small and the
number of districts is very small, our algorithm is useable.
%K Computer Science, Data Structures and Algorithms, cs.DS
[44]
C. Coupette and C. Lenzen, “A Breezing Proof of the KMW Bound,” 2020. [Online]. Available: https://arxiv.org/abs/2002.06005. (arXiv: 2002.06005)
Abstract
In their seminal paper from 2004, Kuhn, Moscibroda, and Wattenhofer (KMW)
proved a hardness result for several fundamental graph problems in the LOCAL
model: For any (randomized) algorithm, there are input graphs with $n$ nodes
and maximum degree $\Delta$ on which $\Omega(\min\{\sqrt{\log n/\log \log
n},\log \Delta/\log \log \Delta\})$ (expected) communication rounds are
required to obtain polylogarithmic approximations to a minimum vertex cover,
minimum dominating set, or maximum matching. Via reduction, this hardness
extends to symmetry breaking tasks like finding maximal independent sets or
maximal matchings. Today, more than $15$ years later, there is still no proof
of this result that is easy on the reader. Setting out to change this, in this
work, we provide a fully self-contained and $\mathit{simple}$ proof of the KMW
lower bound. The key argument is algorithmic, and it relies on an invariant
that can be readily verified from the generation rules of the lower bound
graphs.
Export
BibTeX
@online{Coupette_arXiv2002.06005,
TITLE = {A Breezing Proof of the {KMW} Bound},
AUTHOR = {Coupette, Corinna and Lenzen, Christoph},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2002.06005},
EPRINT = {2002.06005},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {In their seminal paper from 2004, Kuhn, Moscibroda, and Wattenhofer (KMW) proved a hardness result for several fundamental graph problems in the LOCAL model: For any (randomized) algorithm, there are input graphs with $n$ nodes and maximum degree $\Delta$ on which $\Omega(\min\{\sqrt{\log n/\log \log n},\log \Delta/\log \log \Delta\})$ (expected) communication rounds are required to obtain polylogarithmic approximations to a minimum vertex cover, minimum dominating set, or maximum matching. Via reduction, this hardness extends to symmetry breaking tasks like finding maximal independent sets or maximal matchings. Today, more than $15$ years later, there is still no proof of this result that is easy on the reader. Setting out to change this, in this work, we provide a fully self-contained and $\mathit{simple}$ proof of the KMW lower bound. The key argument is algorithmic, and it relies on an invariant that can be readily verified from the generation rules of the lower bound graphs.},
}
Endnote
%0 Report
%A Coupette, Corinna
%A Lenzen, Christoph
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T A Breezing Proof of the KMW Bound :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-46DC-3
%U https://arxiv.org/abs/2002.06005
%D 2020
%X In their seminal paper from 2004, Kuhn, Moscibroda, and Wattenhofer (KMW)
proved a hardness result for several fundamental graph problems in the LOCAL
model: For any (randomized) algorithm, there are input graphs with $n$ nodes
and maximum degree $\Delta$ on which $\Omega(\min\{\sqrt{\log n/\log \log
n},\log \Delta/\log \log \Delta\})$ (expected) communication rounds are
required to obtain polylogarithmic approximations to a minimum vertex cover,
minimum dominating set, or maximum matching. Via reduction, this hardness
extends to symmetry breaking tasks like finding maximal independent sets or
maximal matchings. Today, more than $15$ years later, there is still no proof
of this result that is easy on the reader. Setting out to change this, in this
work, we provide a fully self-contained and $\mathit{simple}$ proof of the KMW
lower bound. The key argument is algorithmic, and it relies on an invariant
that can be readily verified from the generation rules of the lower bound
graphs.
%K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC,Computer Science, Computational Complexity, cs.CC,Computer Science, Discrete Mathematics, cs.DM,Computer Science, Data Structures and Algorithms, cs.DS
[45]
N. R. Dayama, M. Shiripour, A. Oulasvirta, E. Ivanko, and A. Karrenbauer, “Foraging-based Optimization of Menu Systems,” 2020. . (arXiv: 2005.01292)
Abstract
Computational design of menu systems has been solved in limited cases such as
the linear menu (list) as an assignment task, where commands are assigned to
menu positions while optimizing for for users selection performance and
distance of associated items. We show that this approach falls short with
larger, hierarchically organized menu systems, where one must also take into
account how users navigate hierarchical structures. This paper presents a novel
integer programming formulation that models hierarchical menus as a combination
of the exact set covering problem and the assignment problem. It organizes
commands into ordered groups of ordered groups via a novel objective function
based on information foraging theory. It minimizes, on the one hand, the time
required to select a command whose location is known from previous usage and,
on the other, the time wasted in irrelevant parts of the menu while searching
for commands whose location is not known. The convergence of these two factors
yields usable, well-ordered command hierarchies from a single model. In
generated menus, the lead (first) elements of a group or tab are good
indicators of the remaining contents, thereby facilitating the search process.
In a controlled usability evaluation, the performance of computationally
designed menus was 25 faster than existing commercial designs with respect to
selection time. The algorithm is efficient for large, representative instances
of the problem. We further show applications in personalization and adaptation
of menu systems.
Export
BibTeX
@online{Dayama_arXiv2005.01292,
TITLE = {Foraging-based Optimization of Menu Systems},
AUTHOR = {Dayama, Niraj Ramesh and Shiripour, Morteza and Oulasvirta, Antti and Ivanko, Evgeny and Karrenbauer, Andreas},
LANGUAGE = {eng},
EPRINT = {2005.01292},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Computational design of menu systems has been solved in limited cases such as the linear menu (list) as an assignment task, where commands are assigned to menu positions while optimizing for for users selection performance and distance of associated items. We show that this approach falls short with larger, hierarchically organized menu systems, where one must also take into account how users navigate hierarchical structures. This paper presents a novel integer programming formulation that models hierarchical menus as a combination of the exact set covering problem and the assignment problem. It organizes commands into ordered groups of ordered groups via a novel objective function based on information foraging theory. It minimizes, on the one hand, the time required to select a command whose location is known from previous usage and, on the other, the time wasted in irrelevant parts of the menu while searching for commands whose location is not known. The convergence of these two factors yields usable, well-ordered command hierarchies from a single model. In generated menus, the lead (first) elements of a group or tab are good indicators of the remaining contents, thereby facilitating the search process. In a controlled usability evaluation, the performance of computationally designed menus was 25 faster than existing commercial designs with respect to selection time. The algorithm is efficient for large, representative instances of the problem. We further show applications in personalization and adaptation of menu systems.},
}
Endnote
%0 Report
%A Dayama, Niraj Ramesh
%A Shiripour, Morteza
%A Oulasvirta, Antti
%A Ivanko, Evgeny
%A Karrenbauer, Andreas
%+ External Organizations
External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Foraging-based Optimization of Menu Systems :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-4689-0
%D 2020
%X Computational design of menu systems has been solved in limited cases such as
the linear menu (list) as an assignment task, where commands are assigned to
menu positions while optimizing for for users selection performance and
distance of associated items. We show that this approach falls short with
larger, hierarchically organized menu systems, where one must also take into
account how users navigate hierarchical structures. This paper presents a novel
integer programming formulation that models hierarchical menus as a combination
of the exact set covering problem and the assignment problem. It organizes
commands into ordered groups of ordered groups via a novel objective function
based on information foraging theory. It minimizes, on the one hand, the time
required to select a command whose location is known from previous usage and,
on the other, the time wasted in irrelevant parts of the menu while searching
for commands whose location is not known. The convergence of these two factors
yields usable, well-ordered command hierarchies from a single model. In
generated menus, the lead (first) elements of a group or tab are good
indicators of the remaining contents, thereby facilitating the search process.
In a controlled usability evaluation, the performance of computationally
designed menus was 25 faster than existing commercial designs with respect to
selection time. The algorithm is efficient for large, representative instances
of the problem. We further show applications in personalization and adaptation
of menu systems.
%K Computer Science, Human-Computer Interaction, cs.HC,Mathematics, Optimization and Control, math.OC
[46]
M. de Berg and S. Kisfaludi-Bak, “Lower Bounds for Dominating Set in Ball Graphs and for Weighted Dominating Set in Unit-Ball Graphs,” in Treewidth, Kernels, and Algorithms, Berlin: Springer, 2020.
Export
BibTeX
@incollection{BergK20,
TITLE = {Lower Bounds for Dominating Set in Ball Graphs and for Weighted Dominating Set in Unit-Ball Graphs},
AUTHOR = {de Berg, Mark and Kisfaludi-Bak, S{\'a}ndor},
LANGUAGE = {eng},
ISBN = {978-3-030-42070-3},
DOI = {10.1007/978-3-030-42071-0_5},
PUBLISHER = {Springer},
ADDRESS = {Berlin},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
BOOKTITLE = {Treewidth, Kernels, and Algorithms},
EDITOR = {Fomin, Fedor V. and Kratsch, Stefan and van Leeuwen, Erik Jan},
PAGES = {31--48},
SERIES = {Lecture Notes in Computer Science},
VOLUME = {12160},
}
Endnote
%0 Book Section
%A de Berg, Mark
%A Kisfaludi-Bak, Sándor
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Lower Bounds for Dominating Set in Ball Graphs and for Weighted Dominating Set in Unit-Ball Graphs :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-76CB-0
%R 10.1007/978-3-030-42071-0_5
%D 2020
%B Treewidth, Kernels, and Algorithms
%E Fomin, Fedor V.; Kratsch, Stefan; van Leeuwen, Erik Jan
%P 31 - 48
%I Springer
%C Berlin
%@ 978-3-030-42070-3
%S Lecture Notes in Computer Science
%N 12160
[47]
M. de Berg, H. L. Bodlaender, S. Kisfaludi-Bak, D. Marx, and T. C. van der Zanden, “A Framework for Exponential-Time-Hypothesis-Tight Algorithms and Lower Bounds in Geometric Intersection Graphs,” SIAM Journal on Computing, vol. 49, no. 6, 2020.
Export
BibTeX
@article{BergBKMZ20,
TITLE = {A Framework for Exponential-Time-Hypothesis-Tight Algorithms and Lower Bounds in Geometric Intersection Graphs},
AUTHOR = {de Berg, Mark and Bodlaender, Hans L. and Kisfaludi-Bak, S{\'a}ndor and Marx, D{\'a}niel and van der Zanden, Tom C.},
LANGUAGE = {eng},
ISSN = {0097-5397},
DOI = {10.1137/20M1320870},
PUBLISHER = {SIAM},
ADDRESS = {Philadelphia, PA},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {SIAM Journal on Computing},
VOLUME = {49},
NUMBER = {6},
PAGES = {1291--1331},
}
Endnote
%0 Journal Article
%A de Berg, Mark
%A Bodlaender, Hans L.
%A Kisfaludi-Bak, Sándor
%A Marx, Dániel
%A van der Zanden, Tom C.
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T A Framework for Exponential-Time-Hypothesis-Tight Algorithms and Lower Bounds in Geometric Intersection Graphs
:
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-A582-B
%R 10.1137/20M1320870
%7 2020
%D 2020
%J SIAM Journal on Computing
%V 49
%N 6
%& 1291
%P 1291 - 1331
%I SIAM
%C Philadelphia, PA
%@ false
[48]
I. Diakonikolas, T. Gouleakis, D. M. Kane, J. Peebles, and E. Price, “Optimal Testing of Discrete Distributions with High Probability,” 2020. [Online]. Available: https://arxiv.org/abs/2009.06540. (arXiv: 2009.06540)
Abstract
We study the problem of testing discrete distributions with a focus on the
high probability regime. Specifically, given samples from one or more discrete
distributions, a property $\mathcal{P}$, and parameters $0< \epsilon, \delta
<1$, we want to distinguish {\em with probability at least $1-\delta$} whether
these distributions satisfy $\mathcal{P}$ or are $\epsilon$-far from
$\mathcal{P}$ in total variation distance. Most prior work in distribution
testing studied the constant confidence case (corresponding to $\delta =
\Omega(1)$), and provided sample-optimal testers for a range of properties.
While one can always boost the confidence probability of any such tester by
black-box amplification, this generic boosting method typically leads to
sub-optimal sample bounds.
Here we study the following broad question: For a given property
$\mathcal{P}$, can we {\em characterize} the sample complexity of testing
$\mathcal{P}$ as a function of all relevant problem parameters, including the
error probability $\delta$? Prior to this work, uniformity testing was the only
statistical task whose sample complexity had been characterized in this
setting. As our main results, we provide the first algorithms for closeness and
independence testing that are sample-optimal, within constant factors, as a
function of all relevant parameters. We also show matching
information-theoretic lower bounds on the sample complexity of these problems.
Our techniques naturally extend to give optimal testers for related problems.
To illustrate the generality of our methods, we give optimal algorithms for
testing collections of distributions and testing closeness with unequal sized
samples.
Export
BibTeX
@online{Diakonikolas_arXiv2009.06540,
TITLE = {Optimal Testing of Discrete Distributions with High Probability},
AUTHOR = {Diakonikolas, Ilias and Gouleakis, Themis and Kane, Daniel M. and Peebles, John and Price, Eric},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2009.06540},
EPRINT = {2009.06540},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We study the problem of testing discrete distributions with a focus on the high probability regime. Specifically, given samples from one or more discrete distributions, a property $\mathcal{P}$, and parameters $0< \epsilon, \delta <1$, we want to distinguish {\em with probability at least $1-\delta$} whether these distributions satisfy $\mathcal{P}$ or are $\epsilon$-far from $\mathcal{P}$ in total variation distance. Most prior work in distribution testing studied the constant confidence case (corresponding to $\delta = \Omega(1)$), and provided sample-optimal testers for a range of properties. While one can always boost the confidence probability of any such tester by black-box amplification, this generic boosting method typically leads to sub-optimal sample bounds. Here we study the following broad question: For a given property $\mathcal{P}$, can we {\em characterize} the sample complexity of testing $\mathcal{P}$ as a function of all relevant problem parameters, including the error probability $\delta$? Prior to this work, uniformity testing was the only statistical task whose sample complexity had been characterized in this setting. As our main results, we provide the first algorithms for closeness and independence testing that are sample-optimal, within constant factors, as a function of all relevant parameters. We also show matching information-theoretic lower bounds on the sample complexity of these problems. Our techniques naturally extend to give optimal testers for related problems. To illustrate the generality of our methods, we give optimal algorithms for testing collections of distributions and testing closeness with unequal sized samples.},
}
Endnote
%0 Report
%A Diakonikolas, Ilias
%A Gouleakis, Themis
%A Kane, Daniel M.
%A Peebles, John
%A Price, Eric
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T Optimal Testing of Discrete Distributions with High Probability :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8B62-E
%U https://arxiv.org/abs/2009.06540
%D 2020
%X We study the problem of testing discrete distributions with a focus on the
high probability regime. Specifically, given samples from one or more discrete
distributions, a property $\mathcal{P}$, and parameters $0< \epsilon, \delta
<1$, we want to distinguish {\em with probability at least $1-\delta$} whether
these distributions satisfy $\mathcal{P}$ or are $\epsilon$-far from
$\mathcal{P}$ in total variation distance. Most prior work in distribution
testing studied the constant confidence case (corresponding to $\delta =
\Omega(1)$), and provided sample-optimal testers for a range of properties.
While one can always boost the confidence probability of any such tester by
black-box amplification, this generic boosting method typically leads to
sub-optimal sample bounds.
Here we study the following broad question: For a given property
$\mathcal{P}$, can we {\em characterize} the sample complexity of testing
$\mathcal{P}$ as a function of all relevant problem parameters, including the
error probability $\delta$? Prior to this work, uniformity testing was the only
statistical task whose sample complexity had been characterized in this
setting. As our main results, we provide the first algorithms for closeness and
independence testing that are sample-optimal, within constant factors, as a
function of all relevant parameters. We also show matching
information-theoretic lower bounds on the sample complexity of these problems.
Our techniques naturally extend to give optimal testers for related problems.
To illustrate the generality of our methods, we give optimal algorithms for
testing collections of distributions and testing closeness with unequal sized
samples.
%K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Learning, cs.LG,Mathematics, Statistics, math.ST,Statistics, Machine Learning, stat.ML,Statistics, Statistics Theory, stat.TH
[49]
B. Doerr and M. Künnemann, “Improved Protocols and Hardness Results for the Two-Player Cryptogenography Problem,” IEEE Transactions on Information Theory, vol. 66, no. 9, 2020.
Export
BibTeX
@article{Doerr2020,
TITLE = {Improved Protocols and Hardness Results for the Two-Player Cryptogenography Problem},
AUTHOR = {Doerr, Benjamin and K{\"u}nnemann, Marvin},
LANGUAGE = {eng},
ISSN = {0018-9448},
DOI = {10.1109/TIT.2020.2978385},
PUBLISHER = {IEEE},
ADDRESS = {Piscataway, NJ},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {IEEE Transactions on Information Theory},
VOLUME = {66},
NUMBER = {9},
PAGES = {5729--5741},
}
Endnote
%0 Journal Article
%A Doerr, Benjamin
%A Künnemann, Marvin
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Improved Protocols and Hardness Results for the Two-Player Cryptogenography Problem :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-FAC1-6
%R 10.1109/TIT.2020.2978385
%7 2020
%D 2020
%J IEEE Transactions on Information Theory
%V 66
%N 9
%& 5729
%P 5729 - 5741
%I IEEE
%C Piscataway, NJ
%@ false
[50]
M. Dyer, C. Greenhill, P. Kleer, J. Ross, and L. Stougie, “Sampling Hypergraphs with Given Degrees,” 2020. [Online]. Available: https://arxiv.org/abs/2006.12021. (arXiv: 2006.12021)
Abstract
There is a well-known connection between hypergraphs and bipartite graphs,
obtained by treating the incidence matrix of the hypergraph as the biadjacency
matrix of a bipartite graph. We use this connection to describe and analyse a
rejection sampling algorithm for sampling simple uniform hypergraphs with a
given degree sequence. Our algorithm uses, as a black box, an algorithm
$\mathcal{A}$ for sampling bipartite graphs with given degrees, uniformly or
nearly uniformly, in (expected) polynomial time. The expected runtime of the
hypergraph sampling algorithm depends on the (expected) runtime of the
bipartite graph sampling algorithm $\mathcal{A}$, and the probability that a
uniformly random bipartite graph with given degrees corresponds to a simple
hypergraph. We give some conditions on the hypergraph degree sequence which
guarantee that this probability is bounded below by a constant.
Export
BibTeX
@online{Dyer_arXiv2006.12021,
TITLE = {Sampling Hypergraphs with Given Degrees},
AUTHOR = {Dyer, Martin and Greenhill, Catherine and Kleer, Pieter and Ross, James and Stougie, Leen},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2006.12021},
EPRINT = {2006.12021},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {There is a well-known connection between hypergraphs and bipartite graphs, obtained by treating the incidence matrix of the hypergraph as the biadjacency matrix of a bipartite graph. We use this connection to describe and analyse a rejection sampling algorithm for sampling simple uniform hypergraphs with a given degree sequence. Our algorithm uses, as a black box, an algorithm $\mathcal{A}$ for sampling bipartite graphs with given degrees, uniformly or nearly uniformly, in (expected) polynomial time. The expected runtime of the hypergraph sampling algorithm depends on the (expected) runtime of the bipartite graph sampling algorithm $\mathcal{A}$, and the probability that a uniformly random bipartite graph with given degrees corresponds to a simple hypergraph. We give some conditions on the hypergraph degree sequence which guarantee that this probability is bounded below by a constant.},
}
Endnote
%0 Report
%A Dyer, Martin
%A Greenhill, Catherine
%A Kleer, Pieter
%A Ross, James
%A Stougie, Leen
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Sampling Hypergraphs with Given Degrees :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9152-8
%U https://arxiv.org/abs/2006.12021
%D 2020
%X There is a well-known connection between hypergraphs and bipartite graphs,
obtained by treating the incidence matrix of the hypergraph as the biadjacency
matrix of a bipartite graph. We use this connection to describe and analyse a
rejection sampling algorithm for sampling simple uniform hypergraphs with a
given degree sequence. Our algorithm uses, as a black box, an algorithm
$\mathcal{A}$ for sampling bipartite graphs with given degrees, uniformly or
nearly uniformly, in (expected) polynomial time. The expected runtime of the
hypergraph sampling algorithm depends on the (expected) runtime of the
bipartite graph sampling algorithm $\mathcal{A}$, and the probability that a
uniformly random bipartite graph with given degrees corresponds to a simple
hypergraph. We give some conditions on the hypergraph degree sequence which
guarantee that this probability is bounded below by a constant.
%K Computer Science, Discrete Mathematics, cs.DM
[51]
E. Facca, A. Karrenbauer, P. Kolev, and K. Mehlhorn, “Convergence of the Non-Uniform Directed Physarum Model,” Theoretical Computer Science, vol. 816, 2020.
Export
BibTeX
@article{FaccaTCS2020,
TITLE = {Convergence of the Non-Uniform Directed Physarum Model},
AUTHOR = {Facca, Enrico and Karrenbauer, Andreas and Kolev, Pavel and Mehlhorn, Kurt},
LANGUAGE = {eng},
ISSN = {0304-3975},
DOI = {10.1016/j.tcs.2020.01.034},
PUBLISHER = {Elsevier},
ADDRESS = {Amsterdam},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {Theoretical Computer Science},
VOLUME = {816},
PAGES = {184--194},
}
Endnote
%0 Journal Article
%A Facca, Enrico
%A Karrenbauer, Andreas
%A Kolev, Pavel
%A Mehlhorn, Kurt
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Convergence of the Non-Uniform Directed Physarum Model :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-97B9-F
%R 10.1016/j.tcs.2020.01.034
%7 2020
%D 2020
%J Theoretical Computer Science
%V 816
%& 184
%P 184 - 194
%I Elsevier
%C Amsterdam
%@ false
[52]
Y. Faenza and T. Kavitha, “Quasi-popular Matchings, Optimality, and Extended Formulations,” in Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020), Salt Lake City, UT, USA, 2020.
Export
BibTeX
@inproceedings{Faenza_SODA20,
TITLE = {Quasi-popular Matchings, Optimality, and Extended Formulations},
AUTHOR = {Faenza, Yuri and Kavitha, Telikepalli},
LANGUAGE = {eng},
ISBN = {978-1-61197-599-4},
DOI = {10.5555/3381089.3381109},
PUBLISHER = {SIAM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020)},
EDITOR = {Chawla, Shuchi},
PAGES = {325--344},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Faenza, Yuri
%A Kavitha, Telikepalli
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Quasi-popular Matchings, Optimality, and Extended Formulations :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-F26C-0
%R 10.5555/3381089.3381109
%D 2020
%B 31st Annual ACM-SIAM Symposium on Discrete Algorithms
%Z date of event: 2020-01-05 - 2020-01-08
%C Salt Lake City, UT, USA
%B Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms
%E Chawla, Shuchi
%P 325 - 344
%I SIAM
%@ 978-1-61197-599-4
[53]
N. Fischer and C. Ikenmeyer, “The Computational Complexity of Plethysm Coefficients,” Computational Complexity, vol. 29, no. 2, 2020.
Export
BibTeX
@article{Fischer_2020,
TITLE = {The Computational Complexity of Plethysm Coefficients},
AUTHOR = {Fischer, Nick and Ikenmeyer, Christian},
LANGUAGE = {eng},
DOI = {10.1007/s00037-020-00198-4},
PUBLISHER = {Springer},
ADDRESS = {New York,NY},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {Computational Complexity},
VOLUME = {29},
NUMBER = {2},
EID = {8},
}
Endnote
%0 Journal Article
%A Fischer, Nick
%A Ikenmeyer, Christian
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T The Computational Complexity of Plethysm Coefficients :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-72D0-D
%R 10.1007/s00037-020-00198-4
%7 2020
%D 2020
%J Computational Complexity
%V 29
%N 2
%Z sequence number: 8
%I Springer
%C New York,NY
[54]
F. V. Fomin, P. A. Golovach, W. Lochet, P. Misra, S. Saket, and R. Sharma, “Parameterized Complexity of Directed Spanner Problems,” in 15th International Symposium on Parameterized and Exact Computation (IPEC 2020), Hong Kong, China (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Fomin_IPEC20,
TITLE = {Parameterized Complexity of Directed Spanner Problems},
AUTHOR = {Fomin, Fedor V. and Golovach, Petr A. and Lochet, William and Misra, Pranabendu and Saket, Saurabh and Sharma, Roohani},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-172-6},
URL = {urn:nbn:de:0030-drops-133156},
DOI = {10.4230/LIPIcs.IPEC.2020.12},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {15th International Symposium on Parameterized and Exact Computation (IPEC 2020)},
EDITOR = {Cao, Yixin and Pilipczuk, Marcin},
EID = {12},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {180},
ADDRESS = {Hong Kong, China (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Fomin, Fedor V.
%A Golovach, Petr A.
%A Lochet, William
%A Misra, Pranabendu
%A Saket, Saurabh
%A Sharma, Roohani
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Parameterized Complexity of Directed Spanner Problems :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9373-1
%R 10.4230/LIPIcs.IPEC.2020.12
%U urn:nbn:de:0030-drops-133156
%D 2020
%B 15th International Symposium on Parameterized and Exact Computation
%Z date of event: 2020-12-14 - 2020-12-18
%C Hong Kong, China (Virtual Conference)
%B 15th International Symposium on Parameterized and Exact Computation
%E Cao, Yixin; Pilipczuk, Marcin
%Z sequence number: 12
%I Schloss Dagstuhl
%@ 978-3-95977-172-6
%B Leibniz International Proceedings in Informatics
%N 180
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/13315/https://creativecommons.org/licenses/by/3.0/legalcode
[55]
S. Forster, D. Nanongkai, L. Yang, T. Saranurak, and S. Yingchareonthawornchai, “Computing and Testing Small Connectivity in Near-Linear Time and Queries via Fast Local Cut Algorithms,” in Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020), Salt Lake City, UT, USA, 2020.
Export
BibTeX
@inproceedings{Forster_SODA20,
TITLE = {Computing and Testing Small Connectivity in Near-Linear Time and Queries via Fast Local Cut Algorithms},
AUTHOR = {Forster, Sebastian and Nanongkai, Danupon and Yang, Liu and Saranurak, Thatchaphol and Yingchareonthawornchai, Sorrachai},
LANGUAGE = {eng},
ISBN = {978-1-61197-599-4},
DOI = {10.5555/3381089.3381215},
PUBLISHER = {SIAM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020)},
EDITOR = {Chawla, Shuchi},
PAGES = {2046--2065},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Forster, Sebastian
%A Nanongkai, Danupon
%A Yang, Liu
%A Saranurak, Thatchaphol
%A Yingchareonthawornchai, Sorrachai
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
%T Computing and Testing Small Connectivity in Near-Linear Time and Queries via Fast Local Cut Algorithms :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-F274-6
%R 10.5555/3381089.3381215
%D 2020
%B 31st Annual ACM-SIAM Symposium on Discrete Algorithms
%Z date of event: 2020-01-05 - 2020-01-08
%C Salt Lake City, UT, USA
%B Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms
%E Chawla, Shuchi
%P 2046 - 2065
%I SIAM
%@ 978-1-61197-599-4
[56]
A. Göke, D. Marx, and M. Mnich, “Hitting Long Directed Cycles Is Fixed-Parameter Tractable,” in 47th International Colloquium on Automata, Languages, and Programming (ICALP 2020), Saarbrücken, Germany (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Goeke_ICALP2020,
TITLE = {Hitting Long Directed Cycles Is Fixed-Parameter Tractable},
AUTHOR = {G{\"o}ke, Alexander and Marx, D{\'a}niel and Mnich, Matthias},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-138-2},
URL = {urn:nbn:de:0030-drops-124664},
DOI = {10.4230/LIPIcs.ICALP.2020.59},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {47th International Colloquium on Automata, Languages, and Programming (ICALP 2020)},
EDITOR = {Czumaj, Artur and Dawa, Anuj and Merelli, Emanuela},
EID = {59},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {168},
ADDRESS = {Saarbr{\"u}cken, Germany (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Göke, Alexander
%A Marx, Dániel
%A Mnich, Matthias
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Hitting Long Directed Cycles Is Fixed-Parameter Tractable :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-491E-7
%R 10.4230/LIPIcs.ICALP.2020.59
%U urn:nbn:de:0030-drops-124664
%D 2020
%B 47th International Colloquium on Automata, Languages, and Programming
%Z date of event: 2020-07-08 - 2020-07-11
%C Saarbrücken, Germany (Virtual Conference)
%B 47th International Colloquium on Automata, Languages, and Programming
%E Czumaj, Artur; Dawa, Anuj; Merelli, Emanuela
%Z sequence number: 59
%I Schloss Dagstuhl
%@ 978-3-95977-138-2
%B Leibniz International Proceedings in Informatics
%N 168
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12466/https://creativecommons.org/licenses/by/3.0/legalcode
[57]
A. Göke, D. Marx, and M. Mnich, “Hitting Long Directed Cycles is Fixed-Parameter Tractable,” 2020. [Online]. Available: https://arxiv.org/abs/2003.05267. (arXiv: 2003.05267)
Abstract
In the Directed Long Cycle Hitting Set} problem we are given a directed graph
$G$, and the task is to find a set $S$ of at most $k$ vertices/arcs such that
$G-S$ has no cycle of length longer than $\ell$. We show that the problem can
be solved in time $2^{\mathcal O(\ell k^3\log k + k^5\log k\log\ell)}\cdot
n^{\mathcal O(1)}$, that is, it is fixed-parameter tractable (FPT)
parameterized by $k$ and $\ell$. This algorithm can be seen as a far-reaching
generalization of the fixed-parameter tractability of {\sc Mixed Graph Feedback
Vertex Set} [Bonsma and Lokshtanov WADS 2011], which is already a common
generalization of the fixed-parameter tractability of (undirected) {\sc
Feedback Vertex Set} and the {\sc Directed Feedback Vertex Set} problems, two
classic results in parameterized algorithms. The algorithm requires significant
insights into the structure of graphs without directed cycles length longer
than $\ell$ and can be seen as an exact version of the approximation algorithm
following from the Erd{\H{o}}s-P{\'o}sa property for long cycles in directed
graphs proved by Kreutzer and Kawarabayashi [STOC 2015].
Export
BibTeX
@online{Goeke_arXiv2003.05267,
TITLE = {Hitting Long Directed Cycles is Fixed-Parameter Tractable},
AUTHOR = {G{\"o}ke, Alexander and Marx, D{\'a}niel and Mnich, Matthias},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2003.05267},
EPRINT = {2003.05267},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {In the Directed Long Cycle Hitting Set} problem we are given a directed graph $G$, and the task is to find a set $S$ of at most $k$ vertices/arcs such that $G-S$ has no cycle of length longer than $\ell$. We show that the problem can be solved in time $2^{\mathcal O(\ell k^3\log k + k^5\log k\log\ell)}\cdot n^{\mathcal O(1)}$, that is, it is fixed-parameter tractable (FPT) parameterized by $k$ and $\ell$. This algorithm can be seen as a far-reaching generalization of the fixed-parameter tractability of {\sc Mixed Graph Feedback Vertex Set} [Bonsma and Lokshtanov WADS 2011], which is already a common generalization of the fixed-parameter tractability of (undirected) {\sc Feedback Vertex Set} and the {\sc Directed Feedback Vertex Set} problems, two classic results in parameterized algorithms. The algorithm requires significant insights into the structure of graphs without directed cycles length longer than $\ell$ and can be seen as an exact version of the approximation algorithm following from the Erd{\H{o}}s-P{\'o}sa property for long cycles in directed graphs proved by Kreutzer and Kawarabayashi [STOC 2015].},
}
Endnote
%0 Report
%A Göke, Alexander
%A Marx, Dániel
%A Mnich, Matthias
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Hitting Long Directed Cycles is Fixed-Parameter Tractable :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-4923-0
%U https://arxiv.org/abs/2003.05267
%D 2020
%X In the Directed Long Cycle Hitting Set} problem we are given a directed graph
$G$, and the task is to find a set $S$ of at most $k$ vertices/arcs such that
$G-S$ has no cycle of length longer than $\ell$. We show that the problem can
be solved in time $2^{\mathcal O(\ell k^3\log k + k^5\log k\log\ell)}\cdot
n^{\mathcal O(1)}$, that is, it is fixed-parameter tractable (FPT)
parameterized by $k$ and $\ell$. This algorithm can be seen as a far-reaching
generalization of the fixed-parameter tractability of {\sc Mixed Graph Feedback
Vertex Set} [Bonsma and Lokshtanov WADS 2011], which is already a common
generalization of the fixed-parameter tractability of (undirected) {\sc
Feedback Vertex Set} and the {\sc Directed Feedback Vertex Set} problems, two
classic results in parameterized algorithms. The algorithm requires significant
insights into the structure of graphs without directed cycles length longer
than $\ell$ and can be seen as an exact version of the approximation algorithm
following from the Erd{\H{o}}s-P{\'o}sa property for long cycles in directed
graphs proved by Kreutzer and Kawarabayashi [STOC 2015].
%K Computer Science, Data Structures and Algorithms, cs.DS
[58]
A. Göke, D. Marx, and M. Mnich, “Parameterized Algorithms for Generalizations of Directed Feedback Vertex Set,” 2020. [Online]. Available: https://arxiv.org/abs/2003.02483. (arXiv: 2003.02483)
Abstract
The Directed Feedback Vertex Set (DFVS) problem takes as input a directed
graph~$G$ and seeks a smallest vertex set~$S$ that hits all cycles in $G$. This
is one of Karp's 21 $\mathsf{NP}$-complete problems. Resolving the
parameterized complexity status of DFVS was a long-standing open problem until
Chen et al. [STOC 2008, J. ACM 2008] showed its fixed-parameter tractability
via a $4^kk! n^{\mathcal{O}(1)}$-time algorithm, where $k = |S|$.
Here we show fixed-parameter tractability of two generalizations of DFVS:
- Find a smallest vertex set $S$ such that every strong component of $G - S$
has size at most~$s$: we give an algorithm solving this problem in time
$4^k(ks+k+s)!\cdot n^{\mathcal{O}(1)}$. This generalizes an algorithm by Xiao
[JCSS 2017] for the undirected version of the problem.
- Find a smallest vertex set $S$ such that every non-trivial strong component
of $G - S$ is 1-out-regular: we give an algorithm solving this problem in time
$2^{\mathcal{O}(k^3)}\cdot n^{\mathcal{O}(1)}$.
We also solve the corresponding arc versions of these problems by
fixed-parameter algorithms.
Export
BibTeX
@online{Goeke_arXiv2003.02483,
TITLE = {Parameterized Algorithms for Generalizations of Directed Feedback Vertex Set},
AUTHOR = {G{\"o}ke, Alexander and Marx, D{\'a}niel and Mnich, Matthias},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2003.02483},
EPRINT = {2003.02483},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {The Directed Feedback Vertex Set (DFVS) problem takes as input a directed graph~$G$ and seeks a smallest vertex set~$S$ that hits all cycles in $G$. This is one of Karp's 21 $\mathsf{NP}$-complete problems. Resolving the parameterized complexity status of DFVS was a long-standing open problem until Chen et al. [STOC 2008, J. ACM 2008] showed its fixed-parameter tractability via a $4^kk! n^{\mathcal{O}(1)}$-time algorithm, where $k = |S|$. Here we show fixed-parameter tractability of two generalizations of DFVS: -- Find a smallest vertex set $S$ such that every strong component of $G -- S$ has size at most~$s$: we give an algorithm solving this problem in time $4^k(ks+k+s)!\cdot n^{\mathcal{O}(1)}$. This generalizes an algorithm by Xiao [JCSS 2017] for the undirected version of the problem. -- Find a smallest vertex set $S$ such that every non-trivial strong component of $G -- S$ is 1-out-regular: we give an algorithm solving this problem in time $2^{\mathcal{O}(k^3)}\cdot n^{\mathcal{O}(1)}$. We also solve the corresponding arc versions of these problems by fixed-parameter algorithms.},
}
Endnote
%0 Report
%A Göke, Alexander
%A Marx, Dániel
%A Mnich, Matthias
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Parameterized Algorithms for Generalizations of Directed Feedback Vertex
Set :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-4920-3
%U https://arxiv.org/abs/2003.02483
%D 2020
%X The Directed Feedback Vertex Set (DFVS) problem takes as input a directed
graph~$G$ and seeks a smallest vertex set~$S$ that hits all cycles in $G$. This
is one of Karp's 21 $\mathsf{NP}$-complete problems. Resolving the
parameterized complexity status of DFVS was a long-standing open problem until
Chen et al. [STOC 2008, J. ACM 2008] showed its fixed-parameter tractability
via a $4^kk! n^{\mathcal{O}(1)}$-time algorithm, where $k = |S|$.
Here we show fixed-parameter tractability of two generalizations of DFVS:
- Find a smallest vertex set $S$ such that every strong component of $G - S$
has size at most~$s$: we give an algorithm solving this problem in time
$4^k(ks+k+s)!\cdot n^{\mathcal{O}(1)}$. This generalizes an algorithm by Xiao
[JCSS 2017] for the undirected version of the problem.
- Find a smallest vertex set $S$ such that every non-trivial strong component
of $G - S$ is 1-out-regular: we give an algorithm solving this problem in time
$2^{\mathcal{O}(k^3)}\cdot n^{\mathcal{O}(1)}$.
We also solve the corresponding arc versions of these problems by
fixed-parameter algorithms.
%K Computer Science, Data Structures and Algorithms, cs.DS
[59]
M. Grohe, D. Neuen, and D. Wiebking, “Isomorphism Testing for Graphs Excluding Small Minors,” 2020. [Online]. Available: https://arxiv.org/abs/2004.07671. (arXiv: 2004.07671)
Abstract
We prove that there is a graph isomorphism test running in time
$n^{\operatorname{polylog}(h)}$ on $n$-vertex graphs excluding some $h$-vertex
graph as a minor. Previously known bounds were $n^{\operatorname{poly}(h)}$
(Ponomarenko, 1988) and $n^{\operatorname{polylog}(n)}$ (Babai, STOC 2016). For
the algorithm we combine recent advances in the group-theoretic graph
isomorphism machinery with new graph-theoretic arguments.
Export
BibTeX
@online{Grohe_arXiv2004.07671,
TITLE = {Isomorphism Testing for Graphs Excluding Small Minors},
AUTHOR = {Grohe, Martin and Neuen, Daniel and Wiebking, Daniel},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2004.07671},
EPRINT = {2004.07671},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We prove that there is a graph isomorphism test running in time $n^{\operatorname{polylog}(h)}$ on $n$-vertex graphs excluding some $h$-vertex graph as a minor. Previously known bounds were $n^{\operatorname{poly}(h)}$ (Ponomarenko, 1988) and $n^{\operatorname{polylog}(n)}$ (Babai, STOC 2016). For the algorithm we combine recent advances in the group-theoretic graph isomorphism machinery with new graph-theoretic arguments.},
}
Endnote
%0 Report
%A Grohe, Martin
%A Neuen, Daniel
%A Wiebking, Daniel
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Isomorphism Testing for Graphs Excluding Small Minors :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9943-1
%U https://arxiv.org/abs/2004.07671
%D 2020
%X We prove that there is a graph isomorphism test running in time
$n^{\operatorname{polylog}(h)}$ on $n$-vertex graphs excluding some $h$-vertex
graph as a minor. Previously known bounds were $n^{\operatorname{poly}(h)}$
(Ponomarenko, 1988) and $n^{\operatorname{polylog}(n)}$ (Babai, STOC 2016). For
the algorithm we combine recent advances in the group-theoretic graph
isomorphism machinery with new graph-theoretic arguments.
%K Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Discrete Mathematics, cs.DM,Mathematics, Combinatorics, math.CO
[60]
M. Grohe, D. Neuen, and D. Wiebking, “Isomorphism Testing for Graphs Excluding Small Minors,” in FOCS 2020, Durham, NC, USA (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Grohe_FOCS2020,
TITLE = {Isomorphism Testing for Graphs Excluding Small Minors},
AUTHOR = {Grohe, Martin and Neuen, Daniel and Wiebking, Daniel},
LANGUAGE = {eng},
PUBLISHER = {IEEE},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {FOCS 2020},
ADDRESS = {Durham, NC, USA (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Grohe, Martin
%A Neuen, Daniel
%A Wiebking, Daniel
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Isomorphism Testing for Graphs Excluding Small Minors :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9947-D
%D 2020
%B 61st Annual IEEE Symposium on Foundations of Computer Science
%Z date of event: 2020-11-16 - 2020-11-19
%C Durham, NC, USA (Virtual Conference)
%B FOCS 2020
%I IEEE
[61]
S. Gunda, P. Jain, D. Lokshtanov, S. Saurabh, and P. Tale, “On the Parameterized Approximability of Contraction to Classes of Chordal Graphs,” in Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques (APPROX/RANDOM 2020), Virtual Conference, 2020.
Export
BibTeX
@inproceedings{Gunda_APPROXRANDOM20,
TITLE = {On the Parameterized Approximability of Contraction to Classes of Chordal Graphs},
AUTHOR = {Gunda, Spoorthy and Jain, Pallavi and Lokshtanov, Daniel and Saurabh, Saket and Tale, Prafullkumar},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-164-1},
URL = {urn:nbn:de:0030-drops-126545},
DOI = {10.4230/LIPIcs.APPROX/RANDOM.2020.51},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques (APPROX/RANDOM 2020)},
EDITOR = {Byrka, Jaros{\l}av and Meka, Raghu},
PAGES = {1--19},
EID = {51},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {176},
ADDRESS = {Virtual Conference},
}
Endnote
%0 Conference Proceedings
%A Gunda, Spoorthy
%A Jain, Pallavi
%A Lokshtanov, Daniel
%A Saurabh, Saket
%A Tale, Prafullkumar
%+ External Organizations
External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T On the Parameterized Approximability of Contraction to Classes of Chordal Graphs :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-874F-9
%R 10.4230/LIPIcs.APPROX/RANDOM.2020.51
%U urn:nbn:de:0030-drops-126545
%D 2020
%B 23rd International Conference on Approximation Algorithms for Combinatorial Optimization Problems / 24th International Conference on Randomization and Computation
%Z date of event: 2020-08-17 - 2020-08-19
%C Virtual Conference
%B Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques
%E Byrka, Jarosłav; Meka, Raghu
%P 1 - 19
%Z sequence number: 51
%I Schloss Dagstuhl
%@ 978-3-95977-164-1
%B Leibniz International Proceedings in Informatics
%N 176
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12654/
[62]
D. Halperin, S. Har-Peled, K. Mehlhorn, E. Oh, and M. Sharir, “The Maximum-Level Vertex in an Arrangement of Lines,” 2020. [Online]. Available: http://arxiv.org/abs/2003.00518. (arXiv: 2003.00518)
Abstract
Let $L$ be a set of $n$ lines in the plane, not necessarily in general
position. We present an efficient algorithm for finding all the vertices of the
arrangement $A(L)$ of maximum level, where the level of a vertex $v$ is the
number of lines of $L$ that pass strictly below $v$. The problem, posed in
Exercise~8.13 in de Berg etal [BCKO08], appears to be much harder than it
seems, as this vertex might not be on the upper envelope of the lines.
We first assume that all the lines of $L$ are distinct, and distinguish
between two cases, depending on whether or not the upper envelope of $L$
contains a bounded edge. In the former case, we show that the number of lines
of $L$ that pass above any maximum level vertex $v_0$ is only $O(\log n)$. In
the latter case, we establish a similar property that holds after we remove
some of the lines that are incident to the single vertex of the upper envelope.
We present algorithms that run, in both cases, in optimal $O(n\log n)$ time.
We then consider the case where the lines of $L$ are not necessarily
distinct. This setup is more challenging, and the best we have is an algorithm
that computes all the maximum-level vertices in time $O(n^{4/3}\log^{3}n)$.
Finally, we consider a related combinatorial question for degenerate
arrangements, where many lines may intersect in a single point, but all the
lines are distinct: We bound the complexity of the weighted $k$-level in such
an arrangement, where the weight of a vertex is the number of lines that pass
through the vertex. We show that the bound in this case is $O(n^{4/3})$, which
matches the corresponding bound for non-degenerate arrangements, and we use
this bound in the analysis of one of our algorithms.
Export
BibTeX
@online{Halperin_arXiv2003.00518,
TITLE = {The Maximum-Level Vertex in an Arrangement of Lines},
AUTHOR = {Halperin, Dan and Har-Peled, Sariel and Mehlhorn, Kurt and Oh, Eunjin and Sharir, Micha},
LANGUAGE = {eng},
URL = {http://arxiv.org/abs/2003.00518},
EPRINT = {2003.00518},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Let $L$ be a set of $n$ lines in the plane, not necessarily in general position. We present an efficient algorithm for finding all the vertices of the arrangement $A(L)$ of maximum level, where the level of a vertex $v$ is the number of lines of $L$ that pass strictly below $v$. The problem, posed in Exercise~8.13 in de Berg etal [BCKO08], appears to be much harder than it seems, as this vertex might not be on the upper envelope of the lines. We first assume that all the lines of $L$ are distinct, and distinguish between two cases, depending on whether or not the upper envelope of $L$ contains a bounded edge. In the former case, we show that the number of lines of $L$ that pass above any maximum level vertex $v_0$ is only $O(\log n)$. In the latter case, we establish a similar property that holds after we remove some of the lines that are incident to the single vertex of the upper envelope. We present algorithms that run, in both cases, in optimal $O(n\log n)$ time. We then consider the case where the lines of $L$ are not necessarily distinct. This setup is more challenging, and the best we have is an algorithm that computes all the maximum-level vertices in time $O(n^{4/3}\log^{3}n)$. Finally, we consider a related combinatorial question for degenerate arrangements, where many lines may intersect in a single point, but all the lines are distinct: We bound the complexity of the weighted $k$-level in such an arrangement, where the weight of a vertex is the number of lines that pass through the vertex. We show that the bound in this case is $O(n^{4/3})$, which matches the corresponding bound for non-degenerate arrangements, and we use this bound in the analysis of one of our algorithms.},
}
Endnote
%0 Report
%A Halperin, Dan
%A Har-Peled, Sariel
%A Mehlhorn, Kurt
%A Oh, Eunjin
%A Sharir, Micha
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T The Maximum-Level Vertex in an Arrangement of Lines :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-AFB1-D
%U http://arxiv.org/abs/2003.00518
%D 2020
%X Let $L$ be a set of $n$ lines in the plane, not necessarily in general
position. We present an efficient algorithm for finding all the vertices of the
arrangement $A(L)$ of maximum level, where the level of a vertex $v$ is the
number of lines of $L$ that pass strictly below $v$. The problem, posed in
Exercise~8.13 in de Berg etal [BCKO08], appears to be much harder than it
seems, as this vertex might not be on the upper envelope of the lines.
We first assume that all the lines of $L$ are distinct, and distinguish
between two cases, depending on whether or not the upper envelope of $L$
contains a bounded edge. In the former case, we show that the number of lines
of $L$ that pass above any maximum level vertex $v_0$ is only $O(\log n)$. In
the latter case, we establish a similar property that holds after we remove
some of the lines that are incident to the single vertex of the upper envelope.
We present algorithms that run, in both cases, in optimal $O(n\log n)$ time.
We then consider the case where the lines of $L$ are not necessarily
distinct. This setup is more challenging, and the best we have is an algorithm
that computes all the maximum-level vertices in time $O(n^{4/3}\log^{3}n)$.
Finally, we consider a related combinatorial question for degenerate
arrangements, where many lines may intersect in a single point, but all the
lines are distinct: We bound the complexity of the weighted $k$-level in such
an arrangement, where the weight of a vertex is the number of lines that pass
through the vertex. We show that the bound in this case is $O(n^{4/3})$, which
matches the corresponding bound for non-degenerate arrangements, and we use
this bound in the analysis of one of our algorithms.
%K Computer Science, Computational Geometry, cs.CG
[63]
P. Jain, L. Kanesh, and P. Misra, “Conflict Free Version of Covering Problems on Graphs: Classical and Parameterized,” Theory of Computing Systems, 2020.
Export
BibTeX
@article{Jain2020,
TITLE = {Conflict Free Version of Covering Problems on Graphs: {C}lassical and Parameterized},
AUTHOR = {Jain, Pallavi and Kanesh, Lawqueen and Misra, Pranabendu},
LANGUAGE = {eng},
ISSN = {1432-4350},
DOI = {10.1007/s00224-019-09964-6},
PUBLISHER = {Springer},
ADDRESS = {New York, NY},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {Theory of Computing Systems},
}
Endnote
%0 Journal Article
%A Jain, Pallavi
%A Kanesh, Lawqueen
%A Misra, Pranabendu
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Conflict Free Version of Covering Problems on Graphs: Classical and Parameterized :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-90BA-5
%R 10.1007/s00224-019-09964-6
%7 2020
%D 2020
%J Theory of Computing Systems
%I Springer
%C New York, NY
%@ false
[64]
M. John, “Of keyboards and beyond,” Universität des Saarlandes, Saarbrücken, 2020.
Export
BibTeX
@phdthesis{John_2019,
TITLE = {Of keyboards and beyond},
AUTHOR = {John, Maximilian},
DOI = {http://dx.doi.org/10.22028/D291-30635},
SCHOOL = {Universit{\"a}t des Saarlandes},
ADDRESS = {Saarbr{\"u}cken},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
}
Endnote
%0 Thesis
%A John, Maximilian
%Y Karrenbauer, Andreas
%A referee: Mehlhorn, Kurt
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
International Max Planck Research School, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Of keyboards and beyond : optimization in human-computer interaction
%U http://hdl.handle.net/21.11116/0000-0007-7152-D
%R http://dx.doi.org/10.22028/D291-30635
%I Universität des Saarlandes
%C Saarbrücken
%D 2020
%P 91 p.
%V phd
%9 phd
%U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28954
[65]
A. Karrenbauer, P. Kolev, and K. Mehlhorn, “Convergence of the Non-Uniform Physarum Dynamics,” Theoretical Computer Science, vol. 816, 2020.
Export
BibTeX
@article{KarrenbauerTCS2020,
TITLE = {Convergence of the Non-Uniform Physarum Dynamics},
AUTHOR = {Karrenbauer, Andreas and Kolev, Pavel and Mehlhorn, Kurt},
LANGUAGE = {eng},
ISSN = {0304-3975},
DOI = {10.1016/j.tcs.2020.02.032},
PUBLISHER = {Elsevier},
ADDRESS = {Amsterdam},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {Theoretical Computer Science},
VOLUME = {816},
PAGES = {260--269},
}
Endnote
%0 Journal Article
%A Karrenbauer, Andreas
%A Kolev, Pavel
%A Mehlhorn, Kurt
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Convergence of the Non-Uniform Physarum Dynamics :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-97C1-5
%R 10.1016/j.tcs.2020.02.032
%7 2020
%D 2020
%J Theoretical Computer Science
%V 816
%& 260
%P 260 - 269
%I Elsevier
%C Amsterdam
%@ false
[66]
D. M. Katz, C. Coupette, J. Beckedorf, and D. Hartung, “Complex Societies and the Growth of the Law,” Scientific Reports, vol. 10, 2020.
Export
BibTeX
@article{Katz2020,
TITLE = {Complex Societies and the Growth of the Law},
AUTHOR = {Katz, Daniel Martin and Coupette, Corinna and Beckedorf, Janis and Hartung, Dirk},
LANGUAGE = {eng},
ISSN = {2045-2322},
DOI = {10.1038/s41598-020-73623-x},
PUBLISHER = {Nature Publishing Group},
ADDRESS = {London, UK},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {Scientific Reports},
VOLUME = {10},
EID = {18737},
}
Endnote
%0 Journal Article
%A Katz, Daniel Martin
%A Coupette, Corinna
%A Beckedorf, Janis
%A Hartung, Dirk
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Complex Societies and the Growth of the Law :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-5C0B-7
%R 10.1038/s41598-020-73623-x
%7 2020
%D 2020
%J Scientific Reports
%O Sci. Rep.
%V 10
%Z sequence number: 18737
%I Nature Publishing Group
%C London, UK
%@ false
[67]
S. Kisfaludi-Bak, “A Quasi-Polynomial Algorithm for Well-Spaced Hyperbolic TSP,” in 36th International Symposium on Computational Geometry (SoCG 2020), Zürich, Switzerland (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{SoCG/Kisfaludi-Bak20,
TITLE = {A Quasi-Polynomial Algorithm for Well-Spaced Hyperbolic {TSP}},
AUTHOR = {Kisfaludi-Bak, S{\'a}ndor},
LANGUAGE = {eng},
ISBN = {978-3-95977-143-6},
URL = {urn:nbn:de:0030-drops-122135},
DOI = {10.4230/LIPIcs.SoCG.2020.55},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {36th International Symposium on Computational Geometry (SoCG 2020)},
EDITOR = {Cabello, Sergio and Chen, Danny Z.},
PAGES = {1--15},
EID = {55},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {164},
ADDRESS = {Z{\"u}rich, Switzerland (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Kisfaludi-Bak, Sándor
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T A Quasi-Polynomial Algorithm for Well-Spaced Hyperbolic TSP :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-76E8-F
%R 10.4230/LIPIcs.SoCG.2020.55
%U urn:nbn:de:0030-drops-122135
%D 2020
%B 36th International Symposium on Computational Geometry
%Z date of event: 2020-06-23 - 2020-06-26
%C Zürich, Switzerland (Virtual Conference)
%B 36th International Symposium on Computational Geometry
%E Cabello, Sergio; Chen, Danny Z.
%P 1 - 15
%Z sequence number: 55
%I Schloss Dagstuhl
%@ 978-3-95977-143-6
%B Leibniz International Proceedings in Informatics
%N 164
%U https://drops.dagstuhl.de/opus/volltexte/2020/12213/https://creativecommons.org/licenses/by/3.0/legalcode
[68]
S. Kisfaludi-Bak, “Hyperbolic Intersection Graphs and (Quasi)-Polynomial Time,” in Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020), Salt Lake City, UT, USA, 2020.
Export
BibTeX
@inproceedings{SODA/Kisfaludi-Bak20,
TITLE = {Hyperbolic Intersection Graphs and (Quasi)-Polynomial Time},
AUTHOR = {Kisfaludi-Bak, S{\'a}ndor},
LANGUAGE = {eng},
ISBN = {978-1-61197-599-4},
DOI = {10.1137/1.9781611975994.100},
PUBLISHER = {SIAM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020)},
EDITOR = {Chawla, Shuchi},
PAGES = {1621--1638},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Kisfaludi-Bak, Sándor
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Hyperbolic Intersection Graphs and (Quasi)-Polynomial Time :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-76EB-C
%R 10.1137/1.9781611975994.100
%D 2020
%B 31st Annual ACM-SIAM Symposium on Discrete Algorithms
%Z date of event: 2020-01-05 - 2020-01-08
%C Salt Lake City, UT, USA
%B Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms
%E Chawla, Shuchi
%P 1621 - 1638
%I SIAM
%@ 978-1-61197-599-4
[69]
S. Kisfaludi-Bak, J. Nederlof, and K. Węgrzycki, “A Gap-ETH-Tight Approximation Scheme for Euclidean TSP,” 2020. [Online]. Available: https://arxiv.org/abs/2011.03778. (arXiv: 2011.03778)
Abstract
We revisit the classic task of finding the shortest tour of $n$ points in
$d$-dimensional Euclidean space, for any fixed constant $d \geq 2$. We
determine the optimal dependence on $\varepsilon$ in the running time of an
algorithm that computes a $(1+\varepsilon)$-approximate tour, under a plausible
assumption. Specifically, we give an algorithm that runs in
$2^{\mathcal{O}(1/\varepsilon^{d-1})} n\log n$ time. This improves the
previously smallest dependence on $\varepsilon$ in the running time
$(1/\varepsilon)^{\mathcal{O}(1/\varepsilon^{d-1})}n \log n$ of the algorithm
by Rao and Smith (STOC 1998). We also show that a
$2^{o(1/\varepsilon^{d-1})}\text{poly}(n)$ algorithm would violate the
Gap-Exponential Time Hypothesis (Gap-ETH).
Our new algorithm builds upon the celebrated quadtree-based methods initially
proposed by Arora (J. ACM 1998), but it adds a simple new idea that we call
\emph{sparsity-sensitive patching}. On a high level this lets the granularity
with which we simplify the tour depend on how sparse it is locally. Our
approach is (arguably) simpler than the one by Rao and Smith since it can work
without geometric spanners. We demonstrate the technique extends easily to
other problems, by showing as an example that it also yields a Gap-ETH-tight
approximation scheme for Rectilinear Steiner Tree.
Export
BibTeX
@online{Kisfaludi-BakNW20,
TITLE = {A Gap-{ETH}-Tight Approximation Scheme for Euclidean {TSP}},
AUTHOR = {Kisfaludi-Bak, S{\'a}ndor and Nederlof, Jesper and W{\c e}grzycki, Karol},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2011.03778},
EPRINT = {2011.03778},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We revisit the classic task of finding the shortest tour of $n$ points in $d$-dimensional Euclidean space, for any fixed constant $d \geq 2$. We determine the optimal dependence on $\varepsilon$ in the running time of an algorithm that computes a $(1+\varepsilon)$-approximate tour, under a plausible assumption. Specifically, we give an algorithm that runs in $2^{\mathcal{O}(1/\varepsilon^{d-1})} n\log n$ time. This improves the previously smallest dependence on $\varepsilon$ in the running time $(1/\varepsilon)^{\mathcal{O}(1/\varepsilon^{d-1})}n \log n$ of the algorithm by Rao and Smith (STOC 1998). We also show that a $2^{o(1/\varepsilon^{d-1})}\text{poly}(n)$ algorithm would violate the Gap-Exponential Time Hypothesis (Gap-ETH). Our new algorithm builds upon the celebrated quadtree-based methods initially proposed by Arora (J. ACM 1998), but it adds a simple new idea that we call \emph{sparsity-sensitive patching}. On a high level this lets the granularity with which we simplify the tour depend on how sparse it is locally. Our approach is (arguably) simpler than the one by Rao and Smith since it can work without geometric spanners. We demonstrate the technique extends easily to other problems, by showing as an example that it also yields a Gap-ETH-tight approximation scheme for Rectilinear Steiner Tree.},
}
Endnote
%0 Report
%A Kisfaludi-Bak, Sándor
%A Nederlof, Jesper
%A Węgrzycki, Karol
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T A Gap-ETH-Tight Approximation Scheme for Euclidean TSP :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-7774-1
%U https://arxiv.org/abs/2011.03778
%D 2020
%X We revisit the classic task of finding the shortest tour of $n$ points in
$d$-dimensional Euclidean space, for any fixed constant $d \geq 2$. We
determine the optimal dependence on $\varepsilon$ in the running time of an
algorithm that computes a $(1+\varepsilon)$-approximate tour, under a plausible
assumption. Specifically, we give an algorithm that runs in
$2^{\mathcal{O}(1/\varepsilon^{d-1})} n\log n$ time. This improves the
previously smallest dependence on $\varepsilon$ in the running time
$(1/\varepsilon)^{\mathcal{O}(1/\varepsilon^{d-1})}n \log n$ of the algorithm
by Rao and Smith (STOC 1998). We also show that a
$2^{o(1/\varepsilon^{d-1})}\text{poly}(n)$ algorithm would violate the
Gap-Exponential Time Hypothesis (Gap-ETH).
Our new algorithm builds upon the celebrated quadtree-based methods initially
proposed by Arora (J. ACM 1998), but it adds a simple new idea that we call
\emph{sparsity-sensitive patching}. On a high level this lets the granularity
with which we simplify the tour depend on how sparse it is locally. Our
approach is (arguably) simpler than the one by Rao and Smith since it can work
without geometric spanners. We demonstrate the technique extends easily to
other problems, by showing as an example that it also yields a Gap-ETH-tight
approximation scheme for Rectilinear Steiner Tree.
%K Computer Science, Computational Geometry, cs.CG,Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[70]
S. Kisfaludi-Bak, J. Nederlof, and E. J. van Leeuwen, “Nearly ETH-tight Algorithms for Planar Steiner Tree with Terminals on Few Faces,” ACM Transactions on Algorithms, 2020.
Export
BibTeX
@article{Kisfaludi-BakNL20b,
TITLE = {Nearly {ETH}-tight Algorithms for Planar {Steiner} Tree with Terminals on Few Faces},
AUTHOR = {Kisfaludi-Bak, S{\'a}ndor and Nederlof, Jesper and van Leeuwen, Erik Jan},
LANGUAGE = {eng},
ISSN = {1549-6325},
DOI = {10.1145/3371389},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Algorithms},
EID = {28},
}
Endnote
%0 Journal Article
%A Kisfaludi-Bak, Sándor
%A Nederlof, Jesper
%A van Leeuwen, Erik Jan
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Nearly ETH-tight Algorithms for Planar Steiner Tree with Terminals on Few Faces :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-7796-A
%R 10.1145/3371389
%7 2020
%D 2020
%J ACM Transactions on Algorithms
%Z sequence number: 28
%I ACM
%C New York, NY
%@ false
[71]
P. Kleer and G. Schäfer, “Computation and Efficiency of Potential Function Minimizers of Combinatorial Congestion Games,” Mathematical Programming, 2020.
Export
BibTeX
@article{Kleer2020,
TITLE = {Computation and Efficiency of Potential Function Minimizers of Combinatorial Congestion Games},
AUTHOR = {Kleer, Pieter and Sch{\"a}fer, Guido},
LANGUAGE = {eng},
DOI = {10.1007/s10107-020-01546-6},
PUBLISHER = {Springer},
ADDRESS = {New York, NY},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {Mathematical Programming},
}
Endnote
%0 Journal Article
%A Kleer, Pieter
%A Schäfer, Guido
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Computation and Efficiency of Potential Function Minimizers of Combinatorial Congestion Games :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-F285-2
%R 10.1007/s10107-020-01546-6
%7 2020
%D 2020
%J Mathematical Programming
%I Springer
%C New York, NY
[72]
P. Kleer, V. Patel, and F. Stroh, “Switch-Based Markov Chains for Sampling Hamiltonian Cycles in Dense Graphs,” The Electronic Journal of Combinatorics, vol. 27, no. 4, 2020.
Export
BibTeX
@article{Kleer_Patel2020,
TITLE = {Switch-Based {Markov} Chains for Sampling {Hamiltonian} Cycles in Dense Graphs},
AUTHOR = {Kleer, Pieter and Patel, Viresh and Stroh, Fabian},
LANGUAGE = {eng},
ISSN = {1077-8926},
DOI = {10.37236/9503},
PUBLISHER = {N.J. Calkin and H.S. Wilf},
ADDRESS = {Atlanta, Ga.},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
JOURNAL = {The Electronic Journal of Combinatorics},
VOLUME = {27},
NUMBER = {4},
EID = {P4.29},
}
Endnote
%0 Journal Article
%A Kleer, Pieter
%A Patel, Viresh
%A Stroh, Fabian
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Switch-Based Markov Chains for Sampling Hamiltonian Cycles in Dense Graphs :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-914C-0
%R 10.37236/9503
%7 2020
%D 2020
%J The Electronic Journal of Combinatorics
%V 27
%N 4
%Z sequence number: P4.29
%I N.J. Calkin and H.S. Wilf
%C Atlanta, Ga.
%@ false
%U https://doi.org/10.37236/9503
[73]
P. Kleer and G. Schäfer, “Topological Price of Anarchy Bounds for Clustering Games on Networks,” 2020. [Online]. Available: https://arxiv.org/abs/2011.09717. (arXiv: 2011.09717)
Abstract
We consider clustering games in which the players are embedded in a network
and want to coordinate (or anti-coordinate) their strategy with their
neighbors. The goal of a player is to choose a strategy that maximizes her
utility given the strategies of her neighbors. Recent studies show that even
very basic variants of these games exhibit a large Price of Anarchy: A large
inefficiency between the total utility generated in centralized outcomes and
equilibrium outcomes in which players selfishly try to maximize their utility.
Our main goal is to understand how structural properties of the network
topology impact the inefficiency of these games. We derive topological bounds
on the Price of Anarchy for different classes of clustering games. These
topological bounds provide a more informative assessment of the inefficiency of
these games than the corresponding (worst-case) Price of Anarchy bounds. As one
of our main results, we derive (tight) bounds on the Price of Anarchy for
clustering games on Erd\H{o}s-R\'enyi random graphs (where every possible edge
in the network is present with a fixed probability), which, depending on the
graph density, stand in stark contrast to the known Price of Anarchy bounds.
Export
BibTeX
@online{Kleer_arXiv2011.09717,
TITLE = {Topological Price of Anarchy Bounds for Clustering Games on Networks},
AUTHOR = {Kleer, Pieter and Sch{\"a}fer, Guido},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2011.09717},
DOI = {10.1007/978-3-030-35389-6_18},
EPRINT = {2011.09717},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We consider clustering games in which the players are embedded in a network and want to coordinate (or anti-coordinate) their strategy with their neighbors. The goal of a player is to choose a strategy that maximizes her utility given the strategies of her neighbors. Recent studies show that even very basic variants of these games exhibit a large Price of Anarchy: A large inefficiency between the total utility generated in centralized outcomes and equilibrium outcomes in which players selfishly try to maximize their utility. Our main goal is to understand how structural properties of the network topology impact the inefficiency of these games. We derive topological bounds on the Price of Anarchy for different classes of clustering games. These topological bounds provide a more informative assessment of the inefficiency of these games than the corresponding (worst-case) Price of Anarchy bounds. As one of our main results, we derive (tight) bounds on the Price of Anarchy for clustering games on Erd\H{o}s-R\'enyi random graphs (where every possible edge in the network is present with a fixed probability), which, depending on the graph density, stand in stark contrast to the known Price of Anarchy bounds.},
}
Endnote
%0 Report
%A Kleer, Pieter
%A Schäfer, Guido
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Topological Price of Anarchy Bounds for Clustering Games on Networks :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-915F-B
%R 10.1007/978-3-030-35389-6_18
%U https://arxiv.org/abs/2011.09717
%D 2020
%X We consider clustering games in which the players are embedded in a network
and want to coordinate (or anti-coordinate) their strategy with their
neighbors. The goal of a player is to choose a strategy that maximizes her
utility given the strategies of her neighbors. Recent studies show that even
very basic variants of these games exhibit a large Price of Anarchy: A large
inefficiency between the total utility generated in centralized outcomes and
equilibrium outcomes in which players selfishly try to maximize their utility.
Our main goal is to understand how structural properties of the network
topology impact the inefficiency of these games. We derive topological bounds
on the Price of Anarchy for different classes of clustering games. These
topological bounds provide a more informative assessment of the inefficiency of
these games than the corresponding (worst-case) Price of Anarchy bounds. As one
of our main results, we derive (tight) bounds on the Price of Anarchy for
clustering games on Erd\H{o}s-R\'enyi random graphs (where every possible edge
in the network is present with a fixed probability), which, depending on the
graph density, stand in stark contrast to the known Price of Anarchy bounds.
%K Computer Science, Computer Science and Game Theory, cs.GT
[74]
M. Künnemann and D. Marx, “Finding Small Satisfying Assignments Faster Than Brute Force: A Fine-Grained Perspective into Boolean Constraint Satisfaction,” in 35th Computational Complexity Conference (CCC 2020), Saarbrücken, Germany (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Kuennemann_CCC2020,
TITLE = {Finding Small Satisfying Assignments Faster Than Brute Force: {A} Fine-Grained Perspective into Boolean Constraint Satisfaction},
AUTHOR = {K{\"u}nnemann, Marvin and Marx, D{\'a}niel},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-156-6},
URL = {urn:nbn:de:0030-drops-125791},
DOI = {10.4230/LIPIcs.CCC.2020.27},
PUBLISHER = {Schlos Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {35th Computational Complexity Conference (CCC 2020)},
EDITOR = {Saraf, Shubhangi},
EID = {27},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {169},
ADDRESS = {Saarbr{\"u}cken, Germany (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Künnemann, Marvin
%A Marx, Dániel
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Finding Small Satisfying Assignments Faster Than Brute Force: A Fine-Grained Perspective into Boolean Constraint Satisfaction :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-491C-9
%R 10.4230/LIPIcs.CCC.2020.27
%U urn:nbn:de:0030-drops-125791
%D 2020
%B 35th Computational Complexity Conference
%Z date of event: 2020-07-28 - 2020-07-31
%C Saarbrücken, Germany (Virtual Conference)
%B 35th Computational Complexity Conference
%E Saraf, Shubhangi
%Z sequence number: 27
%I Schlos Dagstuhl
%@ 978-3-95977-156-6
%B Leibniz International Proceedings in Informatics
%N 169
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12579/https://creativecommons.org/licenses/by/3.0/legalcode
[75]
M. Künnemann and D. Marx, “Finding Small Satisfying Assignments Faster Than Brute Force: A Fine-grained Perspective into Boolean Constraint Satisfaction,” 2020. [Online]. Available: https://arxiv.org/abs/2005.11541. (arXiv: 2005.11541)
Abstract
To study the question under which circumstances small solutions can be found
faster than by exhaustive search (and by how much), we study the fine-grained
complexity of Boolean constraint satisfaction with size constraint exactly $k$.
More precisely, we aim to determine, for any finite constraint family, the
optimal running time $f(k)n^{g(k)}$ required to find satisfying assignments
that set precisely $k$ of the $n$ variables to $1$.
Under central hardness assumptions on detecting cliques in graphs and
3-uniform hypergraphs, we give an almost tight characterization of $g(k)$ into
four regimes: (1) Brute force is essentially best-possible, i.e., $g(k) = (1\pm
o(1))k$, (2) the best algorithms are as fast as current $k$-clique algorithms,
i.e., $g(k)=(\omega/3\pm o(1))k$, (3) the exponent has sublinear dependence on
$k$ with $g(k) \in [\Omega(\sqrt[3]{k}), O(\sqrt{k})]$, or (4) the problem is
fixed-parameter tractable, i.e., $g(k) = O(1)$.
This yields a more fine-grained perspective than a previous FPT/W[1]-hardness
dichotomy (Marx, Computational Complexity 2005). Our most interesting technical
contribution is a $f(k)n^{4\sqrt{k}}$-time algorithm for SubsetSum with
precedence constraints parameterized by the target $k$ -- particularly the
approach, based on generalizing a bound on the Frobenius coin problem to a
setting with precedence constraints, might be of independent interest.
Export
BibTeX
@online{Kuennemann_arXiv2005.11541,
TITLE = {Finding Small Satisfying Assignments Faster Than Brute Force: {A} Fine-grained Perspective into {B}oolean Constraint Satisfaction},
AUTHOR = {K{\"u}nnemann, Marvin and Marx, D{\'a}niel},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2005.11541},
EPRINT = {2005.11541},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {To study the question under which circumstances small solutions can be found faster than by exhaustive search (and by how much), we study the fine-grained complexity of Boolean constraint satisfaction with size constraint exactly $k$. More precisely, we aim to determine, for any finite constraint family, the optimal running time $f(k)n^{g(k)}$ required to find satisfying assignments that set precisely $k$ of the $n$ variables to $1$. Under central hardness assumptions on detecting cliques in graphs and 3-uniform hypergraphs, we give an almost tight characterization of $g(k)$ into four regimes: (1) Brute force is essentially best-possible, i.e., $g(k) = (1\pm o(1))k$, (2) the best algorithms are as fast as current $k$-clique algorithms, i.e., $g(k)=(\omega/3\pm o(1))k$, (3) the exponent has sublinear dependence on $k$ with $g(k) \in [\Omega(\sqrt[3]{k}), O(\sqrt{k})]$, or (4) the problem is fixed-parameter tractable, i.e., $g(k) = O(1)$. This yields a more fine-grained perspective than a previous FPT/W[1]-hardness dichotomy (Marx, Computational Complexity 2005). Our most interesting technical contribution is a $f(k)n^{4\sqrt{k}}$-time algorithm for SubsetSum with precedence constraints parameterized by the target $k$ -- particularly the approach, based on generalizing a bound on the Frobenius coin problem to a setting with precedence constraints, might be of independent interest.},
}
Endnote
%0 Report
%A Künnemann, Marvin
%A Marx, Dániel
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Finding Small Satisfying Assignments Faster Than Brute Force: A
Fine-grained Perspective into Boolean Constraint Satisfaction :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-492E-5
%U https://arxiv.org/abs/2005.11541
%D 2020
%X To study the question under which circumstances small solutions can be found
faster than by exhaustive search (and by how much), we study the fine-grained
complexity of Boolean constraint satisfaction with size constraint exactly $k$.
More precisely, we aim to determine, for any finite constraint family, the
optimal running time $f(k)n^{g(k)}$ required to find satisfying assignments
that set precisely $k$ of the $n$ variables to $1$.
Under central hardness assumptions on detecting cliques in graphs and
3-uniform hypergraphs, we give an almost tight characterization of $g(k)$ into
four regimes: (1) Brute force is essentially best-possible, i.e., $g(k) = (1\pm
o(1))k$, (2) the best algorithms are as fast as current $k$-clique algorithms,
i.e., $g(k)=(\omega/3\pm o(1))k$, (3) the exponent has sublinear dependence on
$k$ with $g(k) \in [\Omega(\sqrt[3]{k}), O(\sqrt{k})]$, or (4) the problem is
fixed-parameter tractable, i.e., $g(k) = O(1)$.
This yields a more fine-grained perspective than a previous FPT/W[1]-hardness
dichotomy (Marx, Computational Complexity 2005). Our most interesting technical
contribution is a $f(k)n^{4\sqrt{k}}$-time algorithm for SubsetSum with
precedence constraints parameterized by the target $k$ -- particularly the
approach, based on generalizing a bound on the Frobenius coin problem to a
setting with precedence constraints, might be of independent interest.
%K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[76]
C. Lenzen and B. Wiederhake, “Brief Announcement: TRIX: Low-Skew Pulse Propagation for Fault-Tolerant Hardware,” in Stabilization, Safety, and Security of Distributed Systems (SSS 2020), Austin, TX, USA (Virtual Event), 2020.
Export
BibTeX
@inproceedings{Lenzen_SSS2020,
TITLE = {Brief Announcement: {TRIX}: {L}ow-Skew Pulse Propagation for Fault-Tolerant Hardware},
AUTHOR = {Lenzen, Christoph and Wiederhake, Ben},
LANGUAGE = {eng},
ISBN = {978-3-030-64347-8},
DOI = {10.1007/978-3-030-64348-5_23},
PUBLISHER = {Springer},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
BOOKTITLE = {Stabilization, Safety, and Security of Distributed Systems (SSS 2020)},
EDITOR = {Devismes, St{\'e}phane and Mittal, Neeraj},
PAGES = {295--300},
SERIES = {Lecture Notes in Computer Science},
VOLUME = {12514},
ADDRESS = {Austin, TX, USA (Virtual Event)},
}
Endnote
%0 Conference Proceedings
%A Lenzen, Christoph
%A Wiederhake, Ben
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Brief Announcement: TRIX: Low-Skew Pulse Propagation for Fault-Tolerant Hardware :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9058-3
%R 10.1007/978-3-030-64348-5_23
%D 2020
%B 22nd International Symposium on Stabilization, Safety, and Security of Distributed Systems
%Z date of event: 2020-11-18 - 2020-11-21
%C Austin, TX, USA (Virtual Event)
%B Stabilization, Safety, and Security of Distributed Systems
%E Devismes, Stéphane; Mittal, Neeraj
%P 295 - 300
%I Springer
%@ 978-3-030-64347-8
%B Lecture Notes in Computer Science
%N 12514
[77]
C. Lenzen and B. Wiederhake, “TRIX: Low-Skew Pulse Propagation for Fault-Tolerant Hardware,” 2020. [Online]. Available: https://arxiv.org/abs/2010.01415. (arXiv: 2010.01415)
Abstract
The vast majority of hardware architectures use a carefully timed reference
signal to clock their computational logic. However, standard distribution
solutions are not fault-tolerant. In this work, we present a simple grid
structure as a more reliable clock propagation method and study it by means of
simulation experiments. Fault-tolerance is achieved by forwarding clock pulses
on arrival of the second of three incoming signals from the previous layer.
A key question is how well neighboring grid nodes are synchronized, even
without faults. Analyzing the clock skew under typical-case conditions is
highly challenging. Because the forwarding mechanism involves taking the
median, standard probabilistic tools fail, even when modeling link delays just
by unbiased coin flips.
Our statistical approach provides substantial evidence that this system
performs surprisingly well. Specifically, in an "infinitely wide" grid of
height~$H$, the delay at a pre-selected node exhibits a standard deviation of
$O(H^{1/4})$ ($\approx 2.7$ link delay uncertainties for $H=2000$) and skew
between adjacent nodes of $o(\log \log H)$ ($\approx 0.77$ link delay
uncertainties for $H=2000$). We conclude that the proposed system is a very
promising clock distribution method. This leads to the open problem of a
stochastic explanation of the tight concentration of delays and skews. More
generally, we believe that understanding our very simple abstraction of the
system is of mathematical interest in its own right.
Export
BibTeX
@online{Lenzen_arXiv2010.01415,
TITLE = {{TRIX}: {L}ow-Skew Pulse Propagation for Fault-Tolerant Hardware},
AUTHOR = {Lenzen, Christoph and Wiederhake, Ben},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2010.01415},
EPRINT = {2010.01415},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {The vast majority of hardware architectures use a carefully timed reference signal to clock their computational logic. However, standard distribution solutions are not fault-tolerant. In this work, we present a simple grid structure as a more reliable clock propagation method and study it by means of simulation experiments. Fault-tolerance is achieved by forwarding clock pulses on arrival of the second of three incoming signals from the previous layer. A key question is how well neighboring grid nodes are synchronized, even without faults. Analyzing the clock skew under typical-case conditions is highly challenging. Because the forwarding mechanism involves taking the median, standard probabilistic tools fail, even when modeling link delays just by unbiased coin flips. Our statistical approach provides substantial evidence that this system performs surprisingly well. Specifically, in an "infinitely wide" grid of height~$H$, the delay at a pre-selected node exhibits a standard deviation of $O(H^{1/4})$ ($\approx 2.7$ link delay uncertainties for $H=2000$) and skew between adjacent nodes of $o(\log \log H)$ ($\approx 0.77$ link delay uncertainties for $H=2000$). We conclude that the proposed system is a very promising clock distribution method. This leads to the open problem of a stochastic explanation of the tight concentration of delays and skews. More generally, we believe that understanding our very simple abstraction of the system is of mathematical interest in its own right.},
}
Endnote
%0 Report
%A Lenzen, Christoph
%A Wiederhake, Ben
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T TRIX: Low-Skew Pulse Propagation for Fault-Tolerant Hardware :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-904F-E
%U https://arxiv.org/abs/2010.01415
%D 2020
%X The vast majority of hardware architectures use a carefully timed reference
signal to clock their computational logic. However, standard distribution
solutions are not fault-tolerant. In this work, we present a simple grid
structure as a more reliable clock propagation method and study it by means of
simulation experiments. Fault-tolerance is achieved by forwarding clock pulses
on arrival of the second of three incoming signals from the previous layer.
A key question is how well neighboring grid nodes are synchronized, even
without faults. Analyzing the clock skew under typical-case conditions is
highly challenging. Because the forwarding mechanism involves taking the
median, standard probabilistic tools fail, even when modeling link delays just
by unbiased coin flips.
Our statistical approach provides substantial evidence that this system
performs surprisingly well. Specifically, in an "infinitely wide" grid of
height~$H$, the delay at a pre-selected node exhibits a standard deviation of
$O(H^{1/4})$ ($\approx 2.7$ link delay uncertainties for $H=2000$) and skew
between adjacent nodes of $o(\log \log H)$ ($\approx 0.77$ link delay
uncertainties for $H=2000$). We conclude that the proposed system is a very
promising clock distribution method. This leads to the open problem of a
stochastic explanation of the tight concentration of delays and skews. More
generally, we believe that understanding our very simple abstraction of the
system is of mathematical interest in its own right.
%K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
[78]
W. Liu, F. Lombardi, M. Shulte, D. J. Miller, Z. Xiang, G. Kesidis, A. Oulasvirta, N. R. Dayama, M. Shiripour, M. John, A. Karrenbauer, and A. Allerhand, “Scanning the Issue,” Proceedings of the IEEE, vol. 108, no. 3, 2020.
Export
BibTeX
@article{Liu2020,
TITLE = {Scanning the Issue},
AUTHOR = {Liu, Weiqiang and Lombardi, Fabrizio and Shulte, Michael and Miller, David J. and Xiang, Zhen and Kesidis, George and Oulasvirta, Antti and Dayama, Niraj Ramesh and Shiripour, Morteza and John, Maximilian and Karrenbauer, Andreas and Allerhand, Adam},
LANGUAGE = {eng},
ISSN = {0018-9219},
DOI = {10.1109/JPROC.2020.2975522},
PUBLISHER = {IEEE},
ADDRESS = {New York, NY},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {Proceedings of the IEEE},
VOLUME = {108},
NUMBER = {3},
PAGES = {400--401},
}
Endnote
%0 Journal Article
%A Liu, Weiqiang
%A Lombardi, Fabrizio
%A Shulte, Michael
%A Miller, David J.
%A Xiang, Zhen
%A Kesidis, George
%A Oulasvirta, Antti
%A Dayama, Niraj Ramesh
%A Shiripour, Morteza
%A John, Maximilian
%A Karrenbauer, Andreas
%A Allerhand, Adam
%+ External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Scanning the Issue :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-4670-C
%R 10.1109/JPROC.2020.2975522
%7 2020
%D 2020
%J Proceedings of the IEEE
%O Proc. IEEE
%V 108
%N 3
%& 400
%P 400 - 401
%I IEEE
%C New York, NY
%@ false
[79]
Y. Li and V. Nakos, “Sublinear-Time Algorithms for Compressive Phase Retrieval,” IEEE Transactions on Information Theory, vol. 66, no. 11, 2020.
Export
BibTeX
@article{Li_10.1109/TIT.2020.3020701,
TITLE = {Sublinear-Time Algorithms for Compressive Phase Retrieval},
AUTHOR = {Li, Yi and Nakos, Vasileios},
LANGUAGE = {eng},
ISSN = {0018-9448},
DOI = {10.1109/TIT.2020.3020701},
PUBLISHER = {IEEE},
ADDRESS = {Piscataway, NJ},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {IEEE Transactions on Information Theory},
VOLUME = {66},
NUMBER = {11},
PAGES = {7302--7310},
}
Endnote
%0 Journal Article
%A Li, Yi
%A Nakos, Vasileios
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Sublinear-Time Algorithms for Compressive Phase Retrieval :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-567C-E
%R 10.1109/TIT.2020.3020701
%7 2020
%D 2020
%J IEEE Transactions on Information Theory
%V 66
%N 11
%& 7302
%P 7302 - 7310
%I IEEE
%C Piscataway, NJ
%@ false
[80]
Y. Li and V. Nakos, “Deterministic Sparse Fourier Transform with an ℓ_{∞} Guarantee,” in 47th International Colloquium on Automata, Languages, and Programming (ICALP 2020), Saarbrücken, Germany (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Li_ICALP2020,
TITLE = {Deterministic Sparse {F}ourier Transform with an $\ell_{\infty}$ Guarantee},
AUTHOR = {Li, Yi and Nakos, Vasileios},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-138-2},
URL = {urn:nbn:de:0030-drops-124844},
DOI = {10.4230/LIPIcs.ICALP.2020.77},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {47th International Colloquium on Automata, Languages, and Programming (ICALP 2020)},
EDITOR = {Czumaj, Artur and Dawa, Anuj and Merelli, Emanuela},
EID = {77},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {168},
ADDRESS = {Saarbr{\"u}cken, Germany (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Li, Yi
%A Nakos, Vasileios
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Deterministic Sparse Fourier Transform with an ℓ_{∞} Guarantee :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-56A6-D
%R 10.4230/LIPIcs.ICALP.2020.77
%U urn:nbn:de:0030-drops-124844
%D 2020
%B 47th International Colloquium on Automata, Languages, and Programming
%Z date of event: 2020-07-08 - 2020-07-11
%C Saarbrücken, Germany (Virtual Conference)
%B 47th International Colloquium on Automata, Languages, and Programming
%E Czumaj, Artur; Dawa, Anuj; Merelli, Emanuela
%Z sequence number: 77
%I Schloss Dagstuhl
%@ 978-3-95977-138-2
%B Leibniz International Proceedings in Informatics
%N 168
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12484/
[81]
D. Lokshtanov, P. Misra, J. Mukherjee, F. Panolan, G. Philip, and S. Saurabh, “2-Approximating Feedback Vertex Set in Tournaments,” in Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020), Salt Lake City, UT, USA, 2020.
Export
BibTeX
@inproceedings{Lokshtanov_SODA20,
TITLE = {2-Approximating Feedback Vertex Set in Tournaments},
AUTHOR = {Lokshtanov, Daniel and Misra, Pranabendu and Mukherjee, Joydeep and Panolan, Fahad and Philip, Geevarghese and Saurabh, Saket},
LANGUAGE = {eng},
ISBN = {978-1-61197-599-4},
DOI = {10.5555/3381089.3381150},
PUBLISHER = {SIAM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020)},
EDITOR = {Chawla, Shuchi},
PAGES = {1010--1018},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Lokshtanov, Daniel
%A Misra, Pranabendu
%A Mukherjee, Joydeep
%A Panolan, Fahad
%A Philip, Geevarghese
%A Saurabh, Saket
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
%T 2-Approximating Feedback Vertex Set in Tournaments :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-F276-4
%R 10.5555/3381089.3381150
%D 2020
%B 31st Annual ACM-SIAM Symposium on Discrete Algorithms
%Z date of event: 2020-01-05 - 2020-01-08
%C Salt Lake City, UT, USA
%B Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms
%E Chawla, Shuchi
%P 1010 - 1018
%I SIAM
%@ 978-1-61197-599-4
[82]
D. Lokshtanov, P. Misra, F. Panolan, G. Philip, and S. Saurabh, “A (2 + ε)-Factor Approximation Algorithm for Split Vertex Deletion,” in 47th International Colloquium on Automata, Languages, and Programming (ICALP 2020), Saarbrücken, Germany (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Lokshtanov_ICALP2020,
TITLE = {A (2 + $\epsilon$)-Factor Approximation Algorithm for Split Vertex Deletion},
AUTHOR = {Lokshtanov, Daniel and Misra, Pranabendu and Panolan, Fahad and Philip, Geevarghese and Saurabh, Saket},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-138-2},
URL = {urn:nbn:de:0030-drops-124879},
DOI = {10.4230/LIPIcs.ICALP.2020.80},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {47th International Colloquium on Automata, Languages, and Programming (ICALP 2020)},
EDITOR = {Czumaj, Artur and Dawa, Anuj and Merelli, Emanuela},
EID = {80},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {168},
ADDRESS = {Saarbr{\"u}cken, Germany (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Lokshtanov, Daniel
%A Misra, Pranabendu
%A Panolan, Fahad
%A Philip, Geevarghese
%A Saurabh, Saket
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T A (2 + ε)-Factor Approximation Algorithm for Split Vertex Deletion :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8EF5-5
%R 10.4230/LIPIcs.ICALP.2020.80
%U urn:nbn:de:0030-drops-124879
%D 2020
%B 47th International Colloquium on Automata, Languages, and Programming
%Z date of event: 2020-07-08 - 2020-07-11
%C Saarbrücken, Germany (Virtual Conference)
%B 47th International Colloquium on Automata, Languages, and Programming
%E Czumaj, Artur; Dawa, Anuj; Merelli, Emanuela
%Z sequence number: 80
%I Schloss Dagstuhl
%@ 978-3-95977-138-2
%B Leibniz International Proceedings in Informatics
%N 168
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12487/https://creativecommons.org/licenses/by/3.0/legalcode
[83]
D. Marx, “Four Shorts Stories on Surprising Algorithmic Uses of Treewidth,” in Treewidth, Kernels, and Algorithms, Berlin: Springer, 2020.
Export
BibTeX
@incollection{Marx_Four2020,
TITLE = {Four Shorts Stories on Surprising Algorithmic Uses of Treewidth},
AUTHOR = {Marx, D{\'a}niel},
LANGUAGE = {eng},
ISBN = {978-3-030-42070-3},
DOI = {10.1007/978-3-030-42071-0_10},
PUBLISHER = {Springer},
ADDRESS = {Berlin},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
BOOKTITLE = {Treewidth, Kernels, and Algorithms},
EDITOR = {Fomin, Fedor V. and Kratsch, Stefan and van Leeuwen, Erik Jan},
PAGES = {129--144},
SERIES = {Lecture Notes in Computer Science},
VOLUME = {12160},
}
Endnote
%0 Book Section
%A Marx, Dániel
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Four Shorts Stories on Surprising Algorithmic Uses of Treewidth :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-4911-4
%R 10.1007/978-3-030-42071-0_10
%D 2020
%B Treewidth, Kernels, and Algorithms
%E Fomin, Fedor V.; Kratsch, Stefan; van Leeuwen, Erik Jan
%P 129 - 144
%I Springer
%C Berlin
%@ 978-3-030-42070-3
%S Lecture Notes in Computer Science
%N 12160
[84]
D. Marx and R. B. Sandeep, “Incompressibility of H-free Edge Modification Problems: Towards a Dichotomy,” 2020. [Online]. Available: https://arxiv.org/abs/2004.11761. (arXiv: 2004.11761)
Abstract
Given a graph $G$ and an integer $k$, the $H$-free Edge Editing problem is to
find whether there exists at most $k$ pairs of vertices in $G$ such that
changing the adjacency of the pairs in $G$ results in a graph without any
induced copy of $H$. The existence of polynomial kernels for $H$-free Edge
Editing received significant attention in the parameterized complexity
literature. Nontrivial polynomial kernels are known to exist for some graphs
$H$ with at most 4 vertices, but starting from 5 vertices, polynomial kernels
are known only if $H$ is either complete or empty. This suggests the conjecture
that there is no other $H$ with at least 5 vertices were $H$-free Edge Editing
admits a polynomial kernel. Towards this goal, we obtain a set $\mathcal{H}$ of
nine 5-vertex graphs such that if for every $H\in\mathcal{H}$, $H$-free Edge
Editing is incompressible and the complexity assumption $NP \not\subseteq
coNP/poly$ holds, then $H$-free Edge Editing is incompressible for every graph
$H$ with at least five vertices that is neither complete nor empty. That is,
proving incompressibility for these nine graphs would give a complete
classification of the kernelization complexity of $H$-free Edge Editing for
every $H$ with at least 5 vertices.
We obtain similar result also for $H$-free Edge Deletion. Here the picture is
more complicated due to the existence of another infinite family of graphs $H$
where the problem is trivial (graphs with exactly one edge). We obtain a larger
set $\mathcal{H}$ of nineteen graphs whose incompressibility would give a
complete classification of the kernelization complexity of $H$-free Edge
Deletion for every graph $H$ with at least 5 vertices. Analogous results follow
also for the $H$-free Edge Completion problem by simple complementation.
Export
BibTeX
@online{Marx_arXiv2004.11761,
TITLE = {Incompressibility of H-free Edge Modification Problems: Towards a Dichotomy},
AUTHOR = {Marx, D{\'a}niel and Sandeep, R. B.},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2004.11761},
EPRINT = {2004.11761},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Given a graph $G$ and an integer $k$, the $H$-free Edge Editing problem is to find whether there exists at most $k$ pairs of vertices in $G$ such that changing the adjacency of the pairs in $G$ results in a graph without any induced copy of $H$. The existence of polynomial kernels for $H$-free Edge Editing received significant attention in the parameterized complexity literature. Nontrivial polynomial kernels are known to exist for some graphs $H$ with at most 4 vertices, but starting from 5 vertices, polynomial kernels are known only if $H$ is either complete or empty. This suggests the conjecture that there is no other $H$ with at least 5 vertices were $H$-free Edge Editing admits a polynomial kernel. Towards this goal, we obtain a set $\mathcal{H}$ of nine 5-vertex graphs such that if for every $H\in\mathcal{H}$, $H$-free Edge Editing is incompressible and the complexity assumption $NP \not\subseteq coNP/poly$ holds, then $H$-free Edge Editing is incompressible for every graph $H$ with at least five vertices that is neither complete nor empty. That is, proving incompressibility for these nine graphs would give a complete classification of the kernelization complexity of $H$-free Edge Editing for every $H$ with at least 5 vertices. We obtain similar result also for $H$-free Edge Deletion. Here the picture is more complicated due to the existence of another infinite family of graphs $H$ where the problem is trivial (graphs with exactly one edge). We obtain a larger set $\mathcal{H}$ of nineteen graphs whose incompressibility would give a complete classification of the kernelization complexity of $H$-free Edge Deletion for every graph $H$ with at least 5 vertices. Analogous results follow also for the $H$-free Edge Completion problem by simple complementation.},
}
Endnote
%0 Report
%A Marx, Dániel
%A Sandeep, R. B.
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Incompressibility of H-free Edge Modification Problems: Towards a
Dichotomy :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-492A-9
%U https://arxiv.org/abs/2004.11761
%D 2020
%X Given a graph $G$ and an integer $k$, the $H$-free Edge Editing problem is to
find whether there exists at most $k$ pairs of vertices in $G$ such that
changing the adjacency of the pairs in $G$ results in a graph without any
induced copy of $H$. The existence of polynomial kernels for $H$-free Edge
Editing received significant attention in the parameterized complexity
literature. Nontrivial polynomial kernels are known to exist for some graphs
$H$ with at most 4 vertices, but starting from 5 vertices, polynomial kernels
are known only if $H$ is either complete or empty. This suggests the conjecture
that there is no other $H$ with at least 5 vertices were $H$-free Edge Editing
admits a polynomial kernel. Towards this goal, we obtain a set $\mathcal{H}$ of
nine 5-vertex graphs such that if for every $H\in\mathcal{H}$, $H$-free Edge
Editing is incompressible and the complexity assumption $NP \not\subseteq
coNP/poly$ holds, then $H$-free Edge Editing is incompressible for every graph
$H$ with at least five vertices that is neither complete nor empty. That is,
proving incompressibility for these nine graphs would give a complete
classification of the kernelization complexity of $H$-free Edge Editing for
every $H$ with at least 5 vertices.
We obtain similar result also for $H$-free Edge Deletion. Here the picture is
more complicated due to the existence of another infinite family of graphs $H$
where the problem is trivial (graphs with exactly one edge). We obtain a larger
set $\mathcal{H}$ of nineteen graphs whose incompressibility would give a
complete classification of the kernelization complexity of $H$-free Edge
Deletion for every graph $H$ with at least 5 vertices. Analogous results follow
also for the $H$-free Edge Completion problem by simple complementation.
%K Computer Science, Data Structures and Algorithms, cs.DS
[85]
D. Marx, “Four Short Stories on Surprising Algorithmic Uses of Treewidth,” 2020. [Online]. Available: https://arxiv.org/abs/2008.07968. (arXiv: 2008.07968)
Abstract
This article briefly describes four algorithmic problems where the notion of
treewidth is very useful. Even though the problems themselves have nothing to
do with treewidth, it turns out that combining known results on treewidth
allows us to easily describe very clean and high-level algorithms.
Export
BibTeX
@online{Marx_arXiv2008.07968,
TITLE = {Four Short Stories on Surprising Algorithmic Uses of Treewidth},
AUTHOR = {Marx, D{\'a}niel},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2008.07968},
EPRINT = {2008.07968},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {This article briefly describes four algorithmic problems where the notion of treewidth is very useful. Even though the problems themselves have nothing to do with treewidth, it turns out that combining known results on treewidth allows us to easily describe very clean and high-level algorithms.},
}
Endnote
%0 Report
%A Marx, Dániel
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Four Short Stories on Surprising Algorithmic Uses of Treewidth :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-4950-D
%U https://arxiv.org/abs/2008.07968
%D 2020
%X This article briefly describes four algorithmic problems where the notion of
treewidth is very useful. Even though the problems themselves have nothing to
do with treewidth, it turns out that combining known results on treewidth
allows us to easily describe very clean and high-level algorithms.
%K Computer Science, Data Structures and Algorithms, cs.DS
[86]
P. Misra, F. Panolan, A. Rai, S. Saket, and R. Sharma, “Quick Separation in Chordal and Split Graphs,” in 45th International Symposium on Mathematical Foundations of Computer Science (MFCS 2020), Prague, Czech Republic (Virtual Event), 2020.
Export
BibTeX
@inproceedings{Misra_MFCS20,
TITLE = {Quick Separation in Chordal and Split Graphs},
AUTHOR = {Misra, Pranabendu and Panolan, Fahad and Rai, Ashutosh and Saket, Saurabh and Sharma, Roohani},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-159-7},
URL = {urn:nbn:de:0030-drops-127391},
DOI = {10.4230/LIPIcs.MFCS.2020.70},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {45th International Symposium on Mathematical Foundations of Computer Science (MFCS 2020)},
EDITOR = {Esparza, Javier and Kr{\`a}l', Daniel},
EID = {70},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {170},
ADDRESS = {Prague, Czech Republic (Virtual Event)},
}
Endnote
%0 Conference Proceedings
%A Misra, Pranabendu
%A Panolan, Fahad
%A Rai, Ashutosh
%A Saket, Saurabh
%A Sharma, Roohani
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
%T Quick Separation in Chordal and Split Graphs :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9380-1
%R 10.4230/LIPIcs.MFCS.2020.70
%U urn:nbn:de:0030-drops-127391
%D 2020
%B 45th International Symposium on Mathematical Foundations of Computer Science
%Z date of event: 2020-08-25 - 2020-08-26
%C Prague, Czech Republic (Virtual Event)
%B 45th International Symposium on Mathematical Foundations of Computer Science
%E Esparza, Javier; Kràl', Daniel
%Z sequence number: 70
%I Schloss Dagstuhl
%@ 978-3-95977-159-7
%B Leibniz International Proceedings in Informatics
%N 170
%@ false
%U https://drops.dagstuhl.de/opus/frontdoor.php?source_opus=12739https://creativecommons.org/licenses/by/3.0/legalcode
[87]
V. Nakos, “Nearly Optimal Sparse Polynomial Multiplication,” IEEE Transactions on Information Theory, vol. 66, no. 11, 2020.
Export
BibTeX
@article{Nakos_10.1109/TIT.2020.2989385,
TITLE = {Nearly Optimal Sparse Polynomial Multiplication},
AUTHOR = {Nakos, Vasileios},
LANGUAGE = {eng},
ISSN = {0018-9448},
DOI = {10.1109/TIT.2020.2989385},
PUBLISHER = {IEEE},
ADDRESS = {Piscataway, NJ},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {IEEE Transactions on Information Theory},
VOLUME = {66},
NUMBER = {11},
PAGES = {7231--7236},
}
Endnote
%0 Journal Article
%A Nakos, Vasileios
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Nearly Optimal Sparse Polynomial Multiplication :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-567A-0
%R 10.1109/TIT.2020.2989385
%7 2020
%D 2020
%J IEEE Transactions on Information Theory
%V 66
%N 11
%& 7231
%P 7231 - 7236
%I IEEE
%C Piscataway, NJ
%@ false
[88]
D. Neuen, “Hypergraph Isomorphism for Groups with Restricted Composition Factors,” in 47th International Colloquium on Automata, Languages, and Programming (ICALP 2020), Saarbrücken, Germany (Virtual Conference), 2020.
Export
BibTeX
@inproceedings{Neuen_ICALP2020,
TITLE = {Hypergraph Isomorphism for Groups with Restricted Composition Factors},
AUTHOR = {Neuen, Daniel},
LANGUAGE = {eng},
ISSN = {1868-8969},
ISBN = {978-3-95977-138-2},
URL = {urn:nbn:de:0030-drops-124959},
DOI = {10.4230/LIPIcs.ICALP.2020.88},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {47th International Colloquium on Automata, Languages, and Programming (ICALP 2020)},
EDITOR = {Czumaj, Artur and Dawa, Anuj and Merelli, Emanuela},
EID = {88},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {168},
ADDRESS = {Saarbr{\"u}cken, Germany (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Neuen, Daniel
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Hypergraph Isomorphism for Groups with Restricted Composition Factors :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-6DCA-C
%R 10.4230/LIPIcs.ICALP.2020.88
%U urn:nbn:de:0030-drops-124959
%D 2020
%B 47th International Colloquium on Automata, Languages, and Programming
%Z date of event: 2020-07-08 - 2020-07-11
%C Saarbrücken, Germany (Virtual Conference)
%B 47th International Colloquium on Automata, Languages, and Programming
%E Czumaj, Artur; Dawa, Anuj; Merelli, Emanuela
%Z sequence number: 88
%I Schloss Dagstuhl
%@ 978-3-95977-138-2
%B Leibniz International Proceedings in Informatics
%N 168
%@ false
%U https://drops.dagstuhl.de/opus/volltexte/2020/12495/https://creativecommons.org/licenses/by/3.0/legalcode
[89]
E. Oh and H.-K. Ahn, “Voronoi Diagrams for a Moderate-Sized Point-Set in a Simple Polygon,” Discrete & Computational Geometry, vol. 63, no. 2, 2020.
Export
BibTeX
@article{Oh2020,
TITLE = {Voronoi Diagrams for a Moderate-Sized Point-Set in a Simple Polygon},
AUTHOR = {Oh, Eunjin and Ahn, Hee-Kap},
LANGUAGE = {eng},
ISSN = {0179-5376},
DOI = {10.1007/s00454-019-00063-4},
PUBLISHER = {Springer},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {Discrete \& Computational Geometry},
VOLUME = {63},
NUMBER = {2},
PAGES = {418--454},
}
Endnote
%0 Journal Article
%A Oh, Eunjin
%A Ahn, Hee-Kap
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Voronoi Diagrams for a Moderate-Sized Point-Set in a Simple Polygon :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-8E04-6
%R 10.1007/s00454-019-00063-4
%7 2019
%D 2020
%J Discrete & Computational Geometry
%V 63
%N 2
%& 418
%P 418 - 454
%I Springer
%@ false
[90]
A. Oulasvirta, N. R. Dayama, M. Shiripour, M. John, and A. Karrenbauer, “Combinatorial Optimization of Graphical User Interface Designs,” Proceedings of the IEEE, vol. 108, no. 3, 2020.
Export
BibTeX
@article{Oulasvirta2020,
TITLE = {Combinatorial Optimization of Graphical User Interface Designs},
AUTHOR = {Oulasvirta, Antti and Dayama, Niraj Ramesh and Shiripour, Morteza and John, Maximilian and Karrenbauer, Andreas},
LANGUAGE = {eng},
ISSN = {0018-9219},
DOI = {10.1109/JPROC.2020.2969687},
PUBLISHER = {IEEE},
ADDRESS = {New York, N.Y.},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2020},
JOURNAL = {Proceedings of the IEEE},
VOLUME = {108},
NUMBER = {3},
PAGES = {434--464},
}
Endnote
%0 Journal Article
%A Oulasvirta, Antti
%A Dayama, Niraj Ramesh
%A Shiripour, Morteza
%A John, Maximilian
%A Karrenbauer, Andreas
%+ External Organizations
External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Combinatorial Optimization of Graphical User Interface Designs :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-99BA-C
%R 10.1109/JPROC.2020.2969687
%7 2020
%D 2020
%J Proceedings of the IEEE
%O Proc. IEEE
%V 108
%N 3
%& 434
%P 434 - 464
%I IEEE
%C New York, N.Y.
%@ false
[91]
B. Ray Chaudhury, J. Garg, and K. Mehlhorn, “EFX exists for three agents,” 2020. [Online]. Available: http://arxiv.org/abs/2002.05119. (arXiv: 2002.05119)
Abstract
We study the problem of distributing a set of indivisible items among agents
with additive valuations in a $\mathit{fair}$ manner. The fairness notion under
consideration is Envy-freeness up to any item (EFX). Despite significant
efforts by many researchers for several years, the existence of EFX allocations
has not been settled beyond the simple case of two agents. In this paper, we
show constructively that an EFX allocation always exists for three agents.
Furthermore, we falsify the conjecture by Caragiannis et al. by showing an
instance with three agents for which there is a partial EFX allocation (some
items are not allocated) with higher Nash welfare than that of any complete EFX
allocation.
Export
BibTeX
@online{RayChaudhury_arXiv2002.05119,
TITLE = {{EFX} exists for three agents},
AUTHOR = {Ray Chaudhury, Bhaskar and Garg, Jugal and Mehlhorn, Kurt},
LANGUAGE = {eng},
URL = {http://arxiv.org/abs/2002.05119},
EPRINT = {2002.05119},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We study the problem of distributing a set of indivisible items among agents with additive valuations in a $\mathit{fair}$ manner. The fairness notion under consideration is Envy-freeness up to any item (EFX). Despite significant efforts by many researchers for several years, the existence of EFX allocations has not been settled beyond the simple case of two agents. In this paper, we show constructively that an EFX allocation always exists for three agents. Furthermore, we falsify the conjecture by Caragiannis et al. by showing an instance with three agents for which there is a partial EFX allocation (some items are not allocated) with higher Nash welfare than that of any complete EFX allocation.},
}
Endnote
%0 Report
%A Ray Chaudhury, Bhaskar
%A Garg, Jugal
%A Mehlhorn, Kurt
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T EFX exists for three agents :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-AF99-9
%U http://arxiv.org/abs/2002.05119
%D 2020
%X We study the problem of distributing a set of indivisible items among agents
with additive valuations in a $\mathit{fair}$ manner. The fairness notion under
consideration is Envy-freeness up to any item (EFX). Despite significant
efforts by many researchers for several years, the existence of EFX allocations
has not been settled beyond the simple case of two agents. In this paper, we
show constructively that an EFX allocation always exists for three agents.
Furthermore, we falsify the conjecture by Caragiannis et al. by showing an
instance with three agents for which there is a partial EFX allocation (some
items are not allocated) with higher Nash welfare than that of any complete EFX
allocation.
%K Computer Science, Computer Science and Game Theory, cs.GT,
[92]
B. Ray Chaudhury, T. Kavitha, K. Mehlhorn, and A. Sgouritsa, “A Little Charity Guarantees Almost Envy-Freeness,” in Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020), Salt Lake City, UT, USA, 2020.
Export
BibTeX
@inproceedings{RayChaudhury_SODA20,
TITLE = {A Little Charity Guarantees Almost Envy-Freeness},
AUTHOR = {Ray Chaudhury, Bhaskar and Kavitha, Telikepalli and Mehlhorn, Kurt and Sgouritsa, Alkmini},
LANGUAGE = {eng},
ISBN = {978-1-61197-599-4},
DOI = {10.1137/1.9781611975994.162},
PUBLISHER = {SIAM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020)},
EDITOR = {Chawla, Shuchi},
PAGES = {2658 --2672},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Ray Chaudhury, Bhaskar
%A Kavitha, Telikepalli
%A Mehlhorn, Kurt
%A Sgouritsa, Alkmini
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T A Little Charity Guarantees Almost Envy-Freeness :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-AF89-B
%R 10.1137/1.9781611975994.162
%D 2020
%B 31st Annual ACM-SIAM Symposium on Discrete Algorithms
%Z date of event: 2020-01-05 - 2020-01-08
%C Salt Lake City, UT, USA
%B Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms
%E Chawla, Shuchi
%P 2658 - 2672
%I SIAM
%@ 978-1-61197-599-4
[93]
B. Ray Chaudhury, J. Garg, and K. Mehlhorn, “EFX Exists for Three Agents,” in EC’20, 21st ACM Conference on Economics and Computation, Virtual Event, Hungary, 2020.
Export
BibTeX
@inproceedings{RayChaudhury_EC2020,
TITLE = {{EFX} Exists for Three Agents},
AUTHOR = {Ray Chaudhury, Bhaskar and Garg, Jugal and Mehlhorn, Kurt},
LANGUAGE = {eng},
ISBN = {978-1-4503-7975-5},
DOI = {10.1145/3391403.3399511},
PUBLISHER = {ACM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {EC'20, 21st ACM Conference on Economics and Computation},
EDITOR = {Bir{\'o}, P{\'e}ter and Hartline, Jason},
PAGES = {1--19},
ADDRESS = {Virtual Event, Hungary},
}
Endnote
%0 Conference Proceedings
%A Ray Chaudhury, Bhaskar
%A Garg, Jugal
%A Mehlhorn, Kurt
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T EFX Exists for Three Agents :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-223A-2
%R 10.1145/3391403.3399511
%D 2020
%B 21st ACM Conference on Economics and Computation
%Z date of event: 2020-07-13 - 2020-07-17
%C Virtual Event, Hungary
%B EC'20
%E Biró, Péter; Hartline, Jason
%P 1 - 19
%I ACM
%@ 978-1-4503-7975-5
[94]
B. Ray Chaudhury, J. Garg, P. McGlaughlin, and R. Mehta, “Competitive Allocation of a Mixed Manna,” 2020. [Online]. Available: https://arxiv.org/abs/2008.02753. (arXiv: 2008.02753)
Abstract
We study the fair division problem of allocating a mixed manna under
additively separable piecewise linear concave (SPLC) utilities. A mixed manna
contains goods that everyone likes and bads that everyone dislikes, as well as
items that some like and others dislike. The seminal work of Bogomolnaia et al.
[Econometrica'17] argue why allocating a mixed manna is genuinely more
complicated than a good or a bad manna, and why competitive equilibrium is the
best mechanism. They also provide the existence of equilibrium and establish
its peculiar properties (e.g., non-convex and disconnected set of equilibria
even under linear utilities), but leave the problem of computing an equilibrium
open. This problem remained unresolved even for only bad manna under linear
utilities.
Our main result is a simplex-like algorithm based on Lemke's scheme for
computing a competitive allocation of a mixed manna under SPLC utilities, a
strict generalization of linear. Experimental results on randomly generated
instances suggest that our algorithm will be fast in practice. The problem is
known to be PPAD-hard for the case of good manna, and we also show a similar
result for the case of bad manna. Given these PPAD-hardness results, designing
such an algorithm is the only non-brute-force (non-enumerative) option known,
e.g., the classic Lemke-Howson algorithm (1964) for computing a Nash
equilibrium in a 2-player game is still one of the most widely used algorithms
in practice.
Our algorithm also yields several new structural properties as simple
corollaries. We obtain a (constructive) proof of existence for a far more
general setting, membership of the problem in PPAD, rational-valued solution,
and odd number of solutions property. The last property also settles the
conjecture of Bogomolnaia et al. in the affirmative.
Export
BibTeX
@online{Chaudhury_arXiv2008.02753,
TITLE = {Competitive Allocation of a Mixed Manna},
AUTHOR = {Ray Chaudhury, Bhaskar and Garg, Jugal and McGlaughlin, Peter and Mehta, Ruta},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2008.02753},
EPRINT = {2008.02753},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We study the fair division problem of allocating a mixed manna under additively separable piecewise linear concave (SPLC) utilities. A mixed manna contains goods that everyone likes and bads that everyone dislikes, as well as items that some like and others dislike. The seminal work of Bogomolnaia et al. [Econometrica'17] argue why allocating a mixed manna is genuinely more complicated than a good or a bad manna, and why competitive equilibrium is the best mechanism. They also provide the existence of equilibrium and establish its peculiar properties (e.g., non-convex and disconnected set of equilibria even under linear utilities), but leave the problem of computing an equilibrium open. This problem remained unresolved even for only bad manna under linear utilities. Our main result is a simplex-like algorithm based on Lemke's scheme for computing a competitive allocation of a mixed manna under SPLC utilities, a strict generalization of linear. Experimental results on randomly generated instances suggest that our algorithm will be fast in practice. The problem is known to be PPAD-hard for the case of good manna, and we also show a similar result for the case of bad manna. Given these PPAD-hardness results, designing such an algorithm is the only non-brute-force (non-enumerative) option known, e.g., the classic Lemke-Howson algorithm (1964) for computing a Nash equilibrium in a 2-player game is still one of the most widely used algorithms in practice. Our algorithm also yields several new structural properties as simple corollaries. We obtain a (constructive) proof of existence for a far more general setting, membership of the problem in PPAD, rational-valued solution, and odd number of solutions property. The last property also settles the conjecture of Bogomolnaia et al. in the affirmative.},
}
Endnote
%0 Report
%A Ray Chaudhury, Bhaskar
%A Garg, Jugal
%A McGlaughlin, Peter
%A Mehta, Ruta
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T Competitive Allocation of a Mixed Manna :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9361-5
%U https://arxiv.org/abs/2008.02753
%D 2020
%X We study the fair division problem of allocating a mixed manna under
additively separable piecewise linear concave (SPLC) utilities. A mixed manna
contains goods that everyone likes and bads that everyone dislikes, as well as
items that some like and others dislike. The seminal work of Bogomolnaia et al.
[Econometrica'17] argue why allocating a mixed manna is genuinely more
complicated than a good or a bad manna, and why competitive equilibrium is the
best mechanism. They also provide the existence of equilibrium and establish
its peculiar properties (e.g., non-convex and disconnected set of equilibria
even under linear utilities), but leave the problem of computing an equilibrium
open. This problem remained unresolved even for only bad manna under linear
utilities.
Our main result is a simplex-like algorithm based on Lemke's scheme for
computing a competitive allocation of a mixed manna under SPLC utilities, a
strict generalization of linear. Experimental results on randomly generated
instances suggest that our algorithm will be fast in practice. The problem is
known to be PPAD-hard for the case of good manna, and we also show a similar
result for the case of bad manna. Given these PPAD-hardness results, designing
such an algorithm is the only non-brute-force (non-enumerative) option known,
e.g., the classic Lemke-Howson algorithm (1964) for computing a Nash
equilibrium in a 2-player game is still one of the most widely used algorithms
in practice.
Our algorithm also yields several new structural properties as simple
corollaries. We obtain a (constructive) proof of existence for a far more
general setting, membership of the problem in PPAD, rational-valued solution,
and odd number of solutions property. The last property also settles the
conjecture of Bogomolnaia et al. in the affirmative.
%K Computer Science, Computer Science and Game Theory, cs.GT,Computer Science, Computational Complexity, cs.CC,Computer Science, Discrete Mathematics, cs.DM,Computer Science, Data Structures and Algorithms, cs.DS,Computer Science, Multiagent Systems, cs.MA
[95]
B. Ray Chaudhury, J. Garg, and R. Mehta, “Fair and Efficient Allocations under Subadditive Valuations,” 2020. [Online]. Available: https://arxiv.org/abs/2005.06511. (arXiv: 2005.06511)
Abstract
We study the problem of allocating a set of indivisible goods among agents
with subadditive valuations in a fair and efficient manner. Envy-Freeness up to
any good (EFX) is the most compelling notion of fairness in the context of
indivisible goods. Although the existence of EFX is not known beyond the simple
case of two agents with subadditive valuations, some good approximations of EFX
are known to exist, namely $\tfrac{1}{2}$-EFX allocation and EFX allocations
with bounded charity.
Nash welfare (the geometric mean of agents' valuations) is one of the most
commonly used measures of efficiency. In case of additive valuations, an
allocation that maximizes Nash welfare also satisfies fairness properties like
Envy-Free up to one good (EF1). Although there is substantial work on
approximating Nash welfare when agents have additive valuations, very little is
known when agents have subadditive valuations. In this paper, we design a
polynomial-time algorithm that outputs an allocation that satisfies either of
the two approximations of EFX as well as achieves an $\mathcal{O}(n)$
approximation to the Nash welfare. Our result also improves the current
best-known approximation of $\mathcal{O}(n \log n)$ and $\mathcal{O}(m)$ to
Nash welfare when agents have submodular and subadditive valuations,
respectively.
Furthermore, our technique also gives an $\mathcal{O}(n)$ approximation to a
family of welfare measures, $p$-mean of valuations for $p\in (-\infty, 1]$,
thereby also matching asymptotically the current best known approximation ratio
for special cases like $p =-\infty$ while also retaining the fairness
properties.
Export
BibTeX
@online{Chaudhury_arXiv2005.06511,
TITLE = {Fair and Efficient Allocations under Subadditive Valuations},
AUTHOR = {Ray Chaudhury, Bhaskar and Garg, Jugal and Mehta, Ruta},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2005.06511},
EPRINT = {2005.06511},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We study the problem of allocating a set of indivisible goods among agents with subadditive valuations in a fair and efficient manner. Envy-Freeness up to any good (EFX) is the most compelling notion of fairness in the context of indivisible goods. Although the existence of EFX is not known beyond the simple case of two agents with subadditive valuations, some good approximations of EFX are known to exist, namely $\tfrac{1}{2}$-EFX allocation and EFX allocations with bounded charity. Nash welfare (the geometric mean of agents' valuations) is one of the most commonly used measures of efficiency. In case of additive valuations, an allocation that maximizes Nash welfare also satisfies fairness properties like Envy-Free up to one good (EF1). Although there is substantial work on approximating Nash welfare when agents have additive valuations, very little is known when agents have subadditive valuations. In this paper, we design a polynomial-time algorithm that outputs an allocation that satisfies either of the two approximations of EFX as well as achieves an $\mathcal{O}(n)$ approximation to the Nash welfare. Our result also improves the current best-known approximation of $\mathcal{O}(n \log n)$ and $\mathcal{O}(m)$ to Nash welfare when agents have submodular and subadditive valuations, respectively. Furthermore, our technique also gives an $\mathcal{O}(n)$ approximation to a family of welfare measures, $p$-mean of valuations for $p\in (-\infty, 1]$, thereby also matching asymptotically the current best known approximation ratio for special cases like $p =-\infty$ while also retaining the fairness properties.},
}
Endnote
%0 Report
%A Ray Chaudhury, Bhaskar
%A Garg, Jugal
%A Mehta, Ruta
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Fair and Efficient Allocations under Subadditive Valuations :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9369-D
%U https://arxiv.org/abs/2005.06511
%D 2020
%X We study the problem of allocating a set of indivisible goods among agents
with subadditive valuations in a fair and efficient manner. Envy-Freeness up to
any good (EFX) is the most compelling notion of fairness in the context of
indivisible goods. Although the existence of EFX is not known beyond the simple
case of two agents with subadditive valuations, some good approximations of EFX
are known to exist, namely $\tfrac{1}{2}$-EFX allocation and EFX allocations
with bounded charity.
Nash welfare (the geometric mean of agents' valuations) is one of the most
commonly used measures of efficiency. In case of additive valuations, an
allocation that maximizes Nash welfare also satisfies fairness properties like
Envy-Free up to one good (EF1). Although there is substantial work on
approximating Nash welfare when agents have additive valuations, very little is
known when agents have subadditive valuations. In this paper, we design a
polynomial-time algorithm that outputs an allocation that satisfies either of
the two approximations of EFX as well as achieves an $\mathcal{O}(n)$
approximation to the Nash welfare. Our result also improves the current
best-known approximation of $\mathcal{O}(n \log n)$ and $\mathcal{O}(m)$ to
Nash welfare when agents have submodular and subadditive valuations,
respectively.
Furthermore, our technique also gives an $\mathcal{O}(n)$ approximation to a
family of welfare measures, $p$-mean of valuations for $p\in (-\infty, 1]$,
thereby also matching asymptotically the current best known approximation ratio
for special cases like $p =-\infty$ while also retaining the fairness
properties.
%K Computer Science, Computer Science and Game Theory, cs.GT,
[96]
W. Rosenbaum and J. Suomela, “Seeing Far vs. Seeing Wide: Volume Complexity of Local Graph Problems,” 2020. [Online]. Available: https://arxiv.org/abs/1907.08160. (arXiv: 1907.08160)
Abstract
Consider a graph problem that is locally checkable but not locally solvable:
given a solution we can check that it is feasible by verifying all
constant-radius neighborhoods, but to find a solution each node needs to
explore the input graph at least up to distance $\Omega(\log n)$ in order to
produce its output. We consider the complexity of such problems from the
perspective of volume: how large a subgraph does a node need to see in order to
produce its output. We study locally checkable graph problems on bounded-degree
graphs. We give a number of constructions that exhibit tradeoffs between
deterministic distance, randomized distance, deterministic volume, and
randomized volume:
- If the deterministic distance is linear, it is also known that randomized
distance is near-linear. In contrast, we show that there are problems with
linear deterministic volume but only logarithmic randomized volume.
- We prove a volume hierarchy theorem for randomized complexity: among
problems with linear deterministic volume complexity, there are infinitely many
distinct randomized volume complexity classes between $\Omega(\log n)$ and
$O(n)$. This hierarchy persists even when restricting to problems whose
randomized and deterministic distance complexities are $\Theta(\log n)$.
- Similar hierarchies exist for polynomial distance complexities: for any $k,
\ell \in N$ with $k \leq \ell$, there are problems whose randomized and
deterministic distance complexities are $\Theta(n^{1/\ell})$, randomized volume
complexities are $\Theta(n^{1/k})$, and whose deterministic volume complexities
are $\Theta(n)$.
Additionally, we consider connections between our volume model and massively
parallel computation (MPC). We give a general simulation argument that any
volume-efficient algorithm can be transformed into a space-efficient MPC
algorithm.
Export
BibTeX
@online{Rosenbaum_arXiv1907.08160,
TITLE = {Seeing Far vs. Seeing Wide: {V}olume Complexity of Local Graph Problems},
AUTHOR = {Rosenbaum, Will and Suomela, Jukka},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/1907.08160},
EPRINT = {1907.08160},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Consider a graph problem that is locally checkable but not locally solvable: given a solution we can check that it is feasible by verifying all constant-radius neighborhoods, but to find a solution each node needs to explore the input graph at least up to distance $\Omega(\log n)$ in order to produce its output. We consider the complexity of such problems from the perspective of volume: how large a subgraph does a node need to see in order to produce its output. We study locally checkable graph problems on bounded-degree graphs. We give a number of constructions that exhibit tradeoffs between deterministic distance, randomized distance, deterministic volume, and randomized volume: -- If the deterministic distance is linear, it is also known that randomized distance is near-linear. In contrast, we show that there are problems with linear deterministic volume but only logarithmic randomized volume. -- We prove a volume hierarchy theorem for randomized complexity: among problems with linear deterministic volume complexity, there are infinitely many distinct randomized volume complexity classes between $\Omega(\log n)$ and $O(n)$. This hierarchy persists even when restricting to problems whose randomized and deterministic distance complexities are $\Theta(\log n)$. -- Similar hierarchies exist for polynomial distance complexities: for any $k, \ell \in N$ with $k \leq \ell$, there are problems whose randomized and deterministic distance complexities are $\Theta(n^{1/\ell})$, randomized volume complexities are $\Theta(n^{1/k})$, and whose deterministic volume complexities are $\Theta(n)$. Additionally, we consider connections between our volume model and massively parallel computation (MPC). We give a general simulation argument that any volume-efficient algorithm can be transformed into a space-efficient MPC algorithm.},
}
Endnote
%0 Report
%A Rosenbaum, Will
%A Suomela, Jukka
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Seeing Far vs. Seeing Wide: Volume Complexity of Local Graph Problems :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-98C0-4
%U https://arxiv.org/abs/1907.08160
%D 2020
%X Consider a graph problem that is locally checkable but not locally solvable:
given a solution we can check that it is feasible by verifying all
constant-radius neighborhoods, but to find a solution each node needs to
explore the input graph at least up to distance $\Omega(\log n)$ in order to
produce its output. We consider the complexity of such problems from the
perspective of volume: how large a subgraph does a node need to see in order to
produce its output. We study locally checkable graph problems on bounded-degree
graphs. We give a number of constructions that exhibit tradeoffs between
deterministic distance, randomized distance, deterministic volume, and
randomized volume:
- If the deterministic distance is linear, it is also known that randomized
distance is near-linear. In contrast, we show that there are problems with
linear deterministic volume but only logarithmic randomized volume.
- We prove a volume hierarchy theorem for randomized complexity: among
problems with linear deterministic volume complexity, there are infinitely many
distinct randomized volume complexity classes between $\Omega(\log n)$ and
$O(n)$. This hierarchy persists even when restricting to problems whose
randomized and deterministic distance complexities are $\Theta(\log n)$.
- Similar hierarchies exist for polynomial distance complexities: for any $k,
\ell \in N$ with $k \leq \ell$, there are problems whose randomized and
deterministic distance complexities are $\Theta(n^{1/\ell})$, randomized volume
complexities are $\Theta(n^{1/k})$, and whose deterministic volume complexities
are $\Theta(n)$.
Additionally, we consider connections between our volume model and massively
parallel computation (MPC). We give a general simulation argument that any
volume-efficient algorithm can be transformed into a space-efficient MPC
algorithm.
%K Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
[97]
W. Rosenbaum and J. Suomela, “Seeing Far vs. Seeing Wide: Volume Complexity of Local Graph Problems Share on,” in PODC’20, 39th Symposium on Principles of Distributed Computing, Virtual Event, Italy, 2020.
Export
BibTeX
@inproceedings{Rosenbaum_PODC2020,
TITLE = {Seeing Far vs. Seeing Wide: {V}olume Complexity of Local Graph Problems Share on},
AUTHOR = {Rosenbaum, Will and Suomela, Jukka},
LANGUAGE = {eng},
ISBN = {9781450375825{\textbraceright}},
DOI = {10.1145/3382734.3405721},
PUBLISHER = {ACM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {PODC'20, 39th Symposium on Principles of Distributed Computing},
PAGES = {89--98},
ADDRESS = {Virtual Event, Italy},
}
Endnote
%0 Conference Proceedings
%A Rosenbaum, Will
%A Suomela, Jukka
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
%T Seeing Far vs. Seeing Wide: Volume Complexity of Local Graph Problems
Share on :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-9A54-D
%R 10.1145/3382734.3405721
%D 2020
%B 39th Symposium on Principles of Distributed Computing
%Z date of event: 2020-08-03 - 2020-08-07
%C Virtual Event, Italy
%B PODC'20
%P 89 - 98
%I ACM
%@ 9781450375825}
[98]
M. Roth and P. Wellnitz, “Counting and Finding Homomorphisms is Universal for Parameterized Complexity Theory,” in Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020), Salt Lake City, UT, USA, 2020.
Export
BibTeX
@inproceedings{Roth_SODA20,
TITLE = {Counting and Finding Homomorphisms is Universal for Parameterized Complexity Theory},
AUTHOR = {Roth, Marc and Wellnitz, Philip},
LANGUAGE = {eng},
ISBN = {978-1-61197-599-4},
DOI = {10.1137/1.9781611975994.133},
PUBLISHER = {SIAM},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms (SODA 2020)},
EDITOR = {Chawla, Shuchi},
PAGES = {2161--2180},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Roth, Marc
%A Wellnitz, Philip
%+ External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Counting and Finding Homomorphisms is Universal for Parameterized Complexity Theory :
%G eng
%U http://hdl.handle.net/21.11116/0000-0005-8665-2
%R 10.1137/1.9781611975994.133
%D 2020
%B 31st Annual ACM-SIAM Symposium on Discrete Algorithms
%Z date of event: 2020-01-05 - 2020-01-08
%C Salt Lake City, UT, USA
%B Proceedings of the Thirty-First ACM-SIAM Symposium on Discrete Algorithms
%E Chawla, Shuchi
%P 2161 - 2180
%I SIAM
%@ 978-1-61197-599-4
[99]
M. Roth, J. Schmitt, and P. Wellnitz, “Counting Small Induced Subgraphs Satisfying Monotone Properties,” in FOCS 2020, Durham, NC, USA (Virtual Conference). (Accepted/in press)
Export
BibTeX
@inproceedings{Roth_FOCS2020,
TITLE = {Counting Small Induced Subgraphs Satisfying Monotone Properties},
AUTHOR = {Roth, Marc and Schmitt, Johannes and Wellnitz, Philip},
LANGUAGE = {eng},
PUBLISHER = {IEEE},
YEAR = {2020},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {FOCS 2020},
ADDRESS = {Durham, NC, USA (Virtual Conference)},
}
Endnote
%0 Conference Proceedings
%A Roth, Marc
%A Schmitt, Johannes
%A Wellnitz, Philip
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Counting Small Induced Subgraphs Satisfying Monotone Properties :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8C5E-3
%D 2020
%B 61st Annual IEEE Symposium on Foundations of Computer Science
%Z date of event: 2020-11-16 - 2020-11-19
%C Durham, NC, USA (Virtual Conference)
%B FOCS 2020
%I IEEE
[100]
M. Roth, J. Schmitt, and P. Wellnitz, “Counting Small Induced Subgraphs Satisfying Monotone Properties,” 2020. [Online]. Available: https://arxiv.org/abs/2004.06595. (arXiv: 2004.06595)
Abstract
Given a graph property $\Phi$, the problem $\#\mathsf{IndSub}(\Phi)$ asks, on
input a graph $G$ and a positive integer $k$, to compute the number of induced
subgraphs of size $k$ in $G$ that satisfy $\Phi$. The search for explicit
criteria on $\Phi$ ensuring that $\#\mathsf{IndSub}(\Phi)$ is hard was
initiated by Jerrum and Meeks [J. Comput. Syst. Sci. 15] and is part of the
major line of research on counting small patterns in graphs. However, apart
from an implicit result due to Curticapean, Dell and Marx [STOC 17] proving
that a full classification into "easy" and "hard" properties is possible and
some partial results on edge-monotone properties due to Meeks [Discret. Appl.
Math. 16] and D\"orfler et al. [MFCS 19], not much is known.
In this work, we fully answer and explicitly classify the case of monotone,
that is subgraph-closed, properties: We show that for any non-trivial monotone
property $\Phi$, the problem $\#\mathsf{IndSub}(\Phi)$ cannot be solved in time
$f(k)\cdot |V(G)|^{o(k/ {\log^{1/2}(k)})}$ for any function $f$, unless the
Exponential Time Hypothesis fails. By this, we establish that any significant
improvement over the brute-force approach is unlikely; in the language of
parameterized complexity, we also obtain a $\#\mathsf{W}[1]$-completeness
result.
Export
BibTeX
@online{Roth_arXiv2004.06595,
TITLE = {Counting Small Induced Subgraphs Satisfying Monotone Properties},
AUTHOR = {Roth, Marc and Schmitt, Johannes and Wellnitz, Philip},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2004.06595},
EPRINT = {2004.06595},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Given a graph property $\Phi$, the problem $\#\mathsf{IndSub}(\Phi)$ asks, on input a graph $G$ and a positive integer $k$, to compute the number of induced subgraphs of size $k$ in $G$ that satisfy $\Phi$. The search for explicit criteria on $\Phi$ ensuring that $\#\mathsf{IndSub}(\Phi)$ is hard was initiated by Jerrum and Meeks [J. Comput. Syst. Sci. 15] and is part of the major line of research on counting small patterns in graphs. However, apart from an implicit result due to Curticapean, Dell and Marx [STOC 17] proving that a full classification into "easy" and "hard" properties is possible and some partial results on edge-monotone properties due to Meeks [Discret. Appl. Math. 16] and D\"orfler et al. [MFCS 19], not much is known. In this work, we fully answer and explicitly classify the case of monotone, that is subgraph-closed, properties: We show that for any non-trivial monotone property $\Phi$, the problem $\#\mathsf{IndSub}(\Phi)$ cannot be solved in time $f(k)\cdot |V(G)|^{o(k/ {\log^{1/2}(k)})}$ for any function $f$, unless the Exponential Time Hypothesis fails. By this, we establish that any significant improvement over the brute-force approach is unlikely; in the language of parameterized complexity, we also obtain a $\#\mathsf{W}[1]$-completeness result.},
}
Endnote
%0 Report
%A Roth, Marc
%A Schmitt, Johannes
%A Wellnitz, Philip
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Counting Small Induced Subgraphs Satisfying Monotone Properties :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8C60-F
%U https://arxiv.org/abs/2004.06595
%D 2020
%X Given a graph property $\Phi$, the problem $\#\mathsf{IndSub}(\Phi)$ asks, on
input a graph $G$ and a positive integer $k$, to compute the number of induced
subgraphs of size $k$ in $G$ that satisfy $\Phi$. The search for explicit
criteria on $\Phi$ ensuring that $\#\mathsf{IndSub}(\Phi)$ is hard was
initiated by Jerrum and Meeks [J. Comput. Syst. Sci. 15] and is part of the
major line of research on counting small patterns in graphs. However, apart
from an implicit result due to Curticapean, Dell and Marx [STOC 17] proving
that a full classification into "easy" and "hard" properties is possible and
some partial results on edge-monotone properties due to Meeks [Discret. Appl.
Math. 16] and D\"orfler et al. [MFCS 19], not much is known.
In this work, we fully answer and explicitly classify the case of monotone,
that is subgraph-closed, properties: We show that for any non-trivial monotone
property $\Phi$, the problem $\#\mathsf{IndSub}(\Phi)$ cannot be solved in time
$f(k)\cdot |V(G)|^{o(k/ {\log^{1/2}(k)})}$ for any function $f$, unless the
Exponential Time Hypothesis fails. By this, we establish that any significant
improvement over the brute-force approach is unlikely; in the language of
parameterized complexity, we also obtain a $\#\mathsf{W}[1]$-completeness
result.
%K Computer Science, Computational Complexity, cs.CC
[101]
M. Roth, J. Schmitt, and P. Wellnitz, “Detecting and Counting Small Subgraphs, and Evaluating a Parameterized Tutte Polynomial: Lower Bounds via Toroidal Grids and Cayley Graph Expanders,” 2020. [Online]. Available: https://arxiv.org/abs/2011.03433. (arXiv: 2011.03433)
Abstract
Given a graph property $\Phi$, we consider the problem
$\mathtt{EdgeSub}(\Phi)$, where the input is a pair of a graph $G$ and a
positive integer $k$, and the task is to decide whether $G$ contains a $k$-edge
subgraph that satisfies $\Phi$. Specifically, we study the parameterized
complexity of $\mathtt{EdgeSub}(\Phi)$ and of its counting problem
$\#\mathtt{EdgeSub}(\Phi)$ with respect to both approximate and exact counting.
We obtain a complete picture for minor-closed properties $\Phi$: the decision
problem $\mathtt{EdgeSub}(\Phi)$ always admits an FPT algorithm and the
counting problem $\#\mathtt{EdgeSub}(\Phi)$ always admits an FPTRAS. For exact
counting, we present an exhaustive and explicit criterion on the property
$\Phi$ which, if satisfied, yields fixed-parameter tractability and otherwise
$\#\mathsf{W[1]}$-hardness. Additionally, most of our hardness results come
with an almost tight conditional lower bound under the so-called Exponential
Time Hypothesis, ruling out algorithms for $\#\mathtt{EdgeSub}(\Phi)$ that run
in time $f(k)\cdot|G|^{o(k/\log k)}$ for any computable function $f$.
As a main technical result, we gain a complete understanding of the
coefficients of toroidal grids and selected Cayley graph expanders in the
homomorphism basis of $\#\mathtt{EdgeSub}(\Phi)$. This allows us to establish
hardness of exact counting using the Complexity Monotonicity framework due to
Curticapean, Dell and Marx (STOC'17). Our methods can also be applied to a
parameterized variant of the Tutte Polynomial $T^k_G$ of a graph $G$, to which
many known combinatorial interpretations of values of the (classical) Tutte
Polynomial can be extended. As an example, $T^k_G(2,1)$ corresponds to the
number of $k$-forests in the graph $G$. Our techniques allow us to completely
understand the parametrized complexity of computing the evaluation of $T^k_G$
at every pair of rational coordinates $(x,y)$.
Export
BibTeX
@online{Roth_arXiv2011.03433,
TITLE = {Detecting and Counting Small Subgraphs, and Evaluating a Parameterized Tutte Polynomial: Lower Bounds via Toroidal Grids and Cayley Graph Expanders},
AUTHOR = {Roth, Marc and Schmitt, Johannes and Wellnitz, Philip},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2011.03433},
EPRINT = {2011.03433},
EPRINTTYPE = {arXiv},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Given a graph property $\Phi$, we consider the problem $\mathtt{EdgeSub}(\Phi)$, where the input is a pair of a graph $G$ and a positive integer $k$, and the task is to decide whether $G$ contains a $k$-edge subgraph that satisfies $\Phi$. Specifically, we study the parameterized complexity of $\mathtt{EdgeSub}(\Phi)$ and of its counting problem $\#\mathtt{EdgeSub}(\Phi)$ with respect to both approximate and exact counting. We obtain a complete picture for minor-closed properties $\Phi$: the decision problem $\mathtt{EdgeSub}(\Phi)$ always admits an FPT algorithm and the counting problem $\#\mathtt{EdgeSub}(\Phi)$ always admits an FPTRAS. For exact counting, we present an exhaustive and explicit criterion on the property $\Phi$ which, if satisfied, yields fixed-parameter tractability and otherwise $\#\mathsf{W[1]}$-hardness. Additionally, most of our hardness results come with an almost tight conditional lower bound under the so-called Exponential Time Hypothesis, ruling out algorithms for $\#\mathtt{EdgeSub}(\Phi)$ that run in time $f(k)\cdot|G|^{o(k/\log k)}$ for any computable function $f$. As a main technical result, we gain a complete understanding of the coefficients of toroidal grids and selected Cayley graph expanders in the homomorphism basis of $\#\mathtt{EdgeSub}(\Phi)$. This allows us to establish hardness of exact counting using the Complexity Monotonicity framework due to Curticapean, Dell and Marx (STOC'17). Our methods can also be applied to a parameterized variant of the Tutte Polynomial $T^k_G$ of a graph $G$, to which many known combinatorial interpretations of values of the (classical) Tutte Polynomial can be extended. As an example, $T^k_G(2,1)$ corresponds to the number of $k$-forests in the graph $G$. Our techniques allow us to completely understand the parametrized complexity of computing the evaluation of $T^k_G$ at every pair of rational coordinates $(x,y)$.},
}
Endnote
%0 Report
%A Roth, Marc
%A Schmitt, Johannes
%A Wellnitz, Philip
%+ External Organizations
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T Detecting and Counting Small Subgraphs, and Evaluating a Parameterized Tutte Polynomial: Lower Bounds via Toroidal Grids and Cayley Graph Expanders :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-8CA1-5
%U https://arxiv.org/abs/2011.03433
%D 2020
%X Given a graph property $\Phi$, we consider the problem
$\mathtt{EdgeSub}(\Phi)$, where the input is a pair of a graph $G$ and a
positive integer $k$, and the task is to decide whether $G$ contains a $k$-edge
subgraph that satisfies $\Phi$. Specifically, we study the parameterized
complexity of $\mathtt{EdgeSub}(\Phi)$ and of its counting problem
$\#\mathtt{EdgeSub}(\Phi)$ with respect to both approximate and exact counting.
We obtain a complete picture for minor-closed properties $\Phi$: the decision
problem $\mathtt{EdgeSub}(\Phi)$ always admits an FPT algorithm and the
counting problem $\#\mathtt{EdgeSub}(\Phi)$ always admits an FPTRAS. For exact
counting, we present an exhaustive and explicit criterion on the property
$\Phi$ which, if satisfied, yields fixed-parameter tractability and otherwise
$\#\mathsf{W[1]}$-hardness. Additionally, most of our hardness results come
with an almost tight conditional lower bound under the so-called Exponential
Time Hypothesis, ruling out algorithms for $\#\mathtt{EdgeSub}(\Phi)$ that run
in time $f(k)\cdot|G|^{o(k/\log k)}$ for any computable function $f$.
As a main technical result, we gain a complete understanding of the
coefficients of toroidal grids and selected Cayley graph expanders in the
homomorphism basis of $\#\mathtt{EdgeSub}(\Phi)$. This allows us to establish
hardness of exact counting using the Complexity Monotonicity framework due to
Curticapean, Dell and Marx (STOC'17). Our methods can also be applied to a
parameterized variant of the Tutte Polynomial $T^k_G$ of a graph $G$, to which
many known combinatorial interpretations of values of the (classical) Tutte
Polynomial can be extended. As an example, $T^k_G(2,1)$ corresponds to the
number of $k$-forests in the graph $G$. Our techniques allow us to completely
understand the parametrized complexity of computing the evaluation of $T^k_G$
at every pair of rational coordinates $(x,y)$.
%K Computer Science, Computational Complexity, cs.CC,Computer Science, Data Structures and Algorithms, cs.DS
[102]
S. Saurabh, U. dos S. Souza, and P. Tale, “On the Parameterized Complexity of Grid Contraction,” in 17th Scandinavian Symposium and Workshops on Algorithm Theory (SWAT 2020), órshavn, Faroe Islands, 2020.
Export
BibTeX
@inproceedings{Saket_SWAT2020,
TITLE = {On the Parameterized Complexity of Grid Contraction},
AUTHOR = {Saurabh, Saket and Souza, U{\'e}verton dos Santos and Tale, Prafullkumar},
LANGUAGE = {eng},
ISBN = {978-3-95977-150-4},
URL = {urn:nbn:de:0030-drops-122810},
DOI = {10.4230/LIPIcs.SWAT.2020.34},
PUBLISHER = {Schloss Dagstuhl},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {17th Scandinavian Symposium and Workshops on Algorithm Theory (SWAT 2020)},
EDITOR = {Albers, Susanne},
EID = {34},
SERIES = {Leibniz International Proceedings in Informatics},
VOLUME = {162},
ADDRESS = {{\'o}rshavn, Faroe Islands},
}
Endnote
%0 Conference Proceedings
%A Saurabh, Saket
%A Souza, Uéverton dos Santos
%A Tale, Prafullkumar
%+ Algorithms and Complexity, MPI for Informatics, Max Planck Society
External Organizations
Algorithms and Complexity, MPI for Informatics, Max Planck Society
%T On the Parameterized Complexity of Grid Contraction :
%G eng
%U http://hdl.handle.net/21.11116/0000-0006-8BA6-2
%R 10.4230/LIPIcs.SWAT.2020.34
%U urn:nbn:de:0030-drops-122810
%D 2020
%B 17th Scandinavian Symposiumand Workshops on Algorithm Theory
%Z date of event: 2020-06-22 - 2020-06-24
%C órshavn, Faroe Islands
%B 17th Scandinavian Symposium and Workshops on Algorithm Theory
%E Albers, Susanne
%Z sequence number: 34
%I Schloss Dagstuhl
%@ 978-3-95977-150-4
%B Leibniz International Proceedings in Informatics
%N 162
%U https://drops.dagstuhl.de/opus/volltexte/2020/12281/