Tribhuvanesh Orekondy (PhD Student)

MSc Tribhuvanesh Orekondy

Adresse
Max-Planck-Institut für Informatik
Saarland Informatics Campus
Campus
Standort
-
Telefon
+49 681 9325 2000
Fax
+49 681 9325 2099

Personal Information

Publications

Orekondy, T., Schiele, B., & Fritz, M. (2020). Prediction Poisoning: Towards Defenses Against DNN Model Stealing Attacks. In International Conference on Learning Representations (ICLR 2020). Addis Ababa, Ethopia: OpenReview.net. Retrieved from https://iclr.cc/Conferences/2020
Export
BibTeX
@inproceedings{orekondy20prediction, TITLE = {Prediction Poisoning: {T}owards Defenses Against {DNN} Model Stealing Attacks}, AUTHOR = {Orekondy, Tribhuvanesh and Schiele, Bernt and Fritz, Mario}, LANGUAGE = {eng}, URL = {https://iclr.cc/Conferences/2020}, PUBLISHER = {OpenReview.net}, YEAR = {2020}, BOOKTITLE = {International Conference on Learning Representations (ICLR 2020)}, ADDRESS = {Addis Ababa, Ethopia}, }
Endnote
%0 Conference Proceedings %A Orekondy, Tribhuvanesh %A Schiele, Bernt %A Fritz, Mario %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Prediction Poisoning: Towards Defenses Against DNN Model Stealing Attacks : %G eng %U http://hdl.handle.net/21.11116/0000-0003-EC93-D %D 2020 %B 8th International Conference on Learning Representations %Z date of event: 2020-04-26 - 2020-04-30 %C Addis Ababa, Ethopia %B International Conference on Learning Representations %I OpenReview.net %U https://iclr.cc/Conferences/2020 %U https://openreview.net/pdf?id=SyevYxHtDB
Orekondy, T., Oh, S. J., Zhang, Y., Schiele, B., & Fritz, M. (n.d.). Gradient-Leaks: Understanding Deanonymization in Federated Learning. In The 2nd International Workshop on Federated Learning for Data Privacy and Confidentiality (in Conjunction with NeurIPS 2019) (FL-NeurIPS 2019). Vancouver, Canada: federated-learning.org. Retrieved from http://federated-learning.org/fl-neurips-2019/
(Accepted/in press)
Export
BibTeX
@inproceedings{orekondy19gradient, TITLE = {Gradient-Leaks: {U}nderstanding Deanonymization in Federated Learning}, AUTHOR = {Orekondy, Tribhuvanesh and Oh, Seong Joon and Zhang, Yang and Schiele, Bernt and Fritz, Mario}, LANGUAGE = {eng}, URL = {http://federated-learning.org/fl-neurips-2019/}, PUBLISHER = {federated-learning.org}, YEAR = {2019}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {The 2nd International Workshop on Federated Learning for Data Privacy and Confidentiality (in Conjunction with NeurIPS 2019) (FL-NeurIPS 2019)}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Orekondy, Tribhuvanesh %A Oh, Seong Joon %A Zhang, Yang %A Schiele, Bernt %A Fritz, Mario %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Gradient-Leaks: Understanding Deanonymization in Federated Learning : %G eng %U http://hdl.handle.net/21.11116/0000-0005-8E47-C %U http://federated-learning.org/fl-neurips-2019/ %D 2019 %B The 2nd International Workshop on Federated Learning for Data Privacy and Confidentiality %Z date of event: 2019-12-13 - 2019-12-13 %C Vancouver, Canada %B The 2nd International Workshop on Federated Learning for Data Privacy and Confidentiality (in Conjunction with NeurIPS 2019) %I federated-learning.org
Orekondy, T., Schiele, B., & Fritz, M. (2019). Knockoff Nets: Stealing Functionality of Black-Box Models. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019). Long Beach, CA, USA: IEEE. doi:10.1109/CVPR.2019.00509
Export
BibTeX
@inproceedings{orekondy18knockoff, TITLE = {Knockoff Nets: {S}tealing Functionality of Black-Box Models}, AUTHOR = {Orekondy, Tribhuvanesh and Schiele, Bernt and Fritz, Mario}, LANGUAGE = {eng}, ISBN = {978-1-7281-3293-8}, DOI = {10.1109/CVPR.2019.00509}, PUBLISHER = {IEEE}, YEAR = {2019}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019)}, PAGES = {‏ 4949--4958}, ADDRESS = {Long Beach, CA, USA}, }
Endnote
%0 Conference Proceedings %A Orekondy, Tribhuvanesh %A Schiele, Bernt %A Fritz, Mario %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T Knockoff Nets: Stealing Functionality of Black-Box Models : %G eng %U http://hdl.handle.net/21.11116/0000-0002-AA57-D %R 10.1109/CVPR.2019.00509 %D 2019 %B 32nd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2019-06-16 - 2019-06-20 %C Long Beach, CA, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P ‏ 4949 - 4958 %I IEEE %@ 978-1-7281-3293-8
Orekondy, T., Fritz, M., & Schiele, B. (2018). Connecting Pixels to Privacy and Utility: Automatic Redaction of Private Information in Images. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018). Salt Lake City, UT, USA: IEEE. doi:10.1109/CVPR.2018.00883
Export
BibTeX
@inproceedings{orekondy17connect, TITLE = {Connecting Pixels to Privacy and Utility: {A}utomatic Redaction of Private Information in Images}, AUTHOR = {Orekondy, Tribhuvanesh and Fritz, Mario and Schiele, Bernt}, LANGUAGE = {eng}, ISBN = {978-1-5386-6420-9}, DOI = {10.1109/CVPR.2018.00883}, PUBLISHER = {IEEE}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018)}, PAGES = {8466--8475}, ADDRESS = {Salt Lake City, UT, USA}, }
Endnote
%0 Conference Proceedings %A Orekondy, Tribhuvanesh %A Fritz, Mario %A Schiele, Bernt %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Connecting Pixels to Privacy and Utility: Automatic Redaction of Private Information in Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-7D65-4 %R 10.1109/CVPR.2018.00883 %D 2018 %B 31st IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2018-06-18 - 2018-06-22 %C Salt Lake City, UT, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 8466 - 8475 %I IEEE %@ 978-1-5386-6420-9
Orekondy, T., Oh, S. J., Schiele, B., & Fritz, M. (2018). Understanding and Controlling User Linkability in Decentralized Learning. Retrieved from http://arxiv.org/abs/1805.05838
(arXiv: 1805.05838)
Abstract
Machine Learning techniques are widely used by online services (e.g. Google, Apple) in order to analyze and make predictions on user data. As many of the provided services are user-centric (e.g. personal photo collections, speech recognition, personal assistance), user data generated on personal devices is key to provide the service. In order to protect the data and the privacy of the user, federated learning techniques have been proposed where the data never leaves the user's device and "only" model updates are communicated back to the server. In our work, we propose a new threat model that is not concerned with learning about the content - but rather is concerned with the linkability of users during such decentralized learning scenarios. We show that model updates are characteristic for users and therefore lend themselves to linkability attacks. We show identification and matching of users across devices in closed and open world scenarios. In our experiments, we find our attacks to be highly effective, achieving 20x-175x chance-level performance. In order to mitigate the risks of linkability attacks, we study various strategies. As adding random noise does not offer convincing operation points, we propose strategies based on using calibrated domain-specific data; we find these strategies offers substantial protection against linkability threats with little effect to utility.
Export
BibTeX
@online{orekondy18understand, TITLE = {Understanding and Controlling User Linkability in Decentralized Learning}, AUTHOR = {Orekondy, Tribhuvanesh and Oh, Seong Joon and Schiele, Bernt and Fritz, Mario}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1805.05838}, EPRINT = {1805.05838}, EPRINTTYPE = {arXiv}, YEAR = {2018}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Machine Learning techniques are widely used by online services (e.g. Google, Apple) in order to analyze and make predictions on user data. As many of the provided services are user-centric (e.g. personal photo collections, speech recognition, personal assistance), user data generated on personal devices is key to provide the service. In order to protect the data and the privacy of the user, federated learning techniques have been proposed where the data never leaves the user's device and "only" model updates are communicated back to the server. In our work, we propose a new threat model that is not concerned with learning about the content -- but rather is concerned with the linkability of users during such decentralized learning scenarios. We show that model updates are characteristic for users and therefore lend themselves to linkability attacks. We show identification and matching of users across devices in closed and open world scenarios. In our experiments, we find our attacks to be highly effective, achieving 20x-175x chance-level performance. In order to mitigate the risks of linkability attacks, we study various strategies. As adding random noise does not offer convincing operation points, we propose strategies based on using calibrated domain-specific data; we find these strategies offers substantial protection against linkability threats with little effect to utility.}, }
Endnote
%0 Report %A Orekondy, Tribhuvanesh %A Oh, Seong Joon %A Schiele, Bernt %A Fritz, Mario %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Understanding and Controlling User Linkability in Decentralized Learning : %G eng %U http://hdl.handle.net/21.11116/0000-0001-4BEC-2 %U http://arxiv.org/abs/1805.05838 %D 2018 %X Machine Learning techniques are widely used by online services (e.g. Google, Apple) in order to analyze and make predictions on user data. As many of the provided services are user-centric (e.g. personal photo collections, speech recognition, personal assistance), user data generated on personal devices is key to provide the service. In order to protect the data and the privacy of the user, federated learning techniques have been proposed where the data never leaves the user's device and "only" model updates are communicated back to the server. In our work, we propose a new threat model that is not concerned with learning about the content - but rather is concerned with the linkability of users during such decentralized learning scenarios. We show that model updates are characteristic for users and therefore lend themselves to linkability attacks. We show identification and matching of users across devices in closed and open world scenarios. In our experiments, we find our attacks to be highly effective, achieving 20x-175x chance-level performance. In order to mitigate the risks of linkability attacks, we study various strategies. As adding random noise does not offer convincing operation points, we propose strategies based on using calibrated domain-specific data; we find these strategies offers substantial protection against linkability threats with little effect to utility. %K Computer Science, Cryptography and Security, cs.CR,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Learning, cs.LG,Statistics, Machine Learning, stat.ML
Orekondy, T., Schiele, B., & Fritz, M. (2017). Towards a Visual Privacy Advisor: Understanding and Predicting Privacy Risks in Images. In IEEE International Conference on Computer Vision (ICCV 2017). Venice, Italy: IEEE. doi:10.1109/ICCV.2017.398
Export
BibTeX
@inproceedings{orekondy17iccv, TITLE = {Towards a Visual Privacy Advisor: Understanding and Predicting Privacy Risks in Images}, AUTHOR = {Orekondy, Tribhuvanesh and Schiele, Bernt and Fritz, Mario}, LANGUAGE = {eng}, ISBN = {978-1-5386-1032-9}, DOI = {10.1109/ICCV.2017.398}, PUBLISHER = {IEEE}, YEAR = {2017}, MARGINALMARK = {$\bullet$}, DATE = {2017}, BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2017)}, PAGES = {3706--3715}, ADDRESS = {Venice, Italy}, }
Endnote
%0 Conference Proceedings %A Orekondy, Tribhuvanesh %A Schiele, Bernt %A Fritz, Mario %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society %T Towards a Visual Privacy Advisor: Understanding and Predicting Privacy Risks in Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-E65F-8 %R 10.1109/ICCV.2017.398 %D 2017 %B International Conference on Computer Vision %Z date of event: 2017-10-22 - 2017-10-29 %C Venice, Italy %B IEEE International Conference on Computer Vision %P 3706 - 3715 %I IEEE %@ 978-1-5386-1032-9