Tribhuvanesh Orekondy (PhD Student)

Personal Information
Publications
Wang, H.-P., Orekondy, T., & Fritz, M. (2021). InfoScrub: Towards Attribute Privacy by Targeted Obfuscation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2021). Virtual Workshop: IEEE. doi:10.1109/CVPRW53098.2021.00366
Export
BibTeX
@inproceedings{Wang_CVPRW2021,
TITLE = {{InfoScrub}: {T}owards Attribute Privacy by Targeted Obfuscation},
AUTHOR = {Wang, Hui-Po and Orekondy, Tribhuvanesh and Fritz, Mario},
LANGUAGE = {eng},
ISBN = {978-1-6654-4899-4},
DOI = {10.1109/CVPRW53098.2021.00366},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2021)},
PAGES = {3275--3283},
ADDRESS = {Virtual Workshop},
}
Endnote
%0 Conference Proceedings
%A Wang, Hui-Po
%A Orekondy, Tribhuvanesh
%A Fritz, Mario
%+ External Organizations
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
External Organizations
%T InfoScrub: Towards Attribute Privacy by Targeted Obfuscation :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-186C-5
%R 10.1109/CVPRW53098.2021.00366
%D 2021
%B IEEE CVPR Workshop On Fair, Data-Efficient, and Trusted Computer Vision
%Z date of event: 2021-06-25 - 2021-06-25
%C Virtual Workshop
%B Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops
%P 3275 - 3283
%I IEEE
%@ 978-1-6654-4899-4
Orekondy, T., Schiele, B., & Fritz, M. (2020). Prediction Poisoning: Towards Defenses Against DNN Model Stealing Attacks. In International Conference on Learning Representations (ICLR 2020). Addis Ababa, Ethopia: OpenReview.net. Retrieved from https://iclr.cc/virtual_2020/poster_SyevYxHtDB.html; https://iclr.cc/Conferences/2020
Export
BibTeX
@inproceedings{orekondy20prediction,
TITLE = {Prediction Poisoning: {T}owards Defenses Against {DNN} Model Stealing Attacks},
AUTHOR = {Orekondy, Tribhuvanesh and Schiele, Bernt and Fritz, Mario},
LANGUAGE = {eng},
URL = {https://iclr.cc/virtual_2020/poster_SyevYxHtDB.html; https://iclr.cc/Conferences/2020},
PUBLISHER = {OpenReview.net},
YEAR = {2020},
BOOKTITLE = {International Conference on Learning Representations (ICLR 2020)},
ADDRESS = {Addis Ababa, Ethopia},
}
Endnote
%0 Conference Proceedings
%A Orekondy, Tribhuvanesh
%A Schiele, Bernt
%A Fritz, Mario
%+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
External Organizations
%T Prediction Poisoning: Towards Defenses Against DNN Model Stealing Attacks :
%G eng
%U http://hdl.handle.net/21.11116/0000-0003-EC93-D
%U https://iclr.cc/virtual_2020/poster_SyevYxHtDB.html
%D 2020
%B 8th International Conference on Learning Representations
%Z date of event: 2020-04-26 - 2020-04-30
%C Addis Ababa, Ethopia
%B International Conference on Learning Representations
%I OpenReview.net
%U https://openreview.net/pdf?id=SyevYxHtDB
Orekondy, T. (2020). Understanding and Controlling Leakage in Machine Learning. Universität des Saarlandes, Saarbrücken. Retrieved from urn:nbn:de:bsz:291--ds-335519
Export
BibTeX
@phdthesis{Orekondy_PhD2020,
TITLE = {Understanding and Controlling Leakage in Machine Learning},
AUTHOR = {Orekondy, Tribhuvanesh},
LANGUAGE = {eng},
URL = {urn:nbn:de:bsz:291--ds-335519},
DOI = {10.22028/D291-33551},
SCHOOL = {Universit{\"a}t des Saarlandes},
ADDRESS = {Saarbr{\"u}cken},
YEAR = {2020},
DATE = {2020},
}
Endnote
%0 Thesis
%A Orekondy, Tribhuvanesh
%Y Schiele, Bernt
%A referee: Fritz, Mario
%A referee: Frahm, Jan-Michael
%+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
International Max Planck Research School, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Understanding and Controlling Leakage in Machine Learning :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-4ACC-0
%R 10.22028/D291-33551
%U urn:nbn:de:bsz:291--ds-335519
%F OTHER: hdl:20.500.11880/30989
%I Universität des Saarlandes
%C Saarbrücken
%D 2020
%V phd
%9 phd
%U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/30989
Chen, D., Orekondy, T., & Fritz, M. (2020). GS-WGAN: A Gradient-Sanitized Approach for Learning Differentially Private Generators. In Advances in Neural Information Processing Systems 33 (NeurIPS 2020). Virtual Event: Curran Associates, Inc.
Export
BibTeX
@inproceedings{Chen_NeurIPS20,
TITLE = {{GS-WGAN}: {A} Gradient-Sanitized Approach for Learning Differentially Private Generators},
AUTHOR = {Chen, Dingfan and Orekondy, Tribhuvanesh and Fritz, Mario},
LANGUAGE = {eng},
PUBLISHER = {Curran Associates, Inc.},
YEAR = {2020},
BOOKTITLE = {Advances in Neural Information Processing Systems 33 (NeurIPS 2020)},
EDITOR = {Larochelle, H. and Ranzato, M. and Hadsell, R. and Balcan, M. F. and Lin, H.},
PAGES = {12673--12684},
ADDRESS = {Virtual Event},
}
Endnote
%0 Conference Proceedings
%A Chen, Dingfan
%A Orekondy, Tribhuvanesh
%A Fritz, Mario
%+ External Organizations
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
%T GS-WGAN: A Gradient-Sanitized Approach for Learning Differentially Private Generators :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-1866-B
%D 2020
%B 34th Conference on Neural Information Processing Systems
%Z date of event: 2020-12-06 - 2020-12-12
%C Virtual Event
%B Advances in Neural Information Processing Systems 33
%E Larochelle, H.; Ranzato, M.; Hadsell, R.; Balcan, M. F.; Lin, H.
%P 12673 - 12684
%I Curran Associates, Inc.
Orekondy, T., Oh, S. J., Zhang, Y., Schiele, B., & Fritz, M. (n.d.). Gradient-Leaks: Understanding Deanonymization in Federated Learning. In The 2nd International Workshop on Federated Learning for Data Privacy and Confidentiality (FL-NeurIPS 2019). Vancouver, Canada: federated-learning.org. Retrieved from http://federated-learning.org/fl-neurips-2019/
(Accepted/in press) Export
BibTeX
@inproceedings{orekondy19gradient,
TITLE = {Gradient-Leaks: {U}nderstanding Deanonymization in Federated Learning},
AUTHOR = {Orekondy, Tribhuvanesh and Oh, Seong Joon and Zhang, Yang and Schiele, Bernt and Fritz, Mario},
LANGUAGE = {eng},
URL = {http://federated-learning.org/fl-neurips-2019/},
PUBLISHER = {federated-learning.org},
YEAR = {2019},
PUBLREMARK = {Accepted},
BOOKTITLE = {The 2nd International Workshop on Federated Learning for Data Privacy and Confidentiality (FL-NeurIPS 2019)},
ADDRESS = {Vancouver, Canada},
}
Endnote
%0 Conference Proceedings
%A Orekondy, Tribhuvanesh
%A Oh, Seong Joon
%A Zhang, Yang
%A Schiele, Bernt
%A Fritz, Mario
%+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
External Organizations
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
External Organizations
%T Gradient-Leaks: Understanding Deanonymization in Federated Learning :
%G eng
%U http://hdl.handle.net/21.11116/0000-0005-8E47-C
%U http://federated-learning.org/fl-neurips-2019/
%D 2019
%B The 2nd International Workshop on Federated Learning for Data Privacy and Confidentiality
%Z date of event: 2019-12-13 - 2019-12-13
%C Vancouver, Canada
%B The 2nd International Workshop on Federated Learning for Data Privacy and Confidentiality
%I federated-learning.org
Orekondy, T., Schiele, B., & Fritz, M. (2019). Knockoff Nets: Stealing Functionality of Black-Box Models. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019). Long Beach, CA, USA: IEEE. doi:10.1109/CVPR.2019.00509
Export
BibTeX
@inproceedings{orekondy18knockoff,
TITLE = {Knockoff Nets: {S}tealing Functionality of Black-Box Models},
AUTHOR = {Orekondy, Tribhuvanesh and Schiele, Bernt and Fritz, Mario},
LANGUAGE = {eng},
ISBN = {978-1-7281-3293-8},
DOI = {10.1109/CVPR.2019.00509},
PUBLISHER = {IEEE},
YEAR = {2019},
BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019)},
PAGES = {4949 --4958},
ADDRESS = {Long Beach, CA, USA},
}
Endnote
%0 Conference Proceedings
%A Orekondy, Tribhuvanesh
%A Schiele, Bernt
%A Fritz, Mario
%+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
External Organizations
%T Knockoff Nets: Stealing Functionality of Black-Box Models :
%G eng
%U http://hdl.handle.net/21.11116/0000-0002-AA57-D
%R 10.1109/CVPR.2019.00509
%D 2019
%B 32nd IEEE Conference on Computer Vision and Pattern Recognition
%Z date of event: 2019-06-16 - 2019-06-20
%C Long Beach, CA, USA
%B IEEE/CVF Conference on Computer Vision and Pattern Recognition
%P 4949 - 4958
%I IEEE
%@ 978-1-7281-3293-8
Orekondy, T., Oh, S. J., Schiele, B., & Fritz, M. (2018). Understanding and Controlling User Linkability in Decentralized Learning. Retrieved from http://arxiv.org/abs/1805.05838
(arXiv: 1805.05838) Abstract
Machine Learning techniques are widely used by online services (e.g. Google,
Apple) in order to analyze and make predictions on user data. As many of the
provided services are user-centric (e.g. personal photo collections, speech
recognition, personal assistance), user data generated on personal devices is
key to provide the service. In order to protect the data and the privacy of the
user, federated learning techniques have been proposed where the data never
leaves the user's device and "only" model updates are communicated back to the
server. In our work, we propose a new threat model that is not concerned with
learning about the content - but rather is concerned with the linkability of
users during such decentralized learning scenarios.
We show that model updates are characteristic for users and therefore lend
themselves to linkability attacks. We show identification and matching of users
across devices in closed and open world scenarios. In our experiments, we find
our attacks to be highly effective, achieving 20x-175x chance-level
performance.
In order to mitigate the risks of linkability attacks, we study various
strategies. As adding random noise does not offer convincing operation points,
we propose strategies based on using calibrated domain-specific data; we find
these strategies offers substantial protection against linkability threats with
little effect to utility.
Export
BibTeX
@online{orekondy18understand,
TITLE = {Understanding and Controlling User Linkability in Decentralized Learning},
AUTHOR = {Orekondy, Tribhuvanesh and Oh, Seong Joon and Schiele, Bernt and Fritz, Mario},
LANGUAGE = {eng},
URL = {http://arxiv.org/abs/1805.05838},
EPRINT = {1805.05838},
EPRINTTYPE = {arXiv},
YEAR = {2018},
ABSTRACT = {Machine Learning techniques are widely used by online services (e.g. Google, Apple) in order to analyze and make predictions on user data. As many of the provided services are user-centric (e.g. personal photo collections, speech recognition, personal assistance), user data generated on personal devices is key to provide the service. In order to protect the data and the privacy of the user, federated learning techniques have been proposed where the data never leaves the user's device and "only" model updates are communicated back to the server. In our work, we propose a new threat model that is not concerned with learning about the content -- but rather is concerned with the linkability of users during such decentralized learning scenarios. We show that model updates are characteristic for users and therefore lend themselves to linkability attacks. We show identification and matching of users across devices in closed and open world scenarios. In our experiments, we find our attacks to be highly effective, achieving 20x-175x chance-level performance. In order to mitigate the risks of linkability attacks, we study various strategies. As adding random noise does not offer convincing operation points, we propose strategies based on using calibrated domain-specific data; we find these strategies offers substantial protection against linkability threats with little effect to utility.},
}
Endnote
%0 Report
%A Orekondy, Tribhuvanesh
%A Oh, Seong Joon
%A Schiele, Bernt
%A Fritz, Mario
%+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
%T Understanding and Controlling User Linkability in Decentralized Learning :
%G eng
%U http://hdl.handle.net/21.11116/0000-0001-4BEC-2
%U http://arxiv.org/abs/1805.05838
%D 2018
%X Machine Learning techniques are widely used by online services (e.g. Google,
Apple) in order to analyze and make predictions on user data. As many of the
provided services are user-centric (e.g. personal photo collections, speech
recognition, personal assistance), user data generated on personal devices is
key to provide the service. In order to protect the data and the privacy of the
user, federated learning techniques have been proposed where the data never
leaves the user's device and "only" model updates are communicated back to the
server. In our work, we propose a new threat model that is not concerned with
learning about the content - but rather is concerned with the linkability of
users during such decentralized learning scenarios.
We show that model updates are characteristic for users and therefore lend
themselves to linkability attacks. We show identification and matching of users
across devices in closed and open world scenarios. In our experiments, we find
our attacks to be highly effective, achieving 20x-175x chance-level
performance.
In order to mitigate the risks of linkability attacks, we study various
strategies. As adding random noise does not offer convincing operation points,
we propose strategies based on using calibrated domain-specific data; we find
these strategies offers substantial protection against linkability threats with
little effect to utility.
%K Computer Science, Cryptography and Security, cs.CR,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Learning, cs.LG,Statistics, Machine Learning, stat.ML
Orekondy, T., Fritz, M., & Schiele, B. (2018). Connecting Pixels to Privacy and Utility: Automatic Redaction of Private Information in Images. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018). Salt Lake City, UT, USA: IEEE. doi:10.1109/CVPR.2018.00883
Export
BibTeX
@inproceedings{orekondy17connect,
TITLE = {Connecting Pixels to Privacy and Utility: {A}utomatic Redaction of Private Information in Images},
AUTHOR = {Orekondy, Tribhuvanesh and Fritz, Mario and Schiele, Bernt},
LANGUAGE = {eng},
ISBN = {978-1-5386-6420-9},
DOI = {10.1109/CVPR.2018.00883},
PUBLISHER = {IEEE},
YEAR = {2018},
BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018)},
PAGES = {8466--8475},
ADDRESS = {Salt Lake City, UT, USA},
}
Endnote
%0 Conference Proceedings
%A Orekondy, Tribhuvanesh
%A Fritz, Mario
%A Schiele, Bernt
%+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
%T Connecting Pixels to Privacy and Utility: Automatic Redaction of Private
Information in Images :
%G eng
%U http://hdl.handle.net/11858/00-001M-0000-002E-7D65-4
%R 10.1109/CVPR.2018.00883
%D 2018
%B 31st IEEE Conference on Computer Vision and Pattern Recognition
%Z date of event: 2018-06-18 - 2018-06-22
%C Salt Lake City, UT, USA
%B IEEE/CVF Conference on Computer Vision and Pattern Recognition
%P 8466 - 8475
%I IEEE
%@ 978-1-5386-6420-9
Orekondy, T., Schiele, B., & Fritz, M. (2017). Towards a Visual Privacy Advisor: Understanding and Predicting Privacy Risks in Images. In IEEE International Conference on Computer Vision (ICCV 2017). Venice, Italy: IEEE. doi:10.1109/ICCV.2017.398
Export
BibTeX
@inproceedings{orekondy17iccv,
TITLE = {Towards a Visual Privacy Advisor: Understanding and Predicting Privacy Risks in Images},
AUTHOR = {Orekondy, Tribhuvanesh and Schiele, Bernt and Fritz, Mario},
LANGUAGE = {eng},
ISBN = {978-1-5386-1032-9},
DOI = {10.1109/ICCV.2017.398},
PUBLISHER = {IEEE},
YEAR = {2017},
DATE = {2017},
BOOKTITLE = {IEEE International Conference on Computer Vision (ICCV 2017)},
PAGES = {3706--3715},
ADDRESS = {Venice, Italy},
}
Endnote
%0 Conference Proceedings
%A Orekondy, Tribhuvanesh
%A Schiele, Bernt
%A Fritz, Mario
%+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society
%T Towards a Visual Privacy Advisor: Understanding and Predicting Privacy Risks in Images :
%G eng
%U http://hdl.handle.net/11858/00-001M-0000-002C-E65F-8
%R 10.1109/ICCV.2017.398
%D 2017
%B International Conference on Computer Vision
%Z date of event: 2017-10-22 - 2017-10-29
%C Venice, Italy
%B IEEE International Conference on Computer Vision
%P 3706 - 3715
%I IEEE
%@ 978-1-5386-1032-9