D2
Computer Vision and Machine Learning

Yue Fan (PhD Student)

MSc Yue Fan

Address
Max-Planck-Institut für Informatik
Saarland Informatics Campus
Campus E1 4
66123 Saarbrücken
Location
E1 4 - 608
Phone
+49 681 9325 2138
Fax
+49 681 9325 2099

Personal Information

Publications

Fan, Y., Xian, Y., Losch, M. M., & Schiele, B. (2021). Analyzing the Dependency of ConvNets on Spatial Information. In Pattern Recognition (GCPR 2020). Tübingen, Germany: Springer. doi:10.1007/978-3-030-71278-5_8
Export
BibTeX
@inproceedings{Fan_GCPR2020, TITLE = {Analyzing the Dependency of {ConvNets} on Spatial Information}, AUTHOR = {Fan, Yue and Xian, Yongqin and Losch, Max Maria and Schiele, Bernt}, LANGUAGE = {eng}, ISBN = {978-3-030-71277-8}, DOI = {10.1007/978-3-030-71278-5_8}, PUBLISHER = {Springer}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2021}, BOOKTITLE = {Pattern Recognition (GCPR 2020)}, EDITOR = {Akata, Zeynep and Geiger, Andreas and Sattler, Torsten}, PAGES = {101--115}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {12544}, ADDRESS = {T{\"u}bingen, Germany}, }
Endnote
%0 Conference Proceedings %A Fan, Yue %A Xian, Yongqin %A Losch, Max Maria %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Analyzing the Dependency of ConvNets on Spatial Information : %G eng %U http://hdl.handle.net/21.11116/0000-0008-3292-A %R 10.1007/978-3-030-71278-5_8 %D 2021 %B 42nd German Conference on Pattern Recognition %Z date of event: 2020-09-28 - 2020-10-01 %C Tübingen, Germany %B Pattern Recognition %E Akata, Zeynep; Geiger, Andreas; Sattler, Torsten %P 101 - 115 %I Springer %@ 978-3-030-71277-8 %B Lecture Notes in Computer Science %N 12544
Fan, Y., Dai, D., & Schiele, B. (2022). CoSSL: Co-Learning of Representation and Classifier for Imbalanced Semi-Supervised Learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2022). New Orleans, LA, USA: IEEE. doi:10.1109/CVPR52688.2022.01417
Abstract
In this paper, we propose a novel co-learning framework (CoSSL) with<br>decoupled representation learning and classifier learning for imbalanced SSL.<br>To handle the data imbalance, we devise Tail-class Feature Enhancement (TFE)<br>for classifier learning. Furthermore, the current evaluation protocol for<br>imbalanced SSL focuses only on balanced test sets, which has limited<br>practicality in real-world scenarios. Therefore, we further conduct a<br>comprehensive evaluation under various shifted test distributions. In<br>experiments, we show that our approach outperforms other methods over a large<br>range of shifted distributions, achieving state-of-the-art performance on<br>benchmark datasets ranging from CIFAR-10, CIFAR-100, ImageNet, to Food-101. Our<br>code will be made publicly available.<br>
Export
BibTeX
@inproceedings{Fan_CVPR2022, TITLE = {{CoSSL}: {C}o-Learning of Representation and Classifier for Imbalanced Semi-Supervised Learning}, AUTHOR = {Fan, Yue and Dai, Dengxin and Schiele, Bernt}, LANGUAGE = {eng}, ISBN = {978-1-6654-6946-3}, DOI = {10.1109/CVPR52688.2022.01417}, PUBLISHER = {IEEE}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, ABSTRACT = {In this paper, we propose a novel co-learning framework (CoSSL) with<br>decoupled representation learning and classifier learning for imbalanced SSL.<br>To handle the data imbalance, we devise Tail-class Feature Enhancement (TFE)<br>for classifier learning. Furthermore, the current evaluation protocol for<br>imbalanced SSL focuses only on balanced test sets, which has limited<br>practicality in real-world scenarios. Therefore, we further conduct a<br>comprehensive evaluation under various shifted test distributions. In<br>experiments, we show that our approach outperforms other methods over a large<br>range of shifted distributions, achieving state-of-the-art performance on<br>benchmark datasets ranging from CIFAR-10, CIFAR-100, ImageNet, to Food-101. Our<br>code will be made publicly available.<br>}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2022)}, PAGES = {14554--14564}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Fan, Yue %A Dai, Dengxin %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T CoSSL: Co-Learning of Representation and Classifier for Imbalanced Semi-Supervised Learning : %G eng %U http://hdl.handle.net/21.11116/0000-000A-16BA-C %R 10.1109/CVPR52688.2022.01417 %D 2022 %B 35th IEEE/CVF Conference on Computer Vision and Pattern Recognition %Z date of event: 2022-06-19 - 2022-06-24 %C New Orleans, LA, USA %X In this paper, we propose a novel co-learning framework (CoSSL) with<br>decoupled representation learning and classifier learning for imbalanced SSL.<br>To handle the data imbalance, we devise Tail-class Feature Enhancement (TFE)<br>for classifier learning. Furthermore, the current evaluation protocol for<br>imbalanced SSL focuses only on balanced test sets, which has limited<br>practicality in real-world scenarios. Therefore, we further conduct a<br>comprehensive evaluation under various shifted test distributions. In<br>experiments, we show that our approach outperforms other methods over a large<br>range of shifted distributions, achieving state-of-the-art performance on<br>benchmark datasets ranging from CIFAR-10, CIFAR-100, ImageNet, to Food-101. Our<br>code will be made publicly available.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Learning, cs.LG %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 14554 - 14564 %I IEEE %@ 978-1-6654-6946-3
Fan, Y., Kukleva, A., & Schiele, B. (2022). Revisiting Consistency Regularization for Semi-supervised Learning. In Pattern Recognition (GCPR 2021). Bonn, Germany: Springer. doi:10.1007/978-3-030-92659-5_5
Export
BibTeX
@inproceedings{Fan_GCPR2021, TITLE = {Revisiting Consistency Regularization for Semi-supervised Learning}, AUTHOR = {Fan, Yue and Kukleva, Anna and Schiele, Bernt}, LANGUAGE = {eng}, ISBN = {978-3-030-92659-5; 978-3-030-92658-8}, DOI = {10.1007/978-3-030-92659-5_5}, PUBLISHER = {Springer}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Pattern Recognition (GCPR 2021)}, EDITOR = {Bauckhage, Christian and Gall, J{\"u}rgen and Schwing, Alexander}, PAGES = {63--78}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {13024}, ADDRESS = {Bonn, Germany}, }
Endnote
%0 Conference Proceedings %A Fan, Yue %A Kukleva, Anna %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Revisiting Consistency Regularization for Semi-supervised Learning : %G eng %U http://hdl.handle.net/21.11116/0000-000C-4358-6 %R 10.1007/978-3-030-92659-5_5 %D 2022 %B 43rd German Conference on Pattern Recognition %Z date of event: 2021-09-28 - 2021-10-01 %C Bonn, Germany %B Pattern Recognition %E Bauckhage, Christian; Gall, J&#252;rgen; Schwing, Alexander %P 63 - 78 %I Springer %@ 978-3-030-92659-5 978-3-030-92658-8 %B Lecture Notes in Computer Science %N 13024
Fan, Y., Xian, Y., Losch, M. M., & Schiele, B. (2020). Analyzing the Dependency of ConvNets on Spatial Information. Retrieved from https://arxiv.org/abs/2002.01827
(arXiv: 2002.01827)
Abstract
Intuitively, image classification should profit from using spatial<br>information. Recent work, however, suggests that this might be overrated in<br>standard CNNs. In this paper, we are pushing the envelope and aim to further<br>investigate the reliance on spatial information. We propose spatial shuffling<br>and GAP+FC to destroy spatial information during both training and testing<br>phases. Interestingly, we observe that spatial information can be deleted from<br>later layers with small performance drops, which indicates spatial information<br>at later layers is not necessary for good performance. For example, test<br>accuracy of VGG-16 only drops by 0.03% and 2.66% with spatial information<br>completely removed from the last 30% and 53% layers on CIFAR100, respectively.<br>Evaluation on several object recognition datasets (CIFAR100, Small-ImageNet,<br>ImageNet) with a wide range of CNN architectures (VGG16, ResNet50, ResNet152)<br>shows an overall consistent pattern.<br>
Export
BibTeX
@online{Fan_arXiv2002.01827, TITLE = {Analyzing the Dependency of {ConvNets} on Spatial Information}, AUTHOR = {Fan, Yue and Xian, Yongqin and Losch, Max Maria and Schiele, Bernt}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2002.01827}, EPRINT = {2002.01827}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {Intuitively, image classification should profit from using spatial<br>information. Recent work, however, suggests that this might be overrated in<br>standard CNNs. In this paper, we are pushing the envelope and aim to further<br>investigate the reliance on spatial information. We propose spatial shuffling<br>and GAP+FC to destroy spatial information during both training and testing<br>phases. Interestingly, we observe that spatial information can be deleted from<br>later layers with small performance drops, which indicates spatial information<br>at later layers is not necessary for good performance. For example, test<br>accuracy of VGG-16 only drops by 0.03% and 2.66% with spatial information<br>completely removed from the last 30% and 53% layers on CIFAR100, respectively.<br>Evaluation on several object recognition datasets (CIFAR100, Small-ImageNet,<br>ImageNet) with a wide range of CNN architectures (VGG16, ResNet50, ResNet152)<br>shows an overall consistent pattern.<br>}, }
Endnote
%0 Report %A Fan, Yue %A Xian, Yongqin %A Losch, Max Maria %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Analyzing the Dependency of ConvNets on Spatial Information : %G eng %U http://hdl.handle.net/21.11116/0000-0007-80CB-3 %U https://arxiv.org/abs/2002.01827 %D 2020 %X Intuitively, image classification should profit from using spatial<br>information. Recent work, however, suggests that this might be overrated in<br>standard CNNs. In this paper, we are pushing the envelope and aim to further<br>investigate the reliance on spatial information. We propose spatial shuffling<br>and GAP+FC to destroy spatial information during both training and testing<br>phases. Interestingly, we observe that spatial information can be deleted from<br>later layers with small performance drops, which indicates spatial information<br>at later layers is not necessary for good performance. For example, test<br>accuracy of VGG-16 only drops by 0.03% and 2.66% with spatial information<br>completely removed from the last 30% and 53% layers on CIFAR100, respectively.<br>Evaluation on several object recognition datasets (CIFAR100, Small-ImageNet,<br>ImageNet) with a wide range of CNN architectures (VGG16, ResNet50, ResNet152)<br>shows an overall consistent pattern.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Fan, Y., Kukleva, A., Dai, D., & Schiele, B. (2023). Revisiting Consistency Regularization for Semi-supervised Learning. International Journal of Computer Vision, 131. doi:10.1007/s11263-022-01723-4
Export
BibTeX
@article{Fan22, TITLE = {Revisiting Consistency Regularization for Semi-supervised Learning}, AUTHOR = {Fan, Yue and Kukleva, Anna and Dai, Dengxin and Schiele, Bernt}, LANGUAGE = {eng}, ISSN = {0920-5691}, DOI = {10.1007/s11263-022-01723-4}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, JOURNAL = {International Journal of Computer Vision}, VOLUME = {131}, PAGES = {626--643}, }
Endnote
%0 Journal Article %A Fan, Yue %A Kukleva, Anna %A Dai, Dengxin %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Revisiting Consistency Regularization for Semi-supervised Learning : %G eng %U http://hdl.handle.net/21.11116/0000-000C-73A9-4 %R 10.1007/s11263-022-01723-4 %7 2022 %D 2023 %J International Journal of Computer Vision %O Int. J. Comput. Vis. %V 131 %& 626 %P 626 - 643 %I Springer %C New York, NY %@ false
Fan, Y. (2019). Analyzing the Dependency of ConvNets on Spatial Information. Universität des Saarlandes, Saarbrücken.
Export
BibTeX
@mastersthesis{FanMaster2019, TITLE = {Analyzing the Dependency of {ConvNets} on Spatial Information}, AUTHOR = {Fan, Yue}, LANGUAGE = {eng}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2019}, DATE = {2019}, }
Endnote
%0 Thesis %A Fan, Yue %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Analyzing the Dependency of ConvNets on Spatial Information : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B435-2 %I Universit&#228;t des Saarlandes %C Saarbr&#252;cken %D 2019 %P 75 p. %V master %9 master
Chen, H., Tao, R., Fan, Y., Wang, Y., Savvides, M., Wang, J., … Schiele, B. (n.d.). SoftMatch: Addressing the Quantity-Quality Tradeoff in Semi-supervised Learning. In Eleventh International Conference on Learning Representations (ICLR 2023). Kigali, Rwanda: OpenReview.net.
(Accepted/in press)
Export
BibTeX
@inproceedings{Chen_ICLR23, TITLE = {{SoftMatch}: {A}ddressing the Quantity-Quality Tradeoff in Semi-supervised Learning}, AUTHOR = {Chen, H. and Tao, R. and Fan, Yue and Wang, Y. and Savvides, M. and Wang, J. and Raj, B. and Xie, X. and Schiele, Bernt}, LANGUAGE = {eng}, PUBLISHER = {OpenReview.net}, YEAR = {2023}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Eleventh International Conference on Learning Representations (ICLR 2023)}, ADDRESS = {Kigali, Rwanda}, }
Endnote
%0 Conference Proceedings %A Chen, H. %A Tao, R. %A Fan, Yue %A Wang, Y. %A Savvides, M. %A Wang, J. %A Raj, B. %A Xie, X. %A Schiele, Bernt %+ External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T SoftMatch: Addressing the Quantity-Quality Tradeoff in Semi-supervised Learning : %G eng %U http://hdl.handle.net/21.11116/0000-000C-73BD-E %D 2023 %B Eleventh International Conference on Learning Representations %Z date of event: 2023-05-01 - 2023-05-05 %C Kigali, Rwanda %B Eleventh International Conference on Learning Representations %I OpenReview.net
Chen, H., Fan, Y., Wang, Y., Wang, J., Schiele, B., Xie, X., … Raj, B. (2022). An Embarrassingly Simple Baseline for Imbalanced Semi-Supervised Learning. Retrieved from https://arxiv.org/abs/2211.11086
(arXiv: 2211.11086)
Abstract
Semi-supervised learning (SSL) has shown great promise in leveraging<br>unlabeled data to improve model performance. While standard SSL assumes uniform<br>data distribution, we consider a more realistic and challenging setting called<br>imbalanced SSL, where imbalanced class distributions occur in both labeled and<br>unlabeled data. Although there are existing endeavors to tackle this challenge,<br>their performance degenerates when facing severe imbalance since they can not<br>reduce the class imbalance sufficiently and effectively. In this paper, we<br>study a simple yet overlooked baseline -- SimiS -- which tackles data imbalance<br>by simply supplementing labeled data with pseudo-labels, according to the<br>difference in class distribution from the most frequent class. Such a simple<br>baseline turns out to be highly effective in reducing class imbalance. It<br>outperforms existing methods by a significant margin, e.g., 12.8%, 13.6%, and<br>16.7% over previous SOTA on CIFAR100-LT, FOOD101-LT, and ImageNet127<br>respectively. The reduced imbalance results in faster convergence and better<br>pseudo-label accuracy of SimiS. The simplicity of our method also makes it<br>possible to be combined with other re-balancing techniques to improve the<br>performance further. Moreover, our method shows great robustness to a wide<br>range of data distributions, which holds enormous potential in practice. Code<br>will be publicly available.<br>
Export
BibTeX
@online{Chen2211.11086, TITLE = {An Embarrassingly Simple Baseline for Imbalanced Semi-Supervised Learning}, AUTHOR = {Chen, Hao and Fan, Yue and Wang, Yidong and Wang, Jindong and Schiele, Bernt and Xie, Xing and Savvides, Marios and Raj, Bhiksha}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2211.11086}, EPRINT = {2211.11086}, EPRINTTYPE = {arXiv}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Semi-supervised learning (SSL) has shown great promise in leveraging<br>unlabeled data to improve model performance. While standard SSL assumes uniform<br>data distribution, we consider a more realistic and challenging setting called<br>imbalanced SSL, where imbalanced class distributions occur in both labeled and<br>unlabeled data. Although there are existing endeavors to tackle this challenge,<br>their performance degenerates when facing severe imbalance since they can not<br>reduce the class imbalance sufficiently and effectively. In this paper, we<br>study a simple yet overlooked baseline -- SimiS -- which tackles data imbalance<br>by simply supplementing labeled data with pseudo-labels, according to the<br>difference in class distribution from the most frequent class. Such a simple<br>baseline turns out to be highly effective in reducing class imbalance. It<br>outperforms existing methods by a significant margin, e.g., 12.8%, 13.6%, and<br>16.7% over previous SOTA on CIFAR100-LT, FOOD101-LT, and ImageNet127<br>respectively. The reduced imbalance results in faster convergence and better<br>pseudo-label accuracy of SimiS. The simplicity of our method also makes it<br>possible to be combined with other re-balancing techniques to improve the<br>performance further. Moreover, our method shows great robustness to a wide<br>range of data distributions, which holds enormous potential in practice. Code<br>will be publicly available.<br>}, }
Endnote
%0 Report %A Chen, Hao %A Fan, Yue %A Wang, Yidong %A Wang, Jindong %A Schiele, Bernt %A Xie, Xing %A Savvides, Marios %A Raj, Bhiksha %+ External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T An Embarrassingly Simple Baseline for Imbalanced Semi-Supervised Learning : %G eng %U http://hdl.handle.net/21.11116/0000-000C-185A-5 %U https://arxiv.org/abs/2211.11086 %D 2022 %X Semi-supervised learning (SSL) has shown great promise in leveraging<br>unlabeled data to improve model performance. While standard SSL assumes uniform<br>data distribution, we consider a more realistic and challenging setting called<br>imbalanced SSL, where imbalanced class distributions occur in both labeled and<br>unlabeled data. Although there are existing endeavors to tackle this challenge,<br>their performance degenerates when facing severe imbalance since they can not<br>reduce the class imbalance sufficiently and effectively. In this paper, we<br>study a simple yet overlooked baseline -- SimiS -- which tackles data imbalance<br>by simply supplementing labeled data with pseudo-labels, according to the<br>difference in class distribution from the most frequent class. Such a simple<br>baseline turns out to be highly effective in reducing class imbalance. It<br>outperforms existing methods by a significant margin, e.g., 12.8%, 13.6%, and<br>16.7% over previous SOTA on CIFAR100-LT, FOOD101-LT, and ImageNet127<br>respectively. The reduced imbalance results in faster convergence and better<br>pseudo-label accuracy of SimiS. The simplicity of our method also makes it<br>possible to be combined with other re-balancing techniques to improve the<br>performance further. Moreover, our method shows great robustness to a wide<br>range of data distributions, which holds enormous potential in practice. Code<br>will be publicly available.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Learning, cs.LG
Wang, Y., Chen, H., Heng, Q., Hou, W., Fan, Y., Wu, Z., … Xie, X. (n.d.). FreeMatch: Self-adaptive Thresholding for Semi-supervised Learning. In Eleventh International Conference on Learning Representations (ICLR 2023). Kigali, Rwanda: OpenReview.net.
(Accepted/in press)
Export
BibTeX
@inproceedings{Wang_ICLR2023, TITLE = {{FreeMatch}: Self-adaptive Thresholding for Semi-supervised Learning}, AUTHOR = {Wang, Yidong and Chen, Hao and Heng, Qiang and Hou, Wenxin and Fan, Yue and Wu, Zhen and Wang, Jindong and Savvides, Marios and Shinozaki, Takahiro and Raj, Bhiksha and Schiele, Bernt and Xie, Xing}, LANGUAGE = {eng}, PUBLISHER = {OpenReview.net}, YEAR = {2023}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Eleventh International Conference on Learning Representations (ICLR 2023)}, ADDRESS = {Kigali, Rwanda}, }
Endnote
%0 Conference Proceedings %A Wang, Yidong %A Chen, Hao %A Heng, Qiang %A Hou, Wenxin %A Fan, Yue %A Wu, Zhen %A Wang, Jindong %A Savvides, Marios %A Shinozaki, Takahiro %A Raj, Bhiksha %A Schiele, Bernt %A Xie, Xing %+ External Organizations External Organizations External Organizations External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations %T FreeMatch: Self-adaptive Thresholding for Semi-supervised Learning : %G eng %U http://hdl.handle.net/21.11116/0000-000C-1846-B %D 2023 %B Eleventh International Conference on Learning Representations %Z date of event: 2023-05-01 - 2023-05-05 %C Kigali, Rwanda %B Eleventh International Conference on Learning Representations %I OpenReview.net
Wang, Y., Chen, H., Fan, Y., Sun, W., Tao, R., Hou, W., … Zhang, Y. (2022). USB: A Unified Semi-supervised Learning Benchmark for Classification. In Advances in Neural Information Processing Systems 35 (NeurIPS 2022). New Orleans, LA, USA: Curran Associates, Inc.
Export
BibTeX
@inproceedings{Wang_Neurips22, TITLE = {{USB}: {A} Unified Semi-supervised Learning Benchmark for Classification}, AUTHOR = {Wang, Yidong and Chen, Hao and Fan, Yue and Sun, Wang and Tao, Ran and Hou, Wenxin and Wang, Renjie and Yang, Linyi and Zhou, Zhi and Guo, Lan-Zhe and Qi, Heli and Wu, Zhen and Li, Yu-Feng and Nakamura, Satoshi and Ye, Wei and Savvides, Marios and Raj, Bhiksha and Shinozaki, Takahiro and Schiele, Bernt and Wang, Jindong and Xie, Xing and Zhang, Yue}, LANGUAGE = {eng}, PUBLISHER = {Curran Associates, Inc.}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Advances in Neural Information Processing Systems 35 (NeurIPS 2022)}, EDITOR = {Koyejo, S. and Mohamed, S. and Agarwal, A. and Belgrave, D. and Cho, K. and Oh, A.}, PAGES = {3938--3961}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Wang, Yidong %A Chen, Hao %A Fan, Yue %A Sun, Wang %A Tao, Ran %A Hou, Wenxin %A Wang, Renjie %A Yang, Linyi %A Zhou, Zhi %A Guo, Lan-Zhe %A Qi, Heli %A Wu, Zhen %A Li, Yu-Feng %A Nakamura, Satoshi %A Ye, Wei %A Savvides, Marios %A Raj, Bhiksha %A Shinozaki, Takahiro %A Schiele, Bernt %A Wang, Jindong %A Xie, Xing %A Zhang, Yue %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T USB: A Unified Semi-supervised Learning Benchmark for Classification : %G eng %U http://hdl.handle.net/21.11116/0000-000C-184A-7 %D 2022 %B 36th Conference on Neural Information Processing Systems %Z date of event: 2022-11-28 - 2022-12-09 %C New Orleans, LA, USA %B Advances in Neural Information Processing Systems 35 %E Koyejo, S.; Mohamed, S.; Agarwal, A.; Belgrave, D.; Cho, K.; Oh, A. %P 3938 - 3961 %I Curran Associates, Inc. %U https://openreview.net/pdf?id=QeuwINa96C