Abstract
Autoregressive (AR) models have achieved remarkable success in natural
language and image generation, but their application to 3D shape modeling
remains largely unexplored. Unlike diffusion models, AR models enable more
efficient and controllable generation with faster inference times, making them
especially suitable for data-intensive domains. Traditional 3D generative
models using AR approaches often rely on ``next-token" predictions at the voxel
or point level. While effective for certain applications, these methods can be
restrictive and computationally expensive when dealing with large-scale 3D
data. To tackle these challenges, we introduce 3D-WAG, an AR model for 3D
implicit distance fields that can perform unconditional shape generation,
class-conditioned and also text-conditioned shape generation. Our key idea is
to encode shapes as multi-scale wavelet token maps and use a Transformer to
predict the ``next higher-resolution token map" in an autoregressive manner. By
redefining 3D AR generation task as ``next-scale" prediction, we reduce the
computational cost of generation compared to traditional ``next-token"
prediction models, while preserving essential geometric details of 3D shapes in
a more structured and hierarchical manner. We evaluate 3D-WAG to showcase its
benefit by quantitative and qualitative comparisons with state-of-the-art
methods on widely used benchmarks. Our results show 3D-WAG achieves superior
performance in key metrics like Coverage and MMD, generating high-fidelity 3D
shapes that closely match the real data distribution.
BibTeX
@inproceedings{MediBMVC25,
TITLE = {{3D}-{WAG}: {H}ierarchical Wavelet-Guided Autoregressive Generation for High-Fidelity {3D} Shapes},
AUTHOR = {Medi, Tejaswini and Rampini, Arianna and Reddy, Pradyumna and Jayaraman, Pradeep Kumar and Keuper, Margret},
LANGUAGE = {eng},
URL = {https://bmvc2025.bmva.org/proceedings/1064/},
PUBLISHER = {BMVA Press},
YEAR = {2025},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Autoregressive (AR) models have achieved remarkable success in natural<br>language and image generation, but their application to 3D shape modeling<br>remains largely unexplored. Unlike diffusion models, AR models enable more<br>efficient and controllable generation with faster inference times, making them<br>especially suitable for data-intensive domains. Traditional 3D generative<br>models using AR approaches often rely on ``next-token" predictions at the voxel<br>or point level. While effective for certain applications, these methods can be<br>restrictive and computationally expensive when dealing with large-scale 3D<br>data. To tackle these challenges, we introduce 3D-WAG, an AR model for 3D<br>implicit distance fields that can perform unconditional shape generation,<br>class-conditioned and also text-conditioned shape generation. Our key idea is<br>to encode shapes as multi-scale wavelet token maps and use a Transformer to<br>predict the ``next higher-resolution token map" in an autoregressive manner. By<br>redefining 3D AR generation task as ``next-scale" prediction, we reduce the<br>computational cost of generation compared to traditional ``next-token"<br>prediction models, while preserving essential geometric details of 3D shapes in<br>a more structured and hierarchical manner. We evaluate 3D-WAG to showcase its<br>benefit by quantitative and qualitative comparisons with state-of-the-art<br>methods on widely used benchmarks. Our results show 3D-WAG achieves superior<br>performance in key metrics like Coverage and MMD, generating high-fidelity 3D<br>shapes that closely match the real data distribution.<br>},
BOOKTITLE = {36th British Machine Vision Conference (BMVC 2025)},
PAGES = {1--19},
EID = {1064},
ADDRESS = {Sheffield, UK},
}Endnote
%0 Conference Proceedings %A Medi, Tejaswini %A Rampini, Arianna %A Reddy, Pradyumna %A Jayaraman, Pradeep Kumar %A Keuper, Margret %+ External Organizations External Organizations External Organizations External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T 3D-WAG: Hierarchical Wavelet-Guided Autoregressive Generation for High-Fidelity 3D Shapes : %G eng %U http://hdl.handle.net/21.11116/0000-0010-BF26-D %U https://bmvc2025.bmva.org/proceedings/1064/ %D 2024 %B 36th British Machine Vision Conference %Z date of event: 2025-11-24 - 2025-11-27 %C Sheffield, UK %X Autoregressive (AR) models have achieved remarkable success in natural<br>language and image generation, but their application to 3D shape modeling<br>remains largely unexplored. Unlike diffusion models, AR models enable more<br>efficient and controllable generation with faster inference times, making them<br>especially suitable for data-intensive domains. Traditional 3D generative<br>models using AR approaches often rely on ``next-token" predictions at the voxel<br>or point level. While effective for certain applications, these methods can be<br>restrictive and computationally expensive when dealing with large-scale 3D<br>data. To tackle these challenges, we introduce 3D-WAG, an AR model for 3D<br>implicit distance fields that can perform unconditional shape generation,<br>class-conditioned and also text-conditioned shape generation. Our key idea is<br>to encode shapes as multi-scale wavelet token maps and use a Transformer to<br>predict the ``next higher-resolution token map" in an autoregressive manner. By<br>redefining 3D AR generation task as ``next-scale" prediction, we reduce the<br>computational cost of generation compared to traditional ``next-token"<br>prediction models, while preserving essential geometric details of 3D shapes in<br>a more structured and hierarchical manner. We evaluate 3D-WAG to showcase its<br>benefit by quantitative and qualitative comparisons with state-of-the-art<br>methods on widely used benchmarks. Our results show 3D-WAG achieves superior<br>performance in key metrics like Coverage and MMD, generating high-fidelity 3D<br>shapes that closely match the real data distribution.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV %B 36th British Machine Vision Conference %P 1 - 19 %Z sequence number: 1064 %I BMVA Press
