b'@online{Vutukur2408.05867,'b"\nTITLE = {{SABER}-{6D}: Shape Representation Based Implicit Object Pose Estimation},\nAUTHOR = {Vutukur, Shishir Reddy and Ba, Mengkejiergeli and Busam, Benjamin and Kayser, Matthias and Singh, Gurprit},\nLANGUAGE = {eng},\nURL = {https://arxiv.org/abs/2408.05867},\nEPRINT = {2408.05867},\nEPRINTTYPE = {arXiv},\nYEAR = {2024},\nMARGINALMARK = {$\\bullet$},\nABSTRACT = {In this paper, we propose a novel encoder-decoder architecture, named SABER,<br>to learn the 6D pose of the object in the embedding space by learning shape<br>representation at a given pose. This model enables us to learn pose by<br>performing shape representation at a target pose from RGB image input. We<br>perform shape representation as an auxiliary task which helps us in learning<br>rotations space for an object based on 2D images. An image encoder predicts the<br>rotation in the embedding space and the DeepSDF based decoder learns to<br>represent the object's shape at the given pose. As our approach is shape based,<br>the pipeline is suitable for any type of object irrespective of the symmetry.<br>Moreover, we need only a CAD model of the objects to train SABER. Our pipeline<br>is synthetic data based and can also handle symmetric objects without symmetry<br>labels and, thus, no additional labeled training data is needed. The<br>experimental evaluation shows that our method achieves close to benchmark<br>results for both symmetric objects and asymmetric objects on Occlusion-LineMOD,<br>and T-LESS datasets.<br>},\n}\n"