We propose a vision-proprioception model for planar object pushing, efficiently integrating all necessary information from the environment. A Variational Autoencoder (VAE) is used to extract compact representations from the task-relevant part of the image. With the real-time robot state obtained easily from the hardware system, we fuse the latent representations from the VAE and the robot end-effector position together as the state of a Markov Decision Process. We use Soft Actor-Critic to train the robot to push different objects from random initial poses to target positions in simulation. Hindsight Experience replay is applied during the training process to improve the sample efficiency. Experiments demonstrate that our algorithm achieves a pushing performance superior to a state-based baseline model that cannot be generalized to a different object and outperforms state-of-the-art policies which operate on raw image observations. At last, we verify that our trained model has a good generalization ability to unseen objects in the real world.
@inproceedings{cong2023efficient,
author = {Cong, Lin and Hongzhuo, Liang and Ruppel, Philipp and Yunlei, Shi and Michael, Görner and Hendrich, Norman and Zhang, Jianwei},
title = {Reinforcement Learning With Vision-Proprioception Model for Robot Planar Pushing},
journal = {Frontiers in Neurorobotics},
year = {2022},
}