@conference {347, title = {Adaptive Saccade Controller Inspired by the Primates{\textquoteright} Cerebellum}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA)}, year = {2015}, month = {05/2015}, address = {Seattle, Washington, USA}, abstract = {
Saccades are fast eye movements that allow humans and robots to bring the visual target in the center of the visual field. Saccades are open loop with respect to the vision system, thus their execution require a precise knowledge of the internal model of the oculomotor system. In this work, we modeled the saccade control, taking inspiration from the recurrent loops between the cerebellum and the brainstem. In this model, the brainstem acts as a fixed-inverse model of the oculomotor system, while the cerebellum acts as an adaptive element that learns the internal model of the oculomotor system. The adaptive filter is implemented using a state-of-the- art neural network, called I-SSGPR. The proposed approach, namely recurrent architecture, was validated through experiments performed both in simulation and on an antropomorphic robotic head. Moreover, we compared the recurrent architecture with another model of the cerebellum, the feedback error learning. Achieved results show that the recurrent architecture outperforms the feedback error learning in terms of accuracy and insensitivity to the choice of the feedback controller.
11:20-11:24, Paper FrA2T5.6
}, keywords = {Biologically-Inspired Robots, Control Architectures and Programming, Learning and Adaptive Systems}, author = {Antonelli, Marco and Angel J Duran and Eris Chinellato and Angel P. del Pobil} } @conference {374, title = {Tombatossals: A humanoid torso for autonomous sensor-based tasks}, booktitle = {Humanoid Robots (Humanoids), 2015 IEEE-RAS 15th International Conference on}, year = {2015}, publisher = {IEEE}, organization = {IEEE}, author = {Felip, Javier and Angel J Duran and Antonelli, Marco and Morales, Antonio and Angel P. del Pobil} } @conference {289, title = {Bayesian Multimodal Integration in a Robot Replicating Human Head and Eye Movements}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA)}, year = {2014}, keywords = {eye-movements, head-saccades, model, multisensory-integration, neurorobotics, Robotics}, author = {Marco Antonelli and Angel P. del Pobil and Rucci, Michele} } @article {296, title = {A hierarchical system for a distributed representation of the peripersonal space of a humanoid robot}, journal = {IEEE Trans. Auton. Mental Develop}, year = {2014}, pages = {1{\textendash}15}, doi = {10.1109/TAMD.2014.2332875}, author = {Marco Antonelli and Gibaldi, Agostino and Beuth, Frederik and Angel J Duran and Canessa, Andrea and Chessa, Manuela and Solari, F and Angel P. del Pobil and Hamker, F and Eris Chinellato and Sabatini, SP} } @article {Antonelli2014, title = {Learning the visual-oculomotor transformation: Effects on saccade control and space representation}, journal = {Robotics and Autonomous Systems}, year = {2014}, abstract = {

Active eye movements can be exploited to build a visuomotor representation of the surrounding environment. Maintaining and improving such representation requires to update the internal model involved in the generation of eye movements. From this perspective, action and perception are thus tightly coupled and interdependent. In this work, we encoded the internal model for oculomotor control with an adaptive filter inspired by the functionality of the cerebellum. Recurrent loops between a feed-back controller and the internal model allow our system to perform accurate binocular saccades and create an implicit representation of the nearby space. Simulations results show that this recurrent architecture outperforms classical feedback-error-learning in terms of both accuracy and sensitivity to system parameters. The proposed approach was validated implementing the framework on an anthropomorphic robotic head.

}, keywords = {Cerebellum, Gaussian process regression, Humanoid robotics, Sensorimotor transformation, stereo vision}, issn = {09218890}, doi = {10.1016/j.robot.2014.11.018}, url = {http://www.sciencedirect.com/science/article/pii/S092188901400311X}, author = {Marco Antonelli and Angel J Duran and Eris Chinellato and Angel P. del Pobil} } @proceedings {103, title = {Application of the Visuo-Oculomotor Transformation to Ballistic and Visually-Guided Eye Movements}, year = {2013}, author = {Marco Antonelli and Angel J Duran and Angel P. del Pobil} } @inbook {101, title = {Depth Estimation during Fixational Head Movements in a Humanoid Robot}, booktitle = {Computer Vision Systems}, series = {Lecture Notes in Computer Science}, volume = {7963}, number = {Lecture Notes in Computer Science}, year = {2013}, pages = {264-273}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {

Under natural viewing conditions, humans are not aware of continually performing small head and eye movements in the periods in between voluntary relocations of gaze. It has been recently shown that these fixational head movements provide useful depth information in the form of parallax. Here, we replicate this coordinated head and eye movements in a humanoid robot and describe a method for extracting the resulting depth information. Proprioceptive signals are interpreted by means of a kinematic model of the robot to compute the velocity of the camera. The resulting signal is then optimally integrated with the optic flow to estimate depth in the scene. We present the results of simulations which validate the proposed approach.

}, isbn = {978-3-642-39401-0}, doi = {10.1007/978-3-642-39402-7_27}, url = {http://dx.doi.org/10.1007/978-3-642-39402-7_27}, author = {Marco Antonelli and Angel P. del Pobil and Rucci, Michele}, editor = {Chen, Mei and Leibe, Bastian and Neumann, Bernd} } @inbook {39, title = {Integration of Visuomotor Learning, Cognitive Grasping and Sensor-Based Physical Interaction in the UJI Humanoid Torso}, booktitle = {Designing Intelligent Robots: Reintegrating AI}, volume = {SS-13-04}, year = {2013}, pages = {pp. 6-11}, publisher = {AAAI}, organization = {AAAI}, isbn = {978-1-57735-601-1}, author = {Angel P. del Pobil and Angel J Duran and Marco Antonelli and Javier Felip and Antonio Morales and M. Prats and Eris Chinellato} } @inbook {21, title = {On-Line Learning of the Visuomotor Transformations on a Humanoid Robot}, booktitle = {Intelligent Autonomous Systems 12}, series = {Advances in Intelligent Systems and Computing}, volume = {193}, year = {2013}, pages = {853-861}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, isbn = {978-3-642-33925-7}, doi = {10.1007/978-3-642-33926-4_82}, url = {http://dx.doi.org/10.1007/978-3-642-33926-4_82}, author = {Marco Antonelli and Eris Chinellato and Angel P. del Pobil}, editor = {Lee, Sukhan and Cho, Hyungsuck and Yoon, Kwang-Joon and Lee, Jangmyung} } @inbook {102, title = {Speeding-Up the Learning of Saccade Control}, booktitle = {Biomimetic and Biohybrid Systems}, series = {Lecture Notes in Computer Science}, volume = {8064}, year = {2013}, pages = {12-23}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, isbn = {978-3-642-39801-8}, doi = {10.1007/978-3-642-39802-5_2}, url = {http://dx.doi.org/10.1007/978-3-642-39802-5_2}, author = {Marco Antonelli and Angel J Duran and Eris Chinellato and Angel P. del Pobil}, editor = {Lepora, NathanF. and Mura, Anna and Krapp, Holger G. and Paul F. M. J. Verschure and Tony J. Prescott} } @conference {26, title = {Augmenting the Reachable Space in the NAO Humanoid Robot}, booktitle = {AAAI Workshops}, year = {2012}, abstract = {

Reaching for a target requires estimating the spatial position of the target and to convert such a position in a suitable arm-motor command. In the proposed framework, the location of the target is represented implicitly by the gaze direction of the robot and by the distance of the target. The NAO robot is provided with two cameras, one to look ahead and one to look down, which constitute two independent head-centered coordinate systems. These head-centered frames of reference are converted into reaching commands by two neural networks. The weights of networks are learned by moving the arm while gazing the hand, using an on-line learning algorithm that maintains the covariance matrix of weights. This work adapts a previously proposed model that worked on a full humanoid robot torso, to work with the NAO and is a step toward a more generic framework for the implicit representation of the peripersonal space in humanoid robots.

}, keywords = {autonomous learning, cues integration, humanoid robot, radial basis functions, recursive least square}, url = {http://www.aaai.org/ocs/index.php/WS/AAAIW12/paper/view/5231}, author = {Marco Antonelli and Beata J. Grzyb and Vicente Castell{\'o} and Angel P. del Pobil} } @inbook {22, title = {Integration of Static and Self-motion-Based Depth Cues for Efficient Reaching and Locomotor Actions}, booktitle = {Artificial Neural Networks and Machine Learning {\textendash} ICANN 2012}, series = {Lecture Notes in Computer Science}, volume = {7552}, year = {2012}, pages = {322-329}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, keywords = {depth cue integration, distance perception, embodied perception, reward-mediated learning}, isbn = {978-3-642-33268-5}, doi = {10.1007/978-3-642-33269-2_41}, url = {http://dx.doi.org/10.1007/978-3-642-33269-2_41}, author = {Beata J. Grzyb and Vicente Castell{\'o} and Marco Antonelli and Angel P. del Pobil}, editor = {Villa, AlessandroE.P. and Duch, W{\l}odzis{\l}aw and {\'E}rdi, P{\'e}ter and Masulli, Francesco and Palm, G{\"u}nther} } @inbook {27, title = {A Pilot Study on Saccadic Adaptation Experiments with Robots}, booktitle = {Biomimetic and Biohybrid Systems}, series = {Lecture Notes in Computer Science}, volume = {7375}, year = {2012}, pages = {83-94}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, isbn = {978-3-642-31524-4}, doi = {10.1007/978-3-642-31525-1_8}, url = {http://dx.doi.org/10.1007/978-3-642-31525-1_8}, author = {Eris Chinellato and Marco Antonelli and Angel P. del Pobil}, editor = {Tony J. Prescott and Lepora, NathanF. and Mura, Anna and Paul F. M. J. Verschure} } @inbook {25, title = {Plastic Representation of the Reachable Space for a Humanoid Robot}, booktitle = {From Animals to Animats 12}, series = {Lecture Notes in Computer Science}, volume = {7426}, year = {2012}, pages = {167-176}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, isbn = {978-3-642-33092-6}, doi = {10.1007/978-3-642-33093-3_17}, url = {http://dx.doi.org/10.1007/978-3-642-33093-3_17}, author = {Marco Antonelli and Beata J. Grzyb and Vicente Castell{\'o} and Angel P. del Pobil}, editor = {Ziemke, Tom and Balkenius, Christian and Hallam, John} } @article {28, title = {Speeding up the log-polar transform with inexpensive parallel hardware: graphics units and multi-core architectures}, journal = {Journal of Real-Time Image Processing}, year = {2012}, pages = {1-18}, keywords = {CUDA, Graphics processors, Log-polar mapping, Multi-core CPUs, Real-time computer vision, Shaders}, issn = {1861-8200}, doi = {10.1007/s11554-012-0281-6}, url = {http://dx.doi.org/10.1007/s11554-012-0281-6}, author = {Marco Antonelli and Igual, FranciscoD. and Ramos, Francisco and V.J. Traver} } @conference {24, title = {Implicit mapping of the peripersonal space of a humanoid robot}, booktitle = {Computational Intelligence, Cognitive Algorithms, Mind, and Brain (CCMB), 2011 IEEE Symposium on}, year = {2011}, abstract = {

In this work, taking inspiration from primate visuomotor mechanisms, a humanoid robot is able to build a sensorimotor map of the environment that is configured and trained through gazing and reaching movements. The map is accessed and modified by two types of information: retinotopic (visual) and proprioceptive (eye and arm movements), and constitutes both a knowledge of the environment and a sensorimotor code for performing movements and evaluate their outcome. By performing direct and inverse transformations between stereo vision, oculomotor and joint-space representations, the robot learns to perform gazing and reaching movements, which are in turn employed to update the sensorimotor knowledge of the environment. Thus, the robot keeps learning during its normal behavior, by interacting with the world and contextually updating its representation of the world itself. Such representation is never made explicit, but rather constitutes a visuomotor awareness of the space which emerges thanks to the interaction of the agent with the surrounding space.

}, keywords = {Head, humanoid robot, joint space representation, Joints, Neurons, oculomotor, peripersonal space, primate visuomotor mechanisms, proprioceptive information, retinotopic information, Robot kinematics, Robot sensing systems, robot vision, Robotics, sensorimotor code, sensorimotor knowledge, stereo image processing, stereo vision, Visualization, visuomotor awareness}, doi = {10.1109/CCMB.2011.5952119}, author = {Marco Antonelli and Eris Chinellato and Angel P. del Pobil} } @article {23, title = {Implicit Sensorimotor Mapping of the Peripersonal Space by Gazing and Reaching}, journal = {Autonomous Mental Development, IEEE Transactions on}, volume = {3}, year = {2011}, pages = {43-53}, abstract = {

Primates often perform coordinated eye and arm movements, contextually fixating and reaching towards nearby objects. This combination of looking and reaching to the same target is used by infants to establish an implicit visuomotor representation of the peripersonal space, useful for both oculomotor and arm motor control. In this work, taking inspiration from such behavior and from primate visuomotor mechanisms, a shared sensorimotor map of the environment, built on a radial basis function framework, is configured and trained by the coordinated control of eye and arm movements. Computational results confirm that the approach seems especially suitable for the problem at hand, and for its implementation on a real humanoid robot. By exploratory gazing and reaching actions, either free or goal-based, the artificial agent learns to perform direct and inverse transformations between stereo vision, oculomotor, and joint-space representations. The integrated sensorimotor map that allows to contextually represent the peripersonal space through different vision and motor parameters is never made explicit, but rather emerges thanks to the interaction of the agent with the environment.

}, keywords = {arm motor control, arm movement control, artificial agent, control engineering computing, eye movement control, Eye{\textendash}arm coordination, gazing action, humanoid robot, implicit sensorimotor mapping, implicit visuomotor representation, joint-space representation, motion control, oculomotor control, peripersonal space, radial basis function framework, radial basis function networks, reaching actions, Robotics, self-supervised learning, shared sensorimotor map, spatial awareness, stereo vision}, issn = {1943-0604}, doi = {10.1109/TAMD.2011.2106781}, author = {Eris Chinellato and Marco Antonelli and Beata J. Grzyb and Angel P. del Pobil} }