@conference {347, title = {Adaptive Saccade Controller Inspired by the Primates{\textquoteright} Cerebellum}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA)}, year = {2015}, month = {05/2015}, address = {Seattle, Washington, USA}, abstract = {
Saccades are fast eye movements that allow humans and robots to bring the visual target in the center of the visual field. Saccades are open loop with respect to the vision system, thus their execution require a precise knowledge of the internal model of the oculomotor system. In this work, we modeled the saccade control, taking inspiration from the recurrent loops between the cerebellum and the brainstem. In this model, the brainstem acts as a fixed-inverse model of the oculomotor system, while the cerebellum acts as an adaptive element that learns the internal model of the oculomotor system. The adaptive filter is implemented using a state-of-the- art neural network, called I-SSGPR. The proposed approach, namely recurrent architecture, was validated through experiments performed both in simulation and on an antropomorphic robotic head. Moreover, we compared the recurrent architecture with another model of the cerebellum, the feedback error learning. Achieved results show that the recurrent architecture outperforms the feedback error learning in terms of accuracy and insensitivity to the choice of the feedback controller.
11:20-11:24, Paper FrA2T5.6
}, keywords = {Biologically-Inspired Robots, Control Architectures and Programming, Learning and Adaptive Systems}, author = {Antonelli, Marco and Angel J Duran and Eris Chinellato and Angel P. del Pobil} } @inbook {315, title = {Animal Social Behaviour: A Visual Analysis}, booktitle = {From Animals to Animats 13}, series = {Lecture Notes in Computer Science}, volume = {8575}, year = {2014}, pages = {320-327}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, isbn = {978-3-319-08863-1}, doi = {10.1007/978-3-319-08864-8_31}, url = {http://dx.doi.org/10.1007/978-3-319-08864-8_31}, author = {Martinez-Martin, Ester and Angel P. del Pobil}, editor = {Angel P. del Pobil and Eris Chinellato and Martinez-Martin, Ester and Hallam, John and Enric Cervera and Antonio Morales} } @article {296, title = {A hierarchical system for a distributed representation of the peripersonal space of a humanoid robot}, journal = {IEEE Trans. Auton. Mental Develop}, year = {2014}, pages = {1{\textendash}15}, doi = {10.1109/TAMD.2014.2332875}, author = {Marco Antonelli and Gibaldi, Agostino and Beuth, Frederik and Angel J Duran and Canessa, Andrea and Chessa, Manuela and Solari, F and Angel P. del Pobil and Hamker, F and Eris Chinellato and Sabatini, SP} } @article {Antonelli2014, title = {Learning the visual-oculomotor transformation: Effects on saccade control and space representation}, journal = {Robotics and Autonomous Systems}, year = {2014}, abstract = {

Active eye movements can be exploited to build a visuomotor representation of the surrounding environment. Maintaining and improving such representation requires to update the internal model involved in the generation of eye movements. From this perspective, action and perception are thus tightly coupled and interdependent. In this work, we encoded the internal model for oculomotor control with an adaptive filter inspired by the functionality of the cerebellum. Recurrent loops between a feed-back controller and the internal model allow our system to perform accurate binocular saccades and create an implicit representation of the nearby space. Simulations results show that this recurrent architecture outperforms classical feedback-error-learning in terms of both accuracy and sensitivity to system parameters. The proposed approach was validated implementing the framework on an anthropomorphic robotic head.

}, keywords = {Cerebellum, Gaussian process regression, Humanoid robotics, Sensorimotor transformation, stereo vision}, issn = {09218890}, doi = {10.1016/j.robot.2014.11.018}, url = {http://www.sciencedirect.com/science/article/pii/S092188901400311X}, author = {Marco Antonelli and Angel J Duran and Eris Chinellato and Angel P. del Pobil} } @inbook {39, title = {Integration of Visuomotor Learning, Cognitive Grasping and Sensor-Based Physical Interaction in the UJI Humanoid Torso}, booktitle = {Designing Intelligent Robots: Reintegrating AI}, volume = {SS-13-04}, year = {2013}, pages = {pp. 6-11}, publisher = {AAAI}, organization = {AAAI}, isbn = {978-1-57735-601-1}, author = {Angel P. del Pobil and Angel J Duran and Marco Antonelli and Javier Felip and Antonio Morales and M. Prats and Eris Chinellato} } @inbook {21, title = {On-Line Learning of the Visuomotor Transformations on a Humanoid Robot}, booktitle = {Intelligent Autonomous Systems 12}, series = {Advances in Intelligent Systems and Computing}, volume = {193}, year = {2013}, pages = {853-861}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, isbn = {978-3-642-33925-7}, doi = {10.1007/978-3-642-33926-4_82}, url = {http://dx.doi.org/10.1007/978-3-642-33926-4_82}, author = {Marco Antonelli and Eris Chinellato and Angel P. del Pobil}, editor = {Lee, Sukhan and Cho, Hyungsuck and Yoon, Kwang-Joon and Lee, Jangmyung} } @inbook {102, title = {Speeding-Up the Learning of Saccade Control}, booktitle = {Biomimetic and Biohybrid Systems}, series = {Lecture Notes in Computer Science}, volume = {8064}, year = {2013}, pages = {12-23}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, isbn = {978-3-642-39801-8}, doi = {10.1007/978-3-642-39802-5_2}, url = {http://dx.doi.org/10.1007/978-3-642-39802-5_2}, author = {Marco Antonelli and Angel J Duran and Eris Chinellato and Angel P. del Pobil}, editor = {Lepora, NathanF. and Mura, Anna and Krapp, Holger G. and Paul F. M. J. Verschure and Tony J. Prescott} } @inbook {27, title = {A Pilot Study on Saccadic Adaptation Experiments with Robots}, booktitle = {Biomimetic and Biohybrid Systems}, series = {Lecture Notes in Computer Science}, volume = {7375}, year = {2012}, pages = {83-94}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, isbn = {978-3-642-31524-4}, doi = {10.1007/978-3-642-31525-1_8}, url = {http://dx.doi.org/10.1007/978-3-642-31525-1_8}, author = {Eris Chinellato and Marco Antonelli and Angel P. del Pobil}, editor = {Tony J. Prescott and Lepora, NathanF. and Mura, Anna and Paul F. M. J. Verschure} } @article {93, title = {Pose Estimation Through Cue Integration: A Neuroscience-Inspired Approach}, journal = {IEEE Transactions on Systems, Man, and Cybernetics, Part B}, volume = {42(2)}, year = {2012}, pages = {530{\textendash}538}, author = {Eris Chinellato and Beata J. Grzyb and Angel P. del Pobil} } @article {63, title = {Pose Estimation Through Cue Integration: A Neuroscience-Inspired Approach}, journal = {Systems, Man, and Cybernetics, Part B: Cybernetics, IEEE Transactions on}, volume = {42}, year = {2012}, pages = {530-538}, abstract = {

The aim of this paper is to improve the skills of robotic systems in their interaction with nearby objects. The basic idea is to enhance visual estimation of objects in the world through the merging of different visual estimators of the same stimuli. A neuroscience-inspired model of stereoptic and perspective orientation estimators, merged according to different criteria, is implemented on a robotic setup and tested in different conditions. Experimental results suggest that the integration of multiple monocular and binocular cues can make robot sensory systems more reliable and versatile. The same results, compared with simulations and data from human studies, show that the model is able to reproduce some well-recognized neuropsychological effects.

}, keywords = {binocular cue integration, Biological system modeling, Cameras, Computational modeling, Computer Simulation, Computer-Assisted, Cybernetics, Depth Perception, Estimation, Grasping, grippers, Humans, Image Processing, Intelligent robots, Models, monocular cue integration, Neurological, neuropsychological effects, neuroscience-inspired model, object estimation, perspective orientation estimator, pose estimation, Reliability, Reproducibility of Results, robot sensory systems, robot vision, robot vision systems, Robotics, Robots, stereo image processing, stereo vision, stereoptic orientation estimator, Task Performance and Analysis, visual estimation, visual perception, Visualization}, issn = {1083-4419}, doi = {10.1109/TSMCB.2011.2168952}, author = {Eris Chinellato and Beata J. Grzyb and Angel P. del Pobil} } @article {64, title = {The Dorso-medial visual stream: From neural activation to sensorimotor interaction}, journal = {Neurocomputing}, volume = {74}, year = {2011}, pages = {1203 - 1212}, abstract = {

The posterior parietal cortex of primates, and more exactly areas of the dorso-medial visual stream, are able to encode the peripersonal space of a subject in a way suitable for gathering visual information and contextually performing purposeful gazing and arm reaching movements. Such sensorimotor knowledge of the environment is not explicit, but rather emerges through the interaction of the subject with nearby objects. In this work, single-cell data regarding the activation of primate dorso-medial stream neurons during gazing and reaching movements is studied, with the purpose of discovering meaningful pattern useful for modeling purposes. The outline of a model of the mechanisms which allow humans and other primates to build dynamical representations of their peripersonal space through active interaction with nearby objects is proposed, and a detailed description of how to employ the results of the data analysis in the model is offered. The application of the model to robotic systems will allow artificial agents to improve their skills in exploring the nearby space, and will at the same time constitute a way to validate modeling assumptions.

}, keywords = {Bio-inspired systems}, issn = {0925-2312}, doi = {10.1016/j.neucom.2010.07.029}, url = {http://www.sciencedirect.com/science/article/pii/S0925231210004212}, author = {Eris Chinellato and Beata J. Grzyb and Nicoletta Marzocchi and A. Bosco and Patrizia Fattori and Angel P. del Pobil} } @conference {32, title = {Hierarchical object recognition inspired by primate brain mechanisms}, booktitle = {Computational Intelligence for Visual Intelligence (CIVI), 2011 IEEE Workshop on}, year = {2011}, keywords = {brain, Estimation, Grasping, hierarchical object recognition, Image color analysis, multimodal integration, mutual projection, neurophysiology, neuroscience hypothesis, object recognition, object weight estimation, primate brain mechanism, real robot, robot vision, Robots, Shape, visual processing, Visualization, visuomotor behavior}, doi = {10.1109/CIVI.2011.5955017}, author = {Eris Chinellato and Javier Felip and Beata J. Grzyb and Antonio Morales and Angel P. del Pobil} } @conference {24, title = {Implicit mapping of the peripersonal space of a humanoid robot}, booktitle = {Computational Intelligence, Cognitive Algorithms, Mind, and Brain (CCMB), 2011 IEEE Symposium on}, year = {2011}, abstract = {

In this work, taking inspiration from primate visuomotor mechanisms, a humanoid robot is able to build a sensorimotor map of the environment that is configured and trained through gazing and reaching movements. The map is accessed and modified by two types of information: retinotopic (visual) and proprioceptive (eye and arm movements), and constitutes both a knowledge of the environment and a sensorimotor code for performing movements and evaluate their outcome. By performing direct and inverse transformations between stereo vision, oculomotor and joint-space representations, the robot learns to perform gazing and reaching movements, which are in turn employed to update the sensorimotor knowledge of the environment. Thus, the robot keeps learning during its normal behavior, by interacting with the world and contextually updating its representation of the world itself. Such representation is never made explicit, but rather constitutes a visuomotor awareness of the space which emerges thanks to the interaction of the agent with the surrounding space.

}, keywords = {Head, humanoid robot, joint space representation, Joints, Neurons, oculomotor, peripersonal space, primate visuomotor mechanisms, proprioceptive information, retinotopic information, Robot kinematics, Robot sensing systems, robot vision, Robotics, sensorimotor code, sensorimotor knowledge, stereo image processing, stereo vision, Visualization, visuomotor awareness}, doi = {10.1109/CCMB.2011.5952119}, author = {Marco Antonelli and Eris Chinellato and Angel P. del Pobil} } @article {23, title = {Implicit Sensorimotor Mapping of the Peripersonal Space by Gazing and Reaching}, journal = {Autonomous Mental Development, IEEE Transactions on}, volume = {3}, year = {2011}, pages = {43-53}, abstract = {

Primates often perform coordinated eye and arm movements, contextually fixating and reaching towards nearby objects. This combination of looking and reaching to the same target is used by infants to establish an implicit visuomotor representation of the peripersonal space, useful for both oculomotor and arm motor control. In this work, taking inspiration from such behavior and from primate visuomotor mechanisms, a shared sensorimotor map of the environment, built on a radial basis function framework, is configured and trained by the coordinated control of eye and arm movements. Computational results confirm that the approach seems especially suitable for the problem at hand, and for its implementation on a real humanoid robot. By exploratory gazing and reaching actions, either free or goal-based, the artificial agent learns to perform direct and inverse transformations between stereo vision, oculomotor, and joint-space representations. The integrated sensorimotor map that allows to contextually represent the peripersonal space through different vision and motor parameters is never made explicit, but rather emerges thanks to the interaction of the agent with the environment.

}, keywords = {arm motor control, arm movement control, artificial agent, control engineering computing, eye movement control, Eye{\textendash}arm coordination, gazing action, humanoid robot, implicit sensorimotor mapping, implicit visuomotor representation, joint-space representation, motion control, oculomotor control, peripersonal space, radial basis function framework, radial basis function networks, reaching actions, Robotics, self-supervised learning, shared sensorimotor map, spatial awareness, stereo vision}, issn = {1943-0604}, doi = {10.1109/TAMD.2011.2106781}, author = {Eris Chinellato and Marco Antonelli and Beata J. Grzyb and Angel P. del Pobil} } @article {89, title = {A 3D Grasping System Based on Multimodal Visual and Tactile Processing}, journal = {Industrial Robot Journal}, volume = {36}, year = {2009}, pages = {365-369}, doi = {10.1108/01439910910957138}, author = {Beata J. Grzyb and Eris Chinellato and Antonio Morales and Angel P. del Pobil} } @inbook {91, title = {Eye-Hand Coordination for Reaching in Dorsal Stream Area {V6A}: Computational Lessons}, booktitle = {Bioinspired Applications in Artificial and Natural Computation, LNCS 5602}, year = {2009}, pages = {304{\textendash}313}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-642-02267-8_33}, author = {Eris Chinellato and Beata J. Grzyb and Nicoletta Marzocchi and A. Bosco and Patrizia Fattori and Angel P. del Pobil}, editor = {J. Mira and J. M. Ferrandez and J.R. Alvarez S{\'a}nchez and F. de la Paz and J. Toledo} } @conference {84, title = {Facial expression recognition based on Liquid State Machines built of alternative neuron models}, booktitle = {Proc. International Joint Conference on Neural Networks IJCNN 2009}, year = {2009}, doi = {10.1109/IJCNN.2009.5179025}, author = {Beata J. Grzyb and Eris Chinellato and Wojcik, G. M. and Kaminski, W. A.} } @inbook {92, title = {Toward an Integrated Visuomotor Representation of the Peripersonal Space}, booktitle = {Bioinspired Applications in Artificial and Natural Computation, LNCS 5602}, year = {2009}, pages = {314{\textendash}323}, doi = {10.1007/978-3-642-02267-8_34}, author = {Eris Chinellato and Beata J. Grzyb and Patrizia Fattori and Angel P. del Pobil}, editor = {J. Mira and J. M. Ferrandez and J.R. Alvarez S{\'a}nchez and F. de la Paz and J. Toledo} } @conference {83, title = {Which model to use for the Liquid State Machine?}, booktitle = {Proc. International Joint Conference on Neural Networks IJCNN 2009}, year = {2009}, doi = {10.1109/IJCNN.2009.5178822}, author = {Beata J. Grzyb and Eris Chinellato and Wojcik, G. M. and Kaminski, W. A.} } @article {176, title = {Biologically-inspired 3D grasp synthesis based on~visual exploration}, journal = {Autonomous Robots}, volume = {25}, year = {2008}, month = {Jan-08-2008}, pages = {59 - 70}, abstract = {

}, issn = {0929-5593}, doi = {10.1007/s10514-008-9086-7}, author = {Gabriel Recatala and Eris Chinellato and ~Pobil, {\'A}ngel P. and Y. Mezouar and Philippe Martinet} } @conference {90, title = {Brain Mechanisms for Robotic Object Pose Estimation}, year = {2008}, doi = {10.1109/IJCNN.2008.4634262}, author = {Eris Chinellato and Beata J. Grzyb and Angel P. del Pobil} } @conference {85, title = {Robust grasping of 3{D} objects with stereo vision and tactile feedback}, booktitle = {Intl. Conf. on Climbing and Walking Robots, CLAWAR}, year = {2008}, address = {Coimbra, Portugal}, author = {Beata J. Grzyb and Eris Chinellato and Antonio Morales and Angel P. del Pobil} } @article {268, title = {Symbol grounding through robotic manipulation in cognitive systems}, journal = {Robotics and Autonomous Systems}, volume = {55}, number = {12}, year = {2007}, note = {Times Cited: 3 Morales, Antonio/C-4793-2012 Multidisciplinary Conference on Perception and Intelligence (CMPI-2006) Jul 10-14, 2006 Albacete, SPAIN 3}, pages = {851-859}, type = {Journal Article}, issn = {0921-8890}, doi = {10.1016/j.robot.2007.07.011}, url = {://WOS:000251462000003}, author = {Eris Chinellato and Antonio Morales and Enric Cervera and Angel P. del Pobil} } @inbook {188, title = {A visual application for studying cooperative behaviours}, booktitle = {Visualization, Imaging and Image Processing}, year = {2007}, pages = {70{\textendash}75}, publisher = {Acta Press}, organization = {Acta Press}, chapter = {A visual application for studying cooperative behaviours}, isbn = {078-0-88986-691-1}, author = {Ester Martinez-Martin and Eris Chinellato and Angel P. del Pobil} } @conference {169, title = {3D Grasp Synthesis Based on a Visual Cortex Model}, booktitle = {The First IEEE/RAS-EMBS International Conference on Biomedical Robotics and Biomechatronics, 2006. BioRob 2006.The First IEEE/RAS-EMBS International Conference on Biomedical Robotics and Biomechatronics, 2006. BioRob 2006.}, year = {2006}, month = {02/2006}, publisher = {IEEE}, organization = {IEEE}, address = {Pisa, Italy}, abstract = {

}, isbn = {1-4244-0040-6}, doi = {10.1109/BIOROB.2006.1639078}, author = {Gabriel Recatala and Eris Chinellato and Angel P. del Pobil and Y. Mezouar and Philippe Martinet} } @conference {170, title = {3D Grasp Synthesis Based on Object Exploration}, booktitle = {2006 IEEE International Conference on Robotics and Biomimetics2006 IEEE International Conference on Robotics and Biomimetics}, year = {2006}, month = {12/2006}, publisher = {IEEE}, organization = {IEEE}, address = {Kunming, China}, abstract = {

}, isbn = {1-4244-0570-X}, doi = {10.1109/ROBIO.2006.340076}, author = {Eris Chinellato and Gabriel Recatala and Angel P. del Pobil and Y. Mezouar and Philippe Martinet} } @conference {171, title = {Sistema completo de manipulaci{\'o}n aut{\'o}noma basado en fusi{\'o}n de informaci{\'o}n sensorial y t{\'e}cnicas de aprendizaje orientado a la rob{\'o}tica de servicios}, booktitle = {2es Jornades de Recerca en Autom{\`a}tica, Visi{\'o} i Rob{\`o}tica}, year = {2006}, month = {07/2006}, abstract = {

}, isbn = {84-7653-885-5}, author = {P.J. Sanz and Angel P. del Pobil and Antonio Morales and Gabriel Recatala and Eris Chinellato and M. Prats and Ester Martinez-Martin} } @conference {78, title = {An active learning approach for assessing robot grasp reliability}, booktitle = {Intelligent Robots and Systems, 2004. (IROS 2004). Proceedings. 2004 IEEE/RSJ International Conference on}, year = {2004}, abstract = {

Learning techniques in robotic grasping applications have usually been concerned with the way a hand approaches to an object, or with improving the motor control of manipulation actions. We present an active learning approach devised to face the problem of visually-guided grasp selection. We want to choose the best hand configuration for grasping a particular object using only visual information. Experimental data from real grasping actions is used, and the experience gathering process is driven by an on-line estimation of the reliability assessment capabilities of the system. The goal is to improve the selection skills of the grasping system, minimizing at the same time the cost and duration of the learning process.

}, keywords = {active learning approach, Costs, Grasping, Haptic interfaces, Intelligent robots, Laboratories, learning (artificial intelligence), manipulators, motor control, Motor drives, online estimation, Reliability, reliability assessment capabilities, robot grasp reliability, Robot sensing systems, Torso, Training data, Uncertainty, visually-guided grasp selection}, doi = {10.1109/IROS.2004.1389399}, author = {Antonio Morales and Eris Chinellato and Fagg, A.H. and Angel P. del Pobil} } @conference {77, title = {Experimental prediction of the performance of grasp tasks from visual features}, booktitle = {Intelligent Robots and Systems, 2003. (IROS 2003). Proceedings. 2003 IEEE/RSJ International Conference on}, year = {2003}, abstract = {

This paper deals with visually guided grasping of unmodeled objects for robots which exhibit an adaptive behavior based on their previous experiences. Nine features are proposed to characterize three-finger grasps. They are computed from the object image and the kinematics of the hand. Real experiments on a humanoid robot with a Barrett hand are carried out to provide experimental data. This data is employed by a classification strategy, based on the k-nearest neighbour estimation rule, to predict the reliability of a grasp configuration in terms of five different performance classes. Prediction results suggest the methodology is adequate.

}, keywords = {adaptive behavior, Barrett hand, dexterous manipulators, estimation rule, feature extraction, Geometry, grasp configuration, Grasping, hand kinematics, humanoid robot, Humans, Image reconstruction, Intelligent robots, Kinematics, Laboratories, manipulator kinematics, object image, performance prediction, prediction theory, Reliability, Robot sensing systems, robot vision, Robustness, Service robots, three finger grasps, unmodeled objects, visual features, visually guided grasping}, doi = {10.1109/IROS.2003.1249685}, author = {Antonio Morales and Eris Chinellato and Fagg, A.H. and Angel P. del Pobil} }