@article {63, title = {Pose Estimation Through Cue Integration: A Neuroscience-Inspired Approach}, journal = {Systems, Man, and Cybernetics, Part B: Cybernetics, IEEE Transactions on}, volume = {42}, year = {2012}, pages = {530-538}, abstract = {

The aim of this paper is to improve the skills of robotic systems in their interaction with nearby objects. The basic idea is to enhance visual estimation of objects in the world through the merging of different visual estimators of the same stimuli. A neuroscience-inspired model of stereoptic and perspective orientation estimators, merged according to different criteria, is implemented on a robotic setup and tested in different conditions. Experimental results suggest that the integration of multiple monocular and binocular cues can make robot sensory systems more reliable and versatile. The same results, compared with simulations and data from human studies, show that the model is able to reproduce some well-recognized neuropsychological effects.

}, keywords = {binocular cue integration, Biological system modeling, Cameras, Computational modeling, Computer Simulation, Computer-Assisted, Cybernetics, Depth Perception, Estimation, Grasping, grippers, Humans, Image Processing, Intelligent robots, Models, monocular cue integration, Neurological, neuropsychological effects, neuroscience-inspired model, object estimation, perspective orientation estimator, pose estimation, Reliability, Reproducibility of Results, robot sensory systems, robot vision, robot vision systems, Robotics, Robots, stereo image processing, stereo vision, stereoptic orientation estimator, Task Performance and Analysis, visual estimation, visual perception, Visualization}, issn = {1083-4419}, doi = {10.1109/TSMCB.2011.2168952}, author = {Eris Chinellato and Beata J. Grzyb and Angel P. del Pobil} } @conference {32, title = {Hierarchical object recognition inspired by primate brain mechanisms}, booktitle = {Computational Intelligence for Visual Intelligence (CIVI), 2011 IEEE Workshop on}, year = {2011}, keywords = {brain, Estimation, Grasping, hierarchical object recognition, Image color analysis, multimodal integration, mutual projection, neurophysiology, neuroscience hypothesis, object recognition, object weight estimation, primate brain mechanism, real robot, robot vision, Robots, Shape, visual processing, Visualization, visuomotor behavior}, doi = {10.1109/CIVI.2011.5955017}, author = {Eris Chinellato and Javier Felip and Beata J. Grzyb and Antonio Morales and Angel P. del Pobil} } @conference {24, title = {Implicit mapping of the peripersonal space of a humanoid robot}, booktitle = {Computational Intelligence, Cognitive Algorithms, Mind, and Brain (CCMB), 2011 IEEE Symposium on}, year = {2011}, abstract = {

In this work, taking inspiration from primate visuomotor mechanisms, a humanoid robot is able to build a sensorimotor map of the environment that is configured and trained through gazing and reaching movements. The map is accessed and modified by two types of information: retinotopic (visual) and proprioceptive (eye and arm movements), and constitutes both a knowledge of the environment and a sensorimotor code for performing movements and evaluate their outcome. By performing direct and inverse transformations between stereo vision, oculomotor and joint-space representations, the robot learns to perform gazing and reaching movements, which are in turn employed to update the sensorimotor knowledge of the environment. Thus, the robot keeps learning during its normal behavior, by interacting with the world and contextually updating its representation of the world itself. Such representation is never made explicit, but rather constitutes a visuomotor awareness of the space which emerges thanks to the interaction of the agent with the surrounding space.

}, keywords = {Head, humanoid robot, joint space representation, Joints, Neurons, oculomotor, peripersonal space, primate visuomotor mechanisms, proprioceptive information, retinotopic information, Robot kinematics, Robot sensing systems, robot vision, Robotics, sensorimotor code, sensorimotor knowledge, stereo image processing, stereo vision, Visualization, visuomotor awareness}, doi = {10.1109/CCMB.2011.5952119}, author = {Marco Antonelli and Eris Chinellato and Angel P. del Pobil} } @conference {77, title = {Experimental prediction of the performance of grasp tasks from visual features}, booktitle = {Intelligent Robots and Systems, 2003. (IROS 2003). Proceedings. 2003 IEEE/RSJ International Conference on}, year = {2003}, abstract = {

This paper deals with visually guided grasping of unmodeled objects for robots which exhibit an adaptive behavior based on their previous experiences. Nine features are proposed to characterize three-finger grasps. They are computed from the object image and the kinematics of the hand. Real experiments on a humanoid robot with a Barrett hand are carried out to provide experimental data. This data is employed by a classification strategy, based on the k-nearest neighbour estimation rule, to predict the reliability of a grasp configuration in terms of five different performance classes. Prediction results suggest the methodology is adequate.

}, keywords = {adaptive behavior, Barrett hand, dexterous manipulators, estimation rule, feature extraction, Geometry, grasp configuration, Grasping, hand kinematics, humanoid robot, Humans, Image reconstruction, Intelligent robots, Kinematics, Laboratories, manipulator kinematics, object image, performance prediction, prediction theory, Reliability, Robot sensing systems, robot vision, Robustness, Service robots, three finger grasps, unmodeled objects, visual features, visually guided grasping}, doi = {10.1109/IROS.2003.1249685}, author = {Antonio Morales and Eris Chinellato and Fagg, A.H. and Angel P. del Pobil} }