BibTex format
@inproceedings{Zambelli:2016:10.1109/IROS.2016.7759582,
author = {Zambelli, M and Demiris, Y},
doi = {10.1109/IROS.2016.7759582},
publisher = {IEEE},
title = {Multimodal Imitation using Self-learned Sensorimotor Representations},
url = {http://dx.doi.org/10.1109/IROS.2016.7759582},
year = {2016}
}
RIS format (EndNote, RefMan)
TY - CPAPER
AB - Although many tasks intrinsically involve multiplemodalities, often only data from a single modality are used toimprove complex robots acquisition of new skills. We presenta method to equip robots with multimodal learning skills toachieve multimodal imitation on-the-fly on multiple concurrenttask spaces, including vision, touch and proprioception, onlyusing self-learned multimodal sensorimotor relations, withoutthe need of solving inverse kinematic problems or explicit analyticalmodels formulation. We evaluate the proposed methodon a humanoid iCub robot learning to interact with a pianokeyboard and imitating a human demonstration. Since noassumptions are made on the kinematic structure of the robot,the method can be also applied to different robotic platforms.
AU - Zambelli,M
AU - Demiris,Y
DO - 10.1109/IROS.2016.7759582
PB - IEEE
PY - 2016///
SN - 2153-0866
TI - Multimodal Imitation using Self-learned Sensorimotor Representations
UR - http://dx.doi.org/10.1109/IROS.2016.7759582
UR - http://hdl.handle.net/10044/1/39519
ER -