Citation

BibTex format

@article{Gu:2024:10.1109/lra.2024.3396108,
author = {Gu, Y and Demiris, Y},
doi = {10.1109/lra.2024.3396108},
journal = {IEEE Robotics and Automation Letters},
pages = {5751--5758},
title = {VTTB: a visuo-tactile learning approach for robot-assisted bed bathing},
url = {http://dx.doi.org/10.1109/lra.2024.3396108},
volume = {9},
year = {2024}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - Robot-assisted bed bathing holds the potential to enhance the quality of life for older adults and individuals with mobility impairments. Yet, accurately sensing the human body in a contact-rich manipulation task remains challenging. To address this challenge, we propose a multimodal sensing approach that perceives the 3D contour of body parts using the visual modality while capturing local contact details using the tactile modality. We employ a Transformer-based imitation learning model to utilize the multimodal information and learn to focus on crucial visuo-tactile task features for action prediction. We demonstrate our approach using a Baxter robot and a medical manikin to simulate the robot-assisted bed bathing scenario with bedridden individuals. The robot adeptly follows the contours of the manikin's body parts and cleans the surface based on its curve. Experimental results show that our method can adapt to nonlinear surface curves and generalize across multiple surface geometries, and to human subjects. Overall, our research presents a promising approach for robots to accurately sense the human body through multimodal sensing and perform safe interaction during assistive bed bathing.
AU - Gu,Y
AU - Demiris,Y
DO - 10.1109/lra.2024.3396108
EP - 5758
PY - 2024///
SN - 2377-3766
SP - 5751
TI - VTTB: a visuo-tactile learning approach for robot-assisted bed bathing
T2 - IEEE Robotics and Automation Letters
UR - http://dx.doi.org/10.1109/lra.2024.3396108
UR - http://hdl.handle.net/10044/1/112337
VL - 9
ER -