Citation

BibTex format

@inproceedings{Zhong:2023:10.1109/icassp49357.2023.10096824,
author = {Zhong, Y and Zhang, F and Demiris, Y},
doi = {10.1109/icassp49357.2023.10096824},
publisher = {IEEE},
title = {Contrastive self-supervised learning for automated multi-modal dance performance assessment},
url = {http://dx.doi.org/10.1109/icassp49357.2023.10096824},
year = {2023}
}

RIS format (EndNote, RefMan)

TY  - CPAPER
AB - A fundamental challenge of analyzing human motion is to effectively represent human movements both spatially and temporally. We propose a contrastive self-supervised strategy to tackle this challenge. Particularly, we focus on dancing, which involves a high level of physical and intellectual abilities. Firstly, we deploy Graph and Residual Neural Networks with Siamese architecture to represent the dance motion and music features respectively. Secondly, we apply the InfoNCE loss to contrastively embed the high-dimensional multimedia signals onto the latent space without label supervision. Finally, our proposed framework is evaluated on a multi-modal Dance- Music-Level dataset composed of various dance motions, music, genres and choreographies with dancers of different expertise levels. Experimental results demonstrate the robustness and improvements of our proposed method over 3 baselines and 6 ablation studies across tasks of dance genres, choreographies classification and dancer expertise level assessment.
AU - Zhong,Y
AU - Zhang,F
AU - Demiris,Y
DO - 10.1109/icassp49357.2023.10096824
PB - IEEE
PY - 2023///
TI - Contrastive self-supervised learning for automated multi-modal dance performance assessment
UR - http://dx.doi.org/10.1109/icassp49357.2023.10096824
UR - http://hdl.handle.net/10044/1/107206
ER -