BibTex format
@inproceedings{Dai:2019:10.1007/978-3-030-33391-1_23,
author = {Dai, C and Mo, Y and Angelini, E and Guo, Y and Bai, W},
doi = {10.1007/978-3-030-33391-1_23},
title = {Transfer learning from partial annotations for whole brain segmentation},
url = {http://dx.doi.org/10.1007/978-3-030-33391-1_23},
year = {2019}
}
RIS format (EndNote, RefMan)
TY - CPAPER
AB - Brain MR image segmentation is a key task in neuroimaging studies. It is commonly conducted using standard computational tools, such as FSL, SPM, multi-atlas segmentation etc, which are often registration-based and suffer from expensive computation cost. Recently, there is an increased interest using deep neural networks for brain image segmentation, which have demonstrated advantages in both speed and performance. However, neural networks-based approaches normally require a large amount of manual annotations for optimising the massive amount of network parameters. For 3D networks used in volumetric image segmentation, this has become a particular challenge, as a 3D network consists of many more parameters compared to its 2D counterpart. Manual annotation of 3D brain images is extremely time-consuming and requires extensive involvement of trained experts. To address the challenge with limited manual annotations, here we propose a novel multi-task learning framework for brain image segmentation, which utilises a large amount of automatically generated partial annotations together with a small set of manually created full annotations for network training. Our method yields a high performance comparable to state-of-the-art methods for whole brain segmentation.
AU - Dai,C
AU - Mo,Y
AU - Angelini,E
AU - Guo,Y
AU - Bai,W
DO - 10.1007/978-3-030-33391-1_23
PY - 2019///
TI - Transfer learning from partial annotations for whole brain segmentation
UR - http://dx.doi.org/10.1007/978-3-030-33391-1_23
UR - https://arxiv.org/abs/1908.10851
UR - https://link.springer.com/chapter/10.1007/978-3-030-33391-1_23
UR - http://hdl.handle.net/10044/1/73653
ER -