Citation

BibTex format

@inproceedings{Dong:2019,
author = {Dong, S and Gao, Z and Sun, S and Wang, X and Li, M and Zhang, H and Yang, G and Liu, H and Li, S},
title = {Holistic and deep feature pyramids for saliency detection},
year = {2019}
}

RIS format (EndNote, RefMan)

TY  - CPAPER
AB - Saliency detection has been increasingly gaining research interest in recent years since many computer vision applications need to derive object attentions from images in the first steps. Multi-scale awareness of the saliency detector becomes essential to find thin and small attention regions as well as keeping high-level semantics. In this paper, we propose a novel holistic and deep feature pyramid neural network architecture that can leverage multi-scale semantics in feature encoding stage and saliency region prediction (decoding) stage. In the encoding stage, we exploit multi-scale and pyramidal hierarchy of feature maps via the densely connected network with variable-size dilated convolutions as well as a pyramid pooling. In the decoding stage, we fuse multi-level feature maps via up-sampling and convolution. In addition, we utilize the multi-level deep supervision via plugging in loss functions at every feature fusion level. Multi-loss supervision regularizes weights searching space among different tasks minimizing over-fitting and enhances gradient signal during backpropagation, and thus enables us training the network from scratch. This architecture builds an inherent multi-level semantic pyramidal feature maps at different scales and enhances model's capability in the saliency detection task. We validated our approach on six benchmark datasets and compared with eleven state-of-the-art methods. The results demonstrated that the design effectiveness and our approach outperformed the compared methods.
AU - Dong,S
AU - Gao,Z
AU - Sun,S
AU - Wang,X
AU - Li,M
AU - Zhang,H
AU - Yang,G
AU - Liu,H
AU - Li,S
PY - 2019///
TI - Holistic and deep feature pyramids for saliency detection
ER -