Citation

BibTex format

@article{Grinstein:2023:10.1186/s13636-023-00301-x,
author = {Grinstein, E and Neo, VW and Naylor, PA},
doi = {10.1186/s13636-023-00301-x},
journal = {Eurasip Journal on Audio, Speech, and Music Processing},
pages = {1--12},
title = {Dual input neural networks for positional sound source localization},
url = {http://dx.doi.org/10.1186/s13636-023-00301-x},
volume = {2023},
year = {2023}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - In many signal processing applications, metadata may be advantageously used in conjunction with a high dimensional signal to produce a desired output. In the case of classical Sound Source Localization (SSL) algorithms, information from a high dimensional, multichannel audio signals received by many distributed microphones is combined with information describing acoustic properties of the scene, such as the microphones’ coordinates in space, to estimate the position of a sound source. We introduce Dual Input Neural Networks (DI-NNs) as a simple and effective way to model these two data types in a neural network. We train and evaluate our proposed DI-NN on scenarios of varying difficulty and realism and compare it against an alternative architecture, a classical Least-Squares (LS) method as well as a classical Convolutional Recurrent Neural Network (CRNN). Our results show that the DI-NN significantly outperforms the baselines, achieving a five times lower localization error than the LS method and two times lower than the CRNN in a test dataset of real recordings.
AU - Grinstein,E
AU - Neo,VW
AU - Naylor,PA
DO - 10.1186/s13636-023-00301-x
EP - 12
PY - 2023///
SN - 1687-4714
SP - 1
TI - Dual input neural networks for positional sound source localization
T2 - Eurasip Journal on Audio, Speech, and Music Processing
UR - http://dx.doi.org/10.1186/s13636-023-00301-x
UR - http://hdl.handle.net/10044/1/106532
VL - 2023
ER -