@inproceedings{1c986a2123bd47a4895885ca0b9176dc,
title = "Perceptually-motivated Environment-specific Speech Enhancement",
abstract = "This paper introduces a deep learning approach to enhance speech recordings made in a specific environment. A single neural network learns to ameliorate several types of recording artifacts, including noise, reverberation, and non-linear equalization. The method relies on a new perceptual loss function that combines adversarial loss with spectrogram features. Both subjective and objective evaluations show that the proposed approach improves on state-of-the-art baseline methods.",
keywords = "Denoising, de-reverberation, equalization matching, perceptual loss, speech enhancement",
author = "Jiaqi Su and Adam Finkelstein and Zeyu Jin",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; 44th IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2019 ; Conference date: 12-05-2019 Through 17-05-2019",
year = "2019",
month = may,
doi = "10.1109/ICASSP.2019.8683654",
language = "English (US)",
series = "ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "7015--7019",
booktitle = "2019 IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2019 - Proceedings",
address = "United States",
}