@inproceedings{78ad3a3de5314ee186aa28b9ec4e8450,
title = "Methodical Design and Trimming of Deep Learning Networks: Enhancing External BP Learning with Internal Omnipresent-supervision Training Paradigm",
abstract = "Back-propagation (BP) is now a classic learning paradigm whose source of supervision is exclusively from the external (input/output) nodes. Consequently, BP is easily vulnerable to curse-of-depth in (very) Deep Learning Networks (DLNs). This prompts us to advocate Internal Neuron's Learnablility (INL) with (1)internal teacher labels (ITL); and (2)internal optimization metrics (IOM) for evaluating hidden layers/nodes. Conceptually, INL is a step beyond the notion of Internal Neuron's Explainablility (INE), championed by DARPA's XAI (or AI3.0). Practically, INL facilitates a structure/parameter NP-iterative learning for (supervised) deep compression/quantization: simultaneously trimming hidden nodes and raising accuracy. Pursuant to our simulations, the NP-iteration appears to outperform several prominent pruning methods in the literature.",
keywords = "(supervised) deep compression/quantization, BPOS NP-iteratom, Internal Learning, Internal Optimization Metrics (IOM), structural-parameter learning",
author = "Kung, {S. Y.} and Zejiang Hou and Yuchen Liu",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; 44th IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2019 ; Conference date: 12-05-2019 Through 17-05-2019",
year = "2019",
month = may,
doi = "10.1109/ICASSP.2019.8682208",
language = "English (US)",
series = "ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "8058--8062",
booktitle = "2019 IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2019 - Proceedings",
address = "United States",
}