% Encoding: UTF-8
@COMMENT{BibTeX export based on data in FAU CRIS: https://cris.fau.de/}
@COMMENT{For any questions please write to cris-support@fau.de}
@inproceedings{faucris.255641015,
abstract = {We present an algorithm to reduce the computational effort for the multiplication of a given matrix with an unknown column vector. The algorithm decomposes the given matrix into a product of matrices whose entries are either zero or integer powers of two utilizing the principles of sparse recovery. While classical low resolution quantization achieves an accuracy of 6 dB per bit, our method can achieve many times more than that for large matrices. Numerical and analytical evidence suggests that the improvement actually grows unboundedly with matrix size. Due to sparsity, the algorithm even allows for quantization levels below 1 bit per matrix entry while achieving highly accurate approximations for large matrices. Applications include, but are not limited to, neural networks, as well as fully digital beam-forming for massive MIMO and millimeter wave applications.},
author = {Müller, Ralf and Gäde, Bernhard and Bereyhi, Ali},
booktitle = {2020 Information Theory and Applications Workshop, ITA 2020},
date = {2020-02-02/2020-02-07},
doi = {10.1109/ITA50056.2020.9244952},
faupublication = {yes},
isbn = {9781728141909},
keywords = {Quantization (signal); Neural networks; Millimeter wave technology; Massive MIMO; Sparse matrices; Matrix decomposition; Gain},
note = {CRIS-Team Scopus Importer:2021-04-19},
peerreviewed = {unknown},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
title = {{Efficient} {Matrix} {Multiplication}: {The} {Sparse} {Power}-of-2 {Factorization}},
venue = {San Diego, CA, USA},
year = {2020}
}