@article{Shukla_Alam_Bhattacharya_Mitra_Mukhopadhyay_2023, title={“Whispering MLaaS”: Exploiting Timing Channels to Compromise User Privacy in Deep Neural Networks}, volume={2023}, url={https://tches.iacr.org/index.php/TCHES/article/view/10295}, DOI={10.46586/tches.v2023.i2.587-613}, abstractNote={<p>While recent advancements of Deep Learning (DL) in solving complex real-world tasks have spurred their popularity, the usage of privacy-rich data for their training in varied applications has made them an overly-exposed threat surface for privacy violations. Moreover, the rapid adoption of cloud-based <em>Machine-Learning-asa-Service</em> (MLaaS) has broadened the threat surface to various remote <em>side-channel attacks</em>. In this paper, for the first time, we show one such privacy violation by observing a data-dependent timing side-channel (naming this to be <em>Class-Leakage</em>) originating from <em>non-constant time</em> branching operation in a widely popular DL framework, namely <em>PyTorch</em>. We further escalate this timing variability to a practical inference-time attack where an adversary with <em>user level privileges</em> and having<em> hard-label black-box access</em> to an MLaaS can exploit Class-Leakage to compromise the privacy of MLaaS users. DL models have also been shown to be vulnerable to <em>Membership Inference Attack</em> (MIA), where the primary objective of an adversary is to deduce whether any particular data has been used while training the model. <em>Differential Privacy</em> (DP) has been proposed in recent literature as a popular countermeasure against MIA, where inclusivity and exclusivity of a data-point in a dataset cannot be ascertained by definition. In this paper, we also demonstrate that the existence of a data-point within the training dataset of a DL model secured with DP can still be distinguished using the identified timing side-channel. In addition, we propose an efficient countermeasure to the problem by introducing <em>constant-time</em> branching operation that alleviates the Class-Leakage. We validate the approach using five pre-trained DL models trained on two standard benchmarking image classification datasets, <em>CIFAR-10</em> and <em>CIFAR-100</em>, over two different computing environments having <em>Intel Xeon</em> and <em>Intel i7</em> processors.</p>}, number={2}, journal={IACR Transactions on Cryptographic Hardware and Embedded Systems}, author={Shukla, Shubhi and Alam, Manaar and Bhattacharya, Sarani and Mitra, Pabitra and Mukhopadhyay, Debdeep}, year={2023}, month={Mar.}, pages={587–613} }