@ARTICLE{Puchala_Dariusz_Execution_2022, author={Puchala, Dariusz and Stokfiszewski, Kamil and Wieloch, Kamil}, volume={70}, number={1}, journal={Bulletin of the Polish Academy of Sciences Technical Sciences}, pages={e139393}, howpublished={online}, year={2022}, abstract={Parallel realizations of discrete transforms (DTs) computation algorithms (DTCAs) performed on graphics processing units (GPUs) play a significant role in many modern data processing methods utilized in numerous areas of human activity. In this paper the authors propose a novel execution time prediction model, which allows for accurate and rapid estimation of execution times of various kinds of structurally different DTCAs performed on GPUs of distinct architectures, without the necessity of conducting the actual experiments on physical hardware. The model can serve as a guide for the system analyst in making the optimal choice of the GPU hardware solution for a given computational task involving particular DT calculation, or can help in choosing the best appropriate parallel implementation of the selected DT, given the limitations imposed by available hardware. Restricting the model to exhaustively adhere only to the key common features of DTCAs enables the authors to significantly simplify its structure, leading consequently to its design as a hybrid, analytically–simulational method, exploiting jointly the main advantages of both of the mentioned techniques, namely: time-effectiveness and high prediction accuracy, while, at the same time, causing mutual elimination of the major weaknesses of both of the specified approaches within the proposed solution. The model is validated experimentally on two structurally different parallel methods of discrete wavelet transform (DWT) computation, i.e. the direct convolutionbased and lattice structure-based schemes, by comparing its prediction results with the actual measurements taken for 6 different graphics cards, representing a fairly broad spectrum of GPUs compute architectures. Experimental results reveal the overall average execution time and prediction accuracy of the model to be at a level of 97.2%, with global maximum prediction error of 14.5%, recorded throughout all the conducted experiments, maintaining at the same time high average evaluation speed of 3.5 ms for single simulation duration. The results facilitate inferring the model generality and possibility of extrapolation to other DTCAs and different GPU architectures, which along with the proposed model straightforwardness, time-effectiveness and ease of practical application, makes it, in the authors’ opinion, a very interesting alternative to the related existing solutions.}, type={Article}, title={Execution time prediction model for parallel GPU realizations of discrete transforms computation algorithms}, URL={http://ochroma.man.poznan.pl/Content/121345/PDF-MASTER/2152_corr.pdf}, doi={10.24425/bpasts.2021.139393}, keywords={graphics processing unit (GPU), execution time prediction model, discrete wavelet transform (DWT), lattice structure, convolution-based approach, orthogonal transform, orthogonal filter banks, time effectiveness; prediction accuracy}, }