Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qdrawingprimitive_sse2_p.h
Go to the documentation of this file.
1// Copyright (C) 2016 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3// Qt-Security score:significant reason:default
4
5#ifndef QDRAWINGPRIMITIVE_SSE2_P_H
6#define QDRAWINGPRIMITIVE_SSE2_P_H
7
8#include <QtGui/private/qtguiglobal_p.h>
9#include <private/qsimd_p.h>
11#include "qrgba64_p.h"
12
13#ifdef __SSE2__
14
15//
16// W A R N I N G
17// -------------
18//
19// This file is not part of the Qt API. It exists purely as an
20// implementation detail. This header file may change from version to
21// version without notice, or even be removed.
22//
23// We mean it.
24//
25
27
28/*
29 * Multiply the components of pixelVector by alphaChannel
30 * Each 32bits components of alphaChannel must be in the form 0x00AA00AA
31 * colorMask must have 0x00ff00ff on each 32 bits component
32 * half must have the value 128 (0x80) for each 32 bits component
33 */
34#define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \
35{
36 /* 1. separate the colors in 2 vectors so each color is on 16 bits \
37 (in order to be multiplied by the alpha \
38 each 32 bit of dstVectorAG are in the form 0x00AA00GG \
39 each 32 bit of dstVectorRB are in the form 0x00RR00BB */
40 __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8);
41 __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask);
42
43 /* 2. multiply the vectors by the alpha channel */
44 pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel);
45 pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel);
46
47 /* 3. divide by 255, that's the tricky part. \
48 we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256 */
49 /** so first (X + X/256 + rounding) */
50 pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8));
51 pixelVectorRB = _mm_add_epi16(pixelVectorRB, half);
52 pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8));
53 pixelVectorAG = _mm_add_epi16(pixelVectorAG, half);
54
55 /** second divide by 256 */
56 pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8);
57 /** for AG, we could >> 8 to divide followed by << 8 to put the \
58 bytes in the correct position. By masking instead, we execute \
59 only one instruction */
60 pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG);
61
62 /* 4. combine the 2 pairs of colors */
63 result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \
64}
65
66/*
67 * Each 32bits components of alphaChannel must be in the form 0x00AA00AA
68 * oneMinusAlphaChannel must be 255 - alpha for each 32 bits component
69 * colorMask must have 0x00ff00ff on each 32 bits component
70 * half must have the value 128 (0x80) for each 32 bits component
71 */
72#define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) {
73 /* interpolate AG */
74 __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8);
75 __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8);
76 __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel);
77 __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel);
78 __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha);
79 finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8));
80 finalAG = _mm_add_epi16(finalAG, half);
81 finalAG = _mm_andnot_si128(colorMask, finalAG);
82
83 /* interpolate RB */
84 __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask);
85 __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask);
86 __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel);
87 __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel);
88 __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha);
89 finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8));
90 finalRB = _mm_add_epi16(finalRB, half);
91 finalRB = _mm_srli_epi16(finalRB, 8);
92
93 /* combine */
94 result = _mm_or_si128(finalAG, finalRB); \
95}
96
97// same as BLEND_SOURCE_OVER_ARGB32_SSE2, but for one vector srcVector
98#define BLEND_SOURCE_OVER_ARGB32_SSE2_helper(dst, srcVector, nullVector, half, one, colorMask, alphaMask) {
99 const __m128i srcVectorAlpha = _mm_and_si128(srcVector, alphaMask);
100 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, alphaMask)) == 0xffff) {
101 /* all opaque */
102 _mm_store_si128((__m128i *)&dst[x], srcVector);
103 } else if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, nullVector)) != 0xffff) {
104 /* not fully transparent */
105 /* extract the alpha channel on 2 x 16 bits */
106 /* so we have room for the multiplication */
107 /* each 32 bits will be in the form 0x00AA00AA */
108 /* with A being the 1 - alpha */
109 __m128i alphaChannel = _mm_srli_epi32(srcVector, 24);
110 alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16));
111 alphaChannel = _mm_sub_epi16(one, alphaChannel);
112
113 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
114 __m128i destMultipliedByOneMinusAlpha;
115 BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half);
116
117 /* result = s + d * (1-alpha) */
118 const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha);
119 _mm_store_si128((__m128i *)&dst[x], result);
120 }
121 }
122
123
124// Basically blend src over dst with the const alpha defined as constAlphaVector.
125// nullVector, half, one, colorMask are constant across the whole image/texture, and should be defined as:
126//const __m128i nullVector = _mm_set1_epi32(0);
127//const __m128i half = _mm_set1_epi16(0x80);
128//const __m128i one = _mm_set1_epi16(0xff);
129//const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
130//const __m128i alphaMask = _mm_set1_epi32(0xff000000);
131//
132// The computation being done is:
133// result = s + d * (1-alpha)
134// with shortcuts if fully opaque or fully transparent.
135#define BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask) {
136 int x = 0;
137
138 /* First, get dst aligned. */
139 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) {
140 blend_pixel(dst[x], src[x]);
141 }
142
143 for (; x < length-3; x += 4) {
144 const __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]);
145 BLEND_SOURCE_OVER_ARGB32_SSE2_helper(dst, srcVector, nullVector, half, one, colorMask, alphaMask)
146 }
147 SIMD_EPILOGUE(x, length, 3) {
148 blend_pixel(dst[x], src[x]);
149 } \
150}
151
152// Basically blend src over dst with the const alpha defined as constAlphaVector.
153// nullVector, half, one, colorMask are constant across the whole image/texture, and should be defined as:
154//const __m128i nullVector = _mm_set1_epi32(0);
155//const __m128i half = _mm_set1_epi16(0x80);
156//const __m128i one = _mm_set1_epi16(0xff);
157//const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
158//
159// The computation being done is:
160// dest = (s + d * sia) * ca + d * cia
161// = s * ca + d * (sia * ca + cia)
162// = s * ca + d * (1 - sa*ca)
163#define BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector) \
164{
165 int x = 0;
166
167 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) {
168 blend_pixel(dst[x], src[x], const_alpha);
169 }
170
171 for (; x < length-3; x += 4) {
172 __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]);
173 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) {
174 BYTE_MUL_SSE2(srcVector, srcVector, constAlphaVector, colorMask, half);
175
176 __m128i alphaChannel = _mm_srli_epi32(srcVector, 24);
177 alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16));
178 alphaChannel = _mm_sub_epi16(one, alphaChannel);
179
180 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
181 __m128i destMultipliedByOneMinusAlpha;
182 BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half);
183
184 const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha);
185 _mm_store_si128((__m128i *)&dst[x], result);
186 }
187 }
188 SIMD_EPILOGUE(x, length, 3) {
189 blend_pixel(dst[x], src[x], const_alpha);
190 } \
191}
192
193QT_END_NAMESPACE
194
195#endif // __SSE2__
196
197QT_BEGIN_NAMESPACE
198#if QT_COMPILER_SUPPORTS_HERE(SSE4_1)
199QT_FUNCTION_TARGET(SSE2)
200static inline void Q_DECL_VECTORCALL reciprocal_mul_ss(__m128 &ia, const __m128 a, float mul)
201{
202 ia = _mm_rcp_ss(a); // Approximate 1/a
203 // Improve precision of ia using Newton-Raphson
204 ia = _mm_sub_ss(_mm_add_ss(ia, ia), _mm_mul_ss(ia, _mm_mul_ss(ia, a)));
205 ia = _mm_mul_ss(ia, _mm_set_ss(mul));
206 ia = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(0,0,0,0));
207}
208
209QT_FUNCTION_TARGET(SSE4_1)
210static inline QRgb qUnpremultiply_sse4(QRgb p)
211{
212 const uint alpha = qAlpha(p);
213 if (alpha == 255)
214 return p;
215 if (alpha == 0)
216 return 0;
217 const __m128 va = _mm_set1_ps(alpha);
218 __m128 via;
219 reciprocal_mul_ss(via, va, 255.0f); // Approximate 1/a
220 __m128i vl = _mm_cvtepu8_epi32(_mm_cvtsi32_si128(p));
221 vl = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(vl), via));
222 vl = _mm_packus_epi32(vl, vl);
223 vl = _mm_insert_epi16(vl, alpha, 3);
224 vl = _mm_packus_epi16(vl, vl);
225 return _mm_cvtsi128_si32(vl);
226}
227
228template<enum QtPixelOrder PixelOrder>
229QT_FUNCTION_TARGET(SSE4_1)
230static inline uint qConvertArgb32ToA2rgb30_sse4(QRgb p)
231{
232 const uint alpha = qAlpha(p);
233 if (alpha == 255)
234 return qConvertRgb32ToRgb30<PixelOrder>(p);
235 if (alpha == 0)
236 return 0;
237 constexpr float mult = 1023.0f / (255 >> 6);
238 const uint newalpha = (alpha >> 6);
239 const __m128 va = _mm_set1_ps(alpha);
240 __m128 via;
241 reciprocal_mul_ss(via, va, mult * newalpha);
242 __m128i vl = _mm_cvtsi32_si128(p);
243 vl = _mm_cvtepu8_epi32(vl);
244 vl = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(vl), via));
245 vl = _mm_packus_epi32(vl, vl);
246 uint rgb30 = (newalpha << 30);
247 rgb30 |= ((uint)_mm_extract_epi16(vl, 1)) << 10;
248 if (PixelOrder == PixelOrderRGB) {
249 rgb30 |= ((uint)_mm_extract_epi16(vl, 2)) << 20;
250 rgb30 |= ((uint)_mm_extract_epi16(vl, 0));
251 } else {
252 rgb30 |= ((uint)_mm_extract_epi16(vl, 0)) << 20;
253 rgb30 |= ((uint)_mm_extract_epi16(vl, 2));
254 }
255 return rgb30;
256}
257
258template<enum QtPixelOrder PixelOrder>
259QT_FUNCTION_TARGET(SSE4_1)
260static inline uint qConvertRgba64ToRgb32_sse4(QRgba64 p)
261{
262 if (p.isTransparent())
263 return 0;
264 __m128i vl = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&p));
265 if (!p.isOpaque()) {
266 const __m128 va = _mm_set1_ps(p.alpha());
267 __m128 via;
268 reciprocal_mul_ss(via, va, 65535.0f);
269 vl = _mm_unpacklo_epi16(vl, _mm_setzero_si128());
270 vl = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(vl) , via));
271 vl = _mm_packus_epi32(vl, vl);
272 vl = _mm_insert_epi16(vl, p.alpha(), 3);
273 }
274 if (PixelOrder == PixelOrderBGR)
275 vl = _mm_shufflelo_epi16(vl, _MM_SHUFFLE(3, 0, 1, 2));
276 return toArgb32(vl);
277}
278#endif
279QT_END_NAMESPACE
280
281#endif // QDRAWINGPRIMITIVE_SSE2_P_H
Combined button and popup list for selecting options.
static QT_BEGIN_NAMESPACE const int numCompositionFunctions