4#ifndef QDRAWINGPRIMITIVE_SSE2_P_H
5#define QDRAWINGPRIMITIVE_SSE2_P_H
7#include <QtGui/private/qtguiglobal_p.h>
8#include <private/qsimd_p.h>
28
29
30
31
32
33#define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \
34{
36
37
38
39 __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8
);
40 __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask);
43 pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel);
44 pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel);
47
49 pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8
));
50 pixelVectorRB = _mm_add_epi16(pixelVectorRB, half);
51 pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8
));
52 pixelVectorAG = _mm_add_epi16(pixelVectorAG, half);
55 pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8
);
57
58
59 pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG);
62 result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \
63}
66
67
68
69
70
71#define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) {
73 __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8
);
74 __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8
);
75 __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel);
76 __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel);
77 __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha);
78 finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8
));
79 finalAG = _mm_add_epi16(finalAG, half);
80 finalAG = _mm_andnot_si128(colorMask, finalAG);
83 __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask);
84 __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask);
85 __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel);
86 __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel);
87 __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha);
88 finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8
));
89 finalRB = _mm_add_epi16(finalRB, half);
90 finalRB = _mm_srli_epi16(finalRB, 8
);
93 result = _mm_or_si128(finalAG, finalRB); \
94}
97#define BLEND_SOURCE_OVER_ARGB32_SSE2_helper(dst, srcVector, nullVector, half, one, colorMask, alphaMask) {
98 const __m128i srcVectorAlpha = _mm_and_si128(srcVector, alphaMask);
99 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, alphaMask)) == 0xffff
) {
101 _mm_store_si128((__m128i *)&dst[x], srcVector);
102 } else if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, nullVector)) != 0xffff
) {
108 __m128i alphaChannel = _mm_srli_epi32(srcVector, 24
);
109 alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16
));
110 alphaChannel = _mm_sub_epi16(one, alphaChannel);
112 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
113 __m128i destMultipliedByOneMinusAlpha;
114 BYTE_MUL_SSE2
(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half);
117 const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha);
118 _mm_store_si128((__m128i *)&dst[x], result);
134#define BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask) {
138 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) {
139 blend_pixel(dst[x], src[x]);
142 for (; x < length-3
; x += 4
) {
143 const __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]);
144 BLEND_SOURCE_OVER_ARGB32_SSE2_helper
(dst, srcVector, nullVector, half, one, colorMask, alphaMask)
146 SIMD_EPILOGUE(x, length, 3
) {
147 blend_pixel(dst[x], src[x]);
149}
162#define BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector) \
163{
166 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) {
167 blend_pixel(dst[x], src[x], const_alpha);
170 for (; x < length-3
; x += 4
) {
171 __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]);
172 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff
) {
173 BYTE_MUL_SSE2
(srcVector, srcVector, constAlphaVector, colorMask, half);
175 __m128i alphaChannel = _mm_srli_epi32(srcVector, 24
);
176 alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16
));
177 alphaChannel = _mm_sub_epi16(one, alphaChannel);
179 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
180 __m128i destMultipliedByOneMinusAlpha;
181 BYTE_MUL_SSE2
(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half);
183 const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha);
184 _mm_store_si128((__m128i *)&dst[x], result);
187 SIMD_EPILOGUE(x, length, 3
) {
188 blend_pixel(dst[x], src[x], const_alpha);
190}
197#if QT_COMPILER_SUPPORTS_HERE(SSE4_1)
198QT_FUNCTION_TARGET(SSE2)
199static inline void Q_DECL_VECTORCALL reciprocal_mul_ss(__m128 &ia,
const __m128 a,
float mul)
203 ia = _mm_sub_ss(_mm_add_ss(ia, ia), _mm_mul_ss(ia, _mm_mul_ss(ia, a)));
204 ia = _mm_mul_ss(ia, _mm_set_ss(mul));
205 ia = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(0,0,0,0));
208QT_FUNCTION_TARGET(SSE4_1)
209static inline QRgb qUnpremultiply_sse4(QRgb p)
211 const uint alpha = qAlpha(p);
216 const __m128 va = _mm_set1_ps(alpha);
218 reciprocal_mul_ss(via, va, 255.0f);
219 __m128i vl = _mm_cvtepu8_epi32(_mm_cvtsi32_si128(p));
220 vl = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(vl), via));
221 vl = _mm_packus_epi32(vl, vl);
222 vl = _mm_insert_epi16(vl, alpha, 3);
223 vl = _mm_packus_epi16(vl, vl);
224 return _mm_cvtsi128_si32(vl);
227template<
enum QtPixelOrder PixelOrder>
228QT_FUNCTION_TARGET(SSE4_1)
229static inline uint qConvertArgb32ToA2rgb30_sse4(QRgb p)
231 const uint alpha = qAlpha(p);
233 return qConvertRgb32ToRgb30<PixelOrder>(p);
236 constexpr float mult = 1023.0f / (255 >> 6);
237 const uint newalpha = (alpha >> 6);
238 const __m128 va = _mm_set1_ps(alpha);
240 reciprocal_mul_ss(via, va, mult * newalpha);
241 __m128i vl = _mm_cvtsi32_si128(p);
242 vl = _mm_cvtepu8_epi32(vl);
243 vl = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(vl), via));
244 vl = _mm_packus_epi32(vl, vl);
245 uint rgb30 = (newalpha << 30);
246 rgb30 |= ((uint)_mm_extract_epi16(vl, 1)) << 10;
247 if (PixelOrder == PixelOrderRGB) {
248 rgb30 |= ((uint)_mm_extract_epi16(vl, 2)) << 20;
249 rgb30 |= ((uint)_mm_extract_epi16(vl, 0));
251 rgb30 |= ((uint)_mm_extract_epi16(vl, 0)) << 20;
252 rgb30 |= ((uint)_mm_extract_epi16(vl, 2));
257template<
enum QtPixelOrder PixelOrder>
258QT_FUNCTION_TARGET(SSE4_1)
259static inline uint qConvertRgba64ToRgb32_sse4(QRgba64 p)
261 if (p.isTransparent())
263 __m128i vl = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&p));
265 const __m128 va = _mm_set1_ps(p.alpha());
267 reciprocal_mul_ss(via, va, 65535.0f);
268 vl = _mm_unpacklo_epi16(vl, _mm_setzero_si128());
269 vl = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(vl) , via));
270 vl = _mm_packus_epi32(vl, vl);
271 vl = _mm_insert_epi16(vl, p.alpha(), 3);
273 if (PixelOrder == PixelOrderBGR)
274 vl = _mm_shufflelo_epi16(vl, _MM_SHUFFLE(3, 0, 1, 2));
static QT_BEGIN_NAMESPACE const int numCompositionFunctions