Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qdrawhelper_sse2.cpp
Go to the documentation of this file.
1// Copyright (C) 2016 The Qt Company Ltd.
2// Copyright (C) 2016 Intel Corporation.
3// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
4// Qt-Security score:significant reason:default
5
6#include <private/qdrawhelper_x86_p.h>
7
8#ifdef QT_COMPILER_SUPPORTS_SSE2
9
10#include <private/qdrawingprimitive_sse2_p.h>
11#include <private/qpaintengine_raster_p.h>
12
13QT_BEGIN_NAMESPACE
14
15#ifndef QDRAWHELPER_AVX
16// in AVX mode, we'll use the SSSE3 code
17void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
18 const uchar *srcPixels, int sbpl,
19 int w, int h,
20 int const_alpha)
21{
22 const quint32 *src = (const quint32 *) srcPixels;
23 quint32 *dst = (quint32 *) destPixels;
24 if (const_alpha == 256) {
25 const __m128i alphaMask = _mm_set1_epi32(0xff000000);
26 const __m128i nullVector = _mm_set1_epi32(0);
27 const __m128i half = _mm_set1_epi16(0x80);
28 const __m128i one = _mm_set1_epi16(0xff);
29 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
30 for (int y = 0; y < h; ++y) {
31 BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, w, nullVector, half, one, colorMask, alphaMask);
32 dst = (quint32 *)(((uchar *) dst) + dbpl);
33 src = (const quint32 *)(((const uchar *) src) + sbpl);
34 }
35 } else if (const_alpha != 0) {
36 // dest = (s + d * sia) * ca + d * cia
37 // = s * ca + d * (sia * ca + cia)
38 // = s * ca + d * (1 - sa*ca)
39 const_alpha = (const_alpha * 255) >> 8;
40 const __m128i nullVector = _mm_set1_epi32(0);
41 const __m128i half = _mm_set1_epi16(0x80);
42 const __m128i one = _mm_set1_epi16(0xff);
43 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
44 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
45 for (int y = 0; y < h; ++y) {
46 BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, w, nullVector, half, one, colorMask, constAlphaVector)
47 dst = (quint32 *)(((uchar *) dst) + dbpl);
48 src = (const quint32 *)(((const uchar *) src) + sbpl);
49 }
50 }
51}
52#endif
53
54// qblendfunctions.cpp
55void qt_blend_rgb32_on_rgb32(uchar *destPixels, int dbpl,
56 const uchar *srcPixels, int sbpl,
57 int w, int h,
58 int const_alpha);
59
60void qt_blend_rgb32_on_rgb32_sse2(uchar *destPixels, int dbpl,
61 const uchar *srcPixels, int sbpl,
62 int w, int h,
63 int const_alpha)
64{
65 const quint32 *src = (const quint32 *) srcPixels;
66 quint32 *dst = (quint32 *) destPixels;
67 if (const_alpha != 256) {
68 if (const_alpha != 0) {
69 const __m128i half = _mm_set1_epi16(0x80);
70 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
71
72 const_alpha = (const_alpha * 255) >> 8;
73 int one_minus_const_alpha = 255 - const_alpha;
74 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
75 const __m128i oneMinusConstAlpha = _mm_set1_epi16(one_minus_const_alpha);
76 for (int y = 0; y < h; ++y) {
77 int x = 0;
78
79 // First, align dest to 16 bytes:
80 ALIGNMENT_PROLOGUE_16BYTES(dst, x, w) {
81 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], one_minus_const_alpha);
82 }
83
84 for (; x < w-3; x += 4) {
85 __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]);
86 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
87 __m128i result;
88 INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, constAlphaVector, oneMinusConstAlpha, colorMask, half);
89 _mm_store_si128((__m128i *)&dst[x], result);
90 }
91 SIMD_EPILOGUE(x, w, 3)
92 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], one_minus_const_alpha);
93 dst = (quint32 *)(((uchar *) dst) + dbpl);
94 src = (const quint32 *)(((const uchar *) src) + sbpl);
95 }
96 }
97 } else {
98 qt_blend_rgb32_on_rgb32(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
99 }
100}
101
102void QT_FASTCALL comp_func_SourceOver_sse2(uint *destPixels, const uint *srcPixels, int length, uint const_alpha)
103{
104 Q_ASSERT(const_alpha < 256);
105
106 const quint32 *src = (const quint32 *) srcPixels;
107 quint32 *dst = (quint32 *) destPixels;
108
109 const __m128i nullVector = _mm_set1_epi32(0);
110 const __m128i half = _mm_set1_epi16(0x80);
111 const __m128i one = _mm_set1_epi16(0xff);
112 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
113 if (const_alpha == 255) {
114 const __m128i alphaMask = _mm_set1_epi32(0xff000000);
115 BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask);
116 } else {
117 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
118 BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector);
119 }
120}
121
122void QT_FASTCALL comp_func_Plus_sse2(uint *dst, const uint *src, int length, uint const_alpha)
123{
124 int x = 0;
125
126 if (const_alpha == 255) {
127 // 1) Prologue: align destination on 16 bytes
128 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
129 dst[x] = comp_func_Plus_one_pixel(dst[x], src[x]);
130
131 // 2) composition with SSE2
132 for (; x < length - 3; x += 4) {
133 const __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]);
134 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
135
136 const __m128i result = _mm_adds_epu8(srcVector, dstVector);
137 _mm_store_si128((__m128i *)&dst[x], result);
138 }
139
140 // 3) Epilogue:
141 SIMD_EPILOGUE(x, length, 3)
142 dst[x] = comp_func_Plus_one_pixel(dst[x], src[x]);
143 } else {
144 const int one_minus_const_alpha = 255 - const_alpha;
145 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
146 const __m128i oneMinusConstAlpha = _mm_set1_epi16(one_minus_const_alpha);
147
148 // 1) Prologue: align destination on 16 bytes
149 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
150 dst[x] = comp_func_Plus_one_pixel_const_alpha(dst[x], src[x], const_alpha, one_minus_const_alpha);
151
152 const __m128i half = _mm_set1_epi16(0x80);
153 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
154 // 2) composition with SSE2
155 for (; x < length - 3; x += 4) {
156 const __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]);
157 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
158
159 __m128i result = _mm_adds_epu8(srcVector, dstVector);
160 INTERPOLATE_PIXEL_255_SSE2(result, result, dstVector, constAlphaVector, oneMinusConstAlpha, colorMask, half)
161 _mm_store_si128((__m128i *)&dst[x], result);
162 }
163
164 // 3) Epilogue:
165 SIMD_EPILOGUE(x, length, 3)
166 dst[x] = comp_func_Plus_one_pixel_const_alpha(dst[x], src[x], const_alpha, one_minus_const_alpha);
167 }
168}
169
170void QT_FASTCALL comp_func_Source_sse2(uint *dst, const uint *src, int length, uint const_alpha)
171{
172 if (const_alpha == 255) {
173 ::memcpy(dst, src, length * sizeof(uint));
174 } else {
175 const int ialpha = 255 - const_alpha;
176
177 int x = 0;
178
179 // 1) prologue, align on 16 bytes
180 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
181 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], ialpha);
182
183 // 2) interpolate pixels with SSE2
184 const __m128i half = _mm_set1_epi16(0x80);
185 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
186 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
187 const __m128i oneMinusConstAlpha = _mm_set1_epi16(ialpha);
188 for (; x < length - 3; x += 4) {
189 const __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]);
190 __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
191 INTERPOLATE_PIXEL_255_SSE2(dstVector, srcVector, dstVector, constAlphaVector, oneMinusConstAlpha, colorMask, half)
192 _mm_store_si128((__m128i *)&dst[x], dstVector);
193 }
194
195 // 3) Epilogue
196 SIMD_EPILOGUE(x, length, 3)
197 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], ialpha);
198 }
199}
200
201#ifndef __haswell__
202Q_NEVER_INLINE static
203void Q_DECL_VECTORCALL qt_memfillXX_aligned(void *dest, __m128i value128, quintptr bytecount)
204{
205 __m128i *dst128 = reinterpret_cast<__m128i *>(dest);
206 __m128i *end128 = reinterpret_cast<__m128i *>(static_cast<uchar *>(dest) + bytecount);
207
208 while (dst128 + 4 <= end128) {
209 _mm_store_si128(dst128 + 0, value128);
210 _mm_store_si128(dst128 + 1, value128);
211 _mm_store_si128(dst128 + 2, value128);
212 _mm_store_si128(dst128 + 3, value128);
213 dst128 += 4;
214 }
215
216 bytecount %= 4 * sizeof(__m128i);
217 switch (bytecount / sizeof(__m128i)) {
218 case 3: _mm_store_si128(dst128++, value128); Q_FALLTHROUGH();
219 case 2: _mm_store_si128(dst128++, value128); Q_FALLTHROUGH();
220 case 1: _mm_store_si128(dst128++, value128);
221 }
222}
223
224void qt_memfill64_sse2(quint64 *dest, quint64 value, qsizetype count)
225{
226 quintptr misaligned = quintptr(dest) % sizeof(__m128i);
227 if (misaligned && count) {
228#if defined(Q_PROCESSOR_X86_32)
229 // Before SSE came out, the alignment of the stack used to be only 4
230 // bytes and some OS/ABIs (notably, code generated by MSVC) still only
231 // align to that. In any case, we cannot count on the alignment of
232 // quint64 to be 8 -- see QtPrivate::AlignOf_WorkaroundForI386Abi in
233 // qglobal.h.
234 //
235 // If the pointer is not aligned to at least 8 bytes, then we'll never
236 // in turn hit a multiple of 16 for the qt_memfillXX_aligned call
237 // below.
238 if (Q_UNLIKELY(misaligned % sizeof(quint64)))
239 return qt_memfill_template(dest, value, count);
240#endif
241
242 *dest++ = value;
243 --count;
244 }
245
246 if (count % 2) {
247 dest[count - 1] = value;
248 --count;
249 }
250
251 qt_memfillXX_aligned(dest, _mm_set1_epi64x(value), count * sizeof(quint64));
252}
253
254void qt_memfill32_sse2(quint32 *dest, quint32 value, qsizetype count)
255{
256 if (count < 4) {
257 // this simplifies the code below: the first switch can fall through
258 // without checking the value of count
259 switch (count) {
260 case 3: *dest++ = value; Q_FALLTHROUGH();
261 case 2: *dest++ = value; Q_FALLTHROUGH();
262 case 1: *dest = value;
263 }
264 return;
265 }
266
267 const int align = (quintptr)(dest) & 0xf;
268 switch (align) {
269 case 4: *dest++ = value; --count; Q_FALLTHROUGH();
270 case 8: *dest++ = value; --count; Q_FALLTHROUGH();
271 case 12: *dest++ = value; --count;
272 }
273
274 const int rest = count & 0x3;
275 if (rest) {
276 switch (rest) {
277 case 3: dest[count - 3] = value; Q_FALLTHROUGH();
278 case 2: dest[count - 2] = value; Q_FALLTHROUGH();
279 case 1: dest[count - 1] = value;
280 }
281 }
282
283 qt_memfillXX_aligned(dest, _mm_set1_epi32(value), count * sizeof(quint32));
284}
285#endif // !__haswell__
286
287void QT_FASTCALL comp_func_solid_Source_sse2(uint *destPixels, int length, uint color, uint const_alpha)
288{
289 if (const_alpha == 255) {
290 qt_memfill32(destPixels, color, length);
291 } else {
292 const quint32 ialpha = 255 - const_alpha;
293 color = BYTE_MUL(color, const_alpha);
294 int x = 0;
295
296 quint32 *dst = (quint32 *) destPixels;
297 const __m128i colorVector = _mm_set1_epi32(color);
298 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
299 const __m128i half = _mm_set1_epi16(0x80);
300 const __m128i iAlphaVector = _mm_set1_epi16(ialpha);
301
302 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
303 destPixels[x] = color + BYTE_MUL(destPixels[x], ialpha);
304
305 for (; x < length-3; x += 4) {
306 __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
307 BYTE_MUL_SSE2(dstVector, dstVector, iAlphaVector, colorMask, half);
308 dstVector = _mm_add_epi8(colorVector, dstVector);
309 _mm_store_si128((__m128i *)&dst[x], dstVector);
310 }
311 SIMD_EPILOGUE(x, length, 3)
312 destPixels[x] = color + BYTE_MUL(destPixels[x], ialpha);
313 }
314}
315
316void QT_FASTCALL comp_func_solid_SourceOver_sse2(uint *destPixels, int length, uint color, uint const_alpha)
317{
318 if ((const_alpha & qAlpha(color)) == 255) {
319 qt_memfill32(destPixels, color, length);
320 } else {
321 if (const_alpha != 255)
322 color = BYTE_MUL(color, const_alpha);
323
324 const quint32 minusAlphaOfColor = qAlpha(~color);
325 int x = 0;
326
327 quint32 *dst = (quint32 *) destPixels;
328 const __m128i colorVector = _mm_set1_epi32(color);
329 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
330 const __m128i half = _mm_set1_epi16(0x80);
331 const __m128i minusAlphaOfColorVector = _mm_set1_epi16(minusAlphaOfColor);
332
333 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
334 destPixels[x] = color + BYTE_MUL(destPixels[x], minusAlphaOfColor);
335
336 for (; x < length-3; x += 4) {
337 __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
338 BYTE_MUL_SSE2(dstVector, dstVector, minusAlphaOfColorVector, colorMask, half);
339 dstVector = _mm_add_epi8(colorVector, dstVector);
340 _mm_store_si128((__m128i *)&dst[x], dstVector);
341 }
342 SIMD_EPILOGUE(x, length, 3)
343 destPixels[x] = color + BYTE_MUL(destPixels[x], minusAlphaOfColor);
344 }
345}
346
347void qt_bitmapblit32_sse2_base(QRasterBuffer *rasterBuffer, int x, int y,
348 quint32 color,
349 const uchar *src, int width, int height, int stride)
350{
351 quint32 *dest = reinterpret_cast<quint32*>(rasterBuffer->scanLine(y)) + x;
352 const int destStride = rasterBuffer->stride<quint32>();
353
354 const __m128i c128 = _mm_set1_epi32(color);
355 const __m128i maskmask1 = _mm_set_epi32(0x10101010, 0x20202020,
356 0x40404040, 0x80808080);
357 const __m128i maskadd1 = _mm_set_epi32(0x70707070, 0x60606060,
358 0x40404040, 0x00000000);
359
360 if (width > 4) {
361 const __m128i maskmask2 = _mm_set_epi32(0x01010101, 0x02020202,
362 0x04040404, 0x08080808);
363 const __m128i maskadd2 = _mm_set_epi32(0x7f7f7f7f, 0x7e7e7e7e,
364 0x7c7c7c7c, 0x78787878);
365 while (--height >= 0) {
366 for (int x = 0; x < width; x += 8) {
367 const quint8 s = src[x >> 3];
368 if (!s)
369 continue;
370 __m128i mask1 = _mm_set1_epi8(s);
371 __m128i mask2 = mask1;
372
373 mask1 = _mm_and_si128(mask1, maskmask1);
374 mask1 = _mm_add_epi8(mask1, maskadd1);
375 _mm_maskmoveu_si128(c128, mask1, (char*)(dest + x));
376 mask2 = _mm_and_si128(mask2, maskmask2);
377 mask2 = _mm_add_epi8(mask2, maskadd2);
378 _mm_maskmoveu_si128(c128, mask2, (char*)(dest + x + 4));
379 }
380 dest += destStride;
381 src += stride;
382 }
383 } else {
384 while (--height >= 0) {
385 const quint8 s = *src;
386 if (s) {
387 __m128i mask1 = _mm_set1_epi8(s);
388 mask1 = _mm_and_si128(mask1, maskmask1);
389 mask1 = _mm_add_epi8(mask1, maskadd1);
390 _mm_maskmoveu_si128(c128, mask1, (char*)(dest));
391 }
392 dest += destStride;
393 src += stride;
394 }
395 }
396}
397
398void qt_bitmapblit32_sse2(QRasterBuffer *rasterBuffer, int x, int y,
399 const QRgba64 &color,
400 const uchar *src, int width, int height, int stride)
401{
402 qt_bitmapblit32_sse2_base(rasterBuffer, x, y, color.toArgb32(), src, width, height, stride);
403}
404
405void qt_bitmapblit8888_sse2(QRasterBuffer *rasterBuffer, int x, int y,
406 const QRgba64 &color,
407 const uchar *src, int width, int height, int stride)
408{
409 qt_bitmapblit32_sse2_base(rasterBuffer, x, y, ARGB2RGBA(color.toArgb32()), src, width, height, stride);
410}
411
412void qt_bitmapblit16_sse2(QRasterBuffer *rasterBuffer, int x, int y,
413 const QRgba64 &color,
414 const uchar *src, int width, int height, int stride)
415{
416 const quint16 c = qConvertRgb32To16(color.toArgb32());
417 quint16 *dest = reinterpret_cast<quint16*>(rasterBuffer->scanLine(y)) + x;
418 const int destStride = rasterBuffer->stride<quint32>();
419
420 const __m128i c128 = _mm_set1_epi16(c);
421QT_WARNING_DISABLE_MSVC(4309) // truncation of constant value
422 const __m128i maskmask = _mm_set_epi16(0x0101, 0x0202, 0x0404, 0x0808,
423 0x1010, 0x2020, 0x4040, 0x8080);
424 const __m128i maskadd = _mm_set_epi16(0x7f7f, 0x7e7e, 0x7c7c, 0x7878,
425 0x7070, 0x6060, 0x4040, 0x0000);
426
427 while (--height >= 0) {
428 for (int x = 0; x < width; x += 8) {
429 const quint8 s = src[x >> 3];
430 if (!s)
431 continue;
432 __m128i mask = _mm_set1_epi8(s);
433 mask = _mm_and_si128(mask, maskmask);
434 mask = _mm_add_epi8(mask, maskadd);
435 _mm_maskmoveu_si128(c128, mask, (char*)(dest + x));
436 }
437 dest += destStride;
438 src += stride;
439 }
440}
441
442class QSimdSse2
443{
444public:
445 typedef __m128i Int32x4;
446 typedef __m128 Float32x4;
447
448 union Vect_buffer_i { Int32x4 v; int i[4]; };
449 union Vect_buffer_f { Float32x4 v; float f[4]; };
450
451 static inline Float32x4 Q_DECL_VECTORCALL v_dup(float x) { return _mm_set1_ps(x); }
452 static inline Float32x4 Q_DECL_VECTORCALL v_dup(double x) { return _mm_set1_ps(x); }
453 static inline Int32x4 Q_DECL_VECTORCALL v_dup(int x) { return _mm_set1_epi32(x); }
454 static inline Int32x4 Q_DECL_VECTORCALL v_dup(uint x) { return _mm_set1_epi32(x); }
455
456 static inline Float32x4 Q_DECL_VECTORCALL v_add(Float32x4 a, Float32x4 b) { return _mm_add_ps(a, b); }
457 static inline Int32x4 Q_DECL_VECTORCALL v_add(Int32x4 a, Int32x4 b) { return _mm_add_epi32(a, b); }
458
459 static inline Float32x4 Q_DECL_VECTORCALL v_max(Float32x4 a, Float32x4 b) { return _mm_max_ps(a, b); }
460 static inline Float32x4 Q_DECL_VECTORCALL v_min(Float32x4 a, Float32x4 b) { return _mm_min_ps(a, b); }
461 static inline Int32x4 Q_DECL_VECTORCALL v_min_16(Int32x4 a, Int32x4 b) { return _mm_min_epi16(a, b); }
462
463 static inline Int32x4 Q_DECL_VECTORCALL v_and(Int32x4 a, Int32x4 b) { return _mm_and_si128(a, b); }
464
465 static inline Float32x4 Q_DECL_VECTORCALL v_sub(Float32x4 a, Float32x4 b) { return _mm_sub_ps(a, b); }
466 static inline Int32x4 Q_DECL_VECTORCALL v_sub(Int32x4 a, Int32x4 b) { return _mm_sub_epi32(a, b); }
467
468 static inline Float32x4 Q_DECL_VECTORCALL v_mul(Float32x4 a, Float32x4 b) { return _mm_mul_ps(a, b); }
469
470 static inline Float32x4 Q_DECL_VECTORCALL v_sqrt(Float32x4 x) { return _mm_sqrt_ps(x); }
471
472 static inline Int32x4 Q_DECL_VECTORCALL v_toInt(Float32x4 x) { return _mm_cvttps_epi32(x); }
473
474 static inline Int32x4 Q_DECL_VECTORCALL v_greaterOrEqual(Float32x4 a, Float32x4 b) { return _mm_castps_si128(_mm_cmpgt_ps(a, b)); }
475};
476
477const uint * QT_FASTCALL qt_fetch_radial_gradient_sse2(uint *buffer, const Operator *op, const QSpanData *data,
478 int y, int x, int length)
479{
480 return qt_fetch_radial_gradient_template<QRadialFetchSimd<QSimdSse2>,uint>(buffer, op, data, y, x, length);
481}
482
483void qt_scale_image_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
484 const uchar *srcPixels, int sbpl, int srch,
485 const QRectF &targetRect,
486 const QRectF &sourceRect,
487 const QRect &clip,
488 int const_alpha)
489{
490 if (const_alpha != 256) {
491 // from qblendfunctions.cpp
492 extern void qt_scale_image_argb32_on_argb32(uchar *destPixels, int dbpl,
493 const uchar *srcPixels, int sbpl, int srch,
494 const QRectF &targetRect,
495 const QRectF &sourceRect,
496 const QRect &clip,
497 int const_alpha);
498 return qt_scale_image_argb32_on_argb32(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip, const_alpha);
499 }
500
501 qreal sx = sourceRect.width() / (qreal)targetRect.width();
502 qreal sy = sourceRect.height() / (qreal)targetRect.height();
503
504 const int ix = 0x00010000 * sx;
505 const int iy = 0x00010000 * sy;
506
507 QRect tr = targetRect.normalized().toRect();
508 tr = tr.intersected(clip);
509 if (tr.isEmpty())
510 return;
511 const int tx1 = tr.left();
512 const int ty1 = tr.top();
513 int h = tr.height();
514 int w = tr.width();
515
516 quint32 basex;
517 quint32 srcy;
518
519 if (sx < 0) {
520 int dstx = qFloor((tx1 + qreal(0.5) - targetRect.right()) * sx * 65536) + 1;
521 basex = quint32(sourceRect.right() * 65536) + dstx;
522 } else {
523 int dstx = qCeil((tx1 + qreal(0.5) - targetRect.left()) * sx * 65536) - 1;
524 basex = quint32(sourceRect.left() * 65536) + dstx;
525 }
526 if (sy < 0) {
527 int dsty = qFloor((ty1 + qreal(0.5) - targetRect.bottom()) * sy * 65536) + 1;
528 srcy = quint32(sourceRect.bottom() * 65536) + dsty;
529 } else {
530 int dsty = qCeil((ty1 + qreal(0.5) - targetRect.top()) * sy * 65536) - 1;
531 srcy = quint32(sourceRect.top() * 65536) + dsty;
532 }
533
534 quint32 *dst = ((quint32 *) (destPixels + ty1 * dbpl)) + tx1;
535
536 const __m128i nullVector = _mm_setzero_si128();
537 const __m128i half = _mm_set1_epi16(0x80);
538 const __m128i one = _mm_set1_epi16(0xff);
539 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
540 const __m128i alphaMask = _mm_set1_epi32(0xff000000);
541 const __m128i ixVector = _mm_set1_epi32(4*ix);
542
543 // this bounds check here is required as floating point rounding above might in some cases lead to
544 // w/h values that are one pixel too large, falling outside of the valid image area.
545 const int ystart = srcy >> 16;
546 if (ystart >= srch && iy < 0) {
547 srcy += iy;
548 --h;
549 }
550 const int xstart = basex >> 16;
551 if (xstart >= (int)(sbpl/sizeof(quint32)) && ix < 0) {
552 basex += ix;
553 --w;
554 }
555 int yend = (srcy + iy * (h - 1)) >> 16;
556 if (yend < 0 || yend >= srch)
557 --h;
558 int xend = (basex + ix * (w - 1)) >> 16;
559 if (xend < 0 || xend >= (int)(sbpl/sizeof(quint32)))
560 --w;
561
562 while (--h >= 0) {
563 const uint *src = (const quint32 *) (srcPixels + (srcy >> 16) * sbpl);
564 int srcx = basex;
565 int x = 0;
566
567 ALIGNMENT_PROLOGUE_16BYTES(dst, x, w) {
568 uint s = src[srcx >> 16];
569 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
570 srcx += ix;
571 }
572
573 __m128i srcxVector = _mm_set_epi32(srcx, srcx + ix, srcx + ix + ix, srcx + ix + ix + ix);
574
575 for (; x < (w - 3); x += 4) {
576 const int idx0 = _mm_extract_epi16(srcxVector, 1);
577 const int idx1 = _mm_extract_epi16(srcxVector, 3);
578 const int idx2 = _mm_extract_epi16(srcxVector, 5);
579 const int idx3 = _mm_extract_epi16(srcxVector, 7);
580 srcxVector = _mm_add_epi32(srcxVector, ixVector);
581
582 const __m128i srcVector = _mm_set_epi32(src[idx0], src[idx1], src[idx2], src[idx3]);
583 BLEND_SOURCE_OVER_ARGB32_SSE2_helper(dst, srcVector, nullVector, half, one, colorMask, alphaMask);
584 }
585
586 SIMD_EPILOGUE(x, w, 3) {
587 uint s = src[(basex + x*ix) >> 16];
588 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
589 }
590 dst = (quint32 *)(((uchar *) dst) + dbpl);
591 srcy += iy;
592 }
593}
594
595
596QT_END_NAMESPACE
597
598#endif // QT_COMPILER_SUPPORTS_SSE2