Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qdrawhelper_neon.cpp
Go to the documentation of this file.
1// Copyright (C) 2016 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3// Qt-Security score:significant reason:default
4
5#include <private/qdrawhelper_neon_p.h>
6#include <private/qblendfunctions_p.h>
7#include <private/qmath_p.h>
8#include <private/qpixellayout_p.h>
9
10#ifdef __ARM_NEON__
11
12#include <private/qpaintengine_raster_p.h>
13
14QT_BEGIN_NAMESPACE
15
16void qt_memfill32(quint32 *dest, quint32 value, qsizetype count)
17{
18 const int epilogueSize = count % 16;
19#if defined(Q_CC_GHS) || defined(Q_CC_MSVC)
20 // inline assembler free version:
21 if (count >= 16) {
22 quint32 *const neonEnd = dest + count - epilogueSize;
23 const uint32x4_t valueVector1 = vdupq_n_u32(value);
24 const uint32x4x4_t valueVector4 = { valueVector1, valueVector1, valueVector1, valueVector1 };
25 do {
26 vst4q_u32(dest, valueVector4);
27 dest += 16;
28 } while (dest != neonEnd);
29 }
30#elif !defined(Q_PROCESSOR_ARM_64)
31 if (count >= 16) {
32 quint32 *const neonEnd = dest + count - epilogueSize;
33 register uint32x4_t valueVector1 asm ("q0") = vdupq_n_u32(value);
34 register uint32x4_t valueVector2 asm ("q1") = valueVector1;
35 while (dest != neonEnd) {
36 asm volatile (
37 "vst2.32 { d0, d1, d2, d3 }, [%[DST]] !\n\t"
38 "vst2.32 { d0, d1, d2, d3 }, [%[DST]] !\n\t"
39 : [DST]"+r" (dest)
40 : [VALUE1]"w"(valueVector1), [VALUE2]"w"(valueVector2)
41 : "memory"
42 );
43 }
44 }
45#else
46 if (count >= 16) {
47 quint32 *const neonEnd = dest + count - epilogueSize;
48 register uint32x4_t valueVector1 asm ("v0") = vdupq_n_u32(value);
49 register uint32x4_t valueVector2 asm ("v1") = valueVector1;
50 while (dest != neonEnd) {
51 asm volatile (
52 "st2 { v0.4s, v1.4s }, [%[DST]], #32 \n\t"
53 "st2 { v0.4s, v1.4s }, [%[DST]], #32 \n\t"
54 : [DST]"+r" (dest)
55 : [VALUE1]"w"(valueVector1), [VALUE2]"w"(valueVector2)
56 : "memory"
57 );
58 }
59 }
60#endif
61
62 switch (epilogueSize)
63 {
64 case 15: *dest++ = value; Q_FALLTHROUGH();
65 case 14: *dest++ = value; Q_FALLTHROUGH();
66 case 13: *dest++ = value; Q_FALLTHROUGH();
67 case 12: *dest++ = value; Q_FALLTHROUGH();
68 case 11: *dest++ = value; Q_FALLTHROUGH();
69 case 10: *dest++ = value; Q_FALLTHROUGH();
70 case 9: *dest++ = value; Q_FALLTHROUGH();
71 case 8: *dest++ = value; Q_FALLTHROUGH();
72 case 7: *dest++ = value; Q_FALLTHROUGH();
73 case 6: *dest++ = value; Q_FALLTHROUGH();
74 case 5: *dest++ = value; Q_FALLTHROUGH();
75 case 4: *dest++ = value; Q_FALLTHROUGH();
76 case 3: *dest++ = value; Q_FALLTHROUGH();
77 case 2: *dest++ = value; Q_FALLTHROUGH();
78 case 1: *dest++ = value;
79 }
80}
81
82static inline uint16x8_t qvdiv_255_u16(uint16x8_t x, uint16x8_t half)
83{
84 // result = (x + (x >> 8) + 0x80) >> 8
85
86 const uint16x8_t temp = vshrq_n_u16(x, 8); // x >> 8
87 const uint16x8_t sum_part = vaddq_u16(x, half); // x + 0x80
88 const uint16x8_t sum = vaddq_u16(temp, sum_part);
89
90 return vshrq_n_u16(sum, 8);
91}
92
93static inline uint16x8_t qvbyte_mul_u16(uint16x8_t x, uint16x8_t alpha, uint16x8_t half)
94{
95 // t = qRound(x * alpha / 255.0)
96
97 const uint16x8_t t = vmulq_u16(x, alpha); // t
98 return qvdiv_255_u16(t, half);
99}
100
101static inline uint16x8_t qvinterpolate_pixel_255(uint16x8_t x, uint16x8_t a, uint16x8_t y, uint16x8_t b, uint16x8_t half)
102{
103 // t = x * a + y * b
104
105 const uint16x8_t ta = vmulq_u16(x, a);
106 const uint16x8_t tb = vmulq_u16(y, b);
107
108 return qvdiv_255_u16(vaddq_u16(ta, tb), half);
109}
110
111static inline uint16x8_t qvsource_over_u16(uint16x8_t src16, uint16x8_t dst16, uint16x8_t half, uint16x8_t full)
112{
113 const uint16x4_t alpha16_high = vdup_lane_u16(vget_high_u16(src16), 3);
114 const uint16x4_t alpha16_low = vdup_lane_u16(vget_low_u16(src16), 3);
115
116 const uint16x8_t alpha16 = vsubq_u16(full, vcombine_u16(alpha16_low, alpha16_high));
117
118 return vaddq_u16(src16, qvbyte_mul_u16(dst16, alpha16, half));
119}
120
121#if defined(ENABLE_PIXMAN_DRAWHELPERS)
122extern "C" void
123pixman_composite_over_8888_0565_asm_neon (int32_t w,
124 int32_t h,
125 uint16_t *dst,
126 int32_t dst_stride,
127 uint32_t *src,
128 int32_t src_stride);
129
130extern "C" void
131pixman_composite_over_8888_8888_asm_neon (int32_t w,
132 int32_t h,
133 uint32_t *dst,
134 int32_t dst_stride,
135 uint32_t *src,
136 int32_t src_stride);
137
138extern "C" void
139pixman_composite_src_0565_8888_asm_neon (int32_t w,
140 int32_t h,
141 uint32_t *dst,
142 int32_t dst_stride,
143 uint16_t *src,
144 int32_t src_stride);
145
146extern "C" void
147pixman_composite_over_n_8_0565_asm_neon (int32_t w,
148 int32_t h,
149 uint16_t *dst,
150 int32_t dst_stride,
151 uint32_t src,
152 int32_t unused,
153 uint8_t *mask,
154 int32_t mask_stride);
155
156extern "C" void
157pixman_composite_scanline_over_asm_neon (int32_t w,
158 const uint32_t *dst,
159 const uint32_t *src);
160
161extern "C" void
162pixman_composite_src_0565_0565_asm_neon (int32_t w,
163 int32_t h,
164 uint16_t *dst,
165 int32_t dst_stride,
166 uint16_t *src,
167 int32_t src_stride);
168// qblendfunctions.cpp
169void qt_blend_argb32_on_rgb16_const_alpha(uchar *destPixels, int dbpl,
170 const uchar *srcPixels, int sbpl,
171 int w, int h,
172 int const_alpha);
173
174void qt_blend_rgb16_on_argb32_neon(uchar *destPixels, int dbpl,
175 const uchar *srcPixels, int sbpl,
176 int w, int h,
177 int const_alpha)
178{
179 dbpl /= 4;
180 sbpl /= 2;
181
182 quint32 *dst = (quint32 *) destPixels;
183 quint16 *src = (quint16 *) srcPixels;
184
185 if (const_alpha != 256) {
186 quint8 a = (255 * const_alpha) >> 8;
187 quint8 ia = 255 - a;
188
189 while (--h >= 0) {
190 for (int x=0; x<w; ++x)
191 dst[x] = INTERPOLATE_PIXEL_255(qConvertRgb16To32(src[x]), a, dst[x], ia);
192 dst += dbpl;
193 src += sbpl;
194 }
195 return;
196 }
197
198 pixman_composite_src_0565_8888_asm_neon(w, h, dst, dbpl, src, sbpl);
199}
200
201// qblendfunctions.cpp
202void qt_blend_rgb16_on_rgb16(uchar *dst, int dbpl,
203 const uchar *src, int sbpl,
204 int w, int h,
205 int const_alpha);
206
207
208template <int N>
209static inline void scanLineBlit16(quint16 *dst, quint16 *src, int dstride)
210{
211 if (N >= 2) {
212 ((quint32 *)dst)[0] = ((quint32 *)src)[0];
213 __builtin_prefetch(dst + dstride, 1, 0);
214 }
215 for (int i = 1; i < N/2; ++i)
216 ((quint32 *)dst)[i] = ((quint32 *)src)[i];
217 if (N & 1)
218 dst[N-1] = src[N-1];
219}
220
221template <int Width>
222static inline void blockBlit16(quint16 *dst, quint16 *src, int dstride, int sstride, int h)
223{
224 union {
225 quintptr address;
226 quint16 *pointer;
227 } u;
228
229 u.pointer = dst;
230
231 if (u.address & 2) {
232 while (--h >= 0) {
233 // align dst
234 dst[0] = src[0];
235 if (Width > 1)
236 scanLineBlit16<Width-1>(dst + 1, src + 1, dstride);
237 dst += dstride;
238 src += sstride;
239 }
240 } else {
241 while (--h >= 0) {
242 scanLineBlit16<Width>(dst, src, dstride);
243
244 dst += dstride;
245 src += sstride;
246 }
247 }
248}
249
250void qt_blend_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
251 const uchar *srcPixels, int sbpl,
252 int w, int h,
253 int const_alpha)
254{
255 // testing show that the default memcpy is faster for widths 150 and up
256 if (const_alpha != 256 || w >= 150) {
257 qt_blend_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
258 return;
259 }
260
261 int dstride = dbpl / 2;
262 int sstride = sbpl / 2;
263
264 quint16 *dst = (quint16 *) destPixels;
265 quint16 *src = (quint16 *) srcPixels;
266
267 switch (w) {
268#define BLOCKBLIT(n) case n: blockBlit16<n>(dst, src, dstride, sstride, h); return;
269 BLOCKBLIT(1);
270 BLOCKBLIT(2);
271 BLOCKBLIT(3);
272 BLOCKBLIT(4);
273 BLOCKBLIT(5);
274 BLOCKBLIT(6);
275 BLOCKBLIT(7);
276 BLOCKBLIT(8);
277 BLOCKBLIT(9);
278 BLOCKBLIT(10);
279 BLOCKBLIT(11);
280 BLOCKBLIT(12);
281 BLOCKBLIT(13);
282 BLOCKBLIT(14);
283 BLOCKBLIT(15);
284#undef BLOCKBLIT
285 default:
286 break;
287 }
288
289 pixman_composite_src_0565_0565_asm_neon (w, h, dst, dstride, src, sstride);
290}
291
292extern "C" void blend_8_pixels_argb32_on_rgb16_neon(quint16 *dst, const quint32 *src, int const_alpha);
293
294void qt_blend_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
295 const uchar *srcPixels, int sbpl,
296 int w, int h,
297 int const_alpha)
298{
299 quint16 *dst = (quint16 *) destPixels;
300 quint32 *src = (quint32 *) srcPixels;
301
302 if (const_alpha != 256) {
303 for (int y=0; y<h; ++y) {
304 int i = 0;
305 for (; i < w-7; i += 8)
306 blend_8_pixels_argb32_on_rgb16_neon(&dst[i], &src[i], const_alpha);
307
308 if (i < w) {
309 int tail = w - i;
310
311 quint16 dstBuffer[8];
312 quint32 srcBuffer[8];
313
314 for (int j = 0; j < tail; ++j) {
315 dstBuffer[j] = dst[i + j];
316 srcBuffer[j] = src[i + j];
317 }
318
319 blend_8_pixels_argb32_on_rgb16_neon(dstBuffer, srcBuffer, const_alpha);
320
321 for (int j = 0; j < tail; ++j)
322 dst[i + j] = dstBuffer[j];
323 }
324
325 dst = (quint16 *)(((uchar *) dst) + dbpl);
326 src = (quint32 *)(((uchar *) src) + sbpl);
327 }
328 return;
329 }
330
331 pixman_composite_over_8888_0565_asm_neon(w, h, dst, dbpl / 2, src, sbpl / 4);
332}
333#endif
334
335void qt_blend_argb32_on_argb32_scanline_neon(uint *dest, const uint *src, int length, uint const_alpha)
336{
337 if (const_alpha == 255) {
338#if defined(ENABLE_PIXMAN_DRAWHELPERS)
339 pixman_composite_scanline_over_asm_neon(length, dest, src);
340#else
341 qt_blend_argb32_on_argb32_neon((uchar *)dest, 4 * length, (uchar *)src, 4 * length, length, 1, 256);
342#endif
343 } else {
344 qt_blend_argb32_on_argb32_neon((uchar *)dest, 4 * length, (uchar *)src, 4 * length, length, 1, (const_alpha * 256) / 255);
345 }
346}
347
348void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl,
349 const uchar *srcPixels, int sbpl,
350 int w, int h,
351 int const_alpha)
352{
353 const uint *src = (const uint *) srcPixels;
354 uint *dst = (uint *) destPixels;
355 uint16x8_t half = vdupq_n_u16(0x80);
356 uint16x8_t full = vdupq_n_u16(0xff);
357 if (const_alpha == 256) {
358#if defined(ENABLE_PIXMAN_DRAWHELPERS)
359 pixman_composite_over_8888_8888_asm_neon(w, h, (uint32_t *)destPixels, dbpl / 4, (uint32_t *)srcPixels, sbpl / 4);
360#else
361 for (int y=0; y<h; ++y) {
362 int x = 0;
363 for (; x < w-3; x += 4) {
364 if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
365 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
366 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
367
368 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
369 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
370
371 const uint8x8_t src8_low = vget_low_u8(src8);
372 const uint8x8_t dst8_low = vget_low_u8(dst8);
373
374 const uint8x8_t src8_high = vget_high_u8(src8);
375 const uint8x8_t dst8_high = vget_high_u8(dst8);
376
377 const uint16x8_t src16_low = vmovl_u8(src8_low);
378 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
379
380 const uint16x8_t src16_high = vmovl_u8(src8_high);
381 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
382
383 const uint16x8_t result16_low = qvsource_over_u16(src16_low, dst16_low, half, full);
384 const uint16x8_t result16_high = qvsource_over_u16(src16_high, dst16_high, half, full);
385
386 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
387 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
388
389 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
390 }
391 }
392 for (; x<w; ++x) {
393 uint s = src[x];
394 if (s >= 0xff000000)
395 dst[x] = s;
396 else if (s != 0)
397 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
398 }
399 dst = (quint32 *)(((uchar *) dst) + dbpl);
400 src = (const quint32 *)(((const uchar *) src) + sbpl);
401 }
402#endif
403 } else if (const_alpha != 0) {
404 const_alpha = (const_alpha * 255) >> 8;
405 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
406 for (int y = 0; y < h; ++y) {
407 int x = 0;
408 for (; x < w-3; x += 4) {
409 if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
410 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
411 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
412
413 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
414 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
415
416 const uint8x8_t src8_low = vget_low_u8(src8);
417 const uint8x8_t dst8_low = vget_low_u8(dst8);
418
419 const uint8x8_t src8_high = vget_high_u8(src8);
420 const uint8x8_t dst8_high = vget_high_u8(dst8);
421
422 const uint16x8_t src16_low = vmovl_u8(src8_low);
423 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
424
425 const uint16x8_t src16_high = vmovl_u8(src8_high);
426 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
427
428 const uint16x8_t srcalpha16_low = qvbyte_mul_u16(src16_low, const_alpha16, half);
429 const uint16x8_t srcalpha16_high = qvbyte_mul_u16(src16_high, const_alpha16, half);
430
431 const uint16x8_t result16_low = qvsource_over_u16(srcalpha16_low, dst16_low, half, full);
432 const uint16x8_t result16_high = qvsource_over_u16(srcalpha16_high, dst16_high, half, full);
433
434 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
435 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
436
437 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
438 }
439 }
440 for (; x<w; ++x) {
441 uint s = src[x];
442 if (s != 0) {
443 s = BYTE_MUL(s, const_alpha);
444 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
445 }
446 }
447 dst = (quint32 *)(((uchar *) dst) + dbpl);
448 src = (const quint32 *)(((const uchar *) src) + sbpl);
449 }
450 }
451}
452
453// qblendfunctions.cpp
454void qt_blend_rgb32_on_rgb32(uchar *destPixels, int dbpl,
455 const uchar *srcPixels, int sbpl,
456 int w, int h,
457 int const_alpha);
458
459void qt_blend_rgb32_on_rgb32_neon(uchar *destPixels, int dbpl,
460 const uchar *srcPixels, int sbpl,
461 int w, int h,
462 int const_alpha)
463{
464 if (const_alpha != 256) {
465 if (const_alpha != 0) {
466 const uint *src = (const uint *) srcPixels;
467 uint *dst = (uint *) destPixels;
468 uint16x8_t half = vdupq_n_u16(0x80);
469 const_alpha = (const_alpha * 255) >> 8;
470 int one_minus_const_alpha = 255 - const_alpha;
471 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
472 uint16x8_t one_minus_const_alpha16 = vdupq_n_u16(255 - const_alpha);
473 for (int y = 0; y < h; ++y) {
474 int x = 0;
475 for (; x < w-3; x += 4) {
476 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
477 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
478
479 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
480 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
481
482 const uint8x8_t src8_low = vget_low_u8(src8);
483 const uint8x8_t dst8_low = vget_low_u8(dst8);
484
485 const uint8x8_t src8_high = vget_high_u8(src8);
486 const uint8x8_t dst8_high = vget_high_u8(dst8);
487
488 const uint16x8_t src16_low = vmovl_u8(src8_low);
489 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
490
491 const uint16x8_t src16_high = vmovl_u8(src8_high);
492 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
493
494 const uint16x8_t result16_low = qvinterpolate_pixel_255(src16_low, const_alpha16, dst16_low, one_minus_const_alpha16, half);
495 const uint16x8_t result16_high = qvinterpolate_pixel_255(src16_high, const_alpha16, dst16_high, one_minus_const_alpha16, half);
496
497 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
498 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
499
500 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
501 }
502 for (; x<w; ++x) {
503 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], one_minus_const_alpha);
504 }
505 dst = (quint32 *)(((uchar *) dst) + dbpl);
506 src = (const quint32 *)(((const uchar *) src) + sbpl);
507 }
508 }
509 } else {
510 qt_blend_rgb32_on_rgb32(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
511 }
512}
513
514#if defined(ENABLE_PIXMAN_DRAWHELPERS)
515extern void qt_alphamapblit_quint16(QRasterBuffer *rasterBuffer,
516 int x, int y, const QRgba64 &color,
517 const uchar *map,
518 int mapWidth, int mapHeight, int mapStride,
519 const QClipData *clip, bool useGammaCorrection);
520
521void qt_alphamapblit_quint16_neon(QRasterBuffer *rasterBuffer,
522 int x, int y, const QRgba64 &color,
523 const uchar *bitmap,
524 int mapWidth, int mapHeight, int mapStride,
525 const QClipData *clip, bool useGammaCorrection)
526{
527 if (clip || useGammaCorrection) {
528 qt_alphamapblit_quint16(rasterBuffer, x, y, color, bitmap, mapWidth, mapHeight, mapStride, clip, useGammaCorrection);
529 return;
530 }
531
532 quint16 *dest = reinterpret_cast<quint16*>(rasterBuffer->scanLine(y)) + x;
533 const int destStride = rasterBuffer->bytesPerLine() / sizeof(quint16);
534
535 uchar *mask = const_cast<uchar *>(bitmap);
536 const uint c = color.toArgb32();
537
538 pixman_composite_over_n_8_0565_asm_neon(mapWidth, mapHeight, dest, destStride, c, 0, mask, mapStride);
539}
540
541extern "C" void blend_8_pixels_rgb16_on_rgb16_neon(quint16 *dst, const quint16 *src, int const_alpha);
542
543template <typename SRC, typename BlendFunc>
544struct Blend_on_RGB16_SourceAndConstAlpha_Neon {
545 Blend_on_RGB16_SourceAndConstAlpha_Neon(BlendFunc blender, int const_alpha)
546 : m_index(0)
547 , m_blender(blender)
548 , m_const_alpha(const_alpha)
549 {
550 }
551
552 inline void write(quint16 *dst, quint32 src)
553 {
554 srcBuffer[m_index++] = src;
555
556 if (m_index == 8) {
557 m_blender(dst - 7, srcBuffer, m_const_alpha);
558 m_index = 0;
559 }
560 }
561
562 inline void flush(quint16 *dst)
563 {
564 if (m_index > 0) {
565 quint16 dstBuffer[8];
566 for (int i = 0; i < m_index; ++i)
567 dstBuffer[i] = dst[i - m_index];
568
569 m_blender(dstBuffer, srcBuffer, m_const_alpha);
570
571 for (int i = 0; i < m_index; ++i)
572 dst[i - m_index] = dstBuffer[i];
573
574 m_index = 0;
575 }
576 }
577
578 SRC srcBuffer[8];
579
580 int m_index;
581 BlendFunc m_blender;
582 int m_const_alpha;
583};
584
585template <typename SRC, typename BlendFunc>
586Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>
587Blend_on_RGB16_SourceAndConstAlpha_Neon_create(BlendFunc blender, int const_alpha)
588{
589 return Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>(blender, const_alpha);
590}
591
592void qt_scale_image_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
593 const uchar *srcPixels, int sbpl, int srch,
594 const QRectF &targetRect,
595 const QRectF &sourceRect,
596 const QRect &clip,
597 int const_alpha)
598{
599 if (const_alpha == 0)
600 return;
601
602 qt_scale_image_16bit<quint32>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
603 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
604}
605
606void qt_scale_image_rgb16_on_rgb16(uchar *destPixels, int dbpl,
607 const uchar *srcPixels, int sbpl, int srch,
608 const QRectF &targetRect,
609 const QRectF &sourceRect,
610 const QRect &clip,
611 int const_alpha);
612
613void qt_scale_image_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
614 const uchar *srcPixels, int sbpl, int srch,
615 const QRectF &targetRect,
616 const QRectF &sourceRect,
617 const QRect &clip,
618 int const_alpha)
619{
620 if (const_alpha == 0)
621 return;
622
623 if (const_alpha == 256) {
624 qt_scale_image_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip, const_alpha);
625 return;
626 }
627
628 qt_scale_image_16bit<quint16>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
629 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
630}
631
632extern void qt_transform_image_rgb16_on_rgb16(uchar *destPixels, int dbpl,
633 const uchar *srcPixels, int sbpl,
634 const QRectF &targetRect,
635 const QRectF &sourceRect,
636 const QRect &clip,
637 const QTransform &targetRectTransform,
638 int const_alpha);
639
640void qt_transform_image_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
641 const uchar *srcPixels, int sbpl,
642 const QRectF &targetRect,
643 const QRectF &sourceRect,
644 const QRect &clip,
645 const QTransform &targetRectTransform,
646 int const_alpha)
647{
648 if (const_alpha == 0)
649 return;
650
651 if (const_alpha == 256) {
652 qt_transform_image_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, targetRect, sourceRect, clip, targetRectTransform, const_alpha);
653 return;
654 }
655
656 qt_transform_image(reinterpret_cast<quint16 *>(destPixels), dbpl,
657 reinterpret_cast<const quint16 *>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
658 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
659}
660
661void qt_transform_image_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
662 const uchar *srcPixels, int sbpl,
663 const QRectF &targetRect,
664 const QRectF &sourceRect,
665 const QRect &clip,
666 const QTransform &targetRectTransform,
667 int const_alpha)
668{
669 if (const_alpha == 0)
670 return;
671
672 qt_transform_image(reinterpret_cast<quint16 *>(destPixels), dbpl,
673 reinterpret_cast<const quint32 *>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
674 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
675}
676
677static inline void convert_8_pixels_rgb16_to_argb32(quint32 *dst, const quint16 *src)
678{
679 asm volatile (
680 "vld1.16 { d0, d1 }, [%[SRC]]\n\t"
681
682 /* convert 8 r5g6b5 pixel data from {d0, d1} to planar 8-bit format
683 and put data into d4 - red, d3 - green, d2 - blue */
684 "vshrn.u16 d4, q0, #8\n\t"
685 "vshrn.u16 d3, q0, #3\n\t"
686 "vsli.u16 q0, q0, #5\n\t"
687 "vsri.u8 d4, d4, #5\n\t"
688 "vsri.u8 d3, d3, #6\n\t"
689 "vshrn.u16 d2, q0, #2\n\t"
690
691 /* fill d5 - alpha with 0xff */
692 "mov r2, #255\n\t"
693 "vdup.8 d5, r2\n\t"
694
695 "vst4.8 { d2, d3, d4, d5 }, [%[DST]]"
696 : : [DST]"r" (dst), [SRC]"r" (src)
697 : "memory", "r2", "d0", "d1", "d2", "d3", "d4", "d5"
698 );
699}
700
701uint * QT_FASTCALL qt_destFetchRGB16_neon(uint *buffer, QRasterBuffer *rasterBuffer, int x, int y, int length)
702{
703 const ushort *data = (const ushort *)rasterBuffer->scanLine(y) + x;
704
705 int i = 0;
706 for (; i < length - 7; i += 8)
707 convert_8_pixels_rgb16_to_argb32(&buffer[i], &data[i]);
708
709 if (i < length) {
710 quint16 srcBuffer[8];
711 quint32 dstBuffer[8];
712
713 int tail = length - i;
714 for (int j = 0; j < tail; ++j)
715 srcBuffer[j] = data[i + j];
716
717 convert_8_pixels_rgb16_to_argb32(dstBuffer, srcBuffer);
718
719 for (int j = 0; j < tail; ++j)
720 buffer[i + j] = dstBuffer[j];
721 }
722
723 return buffer;
724}
725
726static inline void convert_8_pixels_argb32_to_rgb16(quint16 *dst, const quint32 *src)
727{
728 asm volatile (
729 "vld4.8 { d0, d1, d2, d3 }, [%[SRC]]\n\t"
730
731 /* convert to r5g6b5 and store it into {d28, d29} */
732 "vshll.u8 q14, d2, #8\n\t"
733 "vshll.u8 q8, d1, #8\n\t"
734 "vshll.u8 q9, d0, #8\n\t"
735 "vsri.u16 q14, q8, #5\n\t"
736 "vsri.u16 q14, q9, #11\n\t"
737
738 "vst1.16 { d28, d29 }, [%[DST]]"
739 : : [DST]"r" (dst), [SRC]"r" (src)
740 : "memory", "d0", "d1", "d2", "d3", "d16", "d17", "d18", "d19", "d28", "d29"
741 );
742}
743
744void QT_FASTCALL qt_destStoreRGB16_neon(QRasterBuffer *rasterBuffer, int x, int y, const uint *buffer, int length)
745{
746 quint16 *data = (quint16*)rasterBuffer->scanLine(y) + x;
747
748 int i = 0;
749 for (; i < length - 7; i += 8)
750 convert_8_pixels_argb32_to_rgb16(&data[i], &buffer[i]);
751
752 if (i < length) {
753 quint32 srcBuffer[8];
754 quint16 dstBuffer[8];
755
756 int tail = length - i;
757 for (int j = 0; j < tail; ++j)
758 srcBuffer[j] = buffer[i + j];
759
760 convert_8_pixels_argb32_to_rgb16(dstBuffer, srcBuffer);
761
762 for (int j = 0; j < tail; ++j)
763 data[i + j] = dstBuffer[j];
764 }
765}
766#endif
767
768void QT_FASTCALL comp_func_solid_SourceOver_neon(uint *destPixels, int length, uint color, uint const_alpha)
769{
770 if ((const_alpha & qAlpha(color)) == 255) {
771 qt_memfill32(destPixels, color, length);
772 } else {
773 if (const_alpha != 255)
774 color = BYTE_MUL(color, const_alpha);
775
776 const quint32 minusAlphaOfColor = qAlpha(~color);
777 int x = 0;
778
779 uint32_t *dst = (uint32_t *) destPixels;
780 const uint32x4_t colorVector = vdupq_n_u32(color);
781 uint16x8_t half = vdupq_n_u16(0x80);
782 const uint16x8_t minusAlphaOfColorVector = vdupq_n_u16(minusAlphaOfColor);
783
784 for (; x < length-3; x += 4) {
785 uint32x4_t dstVector = vld1q_u32(&dst[x]);
786
787 const uint8x16_t dst8 = vreinterpretq_u8_u32(dstVector);
788
789 const uint8x8_t dst8_low = vget_low_u8(dst8);
790 const uint8x8_t dst8_high = vget_high_u8(dst8);
791
792 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
793 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
794
795 const uint16x8_t result16_low = qvbyte_mul_u16(dst16_low, minusAlphaOfColorVector, half);
796 const uint16x8_t result16_high = qvbyte_mul_u16(dst16_high, minusAlphaOfColorVector, half);
797
798 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
799 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
800
801 uint32x4_t blendedPixels = vcombine_u32(result32_low, result32_high);
802 uint32x4_t colorPlusBlendedPixels = vaddq_u32(colorVector, blendedPixels);
803 vst1q_u32(&dst[x], colorPlusBlendedPixels);
804 }
805
806 SIMD_EPILOGUE(x, length, 3)
807 destPixels[x] = color + BYTE_MUL(destPixels[x], minusAlphaOfColor);
808 }
809}
810
811void QT_FASTCALL comp_func_Plus_neon(uint *dst, const uint *src, int length, uint const_alpha)
812{
813 if (const_alpha == 255) {
814 uint *const end = dst + length;
815 uint *const neonEnd = end - 3;
816
817 while (dst < neonEnd) {
818 uint8x16_t vs = vld1q_u8((const uint8_t*)src);
819 const uint8x16_t vd = vld1q_u8((uint8_t*)dst);
820 vs = vqaddq_u8(vs, vd);
821 vst1q_u8((uint8_t*)dst, vs);
822 src += 4;
823 dst += 4;
824 };
825
826 while (dst != end) {
827 *dst = comp_func_Plus_one_pixel(*dst, *src);
828 ++dst;
829 ++src;
830 }
831 } else {
832 int x = 0;
833 const int one_minus_const_alpha = 255 - const_alpha;
834 const uint16x8_t constAlphaVector = vdupq_n_u16(const_alpha);
835 const uint16x8_t oneMinusconstAlphaVector = vdupq_n_u16(one_minus_const_alpha);
836
837 const uint16x8_t half = vdupq_n_u16(0x80);
838 for (; x < length - 3; x += 4) {
839 const uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
840 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
841 uint8x16_t dst8 = vld1q_u8((uint8_t *)&dst[x]);
842 uint8x16_t result = vqaddq_u8(dst8, src8);
843
844 uint16x8_t result_low = vmovl_u8(vget_low_u8(result));
845 uint16x8_t result_high = vmovl_u8(vget_high_u8(result));
846
847 uint16x8_t dst_low = vmovl_u8(vget_low_u8(dst8));
848 uint16x8_t dst_high = vmovl_u8(vget_high_u8(dst8));
849
850 result_low = qvinterpolate_pixel_255(result_low, constAlphaVector, dst_low, oneMinusconstAlphaVector, half);
851 result_high = qvinterpolate_pixel_255(result_high, constAlphaVector, dst_high, oneMinusconstAlphaVector, half);
852
853 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result_low));
854 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result_high));
855 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
856 }
857
858 SIMD_EPILOGUE(x, length, 3)
859 dst[x] = comp_func_Plus_one_pixel_const_alpha(dst[x], src[x], const_alpha, one_minus_const_alpha);
860 }
861}
862
863#if defined(ENABLE_PIXMAN_DRAWHELPERS)
864static const int tileSize = 32;
865
866extern "C" void qt_rotate90_16_neon(quint16 *dst, const quint16 *src, int sstride, int dstride, int count);
867
868void qt_memrotate90_16_neon(const uchar *srcPixels, int w, int h, int sstride, uchar *destPixels, int dstride)
869{
870 const ushort *src = (const ushort *)srcPixels;
871 ushort *dest = (ushort *)destPixels;
872
873 sstride /= sizeof(ushort);
874 dstride /= sizeof(ushort);
875
876 const int pack = sizeof(quint32) / sizeof(ushort);
877 const int unaligned =
878 qMin(uint((quintptr(dest) & (sizeof(quint32)-1)) / sizeof(ushort)), uint(h));
879 const int restX = w % tileSize;
880 const int restY = (h - unaligned) % tileSize;
881 const int unoptimizedY = restY % pack;
882 const int numTilesX = w / tileSize + (restX > 0);
883 const int numTilesY = (h - unaligned) / tileSize + (restY >= pack);
884
885 for (int tx = 0; tx < numTilesX; ++tx) {
886 const int startx = w - tx * tileSize - 1;
887 const int stopx = qMax(startx - tileSize, 0);
888
889 if (unaligned) {
890 for (int x = startx; x >= stopx; --x) {
891 ushort *d = dest + (w - x - 1) * dstride;
892 for (int y = 0; y < unaligned; ++y) {
893 *d++ = src[y * sstride + x];
894 }
895 }
896 }
897
898 for (int ty = 0; ty < numTilesY; ++ty) {
899 const int starty = ty * tileSize + unaligned;
900 const int stopy = qMin(starty + tileSize, h - unoptimizedY);
901
902 int x = startx;
903 // qt_rotate90_16_neon writes to eight rows, four pixels at a time
904 for (; x >= stopx + 7; x -= 8) {
905 ushort *d = dest + (w - x - 1) * dstride + starty;
906 const ushort *s = &src[starty * sstride + x - 7];
907 qt_rotate90_16_neon(d, s, sstride * 2, dstride * 2, stopy - starty);
908 }
909
910 for (; x >= stopx; --x) {
911 quint32 *d = reinterpret_cast<quint32*>(dest + (w - x - 1) * dstride + starty);
912 for (int y = starty; y < stopy; y += pack) {
913 quint32 c = src[y * sstride + x];
914 for (int i = 1; i < pack; ++i) {
915 const int shift = (sizeof(int) * 8 / pack * i);
916 const ushort color = src[(y + i) * sstride + x];
917 c |= color << shift;
918 }
919 *d++ = c;
920 }
921 }
922 }
923
924 if (unoptimizedY) {
925 const int starty = h - unoptimizedY;
926 for (int x = startx; x >= stopx; --x) {
927 ushort *d = dest + (w - x - 1) * dstride + starty;
928 for (int y = starty; y < h; ++y) {
929 *d++ = src[y * sstride + x];
930 }
931 }
932 }
933 }
934}
935
936extern "C" void qt_rotate270_16_neon(quint16 *dst, const quint16 *src, int sstride, int dstride, int count);
937
938void qt_memrotate270_16_neon(const uchar *srcPixels, int w, int h,
939 int sstride,
940 uchar *destPixels, int dstride)
941{
942 const ushort *src = (const ushort *)srcPixels;
943 ushort *dest = (ushort *)destPixels;
944
945 sstride /= sizeof(ushort);
946 dstride /= sizeof(ushort);
947
948 const int pack = sizeof(quint32) / sizeof(ushort);
949 const int unaligned =
950 qMin(uint((long(dest) & (sizeof(quint32)-1)) / sizeof(ushort)), uint(h));
951 const int restX = w % tileSize;
952 const int restY = (h - unaligned) % tileSize;
953 const int unoptimizedY = restY % pack;
954 const int numTilesX = w / tileSize + (restX > 0);
955 const int numTilesY = (h - unaligned) / tileSize + (restY >= pack);
956
957 for (int tx = 0; tx < numTilesX; ++tx) {
958 const int startx = tx * tileSize;
959 const int stopx = qMin(startx + tileSize, w);
960
961 if (unaligned) {
962 for (int x = startx; x < stopx; ++x) {
963 ushort *d = dest + x * dstride;
964 for (int y = h - 1; y >= h - unaligned; --y) {
965 *d++ = src[y * sstride + x];
966 }
967 }
968 }
969
970 for (int ty = 0; ty < numTilesY; ++ty) {
971 const int starty = h - 1 - unaligned - ty * tileSize;
972 const int stopy = qMax(starty - tileSize, unoptimizedY);
973
974 int x = startx;
975 // qt_rotate90_16_neon writes to eight rows, four pixels at a time
976 for (; x < stopx - 7; x += 8) {
977 ushort *d = dest + x * dstride + h - 1 - starty;
978 const ushort *s = &src[starty * sstride + x];
979 qt_rotate90_16_neon(d + 7 * dstride, s, -sstride * 2, -dstride * 2, starty - stopy);
980 }
981
982 for (; x < stopx; ++x) {
983 quint32 *d = reinterpret_cast<quint32*>(dest + x * dstride
984 + h - 1 - starty);
985 for (int y = starty; y > stopy; y -= pack) {
986 quint32 c = src[y * sstride + x];
987 for (int i = 1; i < pack; ++i) {
988 const int shift = (sizeof(int) * 8 / pack * i);
989 const ushort color = src[(y - i) * sstride + x];
990 c |= color << shift;
991 }
992 *d++ = c;
993 }
994 }
995 }
996 if (unoptimizedY) {
997 const int starty = unoptimizedY - 1;
998 for (int x = startx; x < stopx; ++x) {
999 ushort *d = dest + x * dstride + h - 1 - starty;
1000 for (int y = starty; y >= 0; --y) {
1001 *d++ = src[y * sstride + x];
1002 }
1003 }
1004 }
1005 }
1006}
1007#endif
1008
1009class QSimdNeon
1010{
1011public:
1012 struct Int32x4 {
1013 Int32x4() = default;
1014 Int32x4(int32x4_t v) : v(v) {}
1015 int32x4_t v;
1016 operator int32x4_t() const { return v; }
1017 };
1018 struct Float32x4 {
1019 Float32x4() = default;
1020 Float32x4(float32x4_t v) : v(v) {};
1021 float32x4_t v;
1022 operator float32x4_t() const { return v; }
1023 };
1024
1025 union Vect_buffer_i { Int32x4 v; int i[4]; };
1026 union Vect_buffer_f { Float32x4 v; float f[4]; };
1027
1028 static inline Float32x4 v_dup(double x) { return vdupq_n_f32(float(x)); }
1029 static inline Float32x4 v_dup(float x) { return vdupq_n_f32(x); }
1030 static inline Int32x4 v_dup(int x) { return vdupq_n_s32(x); }
1031 static inline Int32x4 v_dup(uint x) { return vdupq_n_s32(x); }
1032
1033 static inline Float32x4 v_add(Float32x4 a, Float32x4 b) { return vaddq_f32(a, b); }
1034 static inline Int32x4 v_add(Int32x4 a, Int32x4 b) { return vaddq_s32(a, b); }
1035
1036 static inline Float32x4 v_max(Float32x4 a, Float32x4 b) { return vmaxq_f32(a, b); }
1037 static inline Float32x4 v_min(Float32x4 a, Float32x4 b) { return vminq_f32(a, b); }
1038 static inline Int32x4 v_min_16(Int32x4 a, Int32x4 b) { return vminq_s32(a, b); }
1039
1040 static inline Int32x4 v_and(Int32x4 a, Int32x4 b) { return vandq_s32(a, b); }
1041
1042 static inline Float32x4 v_sub(Float32x4 a, Float32x4 b) { return vsubq_f32(a, b); }
1043 static inline Int32x4 v_sub(Int32x4 a, Int32x4 b) { return vsubq_s32(a, b); }
1044
1045 static inline Float32x4 v_mul(Float32x4 a, Float32x4 b) { return vmulq_f32(a, b); }
1046
1047 static inline Float32x4 v_sqrt(Float32x4 x) { Float32x4 y = vrsqrteq_f32(x); y = vmulq_f32(y, vrsqrtsq_f32(x, vmulq_f32(y, y))); return vmulq_f32(x, y); }
1048
1049 static inline Int32x4 v_toInt(Float32x4 x) { return vcvtq_s32_f32(x); }
1050
1051 static inline Int32x4 v_greaterOrEqual(Float32x4 a, Float32x4 b) { return vreinterpretq_s32_u32(vcgeq_f32(a, b)); }
1052};
1053
1054const uint * QT_FASTCALL qt_fetch_radial_gradient_neon(uint *buffer, const Operator *op, const QSpanData *data,
1055 int y, int x, int length)
1056{
1057 return qt_fetch_radial_gradient_template<QRadialFetchSimd<QSimdNeon>,uint>(buffer, op, data, y, x, length);
1058}
1059
1060extern void QT_FASTCALL qt_convert_rgb888_to_rgb32_neon(quint32 *dst, const uchar *src, int len);
1061
1062const uint * QT_FASTCALL qt_fetchUntransformed_888_neon(uint *buffer, const Operator *, const QSpanData *data,
1063 int y, int x, int length)
1064{
1065 const uchar *line = data->texture.scanLine(y) + x * 3;
1066 qt_convert_rgb888_to_rgb32_neon(buffer, line, length);
1067 return buffer;
1068}
1069
1070#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
1071static inline uint32x4_t vrgba2argb(uint32x4_t srcVector)
1072{
1073#if defined(Q_PROCESSOR_ARM_64)
1074 const uint8x16_t rgbaMask = qvsetq_n_u8(2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15);
1075#else
1076 const uint8x8_t rgbaMask = qvset_n_u8(2, 1, 0, 3, 6, 5, 4, 7);
1077#endif
1078#if defined(Q_PROCESSOR_ARM_64)
1079 srcVector = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(srcVector), rgbaMask));
1080#else
1081 // no vqtbl1q_u8, so use two vtbl1_u8
1082 const uint8x8_t low = vtbl1_u8(vreinterpret_u8_u32(vget_low_u32(srcVector)), rgbaMask);
1083 const uint8x8_t high = vtbl1_u8(vreinterpret_u8_u32(vget_high_u32(srcVector)), rgbaMask);
1084 srcVector = vcombine_u32(vreinterpret_u32_u8(low), vreinterpret_u32_u8(high));
1085#endif
1086 return srcVector;
1087}
1088
1089template<bool RGBA>
1090static inline void convertARGBToARGB32PM_neon(uint *buffer, const uint *src, int count)
1091{
1092 int i = 0;
1093 const uint8x8_t shuffleMask = qvset_n_u8(3, 3, 3, 3, 7, 7, 7, 7);
1094 const uint32x4_t blendMask = vdupq_n_u32(0xff000000);
1095
1096 for (; i < count - 3; i += 4) {
1097 uint32x4_t srcVector = vld1q_u32(src + i);
1098 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1099#if defined(Q_PROCESSOR_ARM_64)
1100 uint32_t alphaSum = vaddvq_u32(alphaVector);
1101#else
1102 // no vaddvq_u32
1103 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1104 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1105#endif
1106 if (alphaSum) {
1107 if (alphaSum != 255 * 4) {
1108 if (RGBA)
1109 srcVector = vrgba2argb(srcVector);
1110 const uint8x8_t s1 = vreinterpret_u8_u32(vget_low_u32(srcVector));
1111 const uint8x8_t s2 = vreinterpret_u8_u32(vget_high_u32(srcVector));
1112 const uint8x8_t alpha1 = vtbl1_u8(s1, shuffleMask);
1113 const uint8x8_t alpha2 = vtbl1_u8(s2, shuffleMask);
1114 uint16x8_t src1 = vmull_u8(s1, alpha1);
1115 uint16x8_t src2 = vmull_u8(s2, alpha2);
1116 src1 = vsraq_n_u16(src1, src1, 8);
1117 src2 = vsraq_n_u16(src2, src2, 8);
1118 const uint8x8_t d1 = vrshrn_n_u16(src1, 8);
1119 const uint8x8_t d2 = vrshrn_n_u16(src2, 8);
1120 const uint32x4_t d = vbslq_u32(blendMask, srcVector, vreinterpretq_u32_u8(vcombine_u8(d1, d2)));
1121 vst1q_u32(buffer + i, d);
1122 } else {
1123 if (RGBA)
1124 vst1q_u32(buffer + i, vrgba2argb(srcVector));
1125 else if (buffer != src)
1126 vst1q_u32(buffer + i, srcVector);
1127 }
1128 } else {
1129 vst1q_u32(buffer + i, vdupq_n_u32(0));
1130 }
1131 }
1132
1133 SIMD_EPILOGUE(i, count, 3) {
1134 uint v = qPremultiply(src[i]);
1135 buffer[i] = RGBA ? RGBA2ARGB(v) : v;
1136 }
1137}
1138
1139template<bool RGBA>
1140static inline void convertARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count)
1141{
1142 if (count <= 0)
1143 return;
1144
1145 const uint8x8_t shuffleMask = qvset_n_u8(3, 3, 3, 3, 7, 7, 7, 7);
1146 const uint64x2_t blendMask = vdupq_n_u64(Q_UINT64_C(0xffff000000000000));
1147
1148 int i = 0;
1149 for (; i < count-3; i += 4) {
1150 uint32x4_t vs32 = vld1q_u32(src + i);
1151 uint32x4_t alphaVector = vshrq_n_u32(vs32, 24);
1152#if defined(Q_PROCESSOR_ARM_64)
1153 uint32_t alphaSum = vaddvq_u32(alphaVector);
1154#else
1155 // no vaddvq_u32
1156 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1157 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1158#endif
1159 if (alphaSum) {
1160 if (!RGBA)
1161 vs32 = vrgba2argb(vs32);
1162 const uint8x16_t vs8 = vreinterpretq_u8_u32(vs32);
1163 const uint8x16x2_t v = vzipq_u8(vs8, vs8);
1164 if (alphaSum != 255 * 4) {
1165 const uint8x8_t s1 = vreinterpret_u8_u32(vget_low_u32(vs32));
1166 const uint8x8_t s2 = vreinterpret_u8_u32(vget_high_u32(vs32));
1167 const uint8x8_t alpha1 = vtbl1_u8(s1, shuffleMask);
1168 const uint8x8_t alpha2 = vtbl1_u8(s2, shuffleMask);
1169 uint16x8_t src1 = vmull_u8(s1, alpha1);
1170 uint16x8_t src2 = vmull_u8(s2, alpha2);
1171 // convert from 0->(255x255) to 0->(255x257)
1172 src1 = vsraq_n_u16(src1, src1, 7);
1173 src2 = vsraq_n_u16(src2, src2, 7);
1174
1175 // now restore alpha from the trivial conversion
1176 const uint64x2_t d1 = vbslq_u64(blendMask, vreinterpretq_u64_u8(v.val[0]), vreinterpretq_u64_u16(src1));
1177 const uint64x2_t d2 = vbslq_u64(blendMask, vreinterpretq_u64_u8(v.val[1]), vreinterpretq_u64_u16(src2));
1178
1179 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u64(d1));
1180 buffer += 2;
1181 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u64(d2));
1182 buffer += 2;
1183 } else {
1184 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u8(v.val[0]));
1185 buffer += 2;
1186 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u8(v.val[1]));
1187 buffer += 2;
1188 }
1189 } else {
1190 vst1q_u16((uint16_t *)buffer, vdupq_n_u16(0));
1191 buffer += 2;
1192 vst1q_u16((uint16_t *)buffer, vdupq_n_u16(0));
1193 buffer += 2;
1194 }
1195 }
1196
1197 SIMD_EPILOGUE(i, count, 3) {
1198 uint s = src[i];
1199 if (RGBA)
1200 s = RGBA2ARGB(s);
1201 *buffer++ = QRgba64::fromArgb32(s).premultiplied();
1202 }
1203}
1204
1205static inline float32x4_t reciprocal_mul_ps(float32x4_t a, float mul)
1206{
1207 float32x4_t ia = vrecpeq_f32(a); // estimate 1/a
1208 ia = vmulq_f32(vrecpsq_f32(a, ia), vmulq_n_f32(ia, mul)); // estimate improvement step * mul
1209 return ia;
1210}
1211
1212template<bool RGBA, bool RGBx>
1213static inline void convertARGBFromARGB32PM_neon(uint *buffer, const uint *src, int count)
1214{
1215 int i = 0;
1216 const uint32x4_t alphaMask = vdupq_n_u32(0xff000000);
1217
1218 for (; i < count - 3; i += 4) {
1219 uint32x4_t srcVector = vld1q_u32(src + i);
1220 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1221#if defined(Q_PROCESSOR_ARM_64)
1222 uint32_t alphaSum = vaddvq_u32(alphaVector);
1223#else
1224 // no vaddvq_u32
1225 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1226 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1227#endif
1228 if (alphaSum) {
1229 if (alphaSum != 255 * 4) {
1230 if (RGBA)
1231 srcVector = vrgba2argb(srcVector);
1232 const float32x4_t a = vcvtq_f32_u32(alphaVector);
1233 const float32x4_t ia = reciprocal_mul_ps(a, 255.0f);
1234 // Convert 4x(4xU8) to 4x(4xF32)
1235 uint16x8_t tmp1 = vmovl_u8(vget_low_u8(vreinterpretq_u8_u32(srcVector)));
1236 uint16x8_t tmp3 = vmovl_u8(vget_high_u8(vreinterpretq_u8_u32(srcVector)));
1237 float32x4_t src1 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp1)));
1238 float32x4_t src2 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp1)));
1239 float32x4_t src3 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp3)));
1240 float32x4_t src4 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp3)));
1241 src1 = vmulq_lane_f32(src1, vget_low_f32(ia), 0);
1242 src2 = vmulq_lane_f32(src2, vget_low_f32(ia), 1);
1243 src3 = vmulq_lane_f32(src3, vget_high_f32(ia), 0);
1244 src4 = vmulq_lane_f32(src4, vget_high_f32(ia), 1);
1245 // Convert 4x(4xF32) back to 4x(4xU8) (over a 8.1 fixed point format to get rounding)
1246 tmp1 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src1, 1), 1),
1247 vrshrn_n_u32(vcvtq_n_u32_f32(src2, 1), 1));
1248 tmp3 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src3, 1), 1),
1249 vrshrn_n_u32(vcvtq_n_u32_f32(src4, 1), 1));
1250 uint32x4_t dstVector = vreinterpretq_u32_u8(vcombine_u8(vmovn_u16(tmp1), vmovn_u16(tmp3)));
1251 // Overwrite any undefined results from alpha==0 with zeros:
1252#if defined(Q_PROCESSOR_ARM_64)
1253 uint32x4_t srcVectorAlphaMask = vceqzq_u32(alphaVector);
1254#else
1255 uint32x4_t srcVectorAlphaMask = vceqq_u32(alphaVector, vdupq_n_u32(0));
1256#endif
1257 dstVector = vbicq_u32(dstVector, srcVectorAlphaMask);
1258 // Restore or mask alpha values:
1259 if (RGBx)
1260 dstVector = vorrq_u32(alphaMask, dstVector);
1261 else
1262 dstVector = vbslq_u32(alphaMask, srcVector, dstVector);
1263 vst1q_u32(&buffer[i], dstVector);
1264 } else {
1265 // 4xAlpha==255, no change except if we are doing RGBA->ARGB:
1266 if (RGBA)
1267 vst1q_u32(&buffer[i], vrgba2argb(srcVector));
1268 else if (buffer != src)
1269 vst1q_u32(&buffer[i], srcVector);
1270 }
1271 } else {
1272 // 4xAlpha==0, always zero, except if output is RGBx:
1273 if (RGBx)
1274 vst1q_u32(&buffer[i], alphaMask);
1275 else
1276 vst1q_u32(&buffer[i], vdupq_n_u32(0));
1277 }
1278 }
1279
1280 SIMD_EPILOGUE(i, count, 3) {
1281 uint v = qUnpremultiply(src[i]);
1282 if (RGBx)
1283 v = 0xff000000 | v;
1284 if (RGBA)
1285 v = ARGB2RGBA(v);
1286 buffer[i] = v;
1287 }
1288}
1289
1290void QT_FASTCALL convertARGB32ToARGB32PM_neon(uint *buffer, int count, const QList<QRgb> *)
1291{
1292 convertARGBToARGB32PM_neon<false>(buffer, buffer, count);
1293}
1294
1295void QT_FASTCALL convertRGBA8888ToARGB32PM_neon(uint *buffer, int count, const QList<QRgb> *)
1296{
1297 convertARGBToARGB32PM_neon<true>(buffer, buffer, count);
1298}
1299
1300const uint *QT_FASTCALL fetchARGB32ToARGB32PM_neon(uint *buffer, const uchar *src, int index, int count,
1301 const QList<QRgb> *, QDitherInfo *)
1302{
1303 convertARGBToARGB32PM_neon<false>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1304 return buffer;
1305}
1306
1307const uint *QT_FASTCALL fetchRGBA8888ToARGB32PM_neon(uint *buffer, const uchar *src, int index, int count,
1308 const QList<QRgb> *, QDitherInfo *)
1309{
1310 convertARGBToARGB32PM_neon<true>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1311 return buffer;
1312}
1313
1314const QRgba64 * QT_FASTCALL convertARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count,
1315 const QList<QRgb> *, QDitherInfo *)
1316{
1317 convertARGB32ToRGBA64PM_neon<false>(buffer, src, count);
1318 return buffer;
1319}
1320
1321const QRgba64 * QT_FASTCALL convertRGBA8888ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count,
1322 const QList<QRgb> *, QDitherInfo *)
1323{
1324 convertARGB32ToRGBA64PM_neon<true>(buffer, src, count);
1325 return buffer;
1326}
1327
1328const QRgba64 *QT_FASTCALL fetchARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uchar *src, int index, int count,
1329 const QList<QRgb> *, QDitherInfo *)
1330{
1331 convertARGB32ToRGBA64PM_neon<false>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1332 return buffer;
1333}
1334
1335const QRgba64 *QT_FASTCALL fetchRGBA8888ToRGBA64PM_neon(QRgba64 *buffer, const uchar *src, int index, int count,
1336 const QList<QRgb> *, QDitherInfo *)
1337{
1338 convertARGB32ToRGBA64PM_neon<true>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1339 return buffer;
1340}
1341
1342void QT_FASTCALL storeRGB32FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1343 const QList<QRgb> *, QDitherInfo *)
1344{
1345 uint *d = reinterpret_cast<uint *>(dest) + index;
1346 convertARGBFromARGB32PM_neon<false,true>(d, src, count);
1347}
1348
1349void QT_FASTCALL storeARGB32FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1350 const QList<QRgb> *, QDitherInfo *)
1351{
1352 uint *d = reinterpret_cast<uint *>(dest) + index;
1353 convertARGBFromARGB32PM_neon<false,false>(d, src, count);
1354}
1355
1356void QT_FASTCALL storeRGBA8888FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1357 const QList<QRgb> *, QDitherInfo *)
1358{
1359 uint *d = reinterpret_cast<uint *>(dest) + index;
1360 convertARGBFromARGB32PM_neon<true,false>(d, src, count);
1361}
1362
1363void QT_FASTCALL storeRGBXFromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1364 const QList<QRgb> *, QDitherInfo *)
1365{
1366 uint *d = reinterpret_cast<uint *>(dest) + index;
1367 convertARGBFromARGB32PM_neon<true,true>(d, src, count);
1368}
1369
1370#endif // Q_BYTE_ORDER == Q_LITTLE_ENDIAN
1371
1372QT_END_NAMESPACE
1373
1374#endif // __ARM_NEON__