Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qdrawhelper_neon.cpp
Go to the documentation of this file.
1// Copyright (C) 2016 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3// Qt-Security score:significant reason:default
4
5#include <private/qdrawhelper_neon_p.h>
6#include <private/qblendfunctions_p.h>
7#include <private/qmath_p.h>
8#include <private/qpixellayout_p.h>
9
10#ifdef __ARM_NEON__
11
12#include <private/qpaintengine_raster_p.h>
13
14QT_BEGIN_NAMESPACE
15
16void qt_memfill32(quint32 *dest, quint32 value, qsizetype count)
17{
18 const int epilogueSize = count % 16;
19 if (count >= 16) {
20 quint32 *const neonEnd = dest + count - epilogueSize;
21 const uint32x4_t valueVector1 = vdupq_n_u32(value);
22 const uint32x4x4_t valueVector4 = { valueVector1, valueVector1, valueVector1, valueVector1 };
23 do {
24 vst4q_u32(dest, valueVector4);
25 dest += 16;
26 } while (dest != neonEnd);
27 }
28
29 switch (epilogueSize)
30 {
31 case 15: *dest++ = value; Q_FALLTHROUGH();
32 case 14: *dest++ = value; Q_FALLTHROUGH();
33 case 13: *dest++ = value; Q_FALLTHROUGH();
34 case 12: *dest++ = value; Q_FALLTHROUGH();
35 case 11: *dest++ = value; Q_FALLTHROUGH();
36 case 10: *dest++ = value; Q_FALLTHROUGH();
37 case 9: *dest++ = value; Q_FALLTHROUGH();
38 case 8: *dest++ = value; Q_FALLTHROUGH();
39 case 7: *dest++ = value; Q_FALLTHROUGH();
40 case 6: *dest++ = value; Q_FALLTHROUGH();
41 case 5: *dest++ = value; Q_FALLTHROUGH();
42 case 4: *dest++ = value; Q_FALLTHROUGH();
43 case 3: *dest++ = value; Q_FALLTHROUGH();
44 case 2: *dest++ = value; Q_FALLTHROUGH();
45 case 1: *dest++ = value;
46 }
47}
48
49static inline uint16x8_t qvdiv_255_u16(uint16x8_t x, uint16x8_t half)
50{
51 // result = (x + (x >> 8) + 0x80) >> 8
52
53 const uint16x8_t temp = vshrq_n_u16(x, 8); // x >> 8
54 const uint16x8_t sum_part = vaddq_u16(x, half); // x + 0x80
55 const uint16x8_t sum = vaddq_u16(temp, sum_part);
56
57 return vshrq_n_u16(sum, 8);
58}
59
60static inline uint16x8_t qvbyte_mul_u16(uint16x8_t x, uint16x8_t alpha, uint16x8_t half)
61{
62 // t = qRound(x * alpha / 255.0)
63
64 const uint16x8_t t = vmulq_u16(x, alpha); // t
65 return qvdiv_255_u16(t, half);
66}
67
68static inline uint16x8_t qvinterpolate_pixel_255(uint16x8_t x, uint16x8_t a, uint16x8_t y, uint16x8_t b, uint16x8_t half)
69{
70 // t = x * a + y * b
71
72 const uint16x8_t ta = vmulq_u16(x, a);
73 const uint16x8_t tb = vmulq_u16(y, b);
74
75 return qvdiv_255_u16(vaddq_u16(ta, tb), half);
76}
77
78static inline uint16x8_t qvsource_over_u16(uint16x8_t src16, uint16x8_t dst16, uint16x8_t half, uint16x8_t full)
79{
80 const uint16x4_t alpha16_high = vdup_lane_u16(vget_high_u16(src16), 3);
81 const uint16x4_t alpha16_low = vdup_lane_u16(vget_low_u16(src16), 3);
82
83 const uint16x8_t alpha16 = vsubq_u16(full, vcombine_u16(alpha16_low, alpha16_high));
84
85 return vaddq_u16(src16, qvbyte_mul_u16(dst16, alpha16, half));
86}
87
88#if defined(ENABLE_PIXMAN_DRAWHELPERS)
89extern "C" void
90pixman_composite_over_8888_0565_asm_neon (int32_t w,
91 int32_t h,
92 uint16_t *dst,
93 int32_t dst_stride,
94 uint32_t *src,
95 int32_t src_stride);
96
97extern "C" void
98pixman_composite_over_8888_8888_asm_neon (int32_t w,
99 int32_t h,
100 uint32_t *dst,
101 int32_t dst_stride,
102 uint32_t *src,
103 int32_t src_stride);
104
105extern "C" void
106pixman_composite_src_0565_8888_asm_neon (int32_t w,
107 int32_t h,
108 uint32_t *dst,
109 int32_t dst_stride,
110 uint16_t *src,
111 int32_t src_stride);
112
113extern "C" void
114pixman_composite_over_n_8_0565_asm_neon (int32_t w,
115 int32_t h,
116 uint16_t *dst,
117 int32_t dst_stride,
118 uint32_t src,
119 int32_t unused,
120 uint8_t *mask,
121 int32_t mask_stride);
122
123extern "C" void
124pixman_composite_scanline_over_asm_neon (int32_t w,
125 const uint32_t *dst,
126 const uint32_t *src);
127
128extern "C" void
129pixman_composite_src_0565_0565_asm_neon (int32_t w,
130 int32_t h,
131 uint16_t *dst,
132 int32_t dst_stride,
133 uint16_t *src,
134 int32_t src_stride);
135// qblendfunctions.cpp
136void qt_blend_argb32_on_rgb16_const_alpha(uchar *destPixels, int dbpl,
137 const uchar *srcPixels, int sbpl,
138 int w, int h,
139 int const_alpha);
140
141void qt_blend_rgb16_on_argb32_neon(uchar *destPixels, int dbpl,
142 const uchar *srcPixels, int sbpl,
143 int w, int h,
144 int const_alpha)
145{
146 dbpl /= 4;
147 sbpl /= 2;
148
149 quint32 *dst = (quint32 *) destPixels;
150 quint16 *src = (quint16 *) srcPixels;
151
152 if (const_alpha != 256) {
153 quint8 a = (255 * const_alpha) >> 8;
154 quint8 ia = 255 - a;
155
156 while (--h >= 0) {
157 for (int x=0; x<w; ++x)
158 dst[x] = INTERPOLATE_PIXEL_255(qConvertRgb16To32(src[x]), a, dst[x], ia);
159 dst += dbpl;
160 src += sbpl;
161 }
162 return;
163 }
164
165 pixman_composite_src_0565_8888_asm_neon(w, h, dst, dbpl, src, sbpl);
166}
167
168// qblendfunctions.cpp
169void qt_blend_rgb16_on_rgb16(uchar *dst, int dbpl,
170 const uchar *src, int sbpl,
171 int w, int h,
172 int const_alpha);
173
174
175template <int N>
176static inline void scanLineBlit16(quint16 *dst, quint16 *src, int dstride)
177{
178 if (N >= 2) {
179 ((quint32 *)dst)[0] = ((quint32 *)src)[0];
180 __builtin_prefetch(dst + dstride, 1, 0);
181 }
182 for (int i = 1; i < N/2; ++i)
183 ((quint32 *)dst)[i] = ((quint32 *)src)[i];
184 if (N & 1)
185 dst[N-1] = src[N-1];
186}
187
188template <int Width>
189static inline void blockBlit16(quint16 *dst, quint16 *src, int dstride, int sstride, int h)
190{
191 union {
192 quintptr address;
193 quint16 *pointer;
194 } u;
195
196 u.pointer = dst;
197
198 if (u.address & 2) {
199 while (--h >= 0) {
200 // align dst
201 dst[0] = src[0];
202 if (Width > 1)
203 scanLineBlit16<Width-1>(dst + 1, src + 1, dstride);
204 dst += dstride;
205 src += sstride;
206 }
207 } else {
208 while (--h >= 0) {
209 scanLineBlit16<Width>(dst, src, dstride);
210
211 dst += dstride;
212 src += sstride;
213 }
214 }
215}
216
217void qt_blend_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
218 const uchar *srcPixels, int sbpl,
219 int w, int h,
220 int const_alpha)
221{
222 // testing show that the default memcpy is faster for widths 150 and up
223 if (const_alpha != 256 || w >= 150) {
224 qt_blend_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
225 return;
226 }
227
228 int dstride = dbpl / 2;
229 int sstride = sbpl / 2;
230
231 quint16 *dst = (quint16 *) destPixels;
232 quint16 *src = (quint16 *) srcPixels;
233
234 switch (w) {
235#define BLOCKBLIT(n) case n: blockBlit16<n>(dst, src, dstride, sstride, h); return;
236 BLOCKBLIT(1);
237 BLOCKBLIT(2);
238 BLOCKBLIT(3);
239 BLOCKBLIT(4);
240 BLOCKBLIT(5);
241 BLOCKBLIT(6);
242 BLOCKBLIT(7);
243 BLOCKBLIT(8);
244 BLOCKBLIT(9);
245 BLOCKBLIT(10);
246 BLOCKBLIT(11);
247 BLOCKBLIT(12);
248 BLOCKBLIT(13);
249 BLOCKBLIT(14);
250 BLOCKBLIT(15);
251#undef BLOCKBLIT
252 default:
253 break;
254 }
255
256 pixman_composite_src_0565_0565_asm_neon (w, h, dst, dstride, src, sstride);
257}
258
259extern "C" void blend_8_pixels_argb32_on_rgb16_neon(quint16 *dst, const quint32 *src, int const_alpha);
260
261void qt_blend_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
262 const uchar *srcPixels, int sbpl,
263 int w, int h,
264 int const_alpha)
265{
266 quint16 *dst = (quint16 *) destPixels;
267 quint32 *src = (quint32 *) srcPixels;
268
269 if (const_alpha != 256) {
270 for (int y=0; y<h; ++y) {
271 int i = 0;
272 for (; i < w-7; i += 8)
273 blend_8_pixels_argb32_on_rgb16_neon(&dst[i], &src[i], const_alpha);
274
275 if (i < w) {
276 int tail = w - i;
277
278 quint16 dstBuffer[8];
279 quint32 srcBuffer[8];
280
281 for (int j = 0; j < tail; ++j) {
282 dstBuffer[j] = dst[i + j];
283 srcBuffer[j] = src[i + j];
284 }
285
286 blend_8_pixels_argb32_on_rgb16_neon(dstBuffer, srcBuffer, const_alpha);
287
288 for (int j = 0; j < tail; ++j)
289 dst[i + j] = dstBuffer[j];
290 }
291
292 dst = (quint16 *)(((uchar *) dst) + dbpl);
293 src = (quint32 *)(((uchar *) src) + sbpl);
294 }
295 return;
296 }
297
298 pixman_composite_over_8888_0565_asm_neon(w, h, dst, dbpl / 2, src, sbpl / 4);
299}
300#endif
301
302void qt_blend_argb32_on_argb32_scanline_neon(uint *dest, const uint *src, int length, uint const_alpha)
303{
304 if (const_alpha == 255) {
305#if defined(ENABLE_PIXMAN_DRAWHELPERS)
306 pixman_composite_scanline_over_asm_neon(length, dest, src);
307#else
308 qt_blend_argb32_on_argb32_neon((uchar *)dest, 4 * length, (uchar *)src, 4 * length, length, 1, 256);
309#endif
310 } else {
311 qt_blend_argb32_on_argb32_neon((uchar *)dest, 4 * length, (uchar *)src, 4 * length, length, 1, (const_alpha * 256) / 255);
312 }
313}
314
315void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl,
316 const uchar *srcPixels, int sbpl,
317 int w, int h,
318 int const_alpha)
319{
320 const uint *src = (const uint *) srcPixels;
321 uint *dst = (uint *) destPixels;
322 uint16x8_t half = vdupq_n_u16(0x80);
323 uint16x8_t full = vdupq_n_u16(0xff);
324 if (const_alpha == 256) {
325#if defined(ENABLE_PIXMAN_DRAWHELPERS)
326 pixman_composite_over_8888_8888_asm_neon(w, h, (uint32_t *)destPixels, dbpl / 4, (uint32_t *)srcPixels, sbpl / 4);
327#else
328 for (int y=0; y<h; ++y) {
329 int x = 0;
330 for (; x < w-3; x += 4) {
331 if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
332 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
333 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
334
335 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
336 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
337
338 const uint8x8_t src8_low = vget_low_u8(src8);
339 const uint8x8_t dst8_low = vget_low_u8(dst8);
340
341 const uint8x8_t src8_high = vget_high_u8(src8);
342 const uint8x8_t dst8_high = vget_high_u8(dst8);
343
344 const uint16x8_t src16_low = vmovl_u8(src8_low);
345 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
346
347 const uint16x8_t src16_high = vmovl_u8(src8_high);
348 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
349
350 const uint16x8_t result16_low = qvsource_over_u16(src16_low, dst16_low, half, full);
351 const uint16x8_t result16_high = qvsource_over_u16(src16_high, dst16_high, half, full);
352
353 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
354 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
355
356 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
357 }
358 }
359 for (; x<w; ++x) {
360 uint s = src[x];
361 if (s >= 0xff000000)
362 dst[x] = s;
363 else if (s != 0)
364 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
365 }
366 dst = (quint32 *)(((uchar *) dst) + dbpl);
367 src = (const quint32 *)(((const uchar *) src) + sbpl);
368 }
369#endif
370 } else if (const_alpha != 0) {
371 const_alpha = (const_alpha * 255) >> 8;
372 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
373 for (int y = 0; y < h; ++y) {
374 int x = 0;
375 for (; x < w-3; x += 4) {
376 if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
377 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
378 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
379
380 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
381 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
382
383 const uint8x8_t src8_low = vget_low_u8(src8);
384 const uint8x8_t dst8_low = vget_low_u8(dst8);
385
386 const uint8x8_t src8_high = vget_high_u8(src8);
387 const uint8x8_t dst8_high = vget_high_u8(dst8);
388
389 const uint16x8_t src16_low = vmovl_u8(src8_low);
390 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
391
392 const uint16x8_t src16_high = vmovl_u8(src8_high);
393 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
394
395 const uint16x8_t srcalpha16_low = qvbyte_mul_u16(src16_low, const_alpha16, half);
396 const uint16x8_t srcalpha16_high = qvbyte_mul_u16(src16_high, const_alpha16, half);
397
398 const uint16x8_t result16_low = qvsource_over_u16(srcalpha16_low, dst16_low, half, full);
399 const uint16x8_t result16_high = qvsource_over_u16(srcalpha16_high, dst16_high, half, full);
400
401 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
402 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
403
404 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
405 }
406 }
407 for (; x<w; ++x) {
408 uint s = src[x];
409 if (s != 0) {
410 s = BYTE_MUL(s, const_alpha);
411 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
412 }
413 }
414 dst = (quint32 *)(((uchar *) dst) + dbpl);
415 src = (const quint32 *)(((const uchar *) src) + sbpl);
416 }
417 }
418}
419
420// qblendfunctions.cpp
421void qt_blend_rgb32_on_rgb32(uchar *destPixels, int dbpl,
422 const uchar *srcPixels, int sbpl,
423 int w, int h,
424 int const_alpha);
425
426void qt_blend_rgb32_on_rgb32_neon(uchar *destPixels, int dbpl,
427 const uchar *srcPixels, int sbpl,
428 int w, int h,
429 int const_alpha)
430{
431 if (const_alpha != 256) {
432 if (const_alpha != 0) {
433 const uint *src = (const uint *) srcPixels;
434 uint *dst = (uint *) destPixels;
435 uint16x8_t half = vdupq_n_u16(0x80);
436 const_alpha = (const_alpha * 255) >> 8;
437 int one_minus_const_alpha = 255 - const_alpha;
438 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
439 uint16x8_t one_minus_const_alpha16 = vdupq_n_u16(255 - const_alpha);
440 for (int y = 0; y < h; ++y) {
441 int x = 0;
442 for (; x < w-3; x += 4) {
443 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
444 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
445
446 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
447 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
448
449 const uint8x8_t src8_low = vget_low_u8(src8);
450 const uint8x8_t dst8_low = vget_low_u8(dst8);
451
452 const uint8x8_t src8_high = vget_high_u8(src8);
453 const uint8x8_t dst8_high = vget_high_u8(dst8);
454
455 const uint16x8_t src16_low = vmovl_u8(src8_low);
456 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
457
458 const uint16x8_t src16_high = vmovl_u8(src8_high);
459 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
460
461 const uint16x8_t result16_low = qvinterpolate_pixel_255(src16_low, const_alpha16, dst16_low, one_minus_const_alpha16, half);
462 const uint16x8_t result16_high = qvinterpolate_pixel_255(src16_high, const_alpha16, dst16_high, one_minus_const_alpha16, half);
463
464 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
465 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
466
467 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
468 }
469 for (; x<w; ++x) {
470 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], one_minus_const_alpha);
471 }
472 dst = (quint32 *)(((uchar *) dst) + dbpl);
473 src = (const quint32 *)(((const uchar *) src) + sbpl);
474 }
475 }
476 } else {
477 qt_blend_rgb32_on_rgb32(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
478 }
479}
480
481#if defined(ENABLE_PIXMAN_DRAWHELPERS)
482extern void qt_alphamapblit_quint16(QRasterBuffer *rasterBuffer,
483 int x, int y, const QRgba64 &color,
484 const uchar *map,
485 int mapWidth, int mapHeight, int mapStride,
486 const QClipData *clip, bool useGammaCorrection);
487
488void qt_alphamapblit_quint16_neon(QRasterBuffer *rasterBuffer,
489 int x, int y, const QRgba64 &color,
490 const uchar *bitmap,
491 int mapWidth, int mapHeight, int mapStride,
492 const QClipData *clip, bool useGammaCorrection)
493{
494 if (clip || useGammaCorrection) {
495 qt_alphamapblit_quint16(rasterBuffer, x, y, color, bitmap, mapWidth, mapHeight, mapStride, clip, useGammaCorrection);
496 return;
497 }
498
499 quint16 *dest = reinterpret_cast<quint16*>(rasterBuffer->scanLine(y)) + x;
500 const int destStride = rasterBuffer->bytesPerLine() / sizeof(quint16);
501
502 uchar *mask = const_cast<uchar *>(bitmap);
503 const uint c = color.toArgb32();
504
505 pixman_composite_over_n_8_0565_asm_neon(mapWidth, mapHeight, dest, destStride, c, 0, mask, mapStride);
506}
507
508extern "C" void blend_8_pixels_rgb16_on_rgb16_neon(quint16 *dst, const quint16 *src, int const_alpha);
509
510template <typename SRC, typename BlendFunc>
511struct Blend_on_RGB16_SourceAndConstAlpha_Neon {
512 Blend_on_RGB16_SourceAndConstAlpha_Neon(BlendFunc blender, int const_alpha)
513 : m_index(0)
514 , m_blender(blender)
515 , m_const_alpha(const_alpha)
516 {
517 }
518
519 inline void write(quint16 *dst, quint32 src)
520 {
521 srcBuffer[m_index++] = src;
522
523 if (m_index == 8) {
524 m_blender(dst - 7, srcBuffer, m_const_alpha);
525 m_index = 0;
526 }
527 }
528
529 inline void flush(quint16 *dst)
530 {
531 if (m_index > 0) {
532 quint16 dstBuffer[8];
533 for (int i = 0; i < m_index; ++i)
534 dstBuffer[i] = dst[i - m_index];
535
536 m_blender(dstBuffer, srcBuffer, m_const_alpha);
537
538 for (int i = 0; i < m_index; ++i)
539 dst[i - m_index] = dstBuffer[i];
540
541 m_index = 0;
542 }
543 }
544
545 SRC srcBuffer[8];
546
547 int m_index;
548 BlendFunc m_blender;
549 int m_const_alpha;
550};
551
552template <typename SRC, typename BlendFunc>
553Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>
554Blend_on_RGB16_SourceAndConstAlpha_Neon_create(BlendFunc blender, int const_alpha)
555{
556 return Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>(blender, const_alpha);
557}
558
559void qt_scale_image_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
560 const uchar *srcPixels, int sbpl, int srch,
561 const QRectF &targetRect,
562 const QRectF &sourceRect,
563 const QRect &clip,
564 int const_alpha)
565{
566 if (const_alpha == 0)
567 return;
568
569 qt_scale_image_16bit<quint32>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
570 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
571}
572
573void qt_scale_image_rgb16_on_rgb16(uchar *destPixels, int dbpl,
574 const uchar *srcPixels, int sbpl, int srch,
575 const QRectF &targetRect,
576 const QRectF &sourceRect,
577 const QRect &clip,
578 int const_alpha);
579
580void qt_scale_image_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
581 const uchar *srcPixels, int sbpl, int srch,
582 const QRectF &targetRect,
583 const QRectF &sourceRect,
584 const QRect &clip,
585 int const_alpha)
586{
587 if (const_alpha == 0)
588 return;
589
590 if (const_alpha == 256) {
591 qt_scale_image_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip, const_alpha);
592 return;
593 }
594
595 qt_scale_image_16bit<quint16>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
596 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
597}
598
599extern void qt_transform_image_rgb16_on_rgb16(uchar *destPixels, int dbpl,
600 const uchar *srcPixels, int sbpl,
601 const QRectF &targetRect,
602 const QRectF &sourceRect,
603 const QRect &clip,
604 const QTransform &targetRectTransform,
605 int const_alpha);
606
607void qt_transform_image_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
608 const uchar *srcPixels, int sbpl,
609 const QRectF &targetRect,
610 const QRectF &sourceRect,
611 const QRect &clip,
612 const QTransform &targetRectTransform,
613 int const_alpha)
614{
615 if (const_alpha == 0)
616 return;
617
618 if (const_alpha == 256) {
619 qt_transform_image_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, targetRect, sourceRect, clip, targetRectTransform, const_alpha);
620 return;
621 }
622
623 qt_transform_image(reinterpret_cast<quint16 *>(destPixels), dbpl,
624 reinterpret_cast<const quint16 *>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
625 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
626}
627
628void qt_transform_image_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
629 const uchar *srcPixels, int sbpl,
630 const QRectF &targetRect,
631 const QRectF &sourceRect,
632 const QRect &clip,
633 const QTransform &targetRectTransform,
634 int const_alpha)
635{
636 if (const_alpha == 0)
637 return;
638
639 qt_transform_image(reinterpret_cast<quint16 *>(destPixels), dbpl,
640 reinterpret_cast<const quint32 *>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
641 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
642}
643
644static inline void convert_8_pixels_rgb16_to_argb32(quint32 *dst, const quint16 *src)
645{
646 asm volatile (
647 "vld1.16 { d0, d1 }, [%[SRC]]\n\t"
648
649 /* convert 8 r5g6b5 pixel data from {d0, d1} to planar 8-bit format
650 and put data into d4 - red, d3 - green, d2 - blue */
651 "vshrn.u16 d4, q0, #8\n\t"
652 "vshrn.u16 d3, q0, #3\n\t"
653 "vsli.u16 q0, q0, #5\n\t"
654 "vsri.u8 d4, d4, #5\n\t"
655 "vsri.u8 d3, d3, #6\n\t"
656 "vshrn.u16 d2, q0, #2\n\t"
657
658 /* fill d5 - alpha with 0xff */
659 "mov r2, #255\n\t"
660 "vdup.8 d5, r2\n\t"
661
662 "vst4.8 { d2, d3, d4, d5 }, [%[DST]]"
663 : : [DST]"r" (dst), [SRC]"r" (src)
664 : "memory", "r2", "d0", "d1", "d2", "d3", "d4", "d5"
665 );
666}
667
668uint * QT_FASTCALL qt_destFetchRGB16_neon(uint *buffer, QRasterBuffer *rasterBuffer, int x, int y, int length)
669{
670 const ushort *data = (const ushort *)rasterBuffer->scanLine(y) + x;
671
672 int i = 0;
673 for (; i < length - 7; i += 8)
674 convert_8_pixels_rgb16_to_argb32(&buffer[i], &data[i]);
675
676 if (i < length) {
677 quint16 srcBuffer[8];
678 quint32 dstBuffer[8];
679
680 int tail = length - i;
681 for (int j = 0; j < tail; ++j)
682 srcBuffer[j] = data[i + j];
683
684 convert_8_pixels_rgb16_to_argb32(dstBuffer, srcBuffer);
685
686 for (int j = 0; j < tail; ++j)
687 buffer[i + j] = dstBuffer[j];
688 }
689
690 return buffer;
691}
692
693static inline void convert_8_pixels_argb32_to_rgb16(quint16 *dst, const quint32 *src)
694{
695 asm volatile (
696 "vld4.8 { d0, d1, d2, d3 }, [%[SRC]]\n\t"
697
698 /* convert to r5g6b5 and store it into {d28, d29} */
699 "vshll.u8 q14, d2, #8\n\t"
700 "vshll.u8 q8, d1, #8\n\t"
701 "vshll.u8 q9, d0, #8\n\t"
702 "vsri.u16 q14, q8, #5\n\t"
703 "vsri.u16 q14, q9, #11\n\t"
704
705 "vst1.16 { d28, d29 }, [%[DST]]"
706 : : [DST]"r" (dst), [SRC]"r" (src)
707 : "memory", "d0", "d1", "d2", "d3", "d16", "d17", "d18", "d19", "d28", "d29"
708 );
709}
710
711void QT_FASTCALL qt_destStoreRGB16_neon(QRasterBuffer *rasterBuffer, int x, int y, const uint *buffer, int length)
712{
713 quint16 *data = (quint16*)rasterBuffer->scanLine(y) + x;
714
715 int i = 0;
716 for (; i < length - 7; i += 8)
717 convert_8_pixels_argb32_to_rgb16(&data[i], &buffer[i]);
718
719 if (i < length) {
720 quint32 srcBuffer[8];
721 quint16 dstBuffer[8];
722
723 int tail = length - i;
724 for (int j = 0; j < tail; ++j)
725 srcBuffer[j] = buffer[i + j];
726
727 convert_8_pixels_argb32_to_rgb16(dstBuffer, srcBuffer);
728
729 for (int j = 0; j < tail; ++j)
730 data[i + j] = dstBuffer[j];
731 }
732}
733#endif
734
735void QT_FASTCALL comp_func_solid_SourceOver_neon(uint *destPixels, int length, uint color, uint const_alpha)
736{
737 if ((const_alpha & qAlpha(color)) == 255) {
738 qt_memfill32(destPixels, color, length);
739 } else {
740 if (const_alpha != 255)
741 color = BYTE_MUL(color, const_alpha);
742
743 const quint32 minusAlphaOfColor = qAlpha(~color);
744 int x = 0;
745
746 uint32_t *dst = (uint32_t *) destPixels;
747 const uint32x4_t colorVector = vdupq_n_u32(color);
748 uint16x8_t half = vdupq_n_u16(0x80);
749 const uint16x8_t minusAlphaOfColorVector = vdupq_n_u16(minusAlphaOfColor);
750
751 for (; x < length-3; x += 4) {
752 uint32x4_t dstVector = vld1q_u32(&dst[x]);
753
754 const uint8x16_t dst8 = vreinterpretq_u8_u32(dstVector);
755
756 const uint8x8_t dst8_low = vget_low_u8(dst8);
757 const uint8x8_t dst8_high = vget_high_u8(dst8);
758
759 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
760 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
761
762 const uint16x8_t result16_low = qvbyte_mul_u16(dst16_low, minusAlphaOfColorVector, half);
763 const uint16x8_t result16_high = qvbyte_mul_u16(dst16_high, minusAlphaOfColorVector, half);
764
765 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
766 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
767
768 uint32x4_t blendedPixels = vcombine_u32(result32_low, result32_high);
769 uint32x4_t colorPlusBlendedPixels = vaddq_u32(colorVector, blendedPixels);
770 vst1q_u32(&dst[x], colorPlusBlendedPixels);
771 }
772
773 SIMD_EPILOGUE(x, length, 3)
774 destPixels[x] = color + BYTE_MUL(destPixels[x], minusAlphaOfColor);
775 }
776}
777
778void QT_FASTCALL comp_func_Plus_neon(uint *dst, const uint *src, int length, uint const_alpha)
779{
780 if (const_alpha == 255) {
781 uint *const end = dst + length;
782 uint *const neonEnd = end - 3;
783
784 while (dst < neonEnd) {
785 uint8x16_t vs = vld1q_u8((const uint8_t*)src);
786 const uint8x16_t vd = vld1q_u8((uint8_t*)dst);
787 vs = vqaddq_u8(vs, vd);
788 vst1q_u8((uint8_t*)dst, vs);
789 src += 4;
790 dst += 4;
791 };
792
793 while (dst != end) {
794 *dst = comp_func_Plus_one_pixel(*dst, *src);
795 ++dst;
796 ++src;
797 }
798 } else {
799 int x = 0;
800 const int one_minus_const_alpha = 255 - const_alpha;
801 const uint16x8_t constAlphaVector = vdupq_n_u16(const_alpha);
802 const uint16x8_t oneMinusconstAlphaVector = vdupq_n_u16(one_minus_const_alpha);
803
804 const uint16x8_t half = vdupq_n_u16(0x80);
805 for (; x < length - 3; x += 4) {
806 const uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
807 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
808 uint8x16_t dst8 = vld1q_u8((uint8_t *)&dst[x]);
809 uint8x16_t result = vqaddq_u8(dst8, src8);
810
811 uint16x8_t result_low = vmovl_u8(vget_low_u8(result));
812 uint16x8_t result_high = vmovl_u8(vget_high_u8(result));
813
814 uint16x8_t dst_low = vmovl_u8(vget_low_u8(dst8));
815 uint16x8_t dst_high = vmovl_u8(vget_high_u8(dst8));
816
817 result_low = qvinterpolate_pixel_255(result_low, constAlphaVector, dst_low, oneMinusconstAlphaVector, half);
818 result_high = qvinterpolate_pixel_255(result_high, constAlphaVector, dst_high, oneMinusconstAlphaVector, half);
819
820 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result_low));
821 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result_high));
822 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
823 }
824
825 SIMD_EPILOGUE(x, length, 3)
826 dst[x] = comp_func_Plus_one_pixel_const_alpha(dst[x], src[x], const_alpha, one_minus_const_alpha);
827 }
828}
829
830#if defined(ENABLE_PIXMAN_DRAWHELPERS)
831static const int tileSize = 32;
832
833extern "C" void qt_rotate90_16_neon(quint16 *dst, const quint16 *src, int sstride, int dstride, int count);
834
835void qt_memrotate90_16_neon(const uchar *srcPixels, int w, int h, int sstride, uchar *destPixels, int dstride)
836{
837 const ushort *src = (const ushort *)srcPixels;
838 ushort *dest = (ushort *)destPixels;
839
840 sstride /= sizeof(ushort);
841 dstride /= sizeof(ushort);
842
843 const int pack = sizeof(quint32) / sizeof(ushort);
844 const int unaligned =
845 qMin(uint((quintptr(dest) & (sizeof(quint32)-1)) / sizeof(ushort)), uint(h));
846 const int restX = w % tileSize;
847 const int restY = (h - unaligned) % tileSize;
848 const int unoptimizedY = restY % pack;
849 const int numTilesX = w / tileSize + (restX > 0);
850 const int numTilesY = (h - unaligned) / tileSize + (restY >= pack);
851
852 for (int tx = 0; tx < numTilesX; ++tx) {
853 const int startx = w - tx * tileSize - 1;
854 const int stopx = qMax(startx - tileSize, 0);
855
856 if (unaligned) {
857 for (int x = startx; x >= stopx; --x) {
858 ushort *d = dest + (w - x - 1) * dstride;
859 for (int y = 0; y < unaligned; ++y) {
860 *d++ = src[y * sstride + x];
861 }
862 }
863 }
864
865 for (int ty = 0; ty < numTilesY; ++ty) {
866 const int starty = ty * tileSize + unaligned;
867 const int stopy = qMin(starty + tileSize, h - unoptimizedY);
868
869 int x = startx;
870 // qt_rotate90_16_neon writes to eight rows, four pixels at a time
871 for (; x >= stopx + 7; x -= 8) {
872 ushort *d = dest + (w - x - 1) * dstride + starty;
873 const ushort *s = &src[starty * sstride + x - 7];
874 qt_rotate90_16_neon(d, s, sstride * 2, dstride * 2, stopy - starty);
875 }
876
877 for (; x >= stopx; --x) {
878 quint32 *d = reinterpret_cast<quint32*>(dest + (w - x - 1) * dstride + starty);
879 for (int y = starty; y < stopy; y += pack) {
880 quint32 c = src[y * sstride + x];
881 for (int i = 1; i < pack; ++i) {
882 const int shift = (sizeof(int) * 8 / pack * i);
883 const ushort color = src[(y + i) * sstride + x];
884 c |= color << shift;
885 }
886 *d++ = c;
887 }
888 }
889 }
890
891 if (unoptimizedY) {
892 const int starty = h - unoptimizedY;
893 for (int x = startx; x >= stopx; --x) {
894 ushort *d = dest + (w - x - 1) * dstride + starty;
895 for (int y = starty; y < h; ++y) {
896 *d++ = src[y * sstride + x];
897 }
898 }
899 }
900 }
901}
902
903extern "C" void qt_rotate270_16_neon(quint16 *dst, const quint16 *src, int sstride, int dstride, int count);
904
905void qt_memrotate270_16_neon(const uchar *srcPixels, int w, int h,
906 int sstride,
907 uchar *destPixels, int dstride)
908{
909 const ushort *src = (const ushort *)srcPixels;
910 ushort *dest = (ushort *)destPixels;
911
912 sstride /= sizeof(ushort);
913 dstride /= sizeof(ushort);
914
915 const int pack = sizeof(quint32) / sizeof(ushort);
916 const int unaligned =
917 qMin(uint((long(dest) & (sizeof(quint32)-1)) / sizeof(ushort)), uint(h));
918 const int restX = w % tileSize;
919 const int restY = (h - unaligned) % tileSize;
920 const int unoptimizedY = restY % pack;
921 const int numTilesX = w / tileSize + (restX > 0);
922 const int numTilesY = (h - unaligned) / tileSize + (restY >= pack);
923
924 for (int tx = 0; tx < numTilesX; ++tx) {
925 const int startx = tx * tileSize;
926 const int stopx = qMin(startx + tileSize, w);
927
928 if (unaligned) {
929 for (int x = startx; x < stopx; ++x) {
930 ushort *d = dest + x * dstride;
931 for (int y = h - 1; y >= h - unaligned; --y) {
932 *d++ = src[y * sstride + x];
933 }
934 }
935 }
936
937 for (int ty = 0; ty < numTilesY; ++ty) {
938 const int starty = h - 1 - unaligned - ty * tileSize;
939 const int stopy = qMax(starty - tileSize, unoptimizedY);
940
941 int x = startx;
942 // qt_rotate90_16_neon writes to eight rows, four pixels at a time
943 for (; x < stopx - 7; x += 8) {
944 ushort *d = dest + x * dstride + h - 1 - starty;
945 const ushort *s = &src[starty * sstride + x];
946 qt_rotate90_16_neon(d + 7 * dstride, s, -sstride * 2, -dstride * 2, starty - stopy);
947 }
948
949 for (; x < stopx; ++x) {
950 quint32 *d = reinterpret_cast<quint32*>(dest + x * dstride
951 + h - 1 - starty);
952 for (int y = starty; y > stopy; y -= pack) {
953 quint32 c = src[y * sstride + x];
954 for (int i = 1; i < pack; ++i) {
955 const int shift = (sizeof(int) * 8 / pack * i);
956 const ushort color = src[(y - i) * sstride + x];
957 c |= color << shift;
958 }
959 *d++ = c;
960 }
961 }
962 }
963 if (unoptimizedY) {
964 const int starty = unoptimizedY - 1;
965 for (int x = startx; x < stopx; ++x) {
966 ushort *d = dest + x * dstride + h - 1 - starty;
967 for (int y = starty; y >= 0; --y) {
968 *d++ = src[y * sstride + x];
969 }
970 }
971 }
972 }
973}
974#endif
975
976class QSimdNeon
977{
978public:
979 struct Int32x4 {
980 Int32x4() = default;
981 Int32x4(int32x4_t v) : v(v) {}
982 int32x4_t v;
983 operator int32x4_t() const { return v; }
984 };
985 struct Float32x4 {
986 Float32x4() = default;
987 Float32x4(float32x4_t v) : v(v) {};
988 float32x4_t v;
989 operator float32x4_t() const { return v; }
990 };
991
992 union Vect_buffer_i { Int32x4 v; int i[4]; };
993 union Vect_buffer_f { Float32x4 v; float f[4]; };
994
995 static inline Float32x4 v_dup(double x) { return vdupq_n_f32(float(x)); }
996 static inline Float32x4 v_dup(float x) { return vdupq_n_f32(x); }
997 static inline Int32x4 v_dup(int x) { return vdupq_n_s32(x); }
998 static inline Int32x4 v_dup(uint x) { return vdupq_n_s32(x); }
999
1000 static inline Float32x4 v_add(Float32x4 a, Float32x4 b) { return vaddq_f32(a, b); }
1001 static inline Int32x4 v_add(Int32x4 a, Int32x4 b) { return vaddq_s32(a, b); }
1002
1003 static inline Float32x4 v_max(Float32x4 a, Float32x4 b) { return vmaxq_f32(a, b); }
1004 static inline Float32x4 v_min(Float32x4 a, Float32x4 b) { return vminq_f32(a, b); }
1005 static inline Int32x4 v_min_16(Int32x4 a, Int32x4 b) { return vminq_s32(a, b); }
1006
1007 static inline Int32x4 v_and(Int32x4 a, Int32x4 b) { return vandq_s32(a, b); }
1008
1009 static inline Float32x4 v_sub(Float32x4 a, Float32x4 b) { return vsubq_f32(a, b); }
1010 static inline Int32x4 v_sub(Int32x4 a, Int32x4 b) { return vsubq_s32(a, b); }
1011
1012 static inline Float32x4 v_mul(Float32x4 a, Float32x4 b) { return vmulq_f32(a, b); }
1013
1014 static inline Float32x4 v_sqrt(Float32x4 x) { Float32x4 y = vrsqrteq_f32(x); y = vmulq_f32(y, vrsqrtsq_f32(x, vmulq_f32(y, y))); return vmulq_f32(x, y); }
1015
1016 static inline Int32x4 v_toInt(Float32x4 x) { return vcvtq_s32_f32(x); }
1017
1018 static inline Int32x4 v_greaterOrEqual(Float32x4 a, Float32x4 b) { return vreinterpretq_s32_u32(vcgeq_f32(a, b)); }
1019};
1020
1021const uint * QT_FASTCALL qt_fetch_radial_gradient_neon(uint *buffer, const Operator *op, const QSpanData *data,
1022 int y, int x, int length)
1023{
1024 return qt_fetch_radial_gradient_template<QRadialFetchSimd<QSimdNeon>,uint>(buffer, op, data, y, x, length);
1025}
1026
1027extern void QT_FASTCALL qt_convert_rgb888_to_rgb32_neon(quint32 *dst, const uchar *src, int len);
1028
1029const uint * QT_FASTCALL qt_fetchUntransformed_888_neon(uint *buffer, const Operator *, const QSpanData *data,
1030 int y, int x, int length)
1031{
1032 const uchar *line = data->texture.scanLine(y) + x * 3;
1033 qt_convert_rgb888_to_rgb32_neon(buffer, line, length);
1034 return buffer;
1035}
1036
1037#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
1038static inline uint32x4_t vrgba2argb(uint32x4_t srcVector)
1039{
1040#if defined(Q_PROCESSOR_ARM_64)
1041 const uint8x16_t rgbaMask = qvsetq_n_u8(2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15);
1042#else
1043 const uint8x8_t rgbaMask = qvset_n_u8(2, 1, 0, 3, 6, 5, 4, 7);
1044#endif
1045#if defined(Q_PROCESSOR_ARM_64)
1046 srcVector = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(srcVector), rgbaMask));
1047#else
1048 // no vqtbl1q_u8, so use two vtbl1_u8
1049 const uint8x8_t low = vtbl1_u8(vreinterpret_u8_u32(vget_low_u32(srcVector)), rgbaMask);
1050 const uint8x8_t high = vtbl1_u8(vreinterpret_u8_u32(vget_high_u32(srcVector)), rgbaMask);
1051 srcVector = vcombine_u32(vreinterpret_u32_u8(low), vreinterpret_u32_u8(high));
1052#endif
1053 return srcVector;
1054}
1055
1056template<bool RGBA>
1057static inline void convertARGBToARGB32PM_neon(uint *buffer, const uint *src, int count)
1058{
1059 int i = 0;
1060 const uint8x8_t shuffleMask = qvset_n_u8(3, 3, 3, 3, 7, 7, 7, 7);
1061 const uint32x4_t blendMask = vdupq_n_u32(0xff000000);
1062
1063 for (; i < count - 3; i += 4) {
1064 uint32x4_t srcVector = vld1q_u32(src + i);
1065 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1066#if defined(Q_PROCESSOR_ARM_64)
1067 uint32_t alphaSum = vaddvq_u32(alphaVector);
1068#else
1069 // no vaddvq_u32
1070 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1071 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1072#endif
1073 if (alphaSum) {
1074 if (alphaSum != 255 * 4) {
1075 if (RGBA)
1076 srcVector = vrgba2argb(srcVector);
1077 const uint8x8_t s1 = vreinterpret_u8_u32(vget_low_u32(srcVector));
1078 const uint8x8_t s2 = vreinterpret_u8_u32(vget_high_u32(srcVector));
1079 const uint8x8_t alpha1 = vtbl1_u8(s1, shuffleMask);
1080 const uint8x8_t alpha2 = vtbl1_u8(s2, shuffleMask);
1081 uint16x8_t src1 = vmull_u8(s1, alpha1);
1082 uint16x8_t src2 = vmull_u8(s2, alpha2);
1083 src1 = vsraq_n_u16(src1, src1, 8);
1084 src2 = vsraq_n_u16(src2, src2, 8);
1085 const uint8x8_t d1 = vrshrn_n_u16(src1, 8);
1086 const uint8x8_t d2 = vrshrn_n_u16(src2, 8);
1087 const uint32x4_t d = vbslq_u32(blendMask, srcVector, vreinterpretq_u32_u8(vcombine_u8(d1, d2)));
1088 vst1q_u32(buffer + i, d);
1089 } else {
1090 if (RGBA)
1091 vst1q_u32(buffer + i, vrgba2argb(srcVector));
1092 else if (buffer != src)
1093 vst1q_u32(buffer + i, srcVector);
1094 }
1095 } else {
1096 vst1q_u32(buffer + i, vdupq_n_u32(0));
1097 }
1098 }
1099
1100 SIMD_EPILOGUE(i, count, 3) {
1101 uint v = qPremultiply(src[i]);
1102 buffer[i] = RGBA ? RGBA2ARGB(v) : v;
1103 }
1104}
1105
1106template<bool RGBA>
1107static inline void convertARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count)
1108{
1109 if (count <= 0)
1110 return;
1111
1112 const uint8x8_t shuffleMask = qvset_n_u8(3, 3, 3, 3, 7, 7, 7, 7);
1113 const uint64x2_t blendMask = vdupq_n_u64(Q_UINT64_C(0xffff000000000000));
1114
1115 int i = 0;
1116 for (; i < count-3; i += 4) {
1117 uint32x4_t vs32 = vld1q_u32(src + i);
1118 uint32x4_t alphaVector = vshrq_n_u32(vs32, 24);
1119#if defined(Q_PROCESSOR_ARM_64)
1120 uint32_t alphaSum = vaddvq_u32(alphaVector);
1121#else
1122 // no vaddvq_u32
1123 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1124 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1125#endif
1126 if (alphaSum) {
1127 if (!RGBA)
1128 vs32 = vrgba2argb(vs32);
1129 const uint8x16_t vs8 = vreinterpretq_u8_u32(vs32);
1130 const uint8x16x2_t v = vzipq_u8(vs8, vs8);
1131 if (alphaSum != 255 * 4) {
1132 const uint8x8_t s1 = vreinterpret_u8_u32(vget_low_u32(vs32));
1133 const uint8x8_t s2 = vreinterpret_u8_u32(vget_high_u32(vs32));
1134 const uint8x8_t alpha1 = vtbl1_u8(s1, shuffleMask);
1135 const uint8x8_t alpha2 = vtbl1_u8(s2, shuffleMask);
1136 uint16x8_t src1 = vmull_u8(s1, alpha1);
1137 uint16x8_t src2 = vmull_u8(s2, alpha2);
1138 // convert from 0->(255x255) to 0->(255x257)
1139 src1 = vsraq_n_u16(src1, src1, 7);
1140 src2 = vsraq_n_u16(src2, src2, 7);
1141
1142 // now restore alpha from the trivial conversion
1143 const uint64x2_t d1 = vbslq_u64(blendMask, vreinterpretq_u64_u8(v.val[0]), vreinterpretq_u64_u16(src1));
1144 const uint64x2_t d2 = vbslq_u64(blendMask, vreinterpretq_u64_u8(v.val[1]), vreinterpretq_u64_u16(src2));
1145
1146 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u64(d1));
1147 buffer += 2;
1148 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u64(d2));
1149 buffer += 2;
1150 } else {
1151 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u8(v.val[0]));
1152 buffer += 2;
1153 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u8(v.val[1]));
1154 buffer += 2;
1155 }
1156 } else {
1157 vst1q_u16((uint16_t *)buffer, vdupq_n_u16(0));
1158 buffer += 2;
1159 vst1q_u16((uint16_t *)buffer, vdupq_n_u16(0));
1160 buffer += 2;
1161 }
1162 }
1163
1164 SIMD_EPILOGUE(i, count, 3) {
1165 uint s = src[i];
1166 if (RGBA)
1167 s = RGBA2ARGB(s);
1168 *buffer++ = QRgba64::fromArgb32(s).premultiplied();
1169 }
1170}
1171
1172static inline float32x4_t reciprocal_mul_ps(float32x4_t a, float mul)
1173{
1174 float32x4_t ia = vrecpeq_f32(a); // estimate 1/a
1175 ia = vmulq_f32(vrecpsq_f32(a, ia), vmulq_n_f32(ia, mul)); // estimate improvement step * mul
1176 return ia;
1177}
1178
1179template<bool RGBA, bool RGBx>
1180static inline void convertARGBFromARGB32PM_neon(uint *buffer, const uint *src, int count)
1181{
1182 int i = 0;
1183 const uint32x4_t alphaMask = vdupq_n_u32(0xff000000);
1184
1185 for (; i < count - 3; i += 4) {
1186 uint32x4_t srcVector = vld1q_u32(src + i);
1187 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1188#if defined(Q_PROCESSOR_ARM_64)
1189 uint32_t alphaSum = vaddvq_u32(alphaVector);
1190#else
1191 // no vaddvq_u32
1192 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1193 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1194#endif
1195 if (alphaSum) {
1196 if (alphaSum != 255 * 4) {
1197 if (RGBA)
1198 srcVector = vrgba2argb(srcVector);
1199 const float32x4_t a = vcvtq_f32_u32(alphaVector);
1200 const float32x4_t ia = reciprocal_mul_ps(a, 255.0f);
1201 // Convert 4x(4xU8) to 4x(4xF32)
1202 uint16x8_t tmp1 = vmovl_u8(vget_low_u8(vreinterpretq_u8_u32(srcVector)));
1203 uint16x8_t tmp3 = vmovl_u8(vget_high_u8(vreinterpretq_u8_u32(srcVector)));
1204 float32x4_t src1 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp1)));
1205 float32x4_t src2 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp1)));
1206 float32x4_t src3 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp3)));
1207 float32x4_t src4 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp3)));
1208 src1 = vmulq_lane_f32(src1, vget_low_f32(ia), 0);
1209 src2 = vmulq_lane_f32(src2, vget_low_f32(ia), 1);
1210 src3 = vmulq_lane_f32(src3, vget_high_f32(ia), 0);
1211 src4 = vmulq_lane_f32(src4, vget_high_f32(ia), 1);
1212 // Convert 4x(4xF32) back to 4x(4xU8) (over a 8.1 fixed point format to get rounding)
1213 tmp1 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src1, 1), 1),
1214 vrshrn_n_u32(vcvtq_n_u32_f32(src2, 1), 1));
1215 tmp3 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src3, 1), 1),
1216 vrshrn_n_u32(vcvtq_n_u32_f32(src4, 1), 1));
1217 uint32x4_t dstVector = vreinterpretq_u32_u8(vcombine_u8(vmovn_u16(tmp1), vmovn_u16(tmp3)));
1218 // Overwrite any undefined results from alpha==0 with zeros:
1219#if defined(Q_PROCESSOR_ARM_64)
1220 uint32x4_t srcVectorAlphaMask = vceqzq_u32(alphaVector);
1221#else
1222 uint32x4_t srcVectorAlphaMask = vceqq_u32(alphaVector, vdupq_n_u32(0));
1223#endif
1224 dstVector = vbicq_u32(dstVector, srcVectorAlphaMask);
1225 // Restore or mask alpha values:
1226 if (RGBx)
1227 dstVector = vorrq_u32(alphaMask, dstVector);
1228 else
1229 dstVector = vbslq_u32(alphaMask, srcVector, dstVector);
1230 vst1q_u32(&buffer[i], dstVector);
1231 } else {
1232 // 4xAlpha==255, no change except if we are doing RGBA->ARGB:
1233 if (RGBA)
1234 vst1q_u32(&buffer[i], vrgba2argb(srcVector));
1235 else if (buffer != src)
1236 vst1q_u32(&buffer[i], srcVector);
1237 }
1238 } else {
1239 // 4xAlpha==0, always zero, except if output is RGBx:
1240 if (RGBx)
1241 vst1q_u32(&buffer[i], alphaMask);
1242 else
1243 vst1q_u32(&buffer[i], vdupq_n_u32(0));
1244 }
1245 }
1246
1247 SIMD_EPILOGUE(i, count, 3) {
1248 uint v = qUnpremultiply(src[i]);
1249 if (RGBx)
1250 v = 0xff000000 | v;
1251 if (RGBA)
1252 v = ARGB2RGBA(v);
1253 buffer[i] = v;
1254 }
1255}
1256
1257void QT_FASTCALL convertARGB32ToARGB32PM_neon(uint *buffer, int count, const QList<QRgb> *)
1258{
1259 convertARGBToARGB32PM_neon<false>(buffer, buffer, count);
1260}
1261
1262void QT_FASTCALL convertRGBA8888ToARGB32PM_neon(uint *buffer, int count, const QList<QRgb> *)
1263{
1264 convertARGBToARGB32PM_neon<true>(buffer, buffer, count);
1265}
1266
1267const uint *QT_FASTCALL fetchARGB32ToARGB32PM_neon(uint *buffer, const uchar *src, int index, int count,
1268 const QList<QRgb> *, QDitherInfo *)
1269{
1270 convertARGBToARGB32PM_neon<false>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1271 return buffer;
1272}
1273
1274const uint *QT_FASTCALL fetchRGBA8888ToARGB32PM_neon(uint *buffer, const uchar *src, int index, int count,
1275 const QList<QRgb> *, QDitherInfo *)
1276{
1277 convertARGBToARGB32PM_neon<true>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1278 return buffer;
1279}
1280
1281const QRgba64 * QT_FASTCALL convertARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count,
1282 const QList<QRgb> *, QDitherInfo *)
1283{
1284 convertARGB32ToRGBA64PM_neon<false>(buffer, src, count);
1285 return buffer;
1286}
1287
1288const QRgba64 * QT_FASTCALL convertRGBA8888ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count,
1289 const QList<QRgb> *, QDitherInfo *)
1290{
1291 convertARGB32ToRGBA64PM_neon<true>(buffer, src, count);
1292 return buffer;
1293}
1294
1295const QRgba64 *QT_FASTCALL fetchARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uchar *src, int index, int count,
1296 const QList<QRgb> *, QDitherInfo *)
1297{
1298 convertARGB32ToRGBA64PM_neon<false>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1299 return buffer;
1300}
1301
1302const QRgba64 *QT_FASTCALL fetchRGBA8888ToRGBA64PM_neon(QRgba64 *buffer, const uchar *src, int index, int count,
1303 const QList<QRgb> *, QDitherInfo *)
1304{
1305 convertARGB32ToRGBA64PM_neon<true>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1306 return buffer;
1307}
1308
1309void QT_FASTCALL storeRGB32FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1310 const QList<QRgb> *, QDitherInfo *)
1311{
1312 uint *d = reinterpret_cast<uint *>(dest) + index;
1313 convertARGBFromARGB32PM_neon<false,true>(d, src, count);
1314}
1315
1316void QT_FASTCALL storeARGB32FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1317 const QList<QRgb> *, QDitherInfo *)
1318{
1319 uint *d = reinterpret_cast<uint *>(dest) + index;
1320 convertARGBFromARGB32PM_neon<false,false>(d, src, count);
1321}
1322
1323void QT_FASTCALL storeRGBA8888FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1324 const QList<QRgb> *, QDitherInfo *)
1325{
1326 uint *d = reinterpret_cast<uint *>(dest) + index;
1327 convertARGBFromARGB32PM_neon<true,false>(d, src, count);
1328}
1329
1330void QT_FASTCALL storeRGBXFromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1331 const QList<QRgb> *, QDitherInfo *)
1332{
1333 uint *d = reinterpret_cast<uint *>(dest) + index;
1334 convertARGBFromARGB32PM_neon<true,true>(d, src, count);
1335}
1336
1337#endif // Q_BYTE_ORDER == Q_LITTLE_ENDIAN
1338
1339QT_END_NAMESPACE
1340
1341#endif // __ARM_NEON__