5#include <private/qdrawhelper_neon_p.h>
6#include <private/qblendfunctions_p.h>
7#include <private/qmath_p.h>
8#include <private/qpixellayout_p.h>
12#include <private/qpaintengine_raster_p.h>
16void qt_memfill32(quint32 *dest, quint32 value, qsizetype count)
18 const int epilogueSize = count % 16;
20 quint32 *
const neonEnd = dest + count - epilogueSize;
21 const uint32x4_t valueVector1 = vdupq_n_u32(value);
22 const uint32x4x4_t valueVector4 = { valueVector1, valueVector1, valueVector1, valueVector1 };
24 vst4q_u32(dest, valueVector4);
26 }
while (dest != neonEnd);
31 case 15: *dest++ = value; Q_FALLTHROUGH();
32 case 14: *dest++ = value; Q_FALLTHROUGH();
33 case 13: *dest++ = value; Q_FALLTHROUGH();
34 case 12: *dest++ = value; Q_FALLTHROUGH();
35 case 11: *dest++ = value; Q_FALLTHROUGH();
36 case 10: *dest++ = value; Q_FALLTHROUGH();
37 case 9: *dest++ = value; Q_FALLTHROUGH();
38 case 8: *dest++ = value; Q_FALLTHROUGH();
39 case 7: *dest++ = value; Q_FALLTHROUGH();
40 case 6: *dest++ = value; Q_FALLTHROUGH();
41 case 5: *dest++ = value; Q_FALLTHROUGH();
42 case 4: *dest++ = value; Q_FALLTHROUGH();
43 case 3: *dest++ = value; Q_FALLTHROUGH();
44 case 2: *dest++ = value; Q_FALLTHROUGH();
45 case 1: *dest++ = value;
49static inline uint16x8_t qvdiv_255_u16(uint16x8_t x, uint16x8_t half)
53 const uint16x8_t temp = vshrq_n_u16(x, 8);
54 const uint16x8_t sum_part = vaddq_u16(x, half);
55 const uint16x8_t sum = vaddq_u16(temp, sum_part);
57 return vshrq_n_u16(sum, 8);
60static inline uint16x8_t qvbyte_mul_u16(uint16x8_t x, uint16x8_t alpha, uint16x8_t half)
64 const uint16x8_t t = vmulq_u16(x, alpha);
65 return qvdiv_255_u16(t, half);
68static inline uint16x8_t qvinterpolate_pixel_255(uint16x8_t x, uint16x8_t a, uint16x8_t y, uint16x8_t b, uint16x8_t half)
72 const uint16x8_t ta = vmulq_u16(x, a);
73 const uint16x8_t tb = vmulq_u16(y, b);
75 return qvdiv_255_u16(vaddq_u16(ta, tb), half);
78static inline uint16x8_t qvsource_over_u16(uint16x8_t src16, uint16x8_t dst16, uint16x8_t half, uint16x8_t full)
80 const uint16x4_t alpha16_high = vdup_lane_u16(vget_high_u16(src16), 3);
81 const uint16x4_t alpha16_low = vdup_lane_u16(vget_low_u16(src16), 3);
83 const uint16x8_t alpha16 = vsubq_u16(full, vcombine_u16(alpha16_low, alpha16_high));
85 return vaddq_u16(src16, qvbyte_mul_u16(dst16, alpha16, half));
88#if defined(ENABLE_PIXMAN_DRAWHELPERS)
90pixman_composite_over_8888_0565_asm_neon (int32_t w,
98pixman_composite_over_8888_8888_asm_neon (int32_t w,
106pixman_composite_src_0565_8888_asm_neon (int32_t w,
114pixman_composite_over_n_8_0565_asm_neon (int32_t w,
121 int32_t mask_stride);
124pixman_composite_scanline_over_asm_neon (int32_t w,
126 const uint32_t *src);
129pixman_composite_src_0565_0565_asm_neon (int32_t w,
136void qt_blend_argb32_on_rgb16_const_alpha(uchar *destPixels,
int dbpl,
137 const uchar *srcPixels,
int sbpl,
141void qt_blend_rgb16_on_argb32_neon(uchar *destPixels,
int dbpl,
142 const uchar *srcPixels,
int sbpl,
149 quint32 *dst = (quint32 *) destPixels;
150 quint16 *src = (quint16 *) srcPixels;
152 if (const_alpha != 256) {
153 quint8 a = (255 * const_alpha) >> 8;
157 for (
int x=0; x<w; ++x)
158 dst[x] = INTERPOLATE_PIXEL_255(qConvertRgb16To32(src[x]), a, dst[x], ia);
165 pixman_composite_src_0565_8888_asm_neon(w, h, dst, dbpl, src, sbpl);
169void qt_blend_rgb16_on_rgb16(uchar *dst,
int dbpl,
170 const uchar *src,
int sbpl,
176static inline void scanLineBlit16(quint16 *dst, quint16 *src,
int dstride)
179 ((quint32 *)dst)[0] = ((quint32 *)src)[0];
180 __builtin_prefetch(dst + dstride, 1, 0);
182 for (
int i = 1; i < N/2; ++i)
183 ((quint32 *)dst)[i] = ((quint32 *)src)[i];
189static inline void blockBlit16(quint16 *dst, quint16 *src,
int dstride,
int sstride,
int h)
203 scanLineBlit16<Width-1>(dst + 1, src + 1, dstride);
209 scanLineBlit16<Width>(dst, src, dstride);
217void qt_blend_rgb16_on_rgb16_neon(uchar *destPixels,
int dbpl,
218 const uchar *srcPixels,
int sbpl,
223 if (const_alpha != 256 || w >= 150) {
224 qt_blend_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
228 int dstride = dbpl / 2;
229 int sstride = sbpl / 2;
231 quint16 *dst = (quint16 *) destPixels;
232 quint16 *src = (quint16 *) srcPixels;
235#define BLOCKBLIT(n) case n: blockBlit16<n>(dst, src, dstride, sstride, h); return;
256 pixman_composite_src_0565_0565_asm_neon (w, h, dst, dstride, src, sstride);
259extern "C" void blend_8_pixels_argb32_on_rgb16_neon(quint16 *dst,
const quint32 *src,
int const_alpha);
261void qt_blend_argb32_on_rgb16_neon(uchar *destPixels,
int dbpl,
262 const uchar *srcPixels,
int sbpl,
266 quint16 *dst = (quint16 *) destPixels;
267 quint32 *src = (quint32 *) srcPixels;
269 if (const_alpha != 256) {
270 for (
int y=0; y<h; ++y) {
272 for (; i < w-7; i += 8)
273 blend_8_pixels_argb32_on_rgb16_neon(&dst[i], &src[i], const_alpha);
278 quint16 dstBuffer[8];
279 quint32 srcBuffer[8];
281 for (
int j = 0; j < tail; ++j) {
282 dstBuffer[j] = dst[i + j];
283 srcBuffer[j] = src[i + j];
286 blend_8_pixels_argb32_on_rgb16_neon(dstBuffer, srcBuffer, const_alpha);
288 for (
int j = 0; j < tail; ++j)
289 dst[i + j] = dstBuffer[j];
292 dst = (quint16 *)(((uchar *) dst) + dbpl);
293 src = (quint32 *)(((uchar *) src) + sbpl);
298 pixman_composite_over_8888_0565_asm_neon(w, h, dst, dbpl / 2, src, sbpl / 4);
302void qt_blend_argb32_on_argb32_scanline_neon(uint *dest,
const uint *src,
int length, uint const_alpha)
304 if (const_alpha == 255) {
305#if defined(ENABLE_PIXMAN_DRAWHELPERS)
306 pixman_composite_scanline_over_asm_neon(length, dest, src);
308 qt_blend_argb32_on_argb32_neon((uchar *)dest, 4 * length, (uchar *)src, 4 * length, length, 1, 256);
311 qt_blend_argb32_on_argb32_neon((uchar *)dest, 4 * length, (uchar *)src, 4 * length, length, 1, (const_alpha * 256) / 255);
315void qt_blend_argb32_on_argb32_neon(uchar *destPixels,
int dbpl,
316 const uchar *srcPixels,
int sbpl,
320 const uint *src = (
const uint *) srcPixels;
321 uint *dst = (uint *) destPixels;
322 uint16x8_t half = vdupq_n_u16(0x80);
323 uint16x8_t full = vdupq_n_u16(0xff);
324 if (const_alpha == 256) {
325#if defined(ENABLE_PIXMAN_DRAWHELPERS)
326 pixman_composite_over_8888_8888_asm_neon(w, h, (uint32_t *)destPixels, dbpl / 4, (uint32_t *)srcPixels, sbpl / 4);
328 for (
int y=0; y<h; ++y) {
330 for (; x < w-3; x += 4) {
331 if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
332 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
333 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
335 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
336 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
338 const uint8x8_t src8_low = vget_low_u8(src8);
339 const uint8x8_t dst8_low = vget_low_u8(dst8);
341 const uint8x8_t src8_high = vget_high_u8(src8);
342 const uint8x8_t dst8_high = vget_high_u8(dst8);
344 const uint16x8_t src16_low = vmovl_u8(src8_low);
345 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
347 const uint16x8_t src16_high = vmovl_u8(src8_high);
348 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
350 const uint16x8_t result16_low = qvsource_over_u16(src16_low, dst16_low, half, full);
351 const uint16x8_t result16_high = qvsource_over_u16(src16_high, dst16_high, half, full);
353 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
354 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
356 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
364 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
366 dst = (quint32 *)(((uchar *) dst) + dbpl);
367 src = (
const quint32 *)(((
const uchar *) src) + sbpl);
370 }
else if (const_alpha != 0) {
371 const_alpha = (const_alpha * 255) >> 8;
372 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
373 for (
int y = 0; y < h; ++y) {
375 for (; x < w-3; x += 4) {
376 if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
377 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
378 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
380 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
381 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
383 const uint8x8_t src8_low = vget_low_u8(src8);
384 const uint8x8_t dst8_low = vget_low_u8(dst8);
386 const uint8x8_t src8_high = vget_high_u8(src8);
387 const uint8x8_t dst8_high = vget_high_u8(dst8);
389 const uint16x8_t src16_low = vmovl_u8(src8_low);
390 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
392 const uint16x8_t src16_high = vmovl_u8(src8_high);
393 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
395 const uint16x8_t srcalpha16_low = qvbyte_mul_u16(src16_low, const_alpha16, half);
396 const uint16x8_t srcalpha16_high = qvbyte_mul_u16(src16_high, const_alpha16, half);
398 const uint16x8_t result16_low = qvsource_over_u16(srcalpha16_low, dst16_low, half, full);
399 const uint16x8_t result16_high = qvsource_over_u16(srcalpha16_high, dst16_high, half, full);
401 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
402 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
404 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
410 s = BYTE_MUL(s, const_alpha);
411 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
414 dst = (quint32 *)(((uchar *) dst) + dbpl);
415 src = (
const quint32 *)(((
const uchar *) src) + sbpl);
421void qt_blend_rgb32_on_rgb32(uchar *destPixels,
int dbpl,
422 const uchar *srcPixels,
int sbpl,
426void qt_blend_rgb32_on_rgb32_neon(uchar *destPixels,
int dbpl,
427 const uchar *srcPixels,
int sbpl,
431 if (const_alpha != 256) {
432 if (const_alpha != 0) {
433 const uint *src = (
const uint *) srcPixels;
434 uint *dst = (uint *) destPixels;
435 uint16x8_t half = vdupq_n_u16(0x80);
436 const_alpha = (const_alpha * 255) >> 8;
437 int one_minus_const_alpha = 255 - const_alpha;
438 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
439 uint16x8_t one_minus_const_alpha16 = vdupq_n_u16(255 - const_alpha);
440 for (
int y = 0; y < h; ++y) {
442 for (; x < w-3; x += 4) {
443 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
444 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
446 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
447 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
449 const uint8x8_t src8_low = vget_low_u8(src8);
450 const uint8x8_t dst8_low = vget_low_u8(dst8);
452 const uint8x8_t src8_high = vget_high_u8(src8);
453 const uint8x8_t dst8_high = vget_high_u8(dst8);
455 const uint16x8_t src16_low = vmovl_u8(src8_low);
456 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
458 const uint16x8_t src16_high = vmovl_u8(src8_high);
459 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
461 const uint16x8_t result16_low = qvinterpolate_pixel_255(src16_low, const_alpha16, dst16_low, one_minus_const_alpha16, half);
462 const uint16x8_t result16_high = qvinterpolate_pixel_255(src16_high, const_alpha16, dst16_high, one_minus_const_alpha16, half);
464 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
465 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
467 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
470 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], one_minus_const_alpha);
472 dst = (quint32 *)(((uchar *) dst) + dbpl);
473 src = (
const quint32 *)(((
const uchar *) src) + sbpl);
477 qt_blend_rgb32_on_rgb32(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
481#if defined(ENABLE_PIXMAN_DRAWHELPERS)
482extern void qt_alphamapblit_quint16(QRasterBuffer *rasterBuffer,
483 int x,
int y,
const QRgba64 &color,
485 int mapWidth,
int mapHeight,
int mapStride,
486 const QClipData *clip,
bool useGammaCorrection);
488void qt_alphamapblit_quint16_neon(QRasterBuffer *rasterBuffer,
489 int x,
int y,
const QRgba64 &color,
491 int mapWidth,
int mapHeight,
int mapStride,
492 const QClipData *clip,
bool useGammaCorrection)
494 if (clip || useGammaCorrection) {
495 qt_alphamapblit_quint16(rasterBuffer, x, y, color, bitmap, mapWidth, mapHeight, mapStride, clip, useGammaCorrection);
499 quint16 *dest =
reinterpret_cast<quint16*>(rasterBuffer->scanLine(y)) + x;
500 const int destStride = rasterBuffer->bytesPerLine() /
sizeof(quint16);
502 uchar *mask =
const_cast<uchar *>(bitmap);
503 const uint c = color.toArgb32();
505 pixman_composite_over_n_8_0565_asm_neon(mapWidth, mapHeight, dest, destStride, c, 0, mask, mapStride);
508extern "C" void blend_8_pixels_rgb16_on_rgb16_neon(quint16 *dst,
const quint16 *src,
int const_alpha);
510template <
typename SRC,
typename BlendFunc>
511struct Blend_on_RGB16_SourceAndConstAlpha_Neon {
512 Blend_on_RGB16_SourceAndConstAlpha_Neon(BlendFunc blender,
int const_alpha)
515 , m_const_alpha(const_alpha)
519 inline void write(quint16 *dst, quint32 src)
521 srcBuffer[m_index++] = src;
524 m_blender(dst - 7, srcBuffer, m_const_alpha);
529 inline void flush(quint16 *dst)
532 quint16 dstBuffer[8];
533 for (
int i = 0; i < m_index; ++i)
534 dstBuffer[i] = dst[i - m_index];
536 m_blender(dstBuffer, srcBuffer, m_const_alpha);
538 for (
int i = 0; i < m_index; ++i)
539 dst[i - m_index] = dstBuffer[i];
552template <
typename SRC,
typename BlendFunc>
553Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>
554Blend_on_RGB16_SourceAndConstAlpha_Neon_create(BlendFunc blender,
int const_alpha)
556 return Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>(blender, const_alpha);
559void qt_scale_image_argb32_on_rgb16_neon(uchar *destPixels,
int dbpl,
560 const uchar *srcPixels,
int sbpl,
int srch,
561 const QRectF &targetRect,
562 const QRectF &sourceRect,
566 if (const_alpha == 0)
569 qt_scale_image_16bit<quint32>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
570 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
573void qt_scale_image_rgb16_on_rgb16(uchar *destPixels,
int dbpl,
574 const uchar *srcPixels,
int sbpl,
int srch,
575 const QRectF &targetRect,
576 const QRectF &sourceRect,
580void qt_scale_image_rgb16_on_rgb16_neon(uchar *destPixels,
int dbpl,
581 const uchar *srcPixels,
int sbpl,
int srch,
582 const QRectF &targetRect,
583 const QRectF &sourceRect,
587 if (const_alpha == 0)
590 if (const_alpha == 256) {
591 qt_scale_image_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip, const_alpha);
595 qt_scale_image_16bit<quint16>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
596 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
599extern void qt_transform_image_rgb16_on_rgb16(uchar *destPixels,
int dbpl,
600 const uchar *srcPixels,
int sbpl,
601 const QRectF &targetRect,
602 const QRectF &sourceRect,
604 const QTransform &targetRectTransform,
607void qt_transform_image_rgb16_on_rgb16_neon(uchar *destPixels,
int dbpl,
608 const uchar *srcPixels,
int sbpl,
609 const QRectF &targetRect,
610 const QRectF &sourceRect,
612 const QTransform &targetRectTransform,
615 if (const_alpha == 0)
618 if (const_alpha == 256) {
619 qt_transform_image_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, targetRect, sourceRect, clip, targetRectTransform, const_alpha);
623 qt_transform_image(
reinterpret_cast<quint16 *>(destPixels), dbpl,
624 reinterpret_cast<
const quint16 *>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
625 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
628void qt_transform_image_argb32_on_rgb16_neon(uchar *destPixels,
int dbpl,
629 const uchar *srcPixels,
int sbpl,
630 const QRectF &targetRect,
631 const QRectF &sourceRect,
633 const QTransform &targetRectTransform,
636 if (const_alpha == 0)
639 qt_transform_image(
reinterpret_cast<quint16 *>(destPixels), dbpl,
640 reinterpret_cast<
const quint32 *>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
641 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
644static inline void convert_8_pixels_rgb16_to_argb32(quint32 *dst,
const quint16 *src)
647 "vld1.16 { d0, d1 }, [%[SRC]]\n\t"
650
651 "vshrn.u16 d4, q0, #8\n\t"
652 "vshrn.u16 d3, q0, #3\n\t"
653 "vsli.u16 q0, q0, #5\n\t"
654 "vsri.u8 d4, d4, #5\n\t"
655 "vsri.u8 d3, d3, #6\n\t"
656 "vshrn.u16 d2, q0, #2\n\t"
662 "vst4.8 { d2, d3, d4, d5 }, [%[DST]]"
663 : : [DST]
"r" (dst), [SRC]
"r" (src)
664 :
"memory",
"r2",
"d0",
"d1",
"d2",
"d3",
"d4",
"d5"
668uint * QT_FASTCALL qt_destFetchRGB16_neon(uint *buffer, QRasterBuffer *rasterBuffer,
int x,
int y,
int length)
670 const ushort *data = (
const ushort *)rasterBuffer->scanLine(y) + x;
673 for (; i < length - 7; i += 8)
674 convert_8_pixels_rgb16_to_argb32(&buffer[i], &data[i]);
677 quint16 srcBuffer[8];
678 quint32 dstBuffer[8];
680 int tail = length - i;
681 for (
int j = 0; j < tail; ++j)
682 srcBuffer[j] = data[i + j];
684 convert_8_pixels_rgb16_to_argb32(dstBuffer, srcBuffer);
686 for (
int j = 0; j < tail; ++j)
687 buffer[i + j] = dstBuffer[j];
693static inline void convert_8_pixels_argb32_to_rgb16(quint16 *dst,
const quint32 *src)
696 "vld4.8 { d0, d1, d2, d3 }, [%[SRC]]\n\t"
699 "vshll.u8 q14, d2, #8\n\t"
700 "vshll.u8 q8, d1, #8\n\t"
701 "vshll.u8 q9, d0, #8\n\t"
702 "vsri.u16 q14, q8, #5\n\t"
703 "vsri.u16 q14, q9, #11\n\t"
705 "vst1.16 { d28, d29 }, [%[DST]]"
706 : : [DST]
"r" (dst), [SRC]
"r" (src)
707 :
"memory",
"d0",
"d1",
"d2",
"d3",
"d16",
"d17",
"d18",
"d19",
"d28",
"d29"
711void QT_FASTCALL qt_destStoreRGB16_neon(QRasterBuffer *rasterBuffer,
int x,
int y,
const uint *buffer,
int length)
713 quint16 *data = (quint16*)rasterBuffer->scanLine(y) + x;
716 for (; i < length - 7; i += 8)
717 convert_8_pixels_argb32_to_rgb16(&data[i], &buffer[i]);
720 quint32 srcBuffer[8];
721 quint16 dstBuffer[8];
723 int tail = length - i;
724 for (
int j = 0; j < tail; ++j)
725 srcBuffer[j] = buffer[i + j];
727 convert_8_pixels_argb32_to_rgb16(dstBuffer, srcBuffer);
729 for (
int j = 0; j < tail; ++j)
730 data[i + j] = dstBuffer[j];
735void QT_FASTCALL comp_func_solid_SourceOver_neon(uint *destPixels,
int length, uint color, uint const_alpha)
737 if ((const_alpha & qAlpha(color)) == 255) {
738 qt_memfill32(destPixels, color, length);
740 if (const_alpha != 255)
741 color = BYTE_MUL(color, const_alpha);
743 const quint32 minusAlphaOfColor = qAlpha(~color);
746 uint32_t *dst = (uint32_t *) destPixels;
747 const uint32x4_t colorVector = vdupq_n_u32(color);
748 uint16x8_t half = vdupq_n_u16(0x80);
749 const uint16x8_t minusAlphaOfColorVector = vdupq_n_u16(minusAlphaOfColor);
751 for (; x < length-3; x += 4) {
752 uint32x4_t dstVector = vld1q_u32(&dst[x]);
754 const uint8x16_t dst8 = vreinterpretq_u8_u32(dstVector);
756 const uint8x8_t dst8_low = vget_low_u8(dst8);
757 const uint8x8_t dst8_high = vget_high_u8(dst8);
759 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
760 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
762 const uint16x8_t result16_low = qvbyte_mul_u16(dst16_low, minusAlphaOfColorVector, half);
763 const uint16x8_t result16_high = qvbyte_mul_u16(dst16_high, minusAlphaOfColorVector, half);
765 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
766 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
768 uint32x4_t blendedPixels = vcombine_u32(result32_low, result32_high);
769 uint32x4_t colorPlusBlendedPixels = vaddq_u32(colorVector, blendedPixels);
770 vst1q_u32(&dst[x], colorPlusBlendedPixels);
773 SIMD_EPILOGUE(x, length, 3)
774 destPixels[x] = color + BYTE_MUL(destPixels[x], minusAlphaOfColor);
778void QT_FASTCALL comp_func_Plus_neon(uint *dst,
const uint *src,
int length, uint const_alpha)
780 if (const_alpha == 255) {
781 uint *
const end = dst + length;
782 uint *
const neonEnd = end - 3;
784 while (dst < neonEnd) {
785 uint8x16_t vs = vld1q_u8((
const uint8_t*)src);
786 const uint8x16_t vd = vld1q_u8((uint8_t*)dst);
787 vs = vqaddq_u8(vs, vd);
788 vst1q_u8((uint8_t*)dst, vs);
794 *dst = comp_func_Plus_one_pixel(*dst, *src);
800 const int one_minus_const_alpha = 255 - const_alpha;
801 const uint16x8_t constAlphaVector = vdupq_n_u16(const_alpha);
802 const uint16x8_t oneMinusconstAlphaVector = vdupq_n_u16(one_minus_const_alpha);
804 const uint16x8_t half = vdupq_n_u16(0x80);
805 for (; x < length - 3; x += 4) {
806 const uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
807 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
808 uint8x16_t dst8 = vld1q_u8((uint8_t *)&dst[x]);
809 uint8x16_t result = vqaddq_u8(dst8, src8);
811 uint16x8_t result_low = vmovl_u8(vget_low_u8(result));
812 uint16x8_t result_high = vmovl_u8(vget_high_u8(result));
814 uint16x8_t dst_low = vmovl_u8(vget_low_u8(dst8));
815 uint16x8_t dst_high = vmovl_u8(vget_high_u8(dst8));
817 result_low = qvinterpolate_pixel_255(result_low, constAlphaVector, dst_low, oneMinusconstAlphaVector, half);
818 result_high = qvinterpolate_pixel_255(result_high, constAlphaVector, dst_high, oneMinusconstAlphaVector, half);
820 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result_low));
821 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result_high));
822 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
825 SIMD_EPILOGUE(x, length, 3)
826 dst[x] = comp_func_Plus_one_pixel_const_alpha(dst[x], src[x], const_alpha, one_minus_const_alpha);
830#if defined(ENABLE_PIXMAN_DRAWHELPERS)
831static const int tileSize = 32;
833extern "C" void qt_rotate90_16_neon(quint16 *dst,
const quint16 *src,
int sstride,
int dstride,
int count);
835void qt_memrotate90_16_neon(
const uchar *srcPixels,
int w,
int h,
int sstride, uchar *destPixels,
int dstride)
837 const ushort *src = (
const ushort *)srcPixels;
838 ushort *dest = (ushort *)destPixels;
840 sstride /=
sizeof(ushort);
841 dstride /=
sizeof(ushort);
843 const int pack =
sizeof(quint32) /
sizeof(ushort);
844 const int unaligned =
845 qMin(uint((quintptr(dest) & (
sizeof(quint32)-1)) /
sizeof(ushort)), uint(h));
846 const int restX = w % tileSize;
847 const int restY = (h - unaligned) % tileSize;
848 const int unoptimizedY = restY % pack;
849 const int numTilesX = w / tileSize + (restX > 0);
850 const int numTilesY = (h - unaligned) / tileSize + (restY >= pack);
852 for (
int tx = 0; tx < numTilesX; ++tx) {
853 const int startx = w - tx * tileSize - 1;
854 const int stopx = qMax(startx - tileSize, 0);
857 for (
int x = startx; x >= stopx; --x) {
858 ushort *d = dest + (w - x - 1) * dstride;
859 for (
int y = 0; y < unaligned; ++y) {
860 *d++ = src[y * sstride + x];
865 for (
int ty = 0; ty < numTilesY; ++ty) {
866 const int starty = ty * tileSize + unaligned;
867 const int stopy = qMin(starty + tileSize, h - unoptimizedY);
871 for (; x >= stopx + 7; x -= 8) {
872 ushort *d = dest + (w - x - 1) * dstride + starty;
873 const ushort *s = &src[starty * sstride + x - 7];
874 qt_rotate90_16_neon(d, s, sstride * 2, dstride * 2, stopy - starty);
877 for (; x >= stopx; --x) {
878 quint32 *d =
reinterpret_cast<quint32*>(dest + (w - x - 1) * dstride + starty);
879 for (
int y = starty; y < stopy; y += pack) {
880 quint32 c = src[y * sstride + x];
881 for (
int i = 1; i < pack; ++i) {
882 const int shift = (
sizeof(
int) * 8 / pack * i);
883 const ushort color = src[(y + i) * sstride + x];
892 const int starty = h - unoptimizedY;
893 for (
int x = startx; x >= stopx; --x) {
894 ushort *d = dest + (w - x - 1) * dstride + starty;
895 for (
int y = starty; y < h; ++y) {
896 *d++ = src[y * sstride + x];
903extern "C" void qt_rotate270_16_neon(quint16 *dst,
const quint16 *src,
int sstride,
int dstride,
int count);
905void qt_memrotate270_16_neon(
const uchar *srcPixels,
int w,
int h,
907 uchar *destPixels,
int dstride)
909 const ushort *src = (
const ushort *)srcPixels;
910 ushort *dest = (ushort *)destPixels;
912 sstride /=
sizeof(ushort);
913 dstride /=
sizeof(ushort);
915 const int pack =
sizeof(quint32) /
sizeof(ushort);
916 const int unaligned =
917 qMin(uint((
long(dest) & (
sizeof(quint32)-1)) /
sizeof(ushort)), uint(h));
918 const int restX = w % tileSize;
919 const int restY = (h - unaligned) % tileSize;
920 const int unoptimizedY = restY % pack;
921 const int numTilesX = w / tileSize + (restX > 0);
922 const int numTilesY = (h - unaligned) / tileSize + (restY >= pack);
924 for (
int tx = 0; tx < numTilesX; ++tx) {
925 const int startx = tx * tileSize;
926 const int stopx = qMin(startx + tileSize, w);
929 for (
int x = startx; x < stopx; ++x) {
930 ushort *d = dest + x * dstride;
931 for (
int y = h - 1; y >= h - unaligned; --y) {
932 *d++ = src[y * sstride + x];
937 for (
int ty = 0; ty < numTilesY; ++ty) {
938 const int starty = h - 1 - unaligned - ty * tileSize;
939 const int stopy = qMax(starty - tileSize, unoptimizedY);
943 for (; x < stopx - 7; x += 8) {
944 ushort *d = dest + x * dstride + h - 1 - starty;
945 const ushort *s = &src[starty * sstride + x];
946 qt_rotate90_16_neon(d + 7 * dstride, s, -sstride * 2, -dstride * 2, starty - stopy);
949 for (; x < stopx; ++x) {
950 quint32 *d =
reinterpret_cast<quint32*>(dest + x * dstride
952 for (
int y = starty; y > stopy; y -= pack) {
953 quint32 c = src[y * sstride + x];
954 for (
int i = 1; i < pack; ++i) {
955 const int shift = (
sizeof(
int) * 8 / pack * i);
956 const ushort color = src[(y - i) * sstride + x];
964 const int starty = unoptimizedY - 1;
965 for (
int x = startx; x < stopx; ++x) {
966 ushort *d = dest + x * dstride + h - 1 - starty;
967 for (
int y = starty; y >= 0; --y) {
968 *d++ = src[y * sstride + x];
981 Int32x4(int32x4_t v) : v(v) {}
983 operator int32x4_t()
const {
return v; }
986 Float32x4() =
default;
987 Float32x4(float32x4_t v) : v(v) {};
989 operator float32x4_t()
const {
return v; }
992 union Vect_buffer_i { Int32x4 v;
int i[4]; };
993 union Vect_buffer_f { Float32x4 v;
float f[4]; };
995 static inline Float32x4 v_dup(
double x) {
return vdupq_n_f32(
float(x)); }
996 static inline Float32x4 v_dup(
float x) {
return vdupq_n_f32(x); }
997 static inline Int32x4 v_dup(
int x) {
return vdupq_n_s32(x); }
998 static inline Int32x4 v_dup(uint x) {
return vdupq_n_s32(x); }
1000 static inline Float32x4 v_add(Float32x4 a, Float32x4 b) {
return vaddq_f32(a, b); }
1001 static inline Int32x4 v_add(Int32x4 a, Int32x4 b) {
return vaddq_s32(a, b); }
1003 static inline Float32x4 v_max(Float32x4 a, Float32x4 b) {
return vmaxq_f32(a, b); }
1004 static inline Float32x4 v_min(Float32x4 a, Float32x4 b) {
return vminq_f32(a, b); }
1005 static inline Int32x4 v_min_16(Int32x4 a, Int32x4 b) {
return vminq_s32(a, b); }
1007 static inline Int32x4 v_and(Int32x4 a, Int32x4 b) {
return vandq_s32(a, b); }
1009 static inline Float32x4 v_sub(Float32x4 a, Float32x4 b) {
return vsubq_f32(a, b); }
1010 static inline Int32x4 v_sub(Int32x4 a, Int32x4 b) {
return vsubq_s32(a, b); }
1012 static inline Float32x4 v_mul(Float32x4 a, Float32x4 b) {
return vmulq_f32(a, b); }
1014 static inline Float32x4 v_sqrt(Float32x4 x) { Float32x4 y = vrsqrteq_f32(x); y = vmulq_f32(y, vrsqrtsq_f32(x, vmulq_f32(y, y)));
return vmulq_f32(x, y); }
1016 static inline Int32x4 v_toInt(Float32x4 x) {
return vcvtq_s32_f32(x); }
1018 static inline Int32x4 v_greaterOrEqual(Float32x4 a, Float32x4 b) {
return vreinterpretq_s32_u32(vcgeq_f32(a, b)); }
1021const uint * QT_FASTCALL qt_fetch_radial_gradient_neon(uint *buffer,
const Operator *op,
const QSpanData *data,
1022 int y,
int x,
int length)
1024 return qt_fetch_radial_gradient_template<QRadialFetchSimd<QSimdNeon>,uint>(buffer, op, data, y, x, length);
1027extern void QT_FASTCALL qt_convert_rgb888_to_rgb32_neon(quint32 *dst,
const uchar *src,
int len);
1029const uint * QT_FASTCALL qt_fetchUntransformed_888_neon(uint *buffer,
const Operator *,
const QSpanData *data,
1030 int y,
int x,
int length)
1032 const uchar *line = data->texture.scanLine(y) + x * 3;
1033 qt_convert_rgb888_to_rgb32_neon(buffer, line, length);
1037#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
1038static inline uint32x4_t vrgba2argb(uint32x4_t srcVector)
1040#if defined(Q_PROCESSOR_ARM_64)
1041 const uint8x16_t rgbaMask = qvsetq_n_u8(2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15);
1043 const uint8x8_t rgbaMask = qvset_n_u8(2, 1, 0, 3, 6, 5, 4, 7);
1045#if defined(Q_PROCESSOR_ARM_64)
1046 srcVector = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(srcVector), rgbaMask));
1049 const uint8x8_t low = vtbl1_u8(vreinterpret_u8_u32(vget_low_u32(srcVector)), rgbaMask);
1050 const uint8x8_t high = vtbl1_u8(vreinterpret_u8_u32(vget_high_u32(srcVector)), rgbaMask);
1051 srcVector = vcombine_u32(vreinterpret_u32_u8(low), vreinterpret_u32_u8(high));
1057static inline void convertARGBToARGB32PM_neon(uint *buffer,
const uint *src,
int count)
1060 const uint8x8_t shuffleMask = qvset_n_u8(3, 3, 3, 3, 7, 7, 7, 7);
1061 const uint32x4_t blendMask = vdupq_n_u32(0xff000000);
1063 for (; i < count - 3; i += 4) {
1064 uint32x4_t srcVector = vld1q_u32(src + i);
1065 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1066#if defined(Q_PROCESSOR_ARM_64)
1067 uint32_t alphaSum = vaddvq_u32(alphaVector);
1070 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1071 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1074 if (alphaSum != 255 * 4) {
1076 srcVector = vrgba2argb(srcVector);
1077 const uint8x8_t s1 = vreinterpret_u8_u32(vget_low_u32(srcVector));
1078 const uint8x8_t s2 = vreinterpret_u8_u32(vget_high_u32(srcVector));
1079 const uint8x8_t alpha1 = vtbl1_u8(s1, shuffleMask);
1080 const uint8x8_t alpha2 = vtbl1_u8(s2, shuffleMask);
1081 uint16x8_t src1 = vmull_u8(s1, alpha1);
1082 uint16x8_t src2 = vmull_u8(s2, alpha2);
1083 src1 = vsraq_n_u16(src1, src1, 8);
1084 src2 = vsraq_n_u16(src2, src2, 8);
1085 const uint8x8_t d1 = vrshrn_n_u16(src1, 8);
1086 const uint8x8_t d2 = vrshrn_n_u16(src2, 8);
1087 const uint32x4_t d = vbslq_u32(blendMask, srcVector, vreinterpretq_u32_u8(vcombine_u8(d1, d2)));
1088 vst1q_u32(buffer + i, d);
1091 vst1q_u32(buffer + i, vrgba2argb(srcVector));
1092 else if (buffer != src)
1093 vst1q_u32(buffer + i, srcVector);
1096 vst1q_u32(buffer + i, vdupq_n_u32(0));
1100 SIMD_EPILOGUE(i, count, 3) {
1101 uint v = qPremultiply(src[i]);
1102 buffer[i] = RGBA ? RGBA2ARGB(v) : v;
1107static inline void convertARGB32ToRGBA64PM_neon(QRgba64 *buffer,
const uint *src,
int count)
1112 const uint8x8_t shuffleMask = qvset_n_u8(3, 3, 3, 3, 7, 7, 7, 7);
1113 const uint64x2_t blendMask = vdupq_n_u64(Q_UINT64_C(0xffff000000000000));
1116 for (; i < count-3; i += 4) {
1117 uint32x4_t vs32 = vld1q_u32(src + i);
1118 uint32x4_t alphaVector = vshrq_n_u32(vs32, 24);
1119#if defined(Q_PROCESSOR_ARM_64)
1120 uint32_t alphaSum = vaddvq_u32(alphaVector);
1123 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1124 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1128 vs32 = vrgba2argb(vs32);
1129 const uint8x16_t vs8 = vreinterpretq_u8_u32(vs32);
1130 const uint8x16x2_t v = vzipq_u8(vs8, vs8);
1131 if (alphaSum != 255 * 4) {
1132 const uint8x8_t s1 = vreinterpret_u8_u32(vget_low_u32(vs32));
1133 const uint8x8_t s2 = vreinterpret_u8_u32(vget_high_u32(vs32));
1134 const uint8x8_t alpha1 = vtbl1_u8(s1, shuffleMask);
1135 const uint8x8_t alpha2 = vtbl1_u8(s2, shuffleMask);
1136 uint16x8_t src1 = vmull_u8(s1, alpha1);
1137 uint16x8_t src2 = vmull_u8(s2, alpha2);
1139 src1 = vsraq_n_u16(src1, src1, 7);
1140 src2 = vsraq_n_u16(src2, src2, 7);
1143 const uint64x2_t d1 = vbslq_u64(blendMask, vreinterpretq_u64_u8(v.val[0]), vreinterpretq_u64_u16(src1));
1144 const uint64x2_t d2 = vbslq_u64(blendMask, vreinterpretq_u64_u8(v.val[1]), vreinterpretq_u64_u16(src2));
1146 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u64(d1));
1148 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u64(d2));
1151 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u8(v.val[0]));
1153 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u8(v.val[1]));
1157 vst1q_u16((uint16_t *)buffer, vdupq_n_u16(0));
1159 vst1q_u16((uint16_t *)buffer, vdupq_n_u16(0));
1164 SIMD_EPILOGUE(i, count, 3) {
1168 *buffer++ = QRgba64::fromArgb32(s).premultiplied();
1172static inline float32x4_t reciprocal_mul_ps(float32x4_t a,
float mul)
1174 float32x4_t ia = vrecpeq_f32(a);
1175 ia = vmulq_f32(vrecpsq_f32(a, ia), vmulq_n_f32(ia, mul));
1179template<
bool RGBA,
bool RGBx>
1180static inline void convertARGBFromARGB32PM_neon(uint *buffer,
const uint *src,
int count)
1183 const uint32x4_t alphaMask = vdupq_n_u32(0xff000000);
1185 for (; i < count - 3; i += 4) {
1186 uint32x4_t srcVector = vld1q_u32(src + i);
1187 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1188#if defined(Q_PROCESSOR_ARM_64)
1189 uint32_t alphaSum = vaddvq_u32(alphaVector);
1192 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1193 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1196 if (alphaSum != 255 * 4) {
1198 srcVector = vrgba2argb(srcVector);
1199 const float32x4_t a = vcvtq_f32_u32(alphaVector);
1200 const float32x4_t ia = reciprocal_mul_ps(a, 255.0f);
1202 uint16x8_t tmp1 = vmovl_u8(vget_low_u8(vreinterpretq_u8_u32(srcVector)));
1203 uint16x8_t tmp3 = vmovl_u8(vget_high_u8(vreinterpretq_u8_u32(srcVector)));
1204 float32x4_t src1 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp1)));
1205 float32x4_t src2 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp1)));
1206 float32x4_t src3 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp3)));
1207 float32x4_t src4 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp3)));
1208 src1 = vmulq_lane_f32(src1, vget_low_f32(ia), 0);
1209 src2 = vmulq_lane_f32(src2, vget_low_f32(ia), 1);
1210 src3 = vmulq_lane_f32(src3, vget_high_f32(ia), 0);
1211 src4 = vmulq_lane_f32(src4, vget_high_f32(ia), 1);
1213 tmp1 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src1, 1), 1),
1214 vrshrn_n_u32(vcvtq_n_u32_f32(src2, 1), 1));
1215 tmp3 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src3, 1), 1),
1216 vrshrn_n_u32(vcvtq_n_u32_f32(src4, 1), 1));
1217 uint32x4_t dstVector = vreinterpretq_u32_u8(vcombine_u8(vmovn_u16(tmp1), vmovn_u16(tmp3)));
1219#if defined(Q_PROCESSOR_ARM_64)
1220 uint32x4_t srcVectorAlphaMask = vceqzq_u32(alphaVector);
1222 uint32x4_t srcVectorAlphaMask = vceqq_u32(alphaVector, vdupq_n_u32(0));
1224 dstVector = vbicq_u32(dstVector, srcVectorAlphaMask);
1227 dstVector = vorrq_u32(alphaMask, dstVector);
1229 dstVector = vbslq_u32(alphaMask, srcVector, dstVector);
1230 vst1q_u32(&buffer[i], dstVector);
1234 vst1q_u32(&buffer[i], vrgba2argb(srcVector));
1235 else if (buffer != src)
1236 vst1q_u32(&buffer[i], srcVector);
1241 vst1q_u32(&buffer[i], alphaMask);
1243 vst1q_u32(&buffer[i], vdupq_n_u32(0));
1247 SIMD_EPILOGUE(i, count, 3) {
1248 uint v = qUnpremultiply(src[i]);
1257void QT_FASTCALL convertARGB32ToARGB32PM_neon(uint *buffer,
int count,
const QList<QRgb> *)
1259 convertARGBToARGB32PM_neon<
false>(buffer, buffer, count);
1262void QT_FASTCALL convertRGBA8888ToARGB32PM_neon(uint *buffer,
int count,
const QList<QRgb> *)
1264 convertARGBToARGB32PM_neon<
true>(buffer, buffer, count);
1267const uint *QT_FASTCALL fetchARGB32ToARGB32PM_neon(uint *buffer,
const uchar *src,
int index,
int count,
1268 const QList<QRgb> *, QDitherInfo *)
1270 convertARGBToARGB32PM_neon<
false>(buffer,
reinterpret_cast<
const uint *>(src) + index, count);
1274const uint *QT_FASTCALL fetchRGBA8888ToARGB32PM_neon(uint *buffer,
const uchar *src,
int index,
int count,
1275 const QList<QRgb> *, QDitherInfo *)
1277 convertARGBToARGB32PM_neon<
true>(buffer,
reinterpret_cast<
const uint *>(src) + index, count);
1281const QRgba64 * QT_FASTCALL convertARGB32ToRGBA64PM_neon(QRgba64 *buffer,
const uint *src,
int count,
1282 const QList<QRgb> *, QDitherInfo *)
1284 convertARGB32ToRGBA64PM_neon<
false>(buffer, src, count);
1288const QRgba64 * QT_FASTCALL convertRGBA8888ToRGBA64PM_neon(QRgba64 *buffer,
const uint *src,
int count,
1289 const QList<QRgb> *, QDitherInfo *)
1291 convertARGB32ToRGBA64PM_neon<
true>(buffer, src, count);
1295const QRgba64 *QT_FASTCALL fetchARGB32ToRGBA64PM_neon(QRgba64 *buffer,
const uchar *src,
int index,
int count,
1296 const QList<QRgb> *, QDitherInfo *)
1298 convertARGB32ToRGBA64PM_neon<
false>(buffer,
reinterpret_cast<
const uint *>(src) + index, count);
1302const QRgba64 *QT_FASTCALL fetchRGBA8888ToRGBA64PM_neon(QRgba64 *buffer,
const uchar *src,
int index,
int count,
1303 const QList<QRgb> *, QDitherInfo *)
1305 convertARGB32ToRGBA64PM_neon<
true>(buffer,
reinterpret_cast<
const uint *>(src) + index, count);
1309void QT_FASTCALL storeRGB32FromARGB32PM_neon(uchar *dest,
const uint *src,
int index,
int count,
1310 const QList<QRgb> *, QDitherInfo *)
1312 uint *d =
reinterpret_cast<uint *>(dest) + index;
1313 convertARGBFromARGB32PM_neon<
false,
true>(d, src, count);
1316void QT_FASTCALL storeARGB32FromARGB32PM_neon(uchar *dest,
const uint *src,
int index,
int count,
1317 const QList<QRgb> *, QDitherInfo *)
1319 uint *d =
reinterpret_cast<uint *>(dest) + index;
1320 convertARGBFromARGB32PM_neon<
false,
false>(d, src, count);
1323void QT_FASTCALL storeRGBA8888FromARGB32PM_neon(uchar *dest,
const uint *src,
int index,
int count,
1324 const QList<QRgb> *, QDitherInfo *)
1326 uint *d =
reinterpret_cast<uint *>(dest) + index;
1327 convertARGBFromARGB32PM_neon<
true,
false>(d, src, count);
1330void QT_FASTCALL storeRGBXFromARGB32PM_neon(uchar *dest,
const uint *src,
int index,
int count,
1331 const QList<QRgb> *, QDitherInfo *)
1333 uint *d =
reinterpret_cast<uint *>(dest) + index;
1334 convertARGBFromARGB32PM_neon<
true,
true>(d, src, count);