21#include <QtCore/private/qsimd_p.h>
22#include <QtGui/private/qtguiglobal_p.h>
28 return QRgba64::fromRgba64(rgba64.red(), rgba64.green(), rgba64.blue(), (rgba64.alpha() * alpha256) >> 8);
32static inline __m128i
Q_DECL_VECTORCALL multiplyAlpha65535(__m128i rgba64, __m128i va)
35 vs = _mm_unpacklo_epi16(_mm_mullo_epi16(vs, va), _mm_mulhi_epu16(vs, va));
36 vs = _mm_add_epi32(vs, _mm_srli_epi32(vs, 16));
37 vs = _mm_add_epi32(vs, _mm_set1_epi32(0x8000));
38 vs = _mm_srai_epi32(vs, 16);
39 vs = _mm_packs_epi32(vs, vs);
42static inline __m128i
Q_DECL_VECTORCALL multiplyAlpha65535(__m128i rgba64, uint alpha65535)
44 const __m128i va = _mm_shufflelo_epi16(_mm_cvtsi32_si128(alpha65535), _MM_SHUFFLE(0, 0, 0, 0));
45 return multiplyAlpha65535(rgba64, va);
47#elif defined(__ARM_NEON__)
48static inline uint16x4_t multiplyAlpha65535(uint16x4_t rgba64, uint16x4_t alpha65535)
50 uint32x4_t vs32 = vmull_u16(rgba64, alpha65535);
51 vs32 = vsraq_n_u32(vs32, vs32, 16);
52 return vrshrn_n_u32(vs32, 16);
54static inline uint16x4_t multiplyAlpha65535(uint16x4_t rgba64, uint alpha65535)
56 uint32x4_t vs32 = vmull_n_u16(rgba64, alpha65535);
57 vs32 = vsraq_n_u32(vs32, vs32, 16);
58 return vrshrn_n_u32(vs32, 16);
60#elif defined(__loongarch_sx)
61static inline __m128i Q_DECL_VECTORCALL multiplyAlpha65535(__m128i rgba64, __m128i va)
64 vs = __lsx_vilvl_h(__lsx_vmuh_hu(vs, va), __lsx_vmul_h(vs, va));
65 vs = __lsx_vadd_w(vs, __lsx_vsrli_w(vs, 16));
66 vs = __lsx_vadd_w(vs, __lsx_vreplgr2vr_w(0x8000));
67 vs = __lsx_vsrai_w(vs, 16);
68 vs = __lsx_vpickev_h(__lsx_vsat_w(vs, 15), __lsx_vsat_w(vs, 15));
71static inline __m128i Q_DECL_VECTORCALL multiplyAlpha65535(__m128i rgba64, uint alpha65535)
73 const __m128i shuffleMask = (__m128i)(v8i16){0, 0, 0, 0, 4, 5, 6, 7};
74 const __m128i va = __lsx_vshuf_h(shuffleMask, __lsx_vldi(0),
75 __lsx_vinsgr2vr_w(__lsx_vldi(0), alpha65535, 0));
76 return multiplyAlpha65535(rgba64, va);
83 const __m128i v = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&rgba64));
84 const __m128i vr = multiplyAlpha65535(v, alpha65535);
86 _mm_storel_epi64(
reinterpret_cast<__m128i *>(&r), vr);
88#elif defined(__ARM_NEON__)
89 const uint16x4_t v = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&rgba64)));
90 const uint16x4_t vr = multiplyAlpha65535(v, alpha65535);
92 vst1_u64(
reinterpret_cast<uint64_t *>(&r), vreinterpret_u64_u16(vr));
94#elif defined(__loongarch_sx)
95 const __m128i v = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&rgba64), 0);
96 const __m128i vr = multiplyAlpha65535(v, alpha65535);
98 __lsx_vstelm_d(vr,
reinterpret_cast<__m128i *>(&r), 0, 0);
101 return QRgba64::fromRgba64(qt_div_65535(rgba64.red() * alpha65535),
102 qt_div_65535(rgba64.green() * alpha65535),
103 qt_div_65535(rgba64.blue() * alpha65535),
104 qt_div_65535(rgba64.alpha() * alpha65535));
108#if defined(__SSE2__
) || defined(__ARM_NEON__) || defined(__loongarch_sx)
112 return multiplyAlpha65535(rgba64, alpha255 * 257);
116static inline T multiplyAlpha255(T rgba64, uint alpha255)
118 return QRgba64::fromRgba64(qt_div_255(rgba64.red() * alpha255),
119 qt_div_255(rgba64.green() * alpha255),
120 qt_div_255(rgba64.blue() * alpha255),
121 qt_div_255(rgba64.alpha() * alpha255));
126static inline __m128i
Q_DECL_VECTORCALL interpolate255(__m128i x, uint alpha1, __m128i y, uint alpha2)
128 return _mm_add_epi16(multiplyAlpha255(x, alpha1), multiplyAlpha255(y, alpha2));
132#if defined __ARM_NEON__
133inline uint16x4_t interpolate255(uint16x4_t x, uint alpha1, uint16x4_t y, uint alpha2)
135 return vadd_u16(multiplyAlpha255(x, alpha1), multiplyAlpha255(y, alpha2));
139#if defined __loongarch_sx
140static inline __m128i Q_DECL_VECTORCALL
141interpolate255(__m128i x, uint alpha1, __m128i y, uint alpha2)
143 return __lsx_vadd_h(multiplyAlpha255(x, alpha1), multiplyAlpha255(y, alpha2));
150 const __m128i vx = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&x));
151 const __m128i vy = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&y));
152 const __m128i vr = interpolate255(vx, alpha1, vy, alpha2);
154 _mm_storel_epi64(
reinterpret_cast<__m128i *>(&r), vr);
156#elif defined(__ARM_NEON__)
157 const uint16x4_t vx = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&x)));
158 const uint16x4_t vy = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&y)));
159 const uint16x4_t vr = interpolate255(vx, alpha1, vy, alpha2);
161 vst1_u64(
reinterpret_cast<uint64_t *>(&r), vreinterpret_u64_u16(vr));
163#elif defined(__loongarch_sx)
164 const __m128i vx = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&x), 0);
165 const __m128i vy = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&y), 0);
166 const __m128i vr = interpolate255(vx, alpha1, vy, alpha2);
168 __lsx_vstelm_d(vr,
reinterpret_cast<__m128i *>(&r), 0, 0);
171 return QRgba64::fromRgba64(multiplyAlpha255(x, alpha1) + multiplyAlpha255(y, alpha2));
176static inline __m128i
Q_DECL_VECTORCALL interpolate65535(__m128i x, uint alpha1, __m128i y, uint alpha2)
178 return _mm_add_epi16(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
181static inline __m128i
Q_DECL_VECTORCALL interpolate65535(__m128i x, __m128i alpha1, __m128i y, __m128i alpha2)
183 return _mm_add_epi16(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
187#if defined __ARM_NEON__
188inline uint16x4_t interpolate65535(uint16x4_t x, uint alpha1, uint16x4_t y, uint alpha2)
190 return vadd_u16(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
192inline uint16x4_t interpolate65535(uint16x4_t x, uint16x4_t alpha1, uint16x4_t y, uint16x4_t alpha2)
194 return vadd_u16(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
198#if defined __loongarch_sx
199static inline __m128i Q_DECL_VECTORCALL interpolate65535(__m128i x, uint alpha1, __m128i y, uint alpha2)
201 return __lsx_vadd_h(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
204static inline __m128i Q_DECL_VECTORCALL interpolate65535(__m128i x, __m128i alpha1, __m128i y, __m128i alpha2)
206 return __lsx_vadd_h(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
213 const __m128i vx = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&x));
214 const __m128i vy = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&y));
215 const __m128i vr = interpolate65535(vx, alpha1, vy, alpha2);
217 _mm_storel_epi64(
reinterpret_cast<__m128i *>(&r), vr);
219#elif defined(__ARM_NEON__)
220 const uint16x4_t vx = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&x)));
221 const uint16x4_t vy = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&y)));
222 const uint16x4_t vr = interpolate65535(vx, alpha1, vy, alpha2);
224 vst1_u64(
reinterpret_cast<uint64_t *>(&r), vreinterpret_u64_u16(vr));
226#elif defined(__loongarch_sx)
227 const __m128i vx = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&x), 0);
228 const __m128i vy = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&y), 0);
229 const __m128i vr = interpolate65535(vx, alpha1, vy, alpha2);
231 __lsx_vstelm_d(vr,
reinterpret_cast<__m128i *>(&r), 0, 0);
234 return QRgba64::fromRgba64(multiplyAlpha65535(x, alpha1) + multiplyAlpha65535(y, alpha2));
241 const __m128i va = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&a));
242 const __m128i vb = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&b));
243 const __m128i vr = _mm_adds_epu16(va, vb);
245 _mm_storel_epi64(
reinterpret_cast<__m128i *>(&r), vr);
247#elif defined(__ARM_NEON__)
248 const uint16x4_t va = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&a)));
249 const uint16x4_t vb = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&b)));
251 vst1_u64(
reinterpret_cast<uint64_t *>(&r), vreinterpret_u64_u16(vqadd_u16(va, vb)));
253#elif defined(__loongarch_sx)
254 const __m128i va = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&a), 0);
255 const __m128i vb = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&b), 0);
256 const __m128i vr = __lsx_vsadd_hu(va, vb);
258 __lsx_vstelm_d(vr,
reinterpret_cast<__m128i *>(&r), 0, 0);
262 return QRgba64::fromRgba64(qMin(a.red() + b.red(), 65535),
263 qMin(a.green() + b.green(), 65535),
264 qMin(a.blue() + b.blue(), 65535),
265 qMin(a.alpha() + b.alpha(), 65535));
269#if QT_COMPILER_SUPPORTS_HERE(SSE2)
270QT_FUNCTION_TARGET(SSE2)
271static inline uint Q_DECL_VECTORCALL toArgb32(__m128i v)
273 v = _mm_unpacklo_epi16(v, _mm_setzero_si128());
274 v = _mm_add_epi32(v, _mm_set1_epi32(128));
275 v = _mm_sub_epi32(v, _mm_srli_epi32(v, 8));
276 v = _mm_srli_epi32(v, 8);
277 v = _mm_packs_epi32(v, v);
278 v = _mm_packus_epi16(v, v);
279 return _mm_cvtsi128_si32(v);
281#elif defined __ARM_NEON__
282static inline uint toArgb32(uint16x4_t v)
284 v = vsub_u16(v, vrshr_n_u16(v, 8));
285 v = vrshr_n_u16(v, 8);
286 uint8x8_t v8 = vmovn_u16(vcombine_u16(v, v));
287 return vget_lane_u32(vreinterpret_u32_u8(v8), 0);
289#elif defined __loongarch_sx
290static inline uint Q_DECL_VECTORCALL toArgb32(__m128i v)
292 v = __lsx_vilvl_h(__lsx_vldi(0), v);
293 v = __lsx_vadd_w(v, __lsx_vreplgr2vr_w(128));
294 v = __lsx_vsub_w(v, __lsx_vsrli_w(v, 8));
295 v = __lsx_vsrli_w(v, 8);
296 v = __lsx_vpickev_h(__lsx_vsat_w(v, 15), __lsx_vsat_w(v, 15));
297 __m128i tmp = __lsx_vmaxi_h(v, 0);
298 v = __lsx_vpickev_b(__lsx_vsat_hu(tmp, 7), __lsx_vsat_hu(tmp, 7));
299 return __lsx_vpickve2gr_w(v, 0);
306 __m128i v = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&rgba64));
307 v = _mm_shufflelo_epi16(v, _MM_SHUFFLE(3, 0, 1, 2));
309#elif defined __ARM_NEON__
310 uint16x4_t v = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&rgba64)));
311#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
312 const uint8x8_t shuffleMask = qvset_n_u8(4, 5, 2, 3, 0, 1, 6, 7);
313 v = vreinterpret_u16_u8(vtbl1_u8(vreinterpret_u8_u16(v), shuffleMask));
315 v = vext_u16(v, v, 3);
318#elif defined __loongarch_sx
319 __m128i v = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&rgba64), 0);
320 const __m128i shuffleMask = (__m128i)(v8i16){2, 1, 0, 3, 4, 5, 6, 7};
321 v = __lsx_vshuf_h(shuffleMask, __lsx_vldi(0), v);
324 return rgba64.toArgb32();
331 __m128i v = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&rgba64));
333#elif defined __ARM_NEON__
334 uint16x4_t v = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&rgba64)));
336#elif defined __loongarch_sx
337 __m128i v = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&rgba64), 0);
340 return ARGB2RGBA(toArgb32(rgba64));
348 __m128i vd = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&d));
349 __m128i vs = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&s));
350 __m128i va = _mm_cvtsi32_si128(rgbAlpha);
351 va = _mm_unpacklo_epi8(va, va);
352 va = _mm_shufflelo_epi16(va, _MM_SHUFFLE(3, 0, 1, 2));
353 __m128i vb = _mm_xor_si128(_mm_set1_epi16(-1), va);
355 vs = _mm_unpacklo_epi16(_mm_mullo_epi16(vs, va), _mm_mulhi_epu16(vs, va));
356 vd = _mm_unpacklo_epi16(_mm_mullo_epi16(vd, vb), _mm_mulhi_epu16(vd, vb));
357 vd = _mm_add_epi32(vd, vs);
358 vd = _mm_add_epi32(vd, _mm_srli_epi32(vd, 16));
359 vd = _mm_add_epi32(vd, _mm_set1_epi32(0x8000));
360 vd = _mm_srai_epi32(vd, 16);
361 vd = _mm_packs_epi32(vd, vd);
363 _mm_storel_epi64(
reinterpret_cast<__m128i *>(&blend), vd);
364#elif defined(__ARM_NEON__)
365 uint16x4_t vd = vreinterpret_u16_u64(vmov_n_u64(d));
366 uint16x4_t vs = vreinterpret_u16_u64(vmov_n_u64(s));
367 uint8x8_t va8 = vreinterpret_u8_u32(vmov_n_u32(ARGB2RGBA(rgbAlpha)));
368 uint16x4_t va = vreinterpret_u16_u8(vzip_u8(va8, va8).val[0]);
369 uint16x4_t vb = veor_u16(vdup_n_u16(0xffff), va);
371 uint32x4_t vs32 = vmull_u16(vs, va);
372 uint32x4_t vd32 = vmull_u16(vd, vb);
373 vd32 = vaddq_u32(vd32, vs32);
374 vd32 = vsraq_n_u32(vd32, vd32, 16);
375 vd = vrshrn_n_u32(vd32, 16);
376 vst1_u64(
reinterpret_cast<uint64_t *>(&blend), vreinterpret_u64_u16(vd));
377#elif defined(__loongarch_sx)
378 __m128i vd = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&d), 0);
379 __m128i vs = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&s), 0);
380 __m128i va = __lsx_vinsgr2vr_w(__lsx_vldi(0), rgbAlpha, 0);
381 va = __lsx_vilvl_b(va, va);
382 const __m128i shuffleMask = (__m128i)(v8i16){2, 1, 0, 3, 4, 5, 6, 7};
383 va = __lsx_vshuf_h(shuffleMask, __lsx_vldi(0), va);
384 __m128i vb = __lsx_vxor_v(__lsx_vreplgr2vr_h(-1), va);
386 vs = __lsx_vilvl_h(__lsx_vmuh_hu(vs, va), __lsx_vmul_h(vs, va));
387 vd = __lsx_vilvl_h(__lsx_vmuh_hu(vd, vb), __lsx_vmul_h(vd, vb));
388 vd = __lsx_vadd_w(vd, vs);
389 vd = __lsx_vadd_w(vd, __lsx_vsrli_w(vd, 16));
390 vd = __lsx_vadd_w(vd, __lsx_vreplgr2vr_w(0x8000));
391 vd = __lsx_vsrai_w(vd, 16);
392 vd = __lsx_vpickev_h(__lsx_vsat_w(vd, 15), __lsx_vsat_w(vd, 15));
393 __lsx_vstelm_d(vd,
reinterpret_cast<__m128i *>(&blend), 0, 0);
395 const int mr = qRed(rgbAlpha);
396 const int mg = qGreen(rgbAlpha);
397 const int mb = qBlue(rgbAlpha);
398 blend = qRgba64(qt_div_255(s.red() * mr + d.red() * (255 - mr)),
399 qt_div_255(s.green() * mg + d.green() * (255 - mg)),
400 qt_div_255(s.blue() * mb + d.blue() * (255 - mb)),
410 else if (!src.isTransparent()) {
412 const __m128i vd = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&dst));
413 const __m128i vs = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&src));
414 const __m128i via = _mm_xor_si128(_mm_set1_epi16(-1), _mm_shufflelo_epi16(vs, _MM_SHUFFLE(3, 3, 3, 3)));
415 const __m128i vr = _mm_add_epi16(vs, multiplyAlpha65535(vd, via));
416 _mm_storel_epi64(
reinterpret_cast<__m128i *>(&dst), vr);
417#elif defined(__ARM_NEON__)
418 const uint16x4_t vd = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&dst)));
419 const uint16x4_t vs = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&src)));
420 const uint16x4_t via = veor_u16(vdup_n_u16(0xffff), vdup_lane_u16(vs, 3));
421 const uint16x4_t vr = vadd_u16(vs, multiplyAlpha65535(vd, via));
422 vst1_u64(
reinterpret_cast<uint64_t *>(&dst), vreinterpret_u64_u16(vr));
423#elif defined(__loongarch_sx)
424 const __m128i vd = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&dst), 0);
425 const __m128i vs = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&src), 0);
426 const __m128i shuffleMask = (__m128i)(v8i16){3, 3, 3, 3, 4, 5, 6, 7};
427 const __m128i via = __lsx_vxor_v(__lsx_vreplgr2vr_h(-1), __lsx_vshuf_h(shuffleMask, __lsx_vldi(0), vs));
428 const __m128i vr = __lsx_vadd_h(vs, multiplyAlpha65535(vd, via));
429 __lsx_vstelm_d(vr,
reinterpret_cast<__m128i *>(&dst), 0, 0);
431 dst = src + multiplyAlpha65535(dst, 65535 - src.alpha());
438 if (const_alpha == 255)
440 if (!src.isTransparent()) {
442 const __m128i vd = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&dst));
443 __m128i vs = _mm_loadl_epi64(
reinterpret_cast<
const __m128i *>(&src));
444 vs = multiplyAlpha255(vs, const_alpha);
445 const __m128i via = _mm_xor_si128(_mm_set1_epi16(-1), _mm_shufflelo_epi16(vs, _MM_SHUFFLE(3, 3, 3, 3)));
446 const __m128i vr = _mm_add_epi16(vs, multiplyAlpha65535(vd, via));
447 _mm_storel_epi64(
reinterpret_cast<__m128i *>(&dst), vr);
448#elif defined(__ARM_NEON__)
449 const uint16x4_t vd = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&dst)));
450 uint16x4_t vs = vreinterpret_u16_u64(vld1_u64(
reinterpret_cast<
const uint64_t *>(&src)));
451 vs = multiplyAlpha255(vs, const_alpha);
452 const uint16x4_t via = veor_u16(vdup_n_u16(0xffff), vdup_lane_u16(vs, 3));
453 const uint16x4_t vr = vadd_u16(vs, multiplyAlpha65535(vd, via));
454 vst1_u64(
reinterpret_cast<uint64_t *>(&dst), vreinterpret_u64_u16(vr));
455#elif defined(__loongarch_sx)
456 const __m128i vd = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&dst), 0);
457 __m128i vs = __lsx_vldrepl_d(
reinterpret_cast<
const __m128i *>(&src), 0);
458 vs = multiplyAlpha255(vs, const_alpha);
459 const __m128i shuffleMask = (__m128i)(v8i16){3, 3, 3, 3, 4, 5, 6, 7};
460 const __m128i via = __lsx_vxor_v(__lsx_vreplgr2vr_h(-1), __lsx_vshuf_h(shuffleMask, __lsx_vldi(0), vs));
461 const __m128i vr = __lsx_vadd_h(vs, multiplyAlpha65535(vd, via));
462 __lsx_vstelm_d(vr,
reinterpret_cast<__m128i *>(&dst), 0, 0);
464 src = multiplyAlpha255(src, const_alpha);
465 dst = src + multiplyAlpha65535(dst, 65535 - src.alpha());
static void comp_func_solid_ColorBurn_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_solid_ColorDodge_impl(uint *dest, int length, uint color, const T &coverage)
CompositionFunctionFP qt_functionForModeFP_C[]
static void comp_func_solid_Darken_impl(uint *dest, int length, uint color, const T &coverage)
CompositionFunction qt_functionForMode_C[]
static void comp_func_solid_SourceOut_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_solid_Screen_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_solid_SoftLight_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_Difference_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_Plus_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static uint hardlight_op(int dst, int src, int da, int sa)
static void comp_func_solid_XOR_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_Clear_template(typename Ops::Type *dest, int length, uint const_alpha)
static void comp_func_SourceAtop_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_Multiply_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_DestinationIn_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_solid_Source_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static int color_dodge_op(int dst, int src, int da, int sa)
static void comp_func_solid_HardLight_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_solid_Overlay_impl(uint *dest, int length, uint color, const T &coverage)
static int overlay_op(int dst, int src, int da, int sa)
static void comp_func_DestinationOver_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_solid_SourceIn_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_solid_Plus_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_solid_SourceOver_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
Argb32OperationsC Argb32Operations
static void comp_func_solid_Difference_impl(uint *dest, int length, uint color, const T &coverage)
static int multiply_op(int dst, int src, int da, int sa)
static void comp_func_DestinationAtop_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static int soft_light_op(int dst, int src, int da, int sa)
static void comp_func_ColorBurn_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_solid_DestinationOver_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_Source_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_HardLight_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static int mix_alpha(int da, int sa)
static void comp_func_solid_DestinationOut_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_SourceOver_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static int difference_op(int dst, int src, int da, int sa)
static void comp_func_solid_SourceAtop_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static int color_burn_op(int dst, int src, int da, int sa)
static void comp_func_SourceOut_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
CompositionFunctionSolidFP qt_functionForModeSolidFP_C[]
static void comp_func_Overlay_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_Lighten_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_solid_DestinationIn_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_Darken_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static int lighten_op(int dst, int src, int da, int sa)
static void comp_func_Screen_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_solid_DestinationAtop_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
CompositionFunction64 qt_functionForMode64_C[]
CompositionFunctionSolid qt_functionForModeSolid_C[]
static void comp_func_ColorDodge_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_solid_Multiply_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_DestinationOut_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_Exclusion_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_SoftLight_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_XOR_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_solid_Lighten_impl(uint *dest, int length, uint color, const T &coverage)
static int darken_op(int dst, int src, int da, int sa)
CompositionFunctionSolid64 qt_functionForModeSolid64_C[]
static void comp_func_SourceIn_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
uint QT_FASTCALL fetch1Pixel< QPixelLayout::BPP1LSB >(const uchar *src, int index)
static constexpr int qt_div_255(int x)
#define Q_DECL_VECTORCALL
static QRgba64 multiplyAlpha65535(QRgba64 rgba64, uint alpha65535)
QT_BEGIN_NAMESPACE QRgba64 combineAlpha256(QRgba64 rgba64, uint alpha256)
static uint toArgb32(QRgba64 rgba64)
static QRgba64 interpolate65535(QRgba64 x, uint alpha1, QRgba64 y, uint alpha2)
static QRgba64 interpolate255(QRgba64 x, uint alpha1, QRgba64 y, uint alpha2)
static void blend_pixel(QRgba64 &dst, QRgba64 src, const int const_alpha)
static QRgba64 addWithSaturation(QRgba64 a, QRgba64 b)
static void blend_pixel(QRgba64 &dst, QRgba64 src)
static uint toRgba8888(QRgba64 rgba64)
static QRgba64 rgbBlend(QRgba64 d, QRgba64 s, uint rgbAlpha)
static OptimalType add(OptimalType a, OptimalType b)
static OptimalType interpolate(OptimalType x, OptimalScalar a1, OptimalType y, OptimalScalar a2)
static Scalar scalarFrom8bit(uint8_t a)
static bool isTransparent(Type val)
static void memfill(Type *ptr, Type value, qsizetype len)
static void store(Type *ptr, OptimalType value)
static void memcpy(Type *Q_DECL_RESTRICT dest, const Type *Q_DECL_RESTRICT src, qsizetype len)
static OptimalScalar invAlpha(OptimalScalar c)
static OptimalType convert(const Type &val)
static OptimalType plus(OptimalType a, OptimalType b)
static OptimalType interpolate8bit(OptimalType x, uint8_t a1, OptimalType y, uint8_t a2)
static OptimalScalar scalar(Scalar v)
static OptimalType multiplyAlpha8bit(OptimalType val, uint8_t a)
static OptimalType load(const Type *ptr)
static OptimalType multiplyAlpha(OptimalType val, OptimalScalar a)
static OptimalScalar alpha(OptimalType val)
static bool isOpaque(Type val)
void store(uint *dest, const uint src) const
void store_template(typename Op::Type *dest, const typename Op::Type src) const
void store(uint *dest, const uint src) const
QPartialCoverage(uint const_alpha)
static bool isOpaque(Type val)
static void memcpy(Type *Q_DECL_RESTRICT dest, const Type *Q_DECL_RESTRICT src, qsizetype len)
static void memfill(Type *ptr, Type value, qsizetype len)
static Scalar scalarFrom8bit(uint8_t a)
static bool isTransparent(Type val)