Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qrgba64_p.h
Go to the documentation of this file.
1// Copyright (C) 2020 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#ifndef QRGBA64_P_H
5#define QRGBA64_P_H
6
7//
8// W A R N I N G
9// -------------
10//
11// This file is not part of the Qt API. It exists purely as an
12// implementation detail. This header file may change from version to
13// version without notice, or even be removed.
14//
15// We mean it.
16//
17
18#include "qrgba64.h"
19#include "qdrawhelper_p.h"
20
21#include <QtCore/private/qsimd_p.h>
22#include <QtGui/private/qtguiglobal_p.h>
23
25
26inline QRgba64 combineAlpha256(QRgba64 rgba64, uint alpha256)
27{
28 return QRgba64::fromRgba64(rgba64.red(), rgba64.green(), rgba64.blue(), (rgba64.alpha() * alpha256) >> 8);
29}
30
31#if defined(__SSE2__)
32static inline __m128i Q_DECL_VECTORCALL multiplyAlpha65535(__m128i rgba64, __m128i va)
33{
34 __m128i vs = rgba64;
35 vs = _mm_unpacklo_epi16(_mm_mullo_epi16(vs, va), _mm_mulhi_epu16(vs, va));
36 vs = _mm_add_epi32(vs, _mm_srli_epi32(vs, 16));
37 vs = _mm_add_epi32(vs, _mm_set1_epi32(0x8000));
38 vs = _mm_srai_epi32(vs, 16);
39 vs = _mm_packs_epi32(vs, vs);
40 return vs;
41}
42static inline __m128i Q_DECL_VECTORCALL multiplyAlpha65535(__m128i rgba64, uint alpha65535)
43{
44 const __m128i va = _mm_shufflelo_epi16(_mm_cvtsi32_si128(alpha65535), _MM_SHUFFLE(0, 0, 0, 0));
45 return multiplyAlpha65535(rgba64, va);
46}
47#elif defined(__ARM_NEON__)
48static inline uint16x4_t multiplyAlpha65535(uint16x4_t rgba64, uint16x4_t alpha65535)
49{
50 uint32x4_t vs32 = vmull_u16(rgba64, alpha65535); // vs = vs * alpha
51 vs32 = vsraq_n_u32(vs32, vs32, 16); // vs = vs + (vs >> 16)
52 return vrshrn_n_u32(vs32, 16); // vs = (vs + 0x8000) >> 16
53}
54static inline uint16x4_t multiplyAlpha65535(uint16x4_t rgba64, uint alpha65535)
55{
56 uint32x4_t vs32 = vmull_n_u16(rgba64, alpha65535); // vs = vs * alpha
57 vs32 = vsraq_n_u32(vs32, vs32, 16); // vs = vs + (vs >> 16)
58 return vrshrn_n_u32(vs32, 16); // vs = (vs + 0x8000) >> 16
59}
60#elif defined(__loongarch_sx)
61static inline __m128i Q_DECL_VECTORCALL multiplyAlpha65535(__m128i rgba64, __m128i va)
62{
63 __m128i vs = rgba64;
64 vs = __lsx_vilvl_h(__lsx_vmuh_hu(vs, va), __lsx_vmul_h(vs, va));
65 vs = __lsx_vadd_w(vs, __lsx_vsrli_w(vs, 16));
66 vs = __lsx_vadd_w(vs, __lsx_vreplgr2vr_w(0x8000));
67 vs = __lsx_vsrai_w(vs, 16);
68 vs = __lsx_vpickev_h(__lsx_vsat_w(vs, 15), __lsx_vsat_w(vs, 15));
69 return vs;
70}
71static inline __m128i Q_DECL_VECTORCALL multiplyAlpha65535(__m128i rgba64, uint alpha65535)
72{
73 const __m128i shuffleMask = (__m128i)(v8i16){0, 0, 0, 0, 4, 5, 6, 7};
74 const __m128i va = __lsx_vshuf_h(shuffleMask, __lsx_vldi(0),
75 __lsx_vinsgr2vr_w(__lsx_vldi(0), alpha65535, 0));
76 return multiplyAlpha65535(rgba64, va);
77}
78#endif
79
80static inline QRgba64 multiplyAlpha65535(QRgba64 rgba64, uint alpha65535)
81{
82#if defined(__SSE2__)
83 const __m128i v = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&rgba64));
84 const __m128i vr = multiplyAlpha65535(v, alpha65535);
85 QRgba64 r;
86 _mm_storel_epi64(reinterpret_cast<__m128i *>(&r), vr);
87 return r;
88#elif defined(__ARM_NEON__)
89 const uint16x4_t v = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&rgba64)));
90 const uint16x4_t vr = multiplyAlpha65535(v, alpha65535);
91 QRgba64 r;
92 vst1_u64(reinterpret_cast<uint64_t *>(&r), vreinterpret_u64_u16(vr));
93 return r;
94#elif defined(__loongarch_sx)
95 const __m128i v = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&rgba64), 0);
96 const __m128i vr = multiplyAlpha65535(v, alpha65535);
97 QRgba64 r;
98 __lsx_vstelm_d(vr, reinterpret_cast<__m128i *>(&r), 0, 0);
99 return r;
100#else
101 return QRgba64::fromRgba64(qt_div_65535(rgba64.red() * alpha65535),
102 qt_div_65535(rgba64.green() * alpha65535),
103 qt_div_65535(rgba64.blue() * alpha65535),
104 qt_div_65535(rgba64.alpha() * alpha65535));
105#endif
106}
107
108#if defined(__SSE2__) || defined(__ARM_NEON__) || defined(__loongarch_sx)
109template<typename T>
110static inline T Q_DECL_VECTORCALL multiplyAlpha255(T rgba64, uint alpha255)
111{
112 return multiplyAlpha65535(rgba64, alpha255 * 257);
113}
114#else
115template<typename T>
116static inline T multiplyAlpha255(T rgba64, uint alpha255)
117{
118 return QRgba64::fromRgba64(qt_div_255(rgba64.red() * alpha255),
119 qt_div_255(rgba64.green() * alpha255),
120 qt_div_255(rgba64.blue() * alpha255),
121 qt_div_255(rgba64.alpha() * alpha255));
122}
123#endif
124
125#if defined __SSE2__
126static inline __m128i Q_DECL_VECTORCALL interpolate255(__m128i x, uint alpha1, __m128i y, uint alpha2)
127{
128 return _mm_add_epi16(multiplyAlpha255(x, alpha1), multiplyAlpha255(y, alpha2));
129}
130#endif
131
132#if defined __ARM_NEON__
133inline uint16x4_t interpolate255(uint16x4_t x, uint alpha1, uint16x4_t y, uint alpha2)
134{
135 return vadd_u16(multiplyAlpha255(x, alpha1), multiplyAlpha255(y, alpha2));
136}
137#endif
138
139#if defined __loongarch_sx
140static inline __m128i Q_DECL_VECTORCALL
141interpolate255(__m128i x, uint alpha1, __m128i y, uint alpha2)
142{
143 return __lsx_vadd_h(multiplyAlpha255(x, alpha1), multiplyAlpha255(y, alpha2));
144}
145#endif
146
147static inline QRgba64 interpolate255(QRgba64 x, uint alpha1, QRgba64 y, uint alpha2)
148{
149#if defined(__SSE2__)
150 const __m128i vx = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&x));
151 const __m128i vy = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&y));
152 const __m128i vr = interpolate255(vx, alpha1, vy, alpha2);
153 QRgba64 r;
154 _mm_storel_epi64(reinterpret_cast<__m128i *>(&r), vr);
155 return r;
156#elif defined(__ARM_NEON__)
157 const uint16x4_t vx = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&x)));
158 const uint16x4_t vy = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&y)));
159 const uint16x4_t vr = interpolate255(vx, alpha1, vy, alpha2);
160 QRgba64 r;
161 vst1_u64(reinterpret_cast<uint64_t *>(&r), vreinterpret_u64_u16(vr));
162 return r;
163#elif defined(__loongarch_sx)
164 const __m128i vx = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&x), 0);
165 const __m128i vy = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&y), 0);
166 const __m128i vr = interpolate255(vx, alpha1, vy, alpha2);
167 QRgba64 r;
168 __lsx_vstelm_d(vr, reinterpret_cast<__m128i *>(&r), 0, 0);
169 return r;
170#else
171 return QRgba64::fromRgba64(multiplyAlpha255(x, alpha1) + multiplyAlpha255(y, alpha2));
172#endif
173}
174
175#if defined __SSE2__
176static inline __m128i Q_DECL_VECTORCALL interpolate65535(__m128i x, uint alpha1, __m128i y, uint alpha2)
177{
178 return _mm_add_epi16(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
179}
180
181static inline __m128i Q_DECL_VECTORCALL interpolate65535(__m128i x, __m128i alpha1, __m128i y, __m128i alpha2)
182{
183 return _mm_add_epi16(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
184}
185#endif
186
187#if defined __ARM_NEON__
188inline uint16x4_t interpolate65535(uint16x4_t x, uint alpha1, uint16x4_t y, uint alpha2)
189{
190 return vadd_u16(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
191}
192inline uint16x4_t interpolate65535(uint16x4_t x, uint16x4_t alpha1, uint16x4_t y, uint16x4_t alpha2)
193{
194 return vadd_u16(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
195}
196#endif
197
198#if defined __loongarch_sx
199static inline __m128i Q_DECL_VECTORCALL interpolate65535(__m128i x, uint alpha1, __m128i y, uint alpha2)
200{
201 return __lsx_vadd_h(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
202}
203
204static inline __m128i Q_DECL_VECTORCALL interpolate65535(__m128i x, __m128i alpha1, __m128i y, __m128i alpha2)
205{
206 return __lsx_vadd_h(multiplyAlpha65535(x, alpha1), multiplyAlpha65535(y, alpha2));
207}
208#endif
209
210static inline QRgba64 interpolate65535(QRgba64 x, uint alpha1, QRgba64 y, uint alpha2)
211{
212#if defined(__SSE2__)
213 const __m128i vx = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&x));
214 const __m128i vy = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&y));
215 const __m128i vr = interpolate65535(vx, alpha1, vy, alpha2);
216 QRgba64 r;
217 _mm_storel_epi64(reinterpret_cast<__m128i *>(&r), vr);
218 return r;
219#elif defined(__ARM_NEON__)
220 const uint16x4_t vx = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&x)));
221 const uint16x4_t vy = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&y)));
222 const uint16x4_t vr = interpolate65535(vx, alpha1, vy, alpha2);
223 QRgba64 r;
224 vst1_u64(reinterpret_cast<uint64_t *>(&r), vreinterpret_u64_u16(vr));
225 return r;
226#elif defined(__loongarch_sx)
227 const __m128i vx = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&x), 0);
228 const __m128i vy = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&y), 0);
229 const __m128i vr = interpolate65535(vx, alpha1, vy, alpha2);
230 QRgba64 r;
231 __lsx_vstelm_d(vr, reinterpret_cast<__m128i *>(&r), 0, 0);
232 return r;
233#else
234 return QRgba64::fromRgba64(multiplyAlpha65535(x, alpha1) + multiplyAlpha65535(y, alpha2));
235#endif
236}
237
239{
240#if defined(__SSE2__)
241 const __m128i va = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&a));
242 const __m128i vb = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&b));
243 const __m128i vr = _mm_adds_epu16(va, vb);
244 QRgba64 r;
245 _mm_storel_epi64(reinterpret_cast<__m128i *>(&r), vr);
246 return r;
247#elif defined(__ARM_NEON__)
248 const uint16x4_t va = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&a)));
249 const uint16x4_t vb = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&b)));
250 QRgba64 r;
251 vst1_u64(reinterpret_cast<uint64_t *>(&r), vreinterpret_u64_u16(vqadd_u16(va, vb)));
252 return r;
253#elif defined(__loongarch_sx)
254 const __m128i va = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&a), 0);
255 const __m128i vb = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&b), 0);
256 const __m128i vr = __lsx_vsadd_hu(va, vb);
257 QRgba64 r;
258 __lsx_vstelm_d(vr, reinterpret_cast<__m128i *>(&r), 0, 0);
259 return r;
260#else
261
262 return QRgba64::fromRgba64(qMin(a.red() + b.red(), 65535),
263 qMin(a.green() + b.green(), 65535),
264 qMin(a.blue() + b.blue(), 65535),
265 qMin(a.alpha() + b.alpha(), 65535));
266#endif
267}
268
269#if QT_COMPILER_SUPPORTS_HERE(SSE2)
270QT_FUNCTION_TARGET(SSE2)
271static inline uint Q_DECL_VECTORCALL toArgb32(__m128i v)
272{
273 v = _mm_unpacklo_epi16(v, _mm_setzero_si128());
274 v = _mm_add_epi32(v, _mm_set1_epi32(128));
275 v = _mm_sub_epi32(v, _mm_srli_epi32(v, 8));
276 v = _mm_srli_epi32(v, 8);
277 v = _mm_packs_epi32(v, v);
278 v = _mm_packus_epi16(v, v);
279 return _mm_cvtsi128_si32(v);
280}
281#elif defined __ARM_NEON__
282static inline uint toArgb32(uint16x4_t v)
283{
284 v = vsub_u16(v, vrshr_n_u16(v, 8));
285 v = vrshr_n_u16(v, 8);
286 uint8x8_t v8 = vmovn_u16(vcombine_u16(v, v));
287 return vget_lane_u32(vreinterpret_u32_u8(v8), 0);
288}
289#elif defined __loongarch_sx
290static inline uint Q_DECL_VECTORCALL toArgb32(__m128i v)
291{
292 v = __lsx_vilvl_h(__lsx_vldi(0), v);
293 v = __lsx_vadd_w(v, __lsx_vreplgr2vr_w(128));
294 v = __lsx_vsub_w(v, __lsx_vsrli_w(v, 8));
295 v = __lsx_vsrli_w(v, 8);
296 v = __lsx_vpickev_h(__lsx_vsat_w(v, 15), __lsx_vsat_w(v, 15));
297 __m128i tmp = __lsx_vmaxi_h(v, 0);
298 v = __lsx_vpickev_b(__lsx_vsat_hu(tmp, 7), __lsx_vsat_hu(tmp, 7));
299 return __lsx_vpickve2gr_w(v, 0);
300}
301#endif
302
303static inline uint toArgb32(QRgba64 rgba64)
304{
305#if defined __SSE2__
306 __m128i v = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&rgba64));
307 v = _mm_shufflelo_epi16(v, _MM_SHUFFLE(3, 0, 1, 2));
308 return toArgb32(v);
309#elif defined __ARM_NEON__
310 uint16x4_t v = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&rgba64)));
311#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
312 const uint8x8_t shuffleMask = qvset_n_u8(4, 5, 2, 3, 0, 1, 6, 7);
313 v = vreinterpret_u16_u8(vtbl1_u8(vreinterpret_u8_u16(v), shuffleMask));
314#else
315 v = vext_u16(v, v, 3);
316#endif
317 return toArgb32(v);
318#elif defined __loongarch_sx
319 __m128i v = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&rgba64), 0);
320 const __m128i shuffleMask = (__m128i)(v8i16){2, 1, 0, 3, 4, 5, 6, 7};
321 v = __lsx_vshuf_h(shuffleMask, __lsx_vldi(0), v);
322 return toArgb32(v);
323#else
324 return rgba64.toArgb32();
325#endif
326}
327
328static inline uint toRgba8888(QRgba64 rgba64)
329{
330#if defined __SSE2__
331 __m128i v = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&rgba64));
332 return toArgb32(v);
333#elif defined __ARM_NEON__
334 uint16x4_t v = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&rgba64)));
335 return toArgb32(v);
336#elif defined __loongarch_sx
337 __m128i v = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&rgba64), 0);
338 return toArgb32(v);
339#else
340 return ARGB2RGBA(toArgb32(rgba64));
341#endif
342}
343
344static inline QRgba64 rgbBlend(QRgba64 d, QRgba64 s, uint rgbAlpha)
345{
346 QRgba64 blend;
347#if defined(__SSE2__)
348 __m128i vd = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&d));
349 __m128i vs = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&s));
350 __m128i va = _mm_cvtsi32_si128(rgbAlpha);
351 va = _mm_unpacklo_epi8(va, va);
352 va = _mm_shufflelo_epi16(va, _MM_SHUFFLE(3, 0, 1, 2));
353 __m128i vb = _mm_xor_si128(_mm_set1_epi16(-1), va);
354
355 vs = _mm_unpacklo_epi16(_mm_mullo_epi16(vs, va), _mm_mulhi_epu16(vs, va));
356 vd = _mm_unpacklo_epi16(_mm_mullo_epi16(vd, vb), _mm_mulhi_epu16(vd, vb));
357 vd = _mm_add_epi32(vd, vs);
358 vd = _mm_add_epi32(vd, _mm_srli_epi32(vd, 16));
359 vd = _mm_add_epi32(vd, _mm_set1_epi32(0x8000));
360 vd = _mm_srai_epi32(vd, 16);
361 vd = _mm_packs_epi32(vd, vd);
362
363 _mm_storel_epi64(reinterpret_cast<__m128i *>(&blend), vd);
364#elif defined(__ARM_NEON__)
365 uint16x4_t vd = vreinterpret_u16_u64(vmov_n_u64(d));
366 uint16x4_t vs = vreinterpret_u16_u64(vmov_n_u64(s));
367 uint8x8_t va8 = vreinterpret_u8_u32(vmov_n_u32(ARGB2RGBA(rgbAlpha)));
368 uint16x4_t va = vreinterpret_u16_u8(vzip_u8(va8, va8).val[0]);
369 uint16x4_t vb = veor_u16(vdup_n_u16(0xffff), va);
370
371 uint32x4_t vs32 = vmull_u16(vs, va);
372 uint32x4_t vd32 = vmull_u16(vd, vb);
373 vd32 = vaddq_u32(vd32, vs32);
374 vd32 = vsraq_n_u32(vd32, vd32, 16);
375 vd = vrshrn_n_u32(vd32, 16);
376 vst1_u64(reinterpret_cast<uint64_t *>(&blend), vreinterpret_u64_u16(vd));
377#elif defined(__loongarch_sx)
378 __m128i vd = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&d), 0);
379 __m128i vs = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&s), 0);
380 __m128i va = __lsx_vinsgr2vr_w(__lsx_vldi(0), rgbAlpha, 0);
381 va = __lsx_vilvl_b(va, va);
382 const __m128i shuffleMask = (__m128i)(v8i16){2, 1, 0, 3, 4, 5, 6, 7};
383 va = __lsx_vshuf_h(shuffleMask, __lsx_vldi(0), va);
384 __m128i vb = __lsx_vxor_v(__lsx_vreplgr2vr_h(-1), va);
385
386 vs = __lsx_vilvl_h(__lsx_vmuh_hu(vs, va), __lsx_vmul_h(vs, va));
387 vd = __lsx_vilvl_h(__lsx_vmuh_hu(vd, vb), __lsx_vmul_h(vd, vb));
388 vd = __lsx_vadd_w(vd, vs);
389 vd = __lsx_vadd_w(vd, __lsx_vsrli_w(vd, 16));
390 vd = __lsx_vadd_w(vd, __lsx_vreplgr2vr_w(0x8000));
391 vd = __lsx_vsrai_w(vd, 16);
392 vd = __lsx_vpickev_h(__lsx_vsat_w(vd, 15), __lsx_vsat_w(vd, 15));
393 __lsx_vstelm_d(vd, reinterpret_cast<__m128i *>(&blend), 0, 0);
394#else
395 const int mr = qRed(rgbAlpha);
396 const int mg = qGreen(rgbAlpha);
397 const int mb = qBlue(rgbAlpha);
398 blend = qRgba64(qt_div_255(s.red() * mr + d.red() * (255 - mr)),
399 qt_div_255(s.green() * mg + d.green() * (255 - mg)),
400 qt_div_255(s.blue() * mb + d.blue() * (255 - mb)),
401 s.alpha());
402#endif
403 return blend;
404}
405
406static inline void blend_pixel(QRgba64 &dst, QRgba64 src)
407{
408 if (src.isOpaque())
409 dst = src;
410 else if (!src.isTransparent()) {
411#if defined(__SSE2__)
412 const __m128i vd = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&dst));
413 const __m128i vs = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&src));
414 const __m128i via = _mm_xor_si128(_mm_set1_epi16(-1), _mm_shufflelo_epi16(vs, _MM_SHUFFLE(3, 3, 3, 3)));
415 const __m128i vr = _mm_add_epi16(vs, multiplyAlpha65535(vd, via));
416 _mm_storel_epi64(reinterpret_cast<__m128i *>(&dst), vr);
417#elif defined(__ARM_NEON__)
418 const uint16x4_t vd = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&dst)));
419 const uint16x4_t vs = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&src)));
420 const uint16x4_t via = veor_u16(vdup_n_u16(0xffff), vdup_lane_u16(vs, 3));
421 const uint16x4_t vr = vadd_u16(vs, multiplyAlpha65535(vd, via));
422 vst1_u64(reinterpret_cast<uint64_t *>(&dst), vreinterpret_u64_u16(vr));
423#elif defined(__loongarch_sx)
424 const __m128i vd = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&dst), 0);
425 const __m128i vs = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&src), 0);
426 const __m128i shuffleMask = (__m128i)(v8i16){3, 3, 3, 3, 4, 5, 6, 7};
427 const __m128i via = __lsx_vxor_v(__lsx_vreplgr2vr_h(-1), __lsx_vshuf_h(shuffleMask, __lsx_vldi(0), vs));
428 const __m128i vr = __lsx_vadd_h(vs, multiplyAlpha65535(vd, via));
429 __lsx_vstelm_d(vr, reinterpret_cast<__m128i *>(&dst), 0, 0);
430#else
431 dst = src + multiplyAlpha65535(dst, 65535 - src.alpha());
432#endif
433 }
434}
435
436static inline void blend_pixel(QRgba64 &dst, QRgba64 src, const int const_alpha)
437{
438 if (const_alpha == 255)
439 return blend_pixel(dst, src);
440 if (!src.isTransparent()) {
441#if defined(__SSE2__)
442 const __m128i vd = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&dst));
443 __m128i vs = _mm_loadl_epi64(reinterpret_cast<const __m128i *>(&src));
444 vs = multiplyAlpha255(vs, const_alpha);
445 const __m128i via = _mm_xor_si128(_mm_set1_epi16(-1), _mm_shufflelo_epi16(vs, _MM_SHUFFLE(3, 3, 3, 3)));
446 const __m128i vr = _mm_add_epi16(vs, multiplyAlpha65535(vd, via));
447 _mm_storel_epi64(reinterpret_cast<__m128i *>(&dst), vr);
448#elif defined(__ARM_NEON__)
449 const uint16x4_t vd = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&dst)));
450 uint16x4_t vs = vreinterpret_u16_u64(vld1_u64(reinterpret_cast<const uint64_t *>(&src)));
451 vs = multiplyAlpha255(vs, const_alpha);
452 const uint16x4_t via = veor_u16(vdup_n_u16(0xffff), vdup_lane_u16(vs, 3));
453 const uint16x4_t vr = vadd_u16(vs, multiplyAlpha65535(vd, via));
454 vst1_u64(reinterpret_cast<uint64_t *>(&dst), vreinterpret_u64_u16(vr));
455#elif defined(__loongarch_sx)
456 const __m128i vd = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&dst), 0);
457 __m128i vs = __lsx_vldrepl_d(reinterpret_cast<const __m128i *>(&src), 0);
458 vs = multiplyAlpha255(vs, const_alpha);
459 const __m128i shuffleMask = (__m128i)(v8i16){3, 3, 3, 3, 4, 5, 6, 7};
460 const __m128i via = __lsx_vxor_v(__lsx_vreplgr2vr_h(-1), __lsx_vshuf_h(shuffleMask, __lsx_vldi(0), vs));
461 const __m128i vr = __lsx_vadd_h(vs, multiplyAlpha65535(vd, via));
462 __lsx_vstelm_d(vr, reinterpret_cast<__m128i *>(&dst), 0, 0);
463#else
464 src = multiplyAlpha255(src, const_alpha);
465 dst = src + multiplyAlpha65535(dst, 65535 - src.alpha());
466#endif
467 }
468}
469
470QT_END_NAMESPACE
471
472#endif // QRGBA64_P_H
static void comp_func_solid_ColorBurn_impl(uint *dest, int length, uint color, const T &coverage)
#define OP(a, b)
static void comp_func_solid_ColorDodge_impl(uint *dest, int length, uint color, const T &coverage)
CompositionFunctionFP qt_functionForModeFP_C[]
static void comp_func_solid_Darken_impl(uint *dest, int length, uint color, const T &coverage)
CompositionFunction qt_functionForMode_C[]
static void comp_func_solid_SourceOut_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_solid_Screen_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_solid_SoftLight_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_Difference_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_Plus_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static uint hardlight_op(int dst, int src, int da, int sa)
static void comp_func_solid_XOR_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_Clear_template(typename Ops::Type *dest, int length, uint const_alpha)
static void comp_func_SourceAtop_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_Multiply_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_DestinationIn_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_solid_Source_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static int color_dodge_op(int dst, int src, int da, int sa)
static void comp_func_solid_HardLight_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_solid_Overlay_impl(uint *dest, int length, uint color, const T &coverage)
static int overlay_op(int dst, int src, int da, int sa)
static void comp_func_DestinationOver_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_solid_SourceIn_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_solid_Plus_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_solid_SourceOver_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
Argb32OperationsC Argb32Operations
static void comp_func_solid_Difference_impl(uint *dest, int length, uint color, const T &coverage)
static int multiply_op(int dst, int src, int da, int sa)
static void comp_func_DestinationAtop_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static int soft_light_op(int dst, int src, int da, int sa)
static void comp_func_ColorBurn_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_solid_DestinationOver_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_Source_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_HardLight_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static int mix_alpha(int da, int sa)
static void comp_func_solid_DestinationOut_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_SourceOver_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static int difference_op(int dst, int src, int da, int sa)
static void comp_func_solid_SourceAtop_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static int color_burn_op(int dst, int src, int da, int sa)
static void comp_func_SourceOut_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
CompositionFunctionSolidFP qt_functionForModeSolidFP_C[]
static void comp_func_Overlay_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_Lighten_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_solid_DestinationIn_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
static void comp_func_Darken_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static int lighten_op(int dst, int src, int da, int sa)
static void comp_func_Screen_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_solid_DestinationAtop_template(typename Ops::Type *dest, int length, typename Ops::Type color, uint const_alpha)
CompositionFunction64 qt_functionForMode64_C[]
CompositionFunctionSolid qt_functionForModeSolid_C[]
static void comp_func_ColorDodge_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_solid_Multiply_impl(uint *dest, int length, uint color, const T &coverage)
static void comp_func_DestinationOut_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_Exclusion_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_SoftLight_impl(uint *Q_DECL_RESTRICT dest, const uint *Q_DECL_RESTRICT src, int length, const T &coverage)
static void comp_func_XOR_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
static void comp_func_solid_Lighten_impl(uint *dest, int length, uint color, const T &coverage)
static int darken_op(int dst, int src, int da, int sa)
CompositionFunctionSolid64 qt_functionForModeSolid64_C[]
static void comp_func_SourceIn_template(typename Ops::Type *Q_DECL_RESTRICT dest, const typename Ops::Type *Q_DECL_RESTRICT src, int length, uint const_alpha)
uint QT_FASTCALL fetch1Pixel< QPixelLayout::BPP1LSB >(const uchar *src, int index)
#define Q_DECL_RESTRICT
static constexpr int qt_div_255(int x)
#define Q_DECL_VECTORCALL
static QRgba64 multiplyAlpha65535(QRgba64 rgba64, uint alpha65535)
Definition qrgba64_p.h:80
QT_BEGIN_NAMESPACE QRgba64 combineAlpha256(QRgba64 rgba64, uint alpha256)
Definition qrgba64_p.h:26
static uint toArgb32(QRgba64 rgba64)
Definition qrgba64_p.h:303
static QRgba64 interpolate65535(QRgba64 x, uint alpha1, QRgba64 y, uint alpha2)
Definition qrgba64_p.h:210
static QRgba64 interpolate255(QRgba64 x, uint alpha1, QRgba64 y, uint alpha2)
Definition qrgba64_p.h:147
static void blend_pixel(QRgba64 &dst, QRgba64 src, const int const_alpha)
Definition qrgba64_p.h:436
static QRgba64 addWithSaturation(QRgba64 a, QRgba64 b)
Definition qrgba64_p.h:238
static void blend_pixel(QRgba64 &dst, QRgba64 src)
Definition qrgba64_p.h:406
static uint toRgba8888(QRgba64 rgba64)
Definition qrgba64_p.h:328
static QRgba64 rgbBlend(QRgba64 d, QRgba64 s, uint rgbAlpha)
Definition qrgba64_p.h:344
static OptimalType add(OptimalType a, OptimalType b)
static OptimalType interpolate(OptimalType x, OptimalScalar a1, OptimalType y, OptimalScalar a2)
static Scalar scalarFrom8bit(uint8_t a)
static bool isTransparent(Type val)
static void memfill(Type *ptr, Type value, qsizetype len)
static void store(Type *ptr, OptimalType value)
static void memcpy(Type *Q_DECL_RESTRICT dest, const Type *Q_DECL_RESTRICT src, qsizetype len)
static OptimalScalar invAlpha(OptimalScalar c)
static OptimalType convert(const Type &val)
static OptimalType plus(OptimalType a, OptimalType b)
static OptimalType interpolate8bit(OptimalType x, uint8_t a1, OptimalType y, uint8_t a2)
static OptimalScalar scalar(Scalar v)
static OptimalType multiplyAlpha8bit(OptimalType val, uint8_t a)
static OptimalType load(const Type *ptr)
static OptimalType multiplyAlpha(OptimalType val, OptimalScalar a)
static OptimalScalar alpha(OptimalType val)
static bool isOpaque(Type val)
void store(uint *dest, const uint src) const
void store_template(typename Op::Type *dest, const typename Op::Type src) const
void store(uint *dest, const uint src) const
QPartialCoverage(uint const_alpha)
static bool isOpaque(Type val)
static void memcpy(Type *Q_DECL_RESTRICT dest, const Type *Q_DECL_RESTRICT src, qsizetype len)
static void memfill(Type *ptr, Type value, qsizetype len)
static Scalar scalarFrom8bit(uint8_t a)
static bool isTransparent(Type val)