Eigen  3.4.90 (git rev 5a9f66fb35d03a4da9ef8976e67a61b30aa16dcf)
 
Loading...
Searching...
No Matches
AVX/PacketMath.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner ([email protected])
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_PACKET_MATH_AVX_H
11#define EIGEN_PACKET_MATH_AVX_H
12
13// IWYU pragma: private
14#include "../../InternalHeaderCheck.h"
15
16namespace Eigen {
17
18namespace internal {
19
20#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
21#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
22#endif
23
24#if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
25#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
26#endif
27
28#ifdef EIGEN_VECTORIZE_FMA
29#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
30#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
31#endif
32#endif
33
34typedef __m256 Packet8f;
35typedef eigen_packet_wrapper<__m256i, 0> Packet8i;
36typedef __m256d Packet4d;
37#ifndef EIGEN_VECTORIZE_AVX512FP16
38typedef eigen_packet_wrapper<__m128i, 2> Packet8h;
39#endif
40typedef eigen_packet_wrapper<__m128i, 3> Packet8bf;
41typedef eigen_packet_wrapper<__m256i, 4> Packet8ui;
42
43#ifdef EIGEN_VECTORIZE_AVX2
44// Start from 3 to be compatible with AVX512
45typedef eigen_packet_wrapper<__m256i, 3> Packet4l;
46typedef eigen_packet_wrapper<__m256i, 5> Packet4ul;
47#endif
48
49template <>
50struct is_arithmetic<__m256> {
51 enum { value = true };
52};
53template <>
54struct is_arithmetic<__m256i> {
55 enum { value = true };
56};
57template <>
58struct is_arithmetic<__m256d> {
59 enum { value = true };
60};
61template <>
62struct is_arithmetic<Packet8i> {
63 enum { value = true };
64};
65// Note that `Packet8ui` uses the underlying type `__m256i`, which is
66// interpreted as a vector of _signed_ `int32`s, which breaks some arithmetic
67// operations used in `GenericPacketMath.h`.
68template <>
69struct is_arithmetic<Packet8ui> {
70 enum { value = false };
71};
72#ifndef EIGEN_VECTORIZE_AVX512FP16
73template <>
74struct is_arithmetic<Packet8h> {
75 enum { value = true };
76};
77#endif
78template <>
79struct is_arithmetic<Packet8bf> {
80 enum { value = true };
81};
82#ifdef EIGEN_VECTORIZE_AVX2
83template <>
84struct is_arithmetic<Packet4l> {
85 enum { value = true };
86};
87// Note that `Packet4ul` uses the underlying type `__m256i`, which is
88// interpreted as a vector of _signed_ `int32`s, which breaks some arithmetic
89// operations used in `GenericPacketMath.h`.
90template <>
91struct is_arithmetic<Packet4ul> {
92 enum { value = false };
93};
94#endif
95
96// Use the packet_traits defined in AVX512/PacketMath.h instead if we're going
97// to leverage AVX512 instructions.
98#ifndef EIGEN_VECTORIZE_AVX512
99template <>
100struct packet_traits<float> : default_packet_traits {
101 typedef Packet8f type;
102 typedef Packet4f half;
103 enum {
104 Vectorizable = 1,
105 AlignedOnScalar = 1,
106 size = 8,
107
108 HasCmp = 1,
109 HasDiv = 1,
110 HasReciprocal = EIGEN_FAST_MATH,
111 HasSin = EIGEN_FAST_MATH,
112 HasCos = EIGEN_FAST_MATH,
113 HasACos = 1,
114 HasASin = 1,
115 HasATan = 1,
116 HasATanh = 1,
117 HasLog = 1,
118 HasLog1p = 1,
119 HasExpm1 = 1,
120 HasExp = 1,
121 HasNdtri = 1,
122 HasBessel = 1,
123 HasSqrt = 1,
124 HasRsqrt = 1,
125 HasTanh = EIGEN_FAST_MATH,
126 HasErf = EIGEN_FAST_MATH,
127 HasBlend = 1
128 };
129};
130template <>
131struct packet_traits<double> : default_packet_traits {
132 typedef Packet4d type;
133 typedef Packet2d half;
134 enum {
135 Vectorizable = 1,
136 AlignedOnScalar = 1,
137 size = 4,
138
139 HasCmp = 1,
140 HasDiv = 1,
141#ifdef EIGEN_VECTORIZE_AVX2
142 HasSin = EIGEN_FAST_MATH,
143 HasCos = EIGEN_FAST_MATH,
144#endif
145 HasLog = 1,
146 HasExp = 1,
147 HasSqrt = 1,
148 HasRsqrt = 1,
149 HasATan = 1,
150 HasBlend = 1
151 };
152};
153
154template <>
155struct packet_traits<Eigen::half> : default_packet_traits {
156 typedef Packet8h type;
157 // There is no half-size packet for Packet8h.
158 typedef Packet8h half;
159 enum {
160 Vectorizable = 1,
161 AlignedOnScalar = 1,
162 size = 8,
163
164 HasCmp = 1,
165 HasAdd = 1,
166 HasSub = 1,
167 HasMul = 1,
168 HasDiv = 1,
169 HasSin = EIGEN_FAST_MATH,
170 HasCos = EIGEN_FAST_MATH,
171 HasNegate = 1,
172 HasAbs = 1,
173 HasAbs2 = 0,
174 HasMin = 1,
175 HasMax = 1,
176 HasConj = 1,
177 HasSetLinear = 0,
178 HasLog = 1,
179 HasLog1p = 1,
180 HasExpm1 = 1,
181 HasExp = 1,
182 HasSqrt = 1,
183 HasRsqrt = 1,
184 HasTanh = EIGEN_FAST_MATH,
185 HasErf = EIGEN_FAST_MATH,
186 HasBlend = 0,
187 HasBessel = 1,
188 HasNdtri = 1
189 };
190};
191
192template <>
193struct packet_traits<bfloat16> : default_packet_traits {
194 typedef Packet8bf type;
195 // There is no half-size packet for current Packet8bf.
196 // TODO: support as SSE path.
197 typedef Packet8bf half;
198 enum {
199 Vectorizable = 1,
200 AlignedOnScalar = 1,
201 size = 8,
202
203 HasCmp = 1,
204 HasAdd = 1,
205 HasSub = 1,
206 HasMul = 1,
207 HasDiv = 1,
208 HasSin = EIGEN_FAST_MATH,
209 HasCos = EIGEN_FAST_MATH,
210 HasNegate = 1,
211 HasAbs = 1,
212 HasAbs2 = 0,
213 HasMin = 1,
214 HasMax = 1,
215 HasConj = 1,
216 HasSetLinear = 0,
217 HasLog = 1,
218 HasLog1p = 1,
219 HasExpm1 = 1,
220 HasExp = 1,
221 HasSqrt = 1,
222 HasRsqrt = 1,
223 HasTanh = EIGEN_FAST_MATH,
224 HasErf = EIGEN_FAST_MATH,
225 HasBlend = 0,
226 HasBessel = 1,
227 HasNdtri = 1
228 };
229};
230
231template <>
232struct packet_traits<int> : default_packet_traits {
233 typedef Packet8i type;
234 typedef Packet4i half;
235 enum { Vectorizable = 1, AlignedOnScalar = 1, HasCmp = 1, HasDiv = 1, size = 8 };
236};
237template <>
238struct packet_traits<uint32_t> : default_packet_traits {
239 typedef Packet8ui type;
240 typedef Packet4ui half;
241 enum {
242 Vectorizable = 1,
243 AlignedOnScalar = 1,
244 size = 8,
245
246 HasDiv = 0,
247 HasNegate = 0,
248 HasSqrt = 0,
249
250 HasCmp = 1,
251 HasMin = 1,
252 HasMax = 1,
253 HasShift = 1
254 };
255};
256
257#ifdef EIGEN_VECTORIZE_AVX2
258template <>
259struct packet_traits<int64_t> : default_packet_traits {
260 typedef Packet4l type;
261 typedef Packet2l half;
262 enum { Vectorizable = 1, AlignedOnScalar = 1, HasCmp = 1, size = 4 };
263};
264template <>
265struct packet_traits<uint64_t> : default_packet_traits {
266 typedef Packet4ul type;
267 // There is no half-size packet for current Packet4ul.
268 // TODO: support as SSE path.
269 typedef Packet4ul half;
270 enum {
271 Vectorizable = 1,
272 AlignedOnScalar = 1,
273 size = 4,
274
275 // HasMin = 0,
276 // HasMax = 0,
277 HasDiv = 0,
278 HasBlend = 0,
279 HasTranspose = 0,
280 HasNegate = 0,
281 HasSqrt = 0,
282 HasCmp = 1,
283 HasShift = 1
284 };
285};
286#endif
287
288#endif
289
290template <>
291struct scalar_div_cost<float, true> {
292 enum { value = 14 };
293};
294template <>
295struct scalar_div_cost<double, true> {
296 enum { value = 16 };
297};
298
299template <>
300struct unpacket_traits<Packet8f> {
301 typedef float type;
302 typedef Packet4f half;
303 typedef Packet8i integer_packet;
304 typedef uint8_t mask_t;
305 enum {
306 size = 8,
307 alignment = Aligned32,
308 vectorizable = true,
309 masked_load_available = true,
310 masked_store_available = true
311#ifdef EIGEN_VECTORIZE_AVX512
312 ,
313 masked_fpops_available = true
314#endif
315 };
316};
317template <>
318struct unpacket_traits<Packet4d> {
319 typedef double type;
320 typedef Packet2d half;
321#ifdef EIGEN_VECTORIZE_AVX2
322 typedef Packet4l integer_packet;
323#endif
324 enum {
325 size = 4,
326 alignment = Aligned32,
327 vectorizable = true,
328 masked_load_available = false,
329 masked_store_available = false
330 };
331};
332template <>
333struct unpacket_traits<Packet8i> {
334 typedef int type;
335 typedef Packet4i half;
336 enum {
337 size = 8,
338 alignment = Aligned32,
339 vectorizable = true,
340 masked_load_available = false,
341 masked_store_available = false
342 };
343};
344template <>
345struct unpacket_traits<Packet8ui> {
346 typedef uint32_t type;
347 typedef Packet4ui half;
348 enum {
349 size = 8,
350 alignment = Aligned32,
351 vectorizable = true,
352 masked_load_available = false,
353 masked_store_available = false
354 };
355};
356#ifdef EIGEN_VECTORIZE_AVX2
357template <>
358struct unpacket_traits<Packet4l> {
359 typedef int64_t type;
360 typedef Packet2l half;
361 enum {
362 size = 4,
363 alignment = Aligned32,
364 vectorizable = true,
365 masked_load_available = false,
366 masked_store_available = false
367 };
368};
369template <>
370struct unpacket_traits<Packet4ul> {
371 typedef uint64_t type;
372 typedef Packet4ul half;
373 enum {
374 size = 4,
375 alignment = Aligned32,
376 vectorizable = true,
377 masked_load_available = false,
378 masked_store_available = false
379 };
380};
381#endif
382template <>
383struct unpacket_traits<Packet8bf> {
384 typedef bfloat16 type;
385 typedef Packet8bf half;
386 enum {
387 size = 8,
388 alignment = Aligned16,
389 vectorizable = true,
390 masked_load_available = false,
391 masked_store_available = false
392 };
393};
394
395// Helper function for bit packing snippet of low precision comparison.
396// It packs the flags from 16x16 to 8x16.
397EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) {
398 return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
399 _mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
400}
401
402#ifdef EIGEN_VECTORIZE_AVX2
403template <>
404EIGEN_STRONG_INLINE Packet4l pset1<Packet4l>(const int64_t& from) {
405 return _mm256_set1_epi64x(from);
406}
407template <>
408EIGEN_STRONG_INLINE Packet4ul pset1<Packet4ul>(const uint64_t& from) {
409 return _mm256_set1_epi64x(numext::bit_cast<uint64_t>(from));
410}
411template <>
412EIGEN_STRONG_INLINE Packet4l pzero(const Packet4l& /*a*/) {
413 return _mm256_setzero_si256();
414}
415template <>
416EIGEN_STRONG_INLINE Packet4ul pzero(const Packet4ul& /*a*/) {
417 return _mm256_setzero_si256();
418}
419template <>
420EIGEN_STRONG_INLINE Packet4l peven_mask(const Packet4l& /*a*/) {
421 return _mm256_set_epi64x(0ll, -1ll, 0ll, -1ll);
422}
423template <>
424EIGEN_STRONG_INLINE Packet4ul peven_mask(const Packet4ul& /*a*/) {
425 return _mm256_set_epi64x(0ll, -1ll, 0ll, -1ll);
426}
427template <>
428EIGEN_STRONG_INLINE Packet4l pload1<Packet4l>(const int64_t* from) {
429 return _mm256_set1_epi64x(*from);
430}
431template <>
432EIGEN_STRONG_INLINE Packet4ul pload1<Packet4ul>(const uint64_t* from) {
433 return _mm256_set1_epi64x(*from);
434}
435template <>
436EIGEN_STRONG_INLINE Packet4l padd<Packet4l>(const Packet4l& a, const Packet4l& b) {
437 return _mm256_add_epi64(a, b);
438}
439template <>
440EIGEN_STRONG_INLINE Packet4ul padd<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
441 return _mm256_add_epi64(a, b);
442}
443template <>
444EIGEN_STRONG_INLINE Packet4l plset<Packet4l>(const int64_t& a) {
445 return padd(pset1<Packet4l>(a), Packet4l(_mm256_set_epi64x(3ll, 2ll, 1ll, 0ll)));
446}
447template <>
448EIGEN_STRONG_INLINE Packet4ul plset<Packet4ul>(const uint64_t& a) {
449 return padd(pset1<Packet4ul>(a), Packet4ul(_mm256_set_epi64x(3ll, 2ll, 1ll, 0ll)));
450}
451template <>
452EIGEN_STRONG_INLINE Packet4l psub<Packet4l>(const Packet4l& a, const Packet4l& b) {
453 return _mm256_sub_epi64(a, b);
454}
455template <>
456EIGEN_STRONG_INLINE Packet4ul psub<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
457 return _mm256_sub_epi64(a, b);
458}
459template <>
460EIGEN_STRONG_INLINE Packet4l pnegate(const Packet4l& a) {
461 return psub(pzero(a), a);
462}
463template <>
464EIGEN_STRONG_INLINE Packet4l pconj(const Packet4l& a) {
465 return a;
466}
467template <>
468EIGEN_STRONG_INLINE Packet4l pcmp_le(const Packet4l& a, const Packet4l& b) {
469 return _mm256_xor_si256(_mm256_cmpgt_epi64(a, b), _mm256_set1_epi32(-1));
470}
471template <>
472EIGEN_STRONG_INLINE Packet4ul pcmp_le(const Packet4ul& a, const Packet4ul& b) {
473 return (Packet4ul)pcmp_le((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
474 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL)));
475}
476template <>
477EIGEN_STRONG_INLINE Packet4l pcmp_lt(const Packet4l& a, const Packet4l& b) {
478 return _mm256_cmpgt_epi64(b, a);
479}
480template <>
481EIGEN_STRONG_INLINE Packet4ul pcmp_lt(const Packet4ul& a, const Packet4ul& b) {
482 return (Packet4ul)pcmp_lt((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
483 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL)));
484}
485template <>
486EIGEN_STRONG_INLINE Packet4l pcmp_eq(const Packet4l& a, const Packet4l& b) {
487 return _mm256_cmpeq_epi64(a, b);
488}
489template <>
490EIGEN_STRONG_INLINE Packet4ul pcmp_eq(const Packet4ul& a, const Packet4ul& b) {
491 return _mm256_cmpeq_epi64(a, b);
492}
493template <>
494EIGEN_STRONG_INLINE Packet4l ptrue<Packet4l>(const Packet4l& a) {
495 return _mm256_cmpeq_epi64(a, a);
496}
497template <>
498EIGEN_STRONG_INLINE Packet4ul ptrue<Packet4ul>(const Packet4ul& a) {
499 return _mm256_cmpeq_epi64(a, a);
500}
501template <>
502EIGEN_STRONG_INLINE Packet4l pand<Packet4l>(const Packet4l& a, const Packet4l& b) {
503 return _mm256_and_si256(a, b);
504}
505template <>
506EIGEN_STRONG_INLINE Packet4l por<Packet4l>(const Packet4l& a, const Packet4l& b) {
507 return _mm256_or_si256(a, b);
508}
509template <>
510EIGEN_STRONG_INLINE Packet4l pxor<Packet4l>(const Packet4l& a, const Packet4l& b) {
511 return _mm256_xor_si256(a, b);
512}
513template <>
514EIGEN_STRONG_INLINE Packet4ul pxor<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
515 return _mm256_xor_si256(a, b);
516}
517template <>
518EIGEN_STRONG_INLINE Packet4l pandnot<Packet4l>(const Packet4l& a, const Packet4l& b) {
519 return _mm256_andnot_si256(b, a);
520}
521template <int N>
522EIGEN_STRONG_INLINE Packet4l plogical_shift_right(Packet4l a) {
523 return _mm256_srli_epi64(a, N);
524}
525template <int N>
526EIGEN_STRONG_INLINE Packet4l plogical_shift_left(Packet4l a) {
527 return _mm256_slli_epi64(a, N);
528}
529#ifdef EIGEN_VECTORIZE_AVX512FP16
530template <int N>
531EIGEN_STRONG_INLINE Packet4l parithmetic_shift_right(Packet4l a) {
532 return _mm256_srai_epi64(a, N);
533}
534#else
535template <int N>
536EIGEN_STRONG_INLINE std::enable_if_t<(N == 0), Packet4l> parithmetic_shift_right(Packet4l a) {
537 return a;
538}
539template <int N>
540EIGEN_STRONG_INLINE std::enable_if_t<(N > 0) && (N < 32), Packet4l> parithmetic_shift_right(Packet4l a) {
541 __m256i hi_word = _mm256_srai_epi32(a, N);
542 __m256i lo_word = _mm256_srli_epi64(a, N);
543 return _mm256_blend_epi32(hi_word, lo_word, 0b01010101);
544}
545template <int N>
546EIGEN_STRONG_INLINE std::enable_if_t<(N >= 32) && (N < 63), Packet4l> parithmetic_shift_right(Packet4l a) {
547 __m256i hi_word = _mm256_srai_epi32(a, 31);
548 __m256i lo_word = _mm256_shuffle_epi32(_mm256_srai_epi32(a, N - 32), (shuffle_mask<1, 1, 3, 3>::mask));
549 return _mm256_blend_epi32(hi_word, lo_word, 0b01010101);
550}
551template <int N>
552EIGEN_STRONG_INLINE std::enable_if_t<(N == 63), Packet4l> parithmetic_shift_right(Packet4l a) {
553 return _mm256_cmpgt_epi64(_mm256_setzero_si256(), a);
554}
555template <int N>
556EIGEN_STRONG_INLINE std::enable_if_t<(N < 0) || (N > 63), Packet4l> parithmetic_shift_right(Packet4l a) {
557 return parithmetic_shift_right<int(N & 63)>(a);
558}
559#endif
560template <>
561EIGEN_STRONG_INLINE Packet4l pload<Packet4l>(const int64_t* from) {
562 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
563}
564template <>
565EIGEN_STRONG_INLINE Packet4ul pload<Packet4ul>(const uint64_t* from) {
566 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
567}
568template <>
569EIGEN_STRONG_INLINE Packet4l ploadu<Packet4l>(const int64_t* from) {
570 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
571}
572template <>
573EIGEN_STRONG_INLINE Packet4ul ploadu<Packet4ul>(const uint64_t* from) {
574 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
575}
576// Loads 2 int64_ts from memory a returns the packet {a0, a0, a1, a1}
577template <>
578EIGEN_STRONG_INLINE Packet4l ploaddup<Packet4l>(const int64_t* from) {
579 const Packet4l a = _mm256_castsi128_si256(_mm_loadu_si128(reinterpret_cast<const __m128i*>(from)));
580 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 1, 0, 1, 2, 3, 2, 3));
581}
582// Loads 2 uint64_ts from memory a returns the packet {a0, a0, a1, a1}
583template <>
584EIGEN_STRONG_INLINE Packet4ul ploaddup<Packet4ul>(const uint64_t* from) {
585 const Packet4ul a = _mm256_castsi128_si256(_mm_loadu_si128(reinterpret_cast<const __m128i*>(from)));
586 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 1, 0, 1, 2, 3, 2, 3));
587}
588template <>
589EIGEN_STRONG_INLINE void pstore<int64_t>(int64_t* to, const Packet4l& from) {
590 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
591}
592template <>
593EIGEN_STRONG_INLINE void pstore<uint64_t>(uint64_t* to, const Packet4ul& from) {
594 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
595}
596template <>
597EIGEN_STRONG_INLINE void pstoreu<int64_t>(int64_t* to, const Packet4l& from) {
598 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
599}
600template <>
601EIGEN_STRONG_INLINE void pstoreu<uint64_t>(uint64_t* to, const Packet4ul& from) {
602 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
603}
604template <>
605EIGEN_DEVICE_FUNC inline Packet4l pgather<int64_t, Packet4l>(const int64_t* from, Index stride) {
606 return _mm256_set_epi64x(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
607}
608template <>
609EIGEN_DEVICE_FUNC inline Packet4ul pgather<uint64_t, Packet4ul>(const uint64_t* from, Index stride) {
610 return _mm256_set_epi64x(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
611}
612template <>
613EIGEN_DEVICE_FUNC inline void pscatter<int64_t, Packet4l>(int64_t* to, const Packet4l& from, Index stride) {
614 __m128i low = _mm256_extractf128_si256(from, 0);
615 to[stride * 0] = _mm_extract_epi64_0(low);
616 to[stride * 1] = _mm_extract_epi64_1(low);
617
618 __m128i high = _mm256_extractf128_si256(from, 1);
619 to[stride * 2] = _mm_extract_epi64_0(high);
620 to[stride * 3] = _mm_extract_epi64_1(high);
621}
622template <>
623EIGEN_DEVICE_FUNC inline void pscatter<uint64_t, Packet4ul>(uint64_t* to, const Packet4ul& from, Index stride) {
624 __m128i low = _mm256_extractf128_si256(from, 0);
625 to[stride * 0] = _mm_extract_epi64_0(low);
626 to[stride * 1] = _mm_extract_epi64_1(low);
627
628 __m128i high = _mm256_extractf128_si256(from, 1);
629 to[stride * 2] = _mm_extract_epi64_0(high);
630 to[stride * 3] = _mm_extract_epi64_1(high);
631}
632template <>
633EIGEN_STRONG_INLINE void pstore1<Packet4l>(int64_t* to, const int64_t& a) {
634 Packet4l pa = pset1<Packet4l>(a);
635 pstore(to, pa);
636}
637template <>
638EIGEN_STRONG_INLINE void pstore1<Packet4ul>(uint64_t* to, const uint64_t& a) {
639 Packet4ul pa = pset1<Packet4ul>(a);
640 pstore(to, pa);
641}
642template <>
643EIGEN_STRONG_INLINE int64_t pfirst<Packet4l>(const Packet4l& a) {
644 return _mm_extract_epi64_0(_mm256_castsi256_si128(a));
645}
646template <>
647EIGEN_STRONG_INLINE uint64_t pfirst<Packet4ul>(const Packet4ul& a) {
648 return _mm_extract_epi64_0(_mm256_castsi256_si128(a));
649}
650template <>
651EIGEN_STRONG_INLINE int64_t predux<Packet4l>(const Packet4l& a) {
652 __m128i r = _mm_add_epi64(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
653 return _mm_extract_epi64_0(r) + _mm_extract_epi64_1(r);
654}
655template <>
656EIGEN_STRONG_INLINE uint64_t predux<Packet4ul>(const Packet4ul& a) {
657 __m128i r = _mm_add_epi64(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
658 return numext::bit_cast<uint64_t>(_mm_extract_epi64_0(r) + _mm_extract_epi64_1(r));
659}
660#define MM256_SHUFFLE_EPI64(A, B, M) _mm256_shuffle_pd(_mm256_castsi256_pd(A), _mm256_castsi256_pd(B), M)
661EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4l, 4>& kernel) {
662 __m256d T0 = MM256_SHUFFLE_EPI64(kernel.packet[0], kernel.packet[1], 15);
663 __m256d T1 = MM256_SHUFFLE_EPI64(kernel.packet[0], kernel.packet[1], 0);
664 __m256d T2 = MM256_SHUFFLE_EPI64(kernel.packet[2], kernel.packet[3], 15);
665 __m256d T3 = MM256_SHUFFLE_EPI64(kernel.packet[2], kernel.packet[3], 0);
666
667 kernel.packet[1] = _mm256_castpd_si256(_mm256_permute2f128_pd(T0, T2, 32));
668 kernel.packet[3] = _mm256_castpd_si256(_mm256_permute2f128_pd(T0, T2, 49));
669 kernel.packet[0] = _mm256_castpd_si256(_mm256_permute2f128_pd(T1, T3, 32));
670 kernel.packet[2] = _mm256_castpd_si256(_mm256_permute2f128_pd(T1, T3, 49));
671}
672EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4ul, 4>& kernel) {
673 ptranspose((PacketBlock<Packet4l, 4>&)kernel);
674}
675template <>
676EIGEN_STRONG_INLINE Packet4l pmin<Packet4l>(const Packet4l& a, const Packet4l& b) {
677 __m256i cmp = _mm256_cmpgt_epi64(a, b);
678 __m256i a_min = _mm256_andnot_si256(cmp, a);
679 __m256i b_min = _mm256_and_si256(cmp, b);
680 return Packet4l(_mm256_or_si256(a_min, b_min));
681}
682template <>
683EIGEN_STRONG_INLINE Packet4ul pmin<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
684 return padd((Packet4ul)pmin((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
685 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL))),
686 pset1<Packet4ul>(0x8000000000000000UL));
687}
688template <>
689EIGEN_STRONG_INLINE Packet4l pmax<Packet4l>(const Packet4l& a, const Packet4l& b) {
690 __m256i cmp = _mm256_cmpgt_epi64(a, b);
691 __m256i a_min = _mm256_and_si256(cmp, a);
692 __m256i b_min = _mm256_andnot_si256(cmp, b);
693 return Packet4l(_mm256_or_si256(a_min, b_min));
694}
695template <>
696EIGEN_STRONG_INLINE Packet4ul pmax<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
697 return padd((Packet4ul)pmax((Packet4l)psub(a, pset1<Packet4ul>(0x8000000000000000UL)),
698 (Packet4l)psub(b, pset1<Packet4ul>(0x8000000000000000UL))),
699 pset1<Packet4ul>(0x8000000000000000UL));
700}
701template <>
702EIGEN_STRONG_INLINE Packet4l pabs<Packet4l>(const Packet4l& a) {
703 Packet4l pz = pzero<Packet4l>(a);
704 Packet4l cmp = _mm256_cmpgt_epi64(a, pz);
705 return psub(cmp, pxor(a, cmp));
706}
707template <>
708EIGEN_STRONG_INLINE Packet4ul pabs<Packet4ul>(const Packet4ul& a) {
709 return a;
710}
711template <>
712EIGEN_STRONG_INLINE Packet4l pmul<Packet4l>(const Packet4l& a, const Packet4l& b) {
713 // 64-bit mul requires avx512, so do this with 32-bit multiplication
714 __m256i upper32_a = _mm256_srli_epi64(a, 32);
715 __m256i upper32_b = _mm256_srli_epi64(b, 32);
716
717 // upper * lower
718 __m256i mul1 = _mm256_mul_epu32(upper32_a, b);
719 __m256i mul2 = _mm256_mul_epu32(upper32_b, a);
720 // Gives us both upper*upper and lower*lower
721 __m256i mul3 = _mm256_mul_epu32(a, b);
722
723 __m256i high = _mm256_slli_epi64(_mm256_add_epi64(mul1, mul2), 32);
724 return _mm256_add_epi64(high, mul3);
725}
726template <>
727EIGEN_STRONG_INLINE Packet4ul pmul<Packet4ul>(const Packet4ul& a, const Packet4ul& b) {
728 return (Packet4ul)pmul<Packet4l>((Packet4l)a, (Packet4l)b);
729}
730#endif
731
732template <>
733EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) {
734 return _mm256_set1_ps(from);
735}
736template <>
737EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) {
738 return _mm256_set1_pd(from);
739}
740template <>
741EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) {
742 return _mm256_set1_epi32(from);
743}
744template <>
745EIGEN_STRONG_INLINE Packet8ui pset1<Packet8ui>(const uint32_t& from) {
746 return _mm256_set1_epi32(from);
747}
748
749template <>
750EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(unsigned int from) {
751 return _mm256_castsi256_ps(pset1<Packet8i>(from));
752}
753template <>
754EIGEN_STRONG_INLINE Packet4d pset1frombits<Packet4d>(uint64_t from) {
755 return _mm256_castsi256_pd(_mm256_set1_epi64x(from));
756}
757
758template <>
759EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) {
760 return _mm256_setzero_ps();
761}
762template <>
763EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) {
764 return _mm256_setzero_pd();
765}
766template <>
767EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) {
768 return _mm256_setzero_si256();
769}
770template <>
771EIGEN_STRONG_INLINE Packet8ui pzero(const Packet8ui& /*a*/) {
772 return _mm256_setzero_si256();
773}
774
775template <>
776EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f& /*a*/) {
777 return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1));
778}
779template <>
780EIGEN_STRONG_INLINE Packet8i peven_mask(const Packet8i& /*a*/) {
781 return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1);
782}
783template <>
784EIGEN_STRONG_INLINE Packet8ui peven_mask(const Packet8ui& /*a*/) {
785 return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1);
786}
787template <>
788EIGEN_STRONG_INLINE Packet4d peven_mask(const Packet4d& /*a*/) {
789 return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1));
790}
791
792template <>
793EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) {
794 return _mm256_broadcast_ss(from);
795}
796template <>
797EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) {
798 return _mm256_broadcast_sd(from);
799}
800
801template <>
802EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) {
803 return _mm256_add_ps(a, b);
804}
805#ifdef EIGEN_VECTORIZE_AVX512
806template <>
807EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b, uint8_t umask) {
808 __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF);
809 return _mm512_castps512_ps256(_mm512_maskz_add_ps(mask, _mm512_castps256_ps512(a), _mm512_castps256_ps512(b)));
810}
811#endif
812template <>
813EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) {
814 return _mm256_add_pd(a, b);
815}
816template <>
817EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(const Packet8i& a, const Packet8i& b) {
818#ifdef EIGEN_VECTORIZE_AVX2
819 return _mm256_add_epi32(a, b);
820#else
821 __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
822 __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
823 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
824#endif
825}
826template <>
827EIGEN_STRONG_INLINE Packet8ui padd<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
828#ifdef EIGEN_VECTORIZE_AVX2
829 return _mm256_add_epi32(a, b);
830#else
831 __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
832 __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
833 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
834#endif
835}
836
837template <>
838EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) {
839 return padd(pset1<Packet8f>(a), _mm256_set_ps(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
840}
841template <>
842EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) {
843 return padd(pset1<Packet4d>(a), _mm256_set_pd(3.0, 2.0, 1.0, 0.0));
844}
845template <>
846EIGEN_STRONG_INLINE Packet8i plset<Packet8i>(const int& a) {
847 return padd(pset1<Packet8i>(a), (Packet8i)_mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0));
848}
849template <>
850EIGEN_STRONG_INLINE Packet8ui plset<Packet8ui>(const uint32_t& a) {
851 return padd(pset1<Packet8ui>(a), (Packet8ui)_mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0));
852}
853
854template <>
855EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) {
856 return _mm256_sub_ps(a, b);
857}
858template <>
859EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) {
860 return _mm256_sub_pd(a, b);
861}
862template <>
863EIGEN_STRONG_INLINE Packet8i psub<Packet8i>(const Packet8i& a, const Packet8i& b) {
864#ifdef EIGEN_VECTORIZE_AVX2
865 return _mm256_sub_epi32(a, b);
866#else
867 __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
868 __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
869 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
870#endif
871}
872template <>
873EIGEN_STRONG_INLINE Packet8ui psub<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
874#ifdef EIGEN_VECTORIZE_AVX2
875 return _mm256_sub_epi32(a, b);
876#else
877 __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
878 __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
879 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
880#endif
881}
882
883template <>
884EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a) {
885 const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000));
886 return _mm256_xor_ps(a, mask);
887}
888template <>
889EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a) {
890 const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x8000000000000000ULL));
891 return _mm256_xor_pd(a, mask);
892}
893template <>
894EIGEN_STRONG_INLINE Packet8i pnegate(const Packet8i& a) {
895 return psub(pzero(a), a);
896}
897
898template <>
899EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) {
900 return a;
901}
902template <>
903EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) {
904 return a;
905}
906template <>
907EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) {
908 return a;
909}
910
911template <>
912EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) {
913 return _mm256_mul_ps(a, b);
914}
915template <>
916EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) {
917 return _mm256_mul_pd(a, b);
918}
919template <>
920EIGEN_STRONG_INLINE Packet8i pmul<Packet8i>(const Packet8i& a, const Packet8i& b) {
921#ifdef EIGEN_VECTORIZE_AVX2
922 return _mm256_mullo_epi32(a, b);
923#else
924 const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
925 const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
926 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
927#endif
928}
929template <>
930EIGEN_STRONG_INLINE Packet8ui pmul<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
931#ifdef EIGEN_VECTORIZE_AVX2
932 return _mm256_mullo_epi32(a, b);
933#else
934 const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
935 const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
936 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
937#endif
938}
939
940template <>
941EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) {
942 return _mm256_div_ps(a, b);
943}
944template <>
945EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) {
946 return _mm256_div_pd(a, b);
947}
948
949template <>
950EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& a, const Packet8i& b) {
951#ifdef EIGEN_VECTORIZE_AVX512
952 return _mm512_cvttpd_epi32(_mm512_div_pd(_mm512_cvtepi32_pd(a), _mm512_cvtepi32_pd(b)));
953#else
954 Packet4i lo = pdiv<Packet4i>(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
955 Packet4i hi = pdiv<Packet4i>(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
956 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
957#endif
958}
959
960#ifdef EIGEN_VECTORIZE_FMA
961template <>
962EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
963 return _mm256_fmadd_ps(a, b, c);
964}
965template <>
966EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
967 return _mm256_fmadd_pd(a, b, c);
968}
969
970template <>
971EIGEN_STRONG_INLINE Packet8f pmsub(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
972 return _mm256_fmsub_ps(a, b, c);
973}
974
975template <>
976EIGEN_STRONG_INLINE Packet4d pmsub(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
977 return _mm256_fmsub_pd(a, b, c);
978}
979
980template <>
981EIGEN_STRONG_INLINE Packet8f pnmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
982 return _mm256_fnmadd_ps(a, b, c);
983}
984
985template <>
986EIGEN_STRONG_INLINE Packet4d pnmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
987 return _mm256_fnmadd_pd(a, b, c);
988}
989
990template <>
991EIGEN_STRONG_INLINE Packet8f pnmsub(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
992 return _mm256_fnmsub_ps(a, b, c);
993}
994
995template <>
996EIGEN_STRONG_INLINE Packet4d pnmsub(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
997 return _mm256_fnmsub_pd(a, b, c);
998}
999
1000#endif
1001
1002template <>
1003EIGEN_STRONG_INLINE Packet8f pcmp_le(const Packet8f& a, const Packet8f& b) {
1004 return _mm256_cmp_ps(a, b, _CMP_LE_OQ);
1005}
1006template <>
1007EIGEN_STRONG_INLINE Packet8f pcmp_lt(const Packet8f& a, const Packet8f& b) {
1008 return _mm256_cmp_ps(a, b, _CMP_LT_OQ);
1009}
1010template <>
1011EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(const Packet8f& a, const Packet8f& b) {
1012 return _mm256_cmp_ps(a, b, _CMP_NGE_UQ);
1013}
1014template <>
1015EIGEN_STRONG_INLINE Packet8f pcmp_eq(const Packet8f& a, const Packet8f& b) {
1016 return _mm256_cmp_ps(a, b, _CMP_EQ_OQ);
1017}
1018template <>
1019EIGEN_STRONG_INLINE Packet8f pisnan(const Packet8f& a) {
1020 return _mm256_cmp_ps(a, a, _CMP_UNORD_Q);
1021}
1022
1023template <>
1024EIGEN_STRONG_INLINE Packet4d pcmp_le(const Packet4d& a, const Packet4d& b) {
1025 return _mm256_cmp_pd(a, b, _CMP_LE_OQ);
1026}
1027template <>
1028EIGEN_STRONG_INLINE Packet4d pcmp_lt(const Packet4d& a, const Packet4d& b) {
1029 return _mm256_cmp_pd(a, b, _CMP_LT_OQ);
1030}
1031template <>
1032EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(const Packet4d& a, const Packet4d& b) {
1033 return _mm256_cmp_pd(a, b, _CMP_NGE_UQ);
1034}
1035template <>
1036EIGEN_STRONG_INLINE Packet4d pcmp_eq(const Packet4d& a, const Packet4d& b) {
1037 return _mm256_cmp_pd(a, b, _CMP_EQ_OQ);
1038}
1039
1040template <>
1041EIGEN_STRONG_INLINE Packet8i pcmp_le(const Packet8i& a, const Packet8i& b) {
1042#ifdef EIGEN_VECTORIZE_AVX2
1043 return _mm256_xor_si256(_mm256_cmpgt_epi32(a, b), _mm256_set1_epi32(-1));
1044#else
1045 __m128i lo = _mm_cmpgt_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1046 lo = _mm_xor_si128(lo, _mm_set1_epi32(-1));
1047 __m128i hi = _mm_cmpgt_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1048 hi = _mm_xor_si128(hi, _mm_set1_epi32(-1));
1049 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1050#endif
1051}
1052template <>
1053EIGEN_STRONG_INLINE Packet8i pcmp_lt(const Packet8i& a, const Packet8i& b) {
1054#ifdef EIGEN_VECTORIZE_AVX2
1055 return _mm256_cmpgt_epi32(b, a);
1056#else
1057 __m128i lo = _mm_cmpgt_epi32(_mm256_extractf128_si256(b, 0), _mm256_extractf128_si256(a, 0));
1058 __m128i hi = _mm_cmpgt_epi32(_mm256_extractf128_si256(b, 1), _mm256_extractf128_si256(a, 1));
1059 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1060#endif
1061}
1062template <>
1063EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8i& b) {
1064#ifdef EIGEN_VECTORIZE_AVX2
1065 return _mm256_cmpeq_epi32(a, b);
1066#else
1067 __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1068 __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1069 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1070#endif
1071}
1072template <>
1073EIGEN_STRONG_INLINE Packet8ui pcmp_eq(const Packet8ui& a, const Packet8ui& b) {
1074#ifdef EIGEN_VECTORIZE_AVX2
1075 return _mm256_cmpeq_epi32(a, b);
1076#else
1077 __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1078 __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1079 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1080#endif
1081}
1082
1083template <>
1084EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) {
1085#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1086 // There appears to be a bug in GCC, by which the optimizer may flip
1087 // the argument order in calls to _mm_min_ps/_mm_max_ps, so we have to
1088 // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
1089 // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
1090 Packet8f res;
1091 asm("vminps %[a], %[b], %[res]" : [res] "=x"(res) : [a] "x"(a), [b] "x"(b));
1092 return res;
1093#else
1094 // Arguments are swapped to match NaN propagation behavior of std::min.
1095 return _mm256_min_ps(b, a);
1096#endif
1097}
1098template <>
1099EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) {
1100#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1101 // See pmin above
1102 Packet4d res;
1103 asm("vminpd %[a], %[b], %[res]" : [res] "=x"(res) : [a] "x"(a), [b] "x"(b));
1104 return res;
1105#else
1106 // Arguments are swapped to match NaN propagation behavior of std::min.
1107 return _mm256_min_pd(b, a);
1108#endif
1109}
1110template <>
1111EIGEN_STRONG_INLINE Packet8i pmin<Packet8i>(const Packet8i& a, const Packet8i& b) {
1112#ifdef EIGEN_VECTORIZE_AVX2
1113 return _mm256_min_epi32(a, b);
1114#else
1115 __m128i lo = _mm_min_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1116 __m128i hi = _mm_min_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1117 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1118#endif
1119}
1120template <>
1121EIGEN_STRONG_INLINE Packet8ui pmin<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1122#ifdef EIGEN_VECTORIZE_AVX2
1123 return _mm256_min_epu32(a, b);
1124#else
1125 __m128i lo = _mm_min_epu32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1126 __m128i hi = _mm_min_epu32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1127 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1128#endif
1129}
1130
1131template <>
1132EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) {
1133#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1134 // See pmin above
1135 Packet8f res;
1136 asm("vmaxps %[a], %[b], %[res]" : [res] "=x"(res) : [a] "x"(a), [b] "x"(b));
1137 return res;
1138#else
1139 // Arguments are swapped to match NaN propagation behavior of std::max.
1140 return _mm256_max_ps(b, a);
1141#endif
1142}
1143template <>
1144EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) {
1145#if EIGEN_GNUC_STRICT_LESS_THAN(6, 3, 0)
1146 // See pmin above
1147 Packet4d res;
1148 asm("vmaxpd %[a], %[b], %[res]" : [res] "=x"(res) : [a] "x"(a), [b] "x"(b));
1149 return res;
1150#else
1151 // Arguments are swapped to match NaN propagation behavior of std::max.
1152 return _mm256_max_pd(b, a);
1153#endif
1154}
1155template <>
1156EIGEN_STRONG_INLINE Packet8i pmax<Packet8i>(const Packet8i& a, const Packet8i& b) {
1157#ifdef EIGEN_VECTORIZE_AVX2
1158 return _mm256_max_epi32(a, b);
1159#else
1160 __m128i lo = _mm_max_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1161 __m128i hi = _mm_max_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1162 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1163#endif
1164}
1165template <>
1166EIGEN_STRONG_INLINE Packet8ui pmax<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1167#ifdef EIGEN_VECTORIZE_AVX2
1168 return _mm256_max_epu32(a, b);
1169#else
1170 __m128i lo = _mm_max_epu32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
1171 __m128i hi = _mm_max_epu32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
1172 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1173#endif
1174}
1175
1176#ifdef EIGEN_VECTORIZE_AVX2
1177template <>
1178EIGEN_STRONG_INLINE Packet8i psign(const Packet8i& a) {
1179 return _mm256_sign_epi32(_mm256_set1_epi32(1), a);
1180}
1181#endif
1182
1183// Add specializations for min/max with prescribed NaN progation.
1184template <>
1185EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
1186 return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
1187}
1188template <>
1189EIGEN_STRONG_INLINE Packet4d pmin<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
1190 return pminmax_propagate_numbers(a, b, pmin<Packet4d>);
1191}
1192template <>
1193EIGEN_STRONG_INLINE Packet8f pmax<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
1194 return pminmax_propagate_numbers(a, b, pmax<Packet8f>);
1195}
1196template <>
1197EIGEN_STRONG_INLINE Packet4d pmax<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
1198 return pminmax_propagate_numbers(a, b, pmax<Packet4d>);
1199}
1200template <>
1201EIGEN_STRONG_INLINE Packet8f pmin<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
1202 return pminmax_propagate_nan(a, b, pmin<Packet8f>);
1203}
1204template <>
1205EIGEN_STRONG_INLINE Packet4d pmin<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
1206 return pminmax_propagate_nan(a, b, pmin<Packet4d>);
1207}
1208template <>
1209EIGEN_STRONG_INLINE Packet8f pmax<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
1210 return pminmax_propagate_nan(a, b, pmax<Packet8f>);
1211}
1212template <>
1213EIGEN_STRONG_INLINE Packet4d pmax<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
1214 return pminmax_propagate_nan(a, b, pmax<Packet4d>);
1215}
1216
1217template <>
1218EIGEN_STRONG_INLINE Packet8f print<Packet8f>(const Packet8f& a) {
1219 return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION);
1220}
1221template <>
1222EIGEN_STRONG_INLINE Packet4d print<Packet4d>(const Packet4d& a) {
1223 return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION);
1224}
1225
1226template <>
1227EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) {
1228 return _mm256_ceil_ps(a);
1229}
1230template <>
1231EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) {
1232 return _mm256_ceil_pd(a);
1233}
1234
1235template <>
1236EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) {
1237 return _mm256_floor_ps(a);
1238}
1239template <>
1240EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) {
1241 return _mm256_floor_pd(a);
1242}
1243
1244template <>
1245EIGEN_STRONG_INLINE Packet8f ptrunc<Packet8f>(const Packet8f& a) {
1246 return _mm256_round_ps(a, _MM_FROUND_TRUNC);
1247}
1248template <>
1249EIGEN_STRONG_INLINE Packet4d ptrunc<Packet4d>(const Packet4d& a) {
1250 return _mm256_round_pd(a, _MM_FROUND_TRUNC);
1251}
1252
1253template <>
1254EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(const Packet8i& a) {
1255#ifdef EIGEN_VECTORIZE_AVX2
1256 // vpcmpeqd has lower latency than the more general vcmpps
1257 return _mm256_cmpeq_epi32(a, a);
1258#else
1259 const __m256 b = _mm256_castsi256_ps(a);
1260 return _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_TRUE_UQ));
1261#endif
1262}
1263
1264template <>
1265EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(const Packet8f& a) {
1266#ifdef EIGEN_VECTORIZE_AVX2
1267 // vpcmpeqd has lower latency than the more general vcmpps
1268 const __m256i b = _mm256_castps_si256(a);
1269 return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b, b));
1270#else
1271 return _mm256_cmp_ps(a, a, _CMP_TRUE_UQ);
1272#endif
1273}
1274
1275template <>
1276EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(const Packet4d& a) {
1277#ifdef EIGEN_VECTORIZE_AVX2
1278 // vpcmpeqq has lower latency than the more general vcmppd
1279 const __m256i b = _mm256_castpd_si256(a);
1280 return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b, b));
1281#else
1282 return _mm256_cmp_pd(a, a, _CMP_TRUE_UQ);
1283#endif
1284}
1285
1286template <>
1287EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) {
1288 return _mm256_and_ps(a, b);
1289}
1290template <>
1291EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) {
1292 return _mm256_and_pd(a, b);
1293}
1294template <>
1295EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(const Packet8i& a, const Packet8i& b) {
1296#ifdef EIGEN_VECTORIZE_AVX2
1297 return _mm256_and_si256(a, b);
1298#else
1299 return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1300#endif
1301}
1302template <>
1303EIGEN_STRONG_INLINE Packet8ui pand<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1304#ifdef EIGEN_VECTORIZE_AVX2
1305 return _mm256_and_si256(a, b);
1306#else
1307 return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1308#endif
1309}
1310
1311template <>
1312EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) {
1313 return _mm256_or_ps(a, b);
1314}
1315template <>
1316EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) {
1317 return _mm256_or_pd(a, b);
1318}
1319template <>
1320EIGEN_STRONG_INLINE Packet8i por<Packet8i>(const Packet8i& a, const Packet8i& b) {
1321#ifdef EIGEN_VECTORIZE_AVX2
1322 return _mm256_or_si256(a, b);
1323#else
1324 return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1325#endif
1326}
1327template <>
1328EIGEN_STRONG_INLINE Packet8ui por<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1329#ifdef EIGEN_VECTORIZE_AVX2
1330 return _mm256_or_si256(a, b);
1331#else
1332 return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1333#endif
1334}
1335
1336template <>
1337EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) {
1338 return _mm256_xor_ps(a, b);
1339}
1340template <>
1341EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) {
1342 return _mm256_xor_pd(a, b);
1343}
1344template <>
1345EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(const Packet8i& a, const Packet8i& b) {
1346#ifdef EIGEN_VECTORIZE_AVX2
1347 return _mm256_xor_si256(a, b);
1348#else
1349 return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1350#endif
1351}
1352template <>
1353EIGEN_STRONG_INLINE Packet8ui pxor<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1354#ifdef EIGEN_VECTORIZE_AVX2
1355 return _mm256_xor_si256(a, b);
1356#else
1357 return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
1358#endif
1359}
1360
1361template <>
1362EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) {
1363 return _mm256_andnot_ps(b, a);
1364}
1365template <>
1366EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) {
1367 return _mm256_andnot_pd(b, a);
1368}
1369template <>
1370EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(const Packet8i& a, const Packet8i& b) {
1371#ifdef EIGEN_VECTORIZE_AVX2
1372 return _mm256_andnot_si256(b, a);
1373#else
1374 return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a)));
1375#endif
1376}
1377template <>
1378EIGEN_STRONG_INLINE Packet8ui pandnot<Packet8ui>(const Packet8ui& a, const Packet8ui& b) {
1379#ifdef EIGEN_VECTORIZE_AVX2
1380 return _mm256_andnot_si256(b, a);
1381#else
1382 return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a)));
1383#endif
1384}
1385
1386template <>
1387EIGEN_STRONG_INLINE Packet8ui pcmp_lt(const Packet8ui& a, const Packet8ui& b) {
1388 return pxor(pcmp_eq(a, pmax(a, b)), ptrue(a));
1389}
1390template <>
1391EIGEN_STRONG_INLINE Packet8ui pcmp_le(const Packet8ui& a, const Packet8ui& b) {
1392 return pcmp_eq(a, pmin(a, b));
1393}
1394
1395template <>
1396EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a) {
1397 const Packet8f mask = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x80000000u));
1398 const Packet8f prev0dot5 = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
1399 return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
1400}
1401template <>
1402EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a) {
1403 const Packet4d mask = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
1404 const Packet4d prev0dot5 = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
1405 return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
1406}
1407
1408template <>
1409EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(const Packet8f& mask, const Packet8f& a, const Packet8f& b) {
1410 return _mm256_blendv_ps(b, a, mask);
1411}
1412template <>
1413EIGEN_STRONG_INLINE Packet8i pselect<Packet8i>(const Packet8i& mask, const Packet8i& a, const Packet8i& b) {
1414 return _mm256_castps_si256(
1415 _mm256_blendv_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a), _mm256_castsi256_ps(mask)));
1416}
1417template <>
1418EIGEN_STRONG_INLINE Packet8ui pselect<Packet8ui>(const Packet8ui& mask, const Packet8ui& a, const Packet8ui& b) {
1419 return _mm256_castps_si256(
1420 _mm256_blendv_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a), _mm256_castsi256_ps(mask)));
1421}
1422
1423template <>
1424EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(const Packet4d& mask, const Packet4d& a, const Packet4d& b) {
1425 return _mm256_blendv_pd(b, a, mask);
1426}
1427
1428template <int N>
1429EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) {
1430#ifdef EIGEN_VECTORIZE_AVX2
1431 return _mm256_srai_epi32(a, N);
1432#else
1433 __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N);
1434 __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N);
1435 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1436#endif
1437}
1438
1439template <int N>
1440EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) {
1441#ifdef EIGEN_VECTORIZE_AVX2
1442 return _mm256_srli_epi32(a, N);
1443#else
1444 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
1445 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
1446 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1447#endif
1448}
1449
1450template <int N>
1451EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) {
1452#ifdef EIGEN_VECTORIZE_AVX2
1453 return _mm256_slli_epi32(a, N);
1454#else
1455 __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
1456 __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
1457 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1458#endif
1459}
1460
1461template <int N>
1462EIGEN_STRONG_INLINE Packet8ui parithmetic_shift_right(Packet8ui a) {
1463 return (Packet8ui)plogical_shift_right<N>((Packet8i)a);
1464}
1465template <int N>
1466EIGEN_STRONG_INLINE Packet8ui plogical_shift_right(Packet8ui a) {
1467 return (Packet8ui)plogical_shift_right<N>((Packet8i)a);
1468}
1469template <int N>
1470EIGEN_STRONG_INLINE Packet8ui plogical_shift_left(Packet8ui a) {
1471 return (Packet8ui)plogical_shift_left<N>((Packet8i)a);
1472}
1473
1474template <>
1475EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) {
1476 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from);
1477}
1478template <>
1479EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) {
1480 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from);
1481}
1482template <>
1483EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int* from) {
1484 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1485}
1486template <>
1487EIGEN_STRONG_INLINE Packet8ui pload<Packet8ui>(const uint32_t* from) {
1488 EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1489}
1490
1491template <>
1492EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) {
1493 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from);
1494}
1495template <>
1496EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) {
1497 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from);
1498}
1499template <>
1500EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) {
1501 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
1502}
1503template <>
1504EIGEN_STRONG_INLINE Packet8ui ploadu<Packet8ui>(const uint32_t* from) {
1505 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
1506}
1507
1508template <>
1509EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from, uint8_t umask) {
1510#ifdef EIGEN_VECTORIZE_AVX512
1511 __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF);
1512 EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_castps512_ps256(_mm512_maskz_loadu_ps(mask, from));
1513#else
1514 Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
1515 const Packet8i bit_mask =
1516 _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
1517 mask = por<Packet8i>(mask, bit_mask);
1518 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
1519 EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_maskload_ps(from, mask);
1520#endif
1521}
1522
1523// Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3}
1524template <>
1525EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from) {
1526 // TODO try to find a way to avoid the need of a temporary register
1527 // Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from));
1528 // tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
1529 // return _mm256_unpacklo_ps(tmp,tmp);
1530
1531 // _mm256_insertf128_ps is very slow on Haswell, thus:
1532 Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
1533 // mimic an "inplace" permutation of the lower 128bits using a blend
1534 tmp = _mm256_blend_ps(
1535 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1536 // then we can perform a consistent permutation on the global register to get everything in shape:
1537 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2));
1538}
1539// Loads 2 doubles from memory a returns the packet {a0, a0, a1, a1}
1540template <>
1541EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from) {
1542 Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
1543 return _mm256_permute_pd(tmp, 3 << 2);
1544}
1545// Loads 4 integers from memory a returns the packet {a0, a0, a1, a1, a2, a2, a3, a3}
1546template <>
1547EIGEN_STRONG_INLINE Packet8i ploaddup<Packet8i>(const int* from) {
1548#ifdef EIGEN_VECTORIZE_AVX2
1549 const Packet8i a = _mm256_castsi128_si256(ploadu<Packet4i>(from));
1550 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 0, 1, 1, 2, 2, 3, 3));
1551#else
1552 __m256 tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
1553 // mimic an "inplace" permutation of the lower 128bits using a blend
1554 tmp = _mm256_blend_ps(
1555 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1556 // then we can perform a consistent permutation on the global register to get everything in shape:
1557 return _mm256_castps_si256(_mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2)));
1558#endif
1559}
1560template <>
1561EIGEN_STRONG_INLINE Packet8ui ploaddup<Packet8ui>(const uint32_t* from) {
1562#ifdef EIGEN_VECTORIZE_AVX2
1563 const Packet8ui a = _mm256_castsi128_si256(ploadu<Packet4ui>(from));
1564 return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 0, 1, 1, 2, 2, 3, 3));
1565#else
1566 __m256 tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
1567 // mimic an "inplace" permutation of the lower 128bits using a blend
1568 tmp = _mm256_blend_ps(
1569 tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15);
1570 // then we can perform a consistent permutation on the global register to get
1571 // everything in shape:
1572 return _mm256_castps_si256(_mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2)));
1573#endif
1574}
1575
1576// Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1}
1577template <>
1578EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from) {
1579 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
1580 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from + 1), 1);
1581}
1582template <>
1583EIGEN_STRONG_INLINE Packet8i ploadquad<Packet8i>(const int* from) {
1584 return _mm256_insertf128_si256(_mm256_set1_epi32(*from), _mm_set1_epi32(*(from + 1)), 1);
1585}
1586template <>
1587EIGEN_STRONG_INLINE Packet8ui ploadquad<Packet8ui>(const uint32_t* from) {
1588 return _mm256_insertf128_si256(_mm256_set1_epi32(*from), _mm_set1_epi32(*(from + 1)), 1);
1589}
1590
1591template <>
1592EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet8f& from) {
1593 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from);
1594}
1595template <>
1596EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) {
1597 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from);
1598}
1599template <>
1600EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet8i& from) {
1601 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
1602}
1603template <>
1604EIGEN_STRONG_INLINE void pstore<uint32_t>(uint32_t* to, const Packet8ui& from) {
1605 EIGEN_DEBUG_ALIGNED_STORE _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
1606}
1607
1608template <>
1609EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from) {
1610 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from);
1611}
1612template <>
1613EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) {
1614 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from);
1615}
1616template <>
1617EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i& from) {
1618 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
1619}
1620template <>
1621EIGEN_STRONG_INLINE void pstoreu<uint32_t>(uint32_t* to, const Packet8ui& from) {
1622 EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
1623}
1624
1625template <>
1626EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from, uint8_t umask) {
1627#ifdef EIGEN_VECTORIZE_AVX512
1628 __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF);
1629 EIGEN_DEBUG_UNALIGNED_STORE _mm512_mask_storeu_ps(to, mask, _mm512_castps256_ps512(from));
1630#else
1631 Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
1632 const Packet8i bit_mask =
1633 _mm256_set_epi32(0x7f7f7f7f, 0xbfbfbfbf, 0xdfdfdfdf, 0xefefefef, 0xf7f7f7f7, 0xfbfbfbfb, 0xfdfdfdfd, 0xfefefefe);
1634 mask = por<Packet8i>(mask, bit_mask);
1635 mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
1636#if EIGEN_COMP_MSVC
1637 // MSVC sometimes seems to use a bogus mask with maskstore.
1638 const __m256i ifrom = _mm256_castps_si256(from);
1639 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 0), _mm256_extractf128_si256(mask, 0),
1640 reinterpret_cast<char*>(to));
1641 EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 1), _mm256_extractf128_si256(mask, 1),
1642 reinterpret_cast<char*>(to + 4));
1643#else
1644 EIGEN_DEBUG_UNALIGNED_STORE _mm256_maskstore_ps(to, mask, from);
1645#endif
1646#endif
1647}
1648
1649// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
1650// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride),
1651// 4);
1652template <>
1653EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride) {
1654 return _mm256_set_ps(from[7 * stride], from[6 * stride], from[5 * stride], from[4 * stride], from[3 * stride],
1655 from[2 * stride], from[1 * stride], from[0 * stride]);
1656}
1657template <>
1658EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride) {
1659 return _mm256_set_pd(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]);
1660}
1661template <>
1662EIGEN_DEVICE_FUNC inline Packet8i pgather<int, Packet8i>(const int* from, Index stride) {
1663 return _mm256_set_epi32(from[7 * stride], from[6 * stride], from[5 * stride], from[4 * stride], from[3 * stride],
1664 from[2 * stride], from[1 * stride], from[0 * stride]);
1665}
1666template <>
1667EIGEN_DEVICE_FUNC inline Packet8ui pgather<uint32_t, Packet8ui>(const uint32_t* from, Index stride) {
1668 return (Packet8ui)pgather<int, Packet8i>((int*)from, stride);
1669}
1670
1671template <>
1672EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride) {
1673 __m128 low = _mm256_extractf128_ps(from, 0);
1674 to[stride * 0] = _mm_cvtss_f32(low);
1675 to[stride * 1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
1676 to[stride * 2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
1677 to[stride * 3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
1678
1679 __m128 high = _mm256_extractf128_ps(from, 1);
1680 to[stride * 4] = _mm_cvtss_f32(high);
1681 to[stride * 5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
1682 to[stride * 6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
1683 to[stride * 7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
1684}
1685template <>
1686EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride) {
1687 __m128d low = _mm256_extractf128_pd(from, 0);
1688 to[stride * 0] = _mm_cvtsd_f64(low);
1689 to[stride * 1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
1690 __m128d high = _mm256_extractf128_pd(from, 1);
1691 to[stride * 2] = _mm_cvtsd_f64(high);
1692 to[stride * 3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
1693}
1694template <>
1695EIGEN_DEVICE_FUNC inline void pscatter<int, Packet8i>(int* to, const Packet8i& from, Index stride) {
1696 __m128i low = _mm256_extractf128_si256(from, 0);
1697 to[stride * 0] = _mm_extract_epi32(low, 0);
1698 to[stride * 1] = _mm_extract_epi32(low, 1);
1699 to[stride * 2] = _mm_extract_epi32(low, 2);
1700 to[stride * 3] = _mm_extract_epi32(low, 3);
1701
1702 __m128i high = _mm256_extractf128_si256(from, 1);
1703 to[stride * 4] = _mm_extract_epi32(high, 0);
1704 to[stride * 5] = _mm_extract_epi32(high, 1);
1705 to[stride * 6] = _mm_extract_epi32(high, 2);
1706 to[stride * 7] = _mm_extract_epi32(high, 3);
1707}
1708template <>
1709EIGEN_DEVICE_FUNC inline void pscatter<uint32_t, Packet8ui>(uint32_t* to, const Packet8ui& from, Index stride) {
1710 pscatter<int, Packet8i>((int*)to, (Packet8i)from, stride);
1711}
1712
1713template <>
1714EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a) {
1715 Packet8f pa = pset1<Packet8f>(a);
1716 pstore(to, pa);
1717}
1718template <>
1719EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a) {
1720 Packet4d pa = pset1<Packet4d>(a);
1721 pstore(to, pa);
1722}
1723template <>
1724EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a) {
1725 Packet8i pa = pset1<Packet8i>(a);
1726 pstore(to, pa);
1727}
1728
1729#ifndef EIGEN_VECTORIZE_AVX512
1730template <>
1731EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) {
1732 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1733}
1734template <>
1735EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) {
1736 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1737}
1738template <>
1739EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) {
1740 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1741}
1742template <>
1743EIGEN_STRONG_INLINE void prefetch<uint32_t>(const uint32_t* addr) {
1744 _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1745}
1746#endif
1747
1748template <>
1749EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
1750 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
1751}
1752template <>
1753EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {
1754 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
1755}
1756template <>
1757EIGEN_STRONG_INLINE int pfirst<Packet8i>(const Packet8i& a) {
1758 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
1759}
1760template <>
1761EIGEN_STRONG_INLINE uint32_t pfirst<Packet8ui>(const Packet8ui& a) {
1762 return numext::bit_cast<uint32_t>(_mm_cvtsi128_si32(_mm256_castsi256_si128(a)));
1763}
1764
1765template <>
1766EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a) {
1767 __m256 tmp = _mm256_shuffle_ps(a, a, 0x1b);
1768 return _mm256_permute2f128_ps(tmp, tmp, 1);
1769}
1770template <>
1771EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a) {
1772 __m256d tmp = _mm256_shuffle_pd(a, a, 5);
1773 return _mm256_permute2f128_pd(tmp, tmp, 1);
1774#if 0
1775 // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
1776 // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
1777 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
1778 return _mm256_permute_pd(swap_halves,5);
1779#endif
1780}
1781template <>
1782EIGEN_STRONG_INLINE Packet8i preverse(const Packet8i& a) {
1783 return _mm256_castps_si256(preverse(_mm256_castsi256_ps(a)));
1784}
1785template <>
1786EIGEN_STRONG_INLINE Packet8ui preverse(const Packet8ui& a) {
1787 return _mm256_castps_si256(preverse(_mm256_castsi256_ps(a)));
1788}
1789
1790#ifdef EIGEN_VECTORIZE_AVX2
1791template <>
1792EIGEN_STRONG_INLINE Packet4l preverse(const Packet4l& a) {
1793 return _mm256_castpd_si256(preverse(_mm256_castsi256_pd(a)));
1794}
1795template <>
1796EIGEN_STRONG_INLINE Packet4ul preverse(const Packet4ul& a) {
1797 return _mm256_castpd_si256(preverse(_mm256_castsi256_pd(a)));
1798}
1799#endif
1800
1801// pabs should be ok
1802template <>
1803EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a) {
1804 const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF));
1805 return _mm256_and_ps(a, mask);
1806}
1807template <>
1808EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a) {
1809 const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x7FFFFFFFFFFFFFFF));
1810 return _mm256_and_pd(a, mask);
1811}
1812template <>
1813EIGEN_STRONG_INLINE Packet8i pabs(const Packet8i& a) {
1814#ifdef EIGEN_VECTORIZE_AVX2
1815 return _mm256_abs_epi32(a);
1816#else
1817 __m128i lo = _mm_abs_epi32(_mm256_extractf128_si256(a, 0));
1818 __m128i hi = _mm_abs_epi32(_mm256_extractf128_si256(a, 1));
1819 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
1820#endif
1821}
1822template <>
1823EIGEN_STRONG_INLINE Packet8ui pabs(const Packet8ui& a) {
1824 return a;
1825}
1826
1827template <>
1828EIGEN_STRONG_INLINE Packet8h psignbit(const Packet8h& a) {
1829 return _mm_cmpgt_epi16(_mm_setzero_si128(), a);
1830}
1831template <>
1832EIGEN_STRONG_INLINE Packet8bf psignbit(const Packet8bf& a) {
1833 return _mm_cmpgt_epi16(_mm_setzero_si128(), a);
1834}
1835template <>
1836EIGEN_STRONG_INLINE Packet8f psignbit(const Packet8f& a) {
1837#ifdef EIGEN_VECTORIZE_AVX2
1838 return _mm256_castsi256_ps(_mm256_cmpgt_epi32(_mm256_setzero_si256(), _mm256_castps_si256(a)));
1839#else
1840 return _mm256_castsi256_ps(parithmetic_shift_right<31>(Packet8i(_mm256_castps_si256(a))));
1841#endif
1842}
1843template <>
1844EIGEN_STRONG_INLINE Packet8ui psignbit(const Packet8ui& /*unused*/) {
1845 return _mm256_setzero_si256();
1846}
1847#ifdef EIGEN_VECTORIZE_AVX2
1848template <>
1849EIGEN_STRONG_INLINE Packet4d psignbit(const Packet4d& a) {
1850 return _mm256_castsi256_pd(_mm256_cmpgt_epi64(_mm256_setzero_si256(), _mm256_castpd_si256(a)));
1851}
1852template <>
1853EIGEN_STRONG_INLINE Packet4ul psignbit(const Packet4ul& /*unused*/) {
1854 return _mm256_setzero_si256();
1855}
1856#endif
1857
1858template <>
1859EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(const Packet8f& a, Packet8f& exponent) {
1860 return pfrexp_generic(a, exponent);
1861}
1862
1863// Extract exponent without existence of Packet4l.
1864template <>
1865EIGEN_STRONG_INLINE Packet4d pfrexp_generic_get_biased_exponent(const Packet4d& a) {
1866 const Packet4d cst_exp_mask = pset1frombits<Packet4d>(static_cast<uint64_t>(0x7ff0000000000000ull));
1867 __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask));
1868#ifdef EIGEN_VECTORIZE_AVX2
1869 a_expo = _mm256_srli_epi64(a_expo, 52);
1870 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
1871 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
1872#else
1873 __m128i lo = _mm256_extractf128_si256(a_expo, 0);
1874 __m128i hi = _mm256_extractf128_si256(a_expo, 1);
1875 lo = _mm_srli_epi64(lo, 52);
1876 hi = _mm_srli_epi64(hi, 52);
1877#endif
1878 Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3));
1879 Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3));
1880 Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0);
1881 exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1);
1882 return exponent;
1883}
1884
1885template <>
1886EIGEN_STRONG_INLINE Packet4d pfrexp<Packet4d>(const Packet4d& a, Packet4d& exponent) {
1887 return pfrexp_generic(a, exponent);
1888}
1889
1890template <>
1891EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(const Packet8f& a, const Packet8f& exponent) {
1892 return pldexp_generic(a, exponent);
1893}
1894
1895template <>
1896EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(const Packet4d& a, const Packet4d& exponent) {
1897 // Clamp exponent to [-2099, 2099]
1898 const Packet4d max_exponent = pset1<Packet4d>(2099.0);
1899 const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
1900
1901 // Split 2^e into four factors and multiply.
1902 const Packet4i bias = pset1<Packet4i>(1023);
1903 Packet4i b = parithmetic_shift_right<2>(e); // floor(e/4)
1904
1905 // 2^b
1906 Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
1907 Packet4i lo = _mm_slli_epi64(hi, 52);
1908 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
1909 Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
1910 Packet4d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
1911
1912 // 2^(e - 3b)
1913 b = psub(psub(psub(e, b), b), b); // e - 3b
1914 hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
1915 lo = _mm_slli_epi64(hi, 52);
1916 hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
1917 c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
1918 out = pmul(out, c); // a * 2^e
1919 return out;
1920}
1921
1922template <>
1923EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a) {
1924 return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 1))));
1925}
1926template <>
1927EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a) {
1928 return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a), _mm256_extractf128_pd(a, 1))));
1929}
1930template <>
1931EIGEN_STRONG_INLINE int predux<Packet8i>(const Packet8i& a) {
1932 return predux(Packet4i(_mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1))));
1933}
1934template <>
1935EIGEN_STRONG_INLINE uint32_t predux<Packet8ui>(const Packet8ui& a) {
1936 return predux(Packet4ui(_mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1))));
1937}
1938
1939template <>
1940EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(const Packet8f& a) {
1941 return _mm_add_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 1));
1942}
1943template <>
1944EIGEN_STRONG_INLINE Packet4i predux_half_dowto4<Packet8i>(const Packet8i& a) {
1945 return _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
1946}
1947template <>
1948EIGEN_STRONG_INLINE Packet4ui predux_half_dowto4<Packet8ui>(const Packet8ui& a) {
1949 return _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
1950}
1951
1952template <>
1953EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a) {
1954 Packet8f tmp;
1955 tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a, a, 1));
1956 tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp, tmp, _MM_SHUFFLE(1, 0, 3, 2)));
1957 return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp, tmp, 1)));
1958}
1959template <>
1960EIGEN_STRONG_INLINE double predux_mul<Packet4d>(const Packet4d& a) {
1961 Packet4d tmp;
1962 tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a, a, 1));
1963 return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
1964}
1965
1966template <>
1967EIGEN_STRONG_INLINE float predux_min<Packet8f>(const Packet8f& a) {
1968 Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a, a, 1));
1969 tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp, tmp, _MM_SHUFFLE(1, 0, 3, 2)));
1970 return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp, tmp, 1)));
1971}
1972template <>
1973EIGEN_STRONG_INLINE double predux_min<Packet4d>(const Packet4d& a) {
1974 Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a, a, 1));
1975 return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
1976}
1977
1978template <>
1979EIGEN_STRONG_INLINE float predux_max<Packet8f>(const Packet8f& a) {
1980 Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a, a, 1));
1981 tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp, tmp, _MM_SHUFFLE(1, 0, 3, 2)));
1982 return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp, tmp, 1)));
1983}
1984
1985template <>
1986EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a) {
1987 Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a, a, 1));
1988 return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
1989}
1990
1991// not needed yet
1992// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet8f& x)
1993// {
1994// return _mm256_movemask_ps(x)==0xFF;
1995// }
1996
1997template <>
1998EIGEN_STRONG_INLINE bool predux_any(const Packet8f& x) {
1999 return _mm256_movemask_ps(x) != 0;
2000}
2001
2002template <>
2003EIGEN_STRONG_INLINE bool predux_any(const Packet8i& x) {
2004 return _mm256_movemask_ps(_mm256_castsi256_ps(x)) != 0;
2005}
2006template <>
2007EIGEN_STRONG_INLINE bool predux_any(const Packet8ui& x) {
2008 return _mm256_movemask_ps(_mm256_castsi256_ps(x)) != 0;
2009}
2010
2011EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8f, 8>& kernel) {
2012 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
2013 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
2014 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
2015 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
2016 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
2017 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
2018 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
2019 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
2020 __m256 S0 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2021 __m256 S1 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2022 __m256 S2 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2023 __m256 S3 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2024 __m256 S4 = _mm256_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
2025 __m256 S5 = _mm256_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
2026 __m256 S6 = _mm256_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
2027 __m256 S7 = _mm256_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
2028 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
2029 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
2030 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
2031 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
2032 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
2033 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
2034 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
2035 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
2036}
2037
2038EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8f, 4>& kernel) {
2039 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
2040 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
2041 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
2042 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
2043
2044 __m256 S0 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2045 __m256 S1 = _mm256_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2046 __m256 S2 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2047 __m256 S3 = _mm256_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2048
2049 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
2050 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
2051 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
2052 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
2053}
2054
2055#define MM256_SHUFFLE_EPI32(A, B, M) \
2056 _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B), M))
2057
2058#ifndef EIGEN_VECTORIZE_AVX2
2059#define MM256_UNPACKLO_EPI32(A, B) \
2060 _mm256_castps_si256(_mm256_unpacklo_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B)))
2061#define MM256_UNPACKHI_EPI32(A, B) \
2062 _mm256_castps_si256(_mm256_unpackhi_ps(_mm256_castsi256_ps(A), _mm256_castsi256_ps(B)))
2063#else
2064#define MM256_UNPACKLO_EPI32(A, B) _mm256_unpacklo_epi32(A, B)
2065#define MM256_UNPACKHI_EPI32(A, B) _mm256_unpackhi_epi32(A, B)
2066#endif
2067
2068EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8i, 8>& kernel) {
2069 __m256i T0 = MM256_UNPACKLO_EPI32(kernel.packet[0], kernel.packet[1]);
2070 __m256i T1 = MM256_UNPACKHI_EPI32(kernel.packet[0], kernel.packet[1]);
2071 __m256i T2 = MM256_UNPACKLO_EPI32(kernel.packet[2], kernel.packet[3]);
2072 __m256i T3 = MM256_UNPACKHI_EPI32(kernel.packet[2], kernel.packet[3]);
2073 __m256i T4 = MM256_UNPACKLO_EPI32(kernel.packet[4], kernel.packet[5]);
2074 __m256i T5 = MM256_UNPACKHI_EPI32(kernel.packet[4], kernel.packet[5]);
2075 __m256i T6 = MM256_UNPACKLO_EPI32(kernel.packet[6], kernel.packet[7]);
2076 __m256i T7 = MM256_UNPACKHI_EPI32(kernel.packet[6], kernel.packet[7]);
2077 __m256i S0 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2078 __m256i S1 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2079 __m256i S2 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2080 __m256i S3 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2081 __m256i S4 = MM256_SHUFFLE_EPI32(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
2082 __m256i S5 = MM256_SHUFFLE_EPI32(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
2083 __m256i S6 = MM256_SHUFFLE_EPI32(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
2084 __m256i S7 = MM256_SHUFFLE_EPI32(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
2085 kernel.packet[0] = _mm256_permute2f128_si256(S0, S4, 0x20);
2086 kernel.packet[1] = _mm256_permute2f128_si256(S1, S5, 0x20);
2087 kernel.packet[2] = _mm256_permute2f128_si256(S2, S6, 0x20);
2088 kernel.packet[3] = _mm256_permute2f128_si256(S3, S7, 0x20);
2089 kernel.packet[4] = _mm256_permute2f128_si256(S0, S4, 0x31);
2090 kernel.packet[5] = _mm256_permute2f128_si256(S1, S5, 0x31);
2091 kernel.packet[6] = _mm256_permute2f128_si256(S2, S6, 0x31);
2092 kernel.packet[7] = _mm256_permute2f128_si256(S3, S7, 0x31);
2093}
2094EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8ui, 8>& kernel) {
2095 ptranspose((PacketBlock<Packet8i, 8>&)kernel);
2096}
2097
2098EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8i, 4>& kernel) {
2099 __m256i T0 = MM256_UNPACKLO_EPI32(kernel.packet[0], kernel.packet[1]);
2100 __m256i T1 = MM256_UNPACKHI_EPI32(kernel.packet[0], kernel.packet[1]);
2101 __m256i T2 = MM256_UNPACKLO_EPI32(kernel.packet[2], kernel.packet[3]);
2102 __m256i T3 = MM256_UNPACKHI_EPI32(kernel.packet[2], kernel.packet[3]);
2103
2104 __m256i S0 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2105 __m256i S1 = MM256_SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2106 __m256i S2 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2107 __m256i S3 = MM256_SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2108
2109 kernel.packet[0] = _mm256_permute2f128_si256(S0, S1, 0x20);
2110 kernel.packet[1] = _mm256_permute2f128_si256(S2, S3, 0x20);
2111 kernel.packet[2] = _mm256_permute2f128_si256(S0, S1, 0x31);
2112 kernel.packet[3] = _mm256_permute2f128_si256(S2, S3, 0x31);
2113}
2114EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8ui, 4>& kernel) {
2115 ptranspose((PacketBlock<Packet8i, 4>&)kernel);
2116}
2117
2118EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4d, 4>& kernel) {
2119 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
2120 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
2121 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
2122 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
2123
2124 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
2125 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
2126 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
2127 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
2128}
2129
2130EIGEN_STRONG_INLINE __m256i avx_blend_mask(const Selector<4>& ifPacket) {
2131 return _mm256_set_epi64x(0 - ifPacket.select[3], 0 - ifPacket.select[2], 0 - ifPacket.select[1],
2132 0 - ifPacket.select[0]);
2133}
2134
2135EIGEN_STRONG_INLINE __m256i avx_blend_mask(const Selector<8>& ifPacket) {
2136 return _mm256_set_epi32(0 - ifPacket.select[7], 0 - ifPacket.select[6], 0 - ifPacket.select[5],
2137 0 - ifPacket.select[4], 0 - ifPacket.select[3], 0 - ifPacket.select[2],
2138 0 - ifPacket.select[1], 0 - ifPacket.select[0]);
2139}
2140
2141template <>
2142EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket,
2143 const Packet8f& elsePacket) {
2144 const __m256 true_mask = _mm256_castsi256_ps(avx_blend_mask(ifPacket));
2145 return pselect<Packet8f>(true_mask, thenPacket, elsePacket);
2146}
2147
2148template <>
2149EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket,
2150 const Packet4d& elsePacket) {
2151 const __m256d true_mask = _mm256_castsi256_pd(avx_blend_mask(ifPacket));
2152 return pselect<Packet4d>(true_mask, thenPacket, elsePacket);
2153}
2154
2155// Packet math for Eigen::half
2156#ifndef EIGEN_VECTORIZE_AVX512FP16
2157template <>
2158struct unpacket_traits<Packet8h> {
2159 typedef Eigen::half type;
2160 enum {
2161 size = 8,
2162 alignment = Aligned16,
2163 vectorizable = true,
2164 masked_load_available = false,
2165 masked_store_available = false
2166 };
2167 typedef Packet8h half;
2168};
2169#endif
2170
2171template <>
2172EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
2173 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
2174}
2175
2176template <>
2177EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {
2178 return numext::bit_cast<Eigen::half>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
2179}
2180
2181template <>
2182EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {
2183 return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
2184}
2185
2186template <>
2187EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {
2188 return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
2189}
2190
2191template <>
2192EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {
2193 _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
2194}
2195
2196template <>
2197EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {
2198 _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
2199}
2200
2201template <>
2202EIGEN_STRONG_INLINE Packet8h ploaddup<Packet8h>(const Eigen::half* from) {
2203 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2204 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2205 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
2206 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
2207 return _mm_set_epi16(d, d, c, c, b, b, a, a);
2208}
2209
2210template <>
2211EIGEN_STRONG_INLINE Packet8h ploadquad<Packet8h>(const Eigen::half* from) {
2212 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2213 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2214 return _mm_set_epi16(b, b, b, b, a, a, a, a);
2215}
2216
2217template <>
2218EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) {
2219 return _mm_cmpeq_epi32(a, a);
2220}
2221
2222template <>
2223EIGEN_STRONG_INLINE Packet8h pabs(const Packet8h& a) {
2224 const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2225 return _mm_andnot_si128(sign_mask, a);
2226}
2227
2228EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {
2229#ifdef EIGEN_HAS_FP16_C
2230 return _mm256_cvtph_ps(a);
2231#else
2232 Eigen::internal::Packet8f pp = _mm256_castsi256_ps(
2233 _mm256_insertf128_si256(_mm256_castsi128_si256(half2floatsse(a)), half2floatsse(_mm_srli_si128(a, 8)), 1));
2234 return pp;
2235#endif
2236}
2237
2238EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
2239#ifdef EIGEN_HAS_FP16_C
2240 return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT);
2241#else
2242 __m128i lo = float2half(_mm256_extractf128_ps(a, 0));
2243 __m128i hi = float2half(_mm256_extractf128_ps(a, 1));
2244 return _mm_packus_epi32(lo, hi);
2245#endif
2246}
2247
2248template <>
2249EIGEN_STRONG_INLINE Packet8h pmin<Packet8h>(const Packet8h& a, const Packet8h& b) {
2250 return float2half(pmin<Packet8f>(half2float(a), half2float(b)));
2251}
2252
2253template <>
2254EIGEN_STRONG_INLINE Packet8h pmax<Packet8h>(const Packet8h& a, const Packet8h& b) {
2255 return float2half(pmax<Packet8f>(half2float(a), half2float(b)));
2256}
2257
2258template <>
2259EIGEN_STRONG_INLINE Packet8h plset<Packet8h>(const half& a) {
2260 return float2half(plset<Packet8f>(static_cast<float>(a)));
2261}
2262
2263template <>
2264EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a, const Packet8h& b) {
2265 // in some cases Packet4i is a wrapper around __m128i, so we either need to
2266 // cast to Packet4i to directly call the intrinsics as below:
2267 return _mm_or_si128(a, b);
2268}
2269template <>
2270EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a, const Packet8h& b) {
2271 return _mm_xor_si128(a, b);
2272}
2273template <>
2274EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a, const Packet8h& b) {
2275 return _mm_and_si128(a, b);
2276}
2277template <>
2278EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a, const Packet8h& b) {
2279 return _mm_andnot_si128(b, a);
2280}
2281
2282template <>
2283EIGEN_STRONG_INLINE Packet8h pselect(const Packet8h& mask, const Packet8h& a, const Packet8h& b) {
2284 return _mm_blendv_epi8(b, a, mask);
2285}
2286
2287template <>
2288EIGEN_STRONG_INLINE Packet8h pround<Packet8h>(const Packet8h& a) {
2289 return float2half(pround<Packet8f>(half2float(a)));
2290}
2291
2292template <>
2293EIGEN_STRONG_INLINE Packet8h print<Packet8h>(const Packet8h& a) {
2294 return float2half(print<Packet8f>(half2float(a)));
2295}
2296
2297template <>
2298EIGEN_STRONG_INLINE Packet8h pceil<Packet8h>(const Packet8h& a) {
2299 return float2half(pceil<Packet8f>(half2float(a)));
2300}
2301
2302template <>
2303EIGEN_STRONG_INLINE Packet8h pfloor<Packet8h>(const Packet8h& a) {
2304 return float2half(pfloor<Packet8f>(half2float(a)));
2305}
2306
2307template <>
2308EIGEN_STRONG_INLINE Packet8h ptrunc<Packet8h>(const Packet8h& a) {
2309 return float2half(ptrunc<Packet8f>(half2float(a)));
2310}
2311
2312template <>
2313EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a, const Packet8h& b) {
2314 return Pack16To8(pcmp_eq(half2float(a), half2float(b)));
2315}
2316
2317template <>
2318EIGEN_STRONG_INLINE Packet8h pcmp_le(const Packet8h& a, const Packet8h& b) {
2319 return Pack16To8(pcmp_le(half2float(a), half2float(b)));
2320}
2321
2322template <>
2323EIGEN_STRONG_INLINE Packet8h pcmp_lt(const Packet8h& a, const Packet8h& b) {
2324 return Pack16To8(pcmp_lt(half2float(a), half2float(b)));
2325}
2326
2327template <>
2328EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(const Packet8h& a, const Packet8h& b) {
2329 return Pack16To8(pcmp_lt_or_nan(half2float(a), half2float(b)));
2330}
2331
2332template <>
2333EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) {
2334 return a;
2335}
2336
2337template <>
2338EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
2339 Packet8h sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2340 return _mm_xor_si128(a, sign_mask);
2341}
2342
2343#ifndef EIGEN_VECTORIZE_AVX512FP16
2344template <>
2345EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
2346 Packet8f af = half2float(a);
2347 Packet8f bf = half2float(b);
2348 Packet8f rf = padd(af, bf);
2349 return float2half(rf);
2350}
2351
2352template <>
2353EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(const Packet8h& a, const Packet8h& b) {
2354 Packet8f af = half2float(a);
2355 Packet8f bf = half2float(b);
2356 Packet8f rf = psub(af, bf);
2357 return float2half(rf);
2358}
2359
2360template <>
2361EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
2362 Packet8f af = half2float(a);
2363 Packet8f bf = half2float(b);
2364 Packet8f rf = pmul(af, bf);
2365 return float2half(rf);
2366}
2367
2368template <>
2369EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(const Packet8h& a, const Packet8h& b) {
2370 Packet8f af = half2float(a);
2371 Packet8f bf = half2float(b);
2372 Packet8f rf = pdiv(af, bf);
2373 return float2half(rf);
2374}
2375#endif
2376
2377template <>
2378EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride) {
2379 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0 * stride]);
2380 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1 * stride]);
2381 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2 * stride]);
2382 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3 * stride]);
2383 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4 * stride]);
2384 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5 * stride]);
2385 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6 * stride]);
2386 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7 * stride]);
2387 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
2388}
2389
2390template <>
2391EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride) {
2392 EIGEN_ALIGN32 Eigen::half aux[8];
2393 pstore(aux, from);
2394 to[stride * 0] = aux[0];
2395 to[stride * 1] = aux[1];
2396 to[stride * 2] = aux[2];
2397 to[stride * 3] = aux[3];
2398 to[stride * 4] = aux[4];
2399 to[stride * 5] = aux[5];
2400 to[stride * 6] = aux[6];
2401 to[stride * 7] = aux[7];
2402}
2403
2404#ifndef EIGEN_VECTORIZE_AVX512FP16
2405template <>
2406EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
2407 Packet8f af = half2float(a);
2408 float reduced = predux<Packet8f>(af);
2409 return Eigen::half(reduced);
2410}
2411#endif
2412
2413template <>
2414EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
2415 Packet8f af = half2float(a);
2416 float reduced = predux_max<Packet8f>(af);
2417 return Eigen::half(reduced);
2418}
2419
2420template <>
2421EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
2422 Packet8f af = half2float(a);
2423 float reduced = predux_min<Packet8f>(af);
2424 return Eigen::half(reduced);
2425}
2426
2427template <>
2428EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
2429 Packet8f af = half2float(a);
2430 float reduced = predux_mul<Packet8f>(af);
2431 return Eigen::half(reduced);
2432}
2433
2434template <>
2435EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a) {
2436 __m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
2437 return _mm_shuffle_epi8(a, m);
2438}
2439
2440EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8h, 8>& kernel) {
2441 __m128i a = kernel.packet[0];
2442 __m128i b = kernel.packet[1];
2443 __m128i c = kernel.packet[2];
2444 __m128i d = kernel.packet[3];
2445 __m128i e = kernel.packet[4];
2446 __m128i f = kernel.packet[5];
2447 __m128i g = kernel.packet[6];
2448 __m128i h = kernel.packet[7];
2449
2450 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
2451 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
2452 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
2453 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
2454 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
2455 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
2456 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
2457 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
2458
2459 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
2460 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
2461 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
2462 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
2463 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
2464 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
2465 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
2466 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
2467
2468 __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
2469 __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
2470 __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
2471 __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
2472 __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
2473 __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
2474 __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
2475 __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
2476
2477 kernel.packet[0] = a0b0c0d0e0f0g0h0;
2478 kernel.packet[1] = a1b1c1d1e1f1g1h1;
2479 kernel.packet[2] = a2b2c2d2e2f2g2h2;
2480 kernel.packet[3] = a3b3c3d3e3f3g3h3;
2481 kernel.packet[4] = a4b4c4d4e4f4g4h4;
2482 kernel.packet[5] = a5b5c5d5e5f5g5h5;
2483 kernel.packet[6] = a6b6c6d6e6f6g6h6;
2484 kernel.packet[7] = a7b7c7d7e7f7g7h7;
2485}
2486
2487EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8h, 4>& kernel) {
2488 EIGEN_ALIGN32 Eigen::half in[4][8];
2489 pstore<Eigen::half>(in[0], kernel.packet[0]);
2490 pstore<Eigen::half>(in[1], kernel.packet[1]);
2491 pstore<Eigen::half>(in[2], kernel.packet[2]);
2492 pstore<Eigen::half>(in[3], kernel.packet[3]);
2493
2494 EIGEN_ALIGN32 Eigen::half out[4][8];
2495
2496 for (int i = 0; i < 4; ++i) {
2497 for (int j = 0; j < 4; ++j) {
2498 out[i][j] = in[j][2 * i];
2499 }
2500 for (int j = 0; j < 4; ++j) {
2501 out[i][j + 4] = in[j][2 * i + 1];
2502 }
2503 }
2504
2505 kernel.packet[0] = pload<Packet8h>(out[0]);
2506 kernel.packet[1] = pload<Packet8h>(out[1]);
2507 kernel.packet[2] = pload<Packet8h>(out[2]);
2508 kernel.packet[3] = pload<Packet8h>(out[3]);
2509}
2510
2511// BFloat16 implementation.
2512
2513EIGEN_STRONG_INLINE Packet8f Bf16ToF32(const Packet8bf& a) {
2514#ifdef EIGEN_VECTORIZE_AVX2
2515 __m256i extend = _mm256_cvtepu16_epi32(a);
2516 return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16));
2517#else
2518 __m128i lo = _mm_cvtepu16_epi32(a);
2519 __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
2520 __m128i lo_shift = _mm_slli_epi32(lo, 16);
2521 __m128i hi_shift = _mm_slli_epi32(hi, 16);
2522 return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1));
2523#endif
2524}
2525
2526// Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
2527EIGEN_STRONG_INLINE Packet8bf F32ToBf16(const Packet8f& a) {
2528 __m256i input = _mm256_castps_si256(a);
2529
2530#ifdef EIGEN_VECTORIZE_AVX2
2531 // uint32_t lsb = (input >> 16);
2532 __m256i t = _mm256_srli_epi32(input, 16);
2533 // uint32_t lsb = lsb & 1;
2534 t = _mm256_and_si256(t, _mm256_set1_epi32(1));
2535 // uint32_t rounding_bias = 0x7fff + lsb;
2536 t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff));
2537 // input += rounding_bias;
2538 t = _mm256_add_epi32(t, input);
2539 // input = input >> 16;
2540 t = _mm256_srli_epi32(t, 16);
2541 // Check NaN before converting back to bf16
2542 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
2543 __m256i nan = _mm256_set1_epi32(0x7fc0);
2544 t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask));
2545 // output = numext::bit_cast<uint16_t>(input);
2546 return _mm_packus_epi32(_mm256_extractf128_si256(t, 0), _mm256_extractf128_si256(t, 1));
2547#else
2548 // uint32_t lsb = (input >> 16);
2549 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16);
2550 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16);
2551 // uint32_t lsb = lsb & 1;
2552 lo = _mm_and_si128(lo, _mm_set1_epi32(1));
2553 hi = _mm_and_si128(hi, _mm_set1_epi32(1));
2554 // uint32_t rounding_bias = 0x7fff + lsb;
2555 lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff));
2556 hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff));
2557 // input += rounding_bias;
2558 lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0));
2559 hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1));
2560 // input = input >> 16;
2561 lo = _mm_srli_epi32(lo, 16);
2562 hi = _mm_srli_epi32(hi, 16);
2563 // Check NaN before converting back to bf16
2564 __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
2565 __m128i nan = _mm_set1_epi32(0x7fc0);
2566 lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask)));
2567 hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1)));
2568 // output = numext::bit_cast<uint16_t>(input);
2569 return _mm_packus_epi32(lo, hi);
2570#endif
2571}
2572
2573template <>
2574EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(const bfloat16& from) {
2575 return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
2576}
2577
2578template <>
2579EIGEN_STRONG_INLINE bfloat16 pfirst<Packet8bf>(const Packet8bf& from) {
2580 return numext::bit_cast<bfloat16>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
2581}
2582
2583template <>
2584EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(const bfloat16* from) {
2585 return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
2586}
2587
2588template <>
2589EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(const bfloat16* from) {
2590 return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
2591}
2592
2593template <>
2594EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to, const Packet8bf& from) {
2595 _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
2596}
2597
2598template <>
2599EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to, const Packet8bf& from) {
2600 _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
2601}
2602
2603template <>
2604EIGEN_STRONG_INLINE Packet8bf ploaddup<Packet8bf>(const bfloat16* from) {
2605 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2606 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2607 const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
2608 const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
2609 return _mm_set_epi16(d, d, c, c, b, b, a, a);
2610}
2611
2612template <>
2613EIGEN_STRONG_INLINE Packet8bf ploadquad<Packet8bf>(const bfloat16* from) {
2614 const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
2615 const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
2616 return _mm_set_epi16(b, b, b, b, a, a, a, a);
2617}
2618
2619template <>
2620EIGEN_STRONG_INLINE Packet8bf ptrue(const Packet8bf& a) {
2621 return _mm_cmpeq_epi32(a, a);
2622}
2623
2624template <>
2625EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) {
2626 const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2627 return _mm_andnot_si128(sign_mask, a);
2628}
2629
2630template <>
2631EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2632 return F32ToBf16(pmin<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2633}
2634
2635template <>
2636EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2637 return F32ToBf16(pmax<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2638}
2639
2640template <>
2641EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(const bfloat16& a) {
2642 return F32ToBf16(plset<Packet8f>(static_cast<float>(a)));
2643}
2644
2645template <>
2646EIGEN_STRONG_INLINE Packet8bf por(const Packet8bf& a, const Packet8bf& b) {
2647 return _mm_or_si128(a, b);
2648}
2649template <>
2650EIGEN_STRONG_INLINE Packet8bf pxor(const Packet8bf& a, const Packet8bf& b) {
2651 return _mm_xor_si128(a, b);
2652}
2653template <>
2654EIGEN_STRONG_INLINE Packet8bf pand(const Packet8bf& a, const Packet8bf& b) {
2655 return _mm_and_si128(a, b);
2656}
2657template <>
2658EIGEN_STRONG_INLINE Packet8bf pandnot(const Packet8bf& a, const Packet8bf& b) {
2659 return _mm_andnot_si128(b, a);
2660}
2661
2662template <>
2663EIGEN_STRONG_INLINE Packet8bf pselect(const Packet8bf& mask, const Packet8bf& a, const Packet8bf& b) {
2664 return _mm_blendv_epi8(b, a, mask);
2665}
2666
2667template <>
2668EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf>(const Packet8bf& a) {
2669 return F32ToBf16(pround<Packet8f>(Bf16ToF32(a)));
2670}
2671
2672template <>
2673EIGEN_STRONG_INLINE Packet8bf print<Packet8bf>(const Packet8bf& a) {
2674 return F32ToBf16(print<Packet8f>(Bf16ToF32(a)));
2675}
2676
2677template <>
2678EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf>(const Packet8bf& a) {
2679 return F32ToBf16(pceil<Packet8f>(Bf16ToF32(a)));
2680}
2681
2682template <>
2683EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf>(const Packet8bf& a) {
2684 return F32ToBf16(pfloor<Packet8f>(Bf16ToF32(a)));
2685}
2686
2687template <>
2688EIGEN_STRONG_INLINE Packet8bf ptrunc<Packet8bf>(const Packet8bf& a) {
2689 return F32ToBf16(ptrunc<Packet8f>(Bf16ToF32(a)));
2690}
2691
2692template <>
2693EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a, const Packet8bf& b) {
2694 return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
2695}
2696
2697template <>
2698EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a, const Packet8bf& b) {
2699 return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
2700}
2701
2702template <>
2703EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a, const Packet8bf& b) {
2704 return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
2705}
2706
2707template <>
2708EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a, const Packet8bf& b) {
2709 return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
2710}
2711
2712template <>
2713EIGEN_STRONG_INLINE Packet8bf pconj(const Packet8bf& a) {
2714 return a;
2715}
2716
2717template <>
2718EIGEN_STRONG_INLINE Packet8bf pnegate(const Packet8bf& a) {
2719 Packet8bf sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2720 return _mm_xor_si128(a, sign_mask);
2721}
2722
2723template <>
2724EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2725 return F32ToBf16(padd<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2726}
2727
2728template <>
2729EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2730 return F32ToBf16(psub<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2731}
2732
2733template <>
2734EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2735 return F32ToBf16(pmul<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2736}
2737
2738template <>
2739EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
2740 return F32ToBf16(pdiv<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
2741}
2742
2743template <>
2744EIGEN_STRONG_INLINE Packet8bf pgather<bfloat16, Packet8bf>(const bfloat16* from, Index stride) {
2745 const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0 * stride]);
2746 const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1 * stride]);
2747 const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2 * stride]);
2748 const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3 * stride]);
2749 const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4 * stride]);
2750 const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5 * stride]);
2751 const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6 * stride]);
2752 const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7 * stride]);
2753 return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
2754}
2755
2756template <>
2757EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet8bf>(bfloat16* to, const Packet8bf& from, Index stride) {
2758 EIGEN_ALIGN32 bfloat16 aux[8];
2759 pstore(aux, from);
2760 to[stride * 0] = aux[0];
2761 to[stride * 1] = aux[1];
2762 to[stride * 2] = aux[2];
2763 to[stride * 3] = aux[3];
2764 to[stride * 4] = aux[4];
2765 to[stride * 5] = aux[5];
2766 to[stride * 6] = aux[6];
2767 to[stride * 7] = aux[7];
2768}
2769
2770template <>
2771EIGEN_STRONG_INLINE bfloat16 predux<Packet8bf>(const Packet8bf& a) {
2772 return static_cast<bfloat16>(predux<Packet8f>(Bf16ToF32(a)));
2773}
2774
2775template <>
2776EIGEN_STRONG_INLINE bfloat16 predux_max<Packet8bf>(const Packet8bf& a) {
2777 return static_cast<bfloat16>(predux_max<Packet8f>(Bf16ToF32(a)));
2778}
2779
2780template <>
2781EIGEN_STRONG_INLINE bfloat16 predux_min<Packet8bf>(const Packet8bf& a) {
2782 return static_cast<bfloat16>(predux_min<Packet8f>(Bf16ToF32(a)));
2783}
2784
2785template <>
2786EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet8bf>(const Packet8bf& a) {
2787 return static_cast<bfloat16>(predux_mul<Packet8f>(Bf16ToF32(a)));
2788}
2789
2790template <>
2791EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a) {
2792 __m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
2793 return _mm_shuffle_epi8(a, m);
2794}
2795
2796EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8bf, 8>& kernel) {
2797 __m128i a = kernel.packet[0];
2798 __m128i b = kernel.packet[1];
2799 __m128i c = kernel.packet[2];
2800 __m128i d = kernel.packet[3];
2801 __m128i e = kernel.packet[4];
2802 __m128i f = kernel.packet[5];
2803 __m128i g = kernel.packet[6];
2804 __m128i h = kernel.packet[7];
2805
2806 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
2807 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
2808 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
2809 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
2810 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
2811 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
2812 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
2813 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
2814
2815 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
2816 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
2817 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
2818 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
2819 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
2820 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
2821 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
2822 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
2823
2824 kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
2825 kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
2826 kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
2827 kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
2828 kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
2829 kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
2830 kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
2831 kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
2832}
2833
2834EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet8bf, 4>& kernel) {
2835 __m128i a = kernel.packet[0];
2836 __m128i b = kernel.packet[1];
2837 __m128i c = kernel.packet[2];
2838 __m128i d = kernel.packet[3];
2839
2840 __m128i ab_03 = _mm_unpacklo_epi16(a, b);
2841 __m128i cd_03 = _mm_unpacklo_epi16(c, d);
2842 __m128i ab_47 = _mm_unpackhi_epi16(a, b);
2843 __m128i cd_47 = _mm_unpackhi_epi16(c, d);
2844
2845 kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03);
2846 kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03);
2847 kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47);
2848 kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47);
2849}
2850
2851} // end namespace internal
2852
2853} // end namespace Eigen
2854
2855#endif // EIGEN_PACKET_MATH_AVX_H
@ Aligned32
Definition Constants.h:238
@ Aligned16
Definition Constants.h:237
Namespace containing all symbols from the Eigen library.
Definition Core:137