vsx_utils.hpp 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html
  4. #ifndef OPENCV_HAL_VSX_UTILS_HPP
  5. #define OPENCV_HAL_VSX_UTILS_HPP
  6. #include "opencv2/core/cvdef.h"
  7. #ifndef SKIP_INCLUDES
  8. # include <assert.h>
  9. #endif
  10. //! @addtogroup core_utils_vsx
  11. //! @{
  12. #if CV_VSX
  13. #define __VSX_S16__(c, v) (c){v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v}
  14. #define __VSX_S8__(c, v) (c){v, v, v, v, v, v, v, v}
  15. #define __VSX_S4__(c, v) (c){v, v, v, v}
  16. #define __VSX_S2__(c, v) (c){v, v}
  17. typedef __vector unsigned char vec_uchar16;
  18. #define vec_uchar16_set(...) (vec_uchar16){__VA_ARGS__}
  19. #define vec_uchar16_sp(c) (__VSX_S16__(vec_uchar16, c))
  20. #define vec_uchar16_c(v) ((vec_uchar16)(v))
  21. #define vec_uchar16_z vec_uchar16_sp(0)
  22. typedef __vector signed char vec_char16;
  23. #define vec_char16_set(...) (vec_char16){__VA_ARGS__}
  24. #define vec_char16_sp(c) (__VSX_S16__(vec_char16, c))
  25. #define vec_char16_c(v) ((vec_char16)(v))
  26. #define vec_char16_z vec_char16_sp(0)
  27. typedef __vector unsigned short vec_ushort8;
  28. #define vec_ushort8_set(...) (vec_ushort8){__VA_ARGS__}
  29. #define vec_ushort8_sp(c) (__VSX_S8__(vec_ushort8, c))
  30. #define vec_ushort8_c(v) ((vec_ushort8)(v))
  31. #define vec_ushort8_z vec_ushort8_sp(0)
  32. typedef __vector signed short vec_short8;
  33. #define vec_short8_set(...) (vec_short8){__VA_ARGS__}
  34. #define vec_short8_sp(c) (__VSX_S8__(vec_short8, c))
  35. #define vec_short8_c(v) ((vec_short8)(v))
  36. #define vec_short8_z vec_short8_sp(0)
  37. typedef __vector unsigned int vec_uint4;
  38. #define vec_uint4_set(...) (vec_uint4){__VA_ARGS__}
  39. #define vec_uint4_sp(c) (__VSX_S4__(vec_uint4, c))
  40. #define vec_uint4_c(v) ((vec_uint4)(v))
  41. #define vec_uint4_z vec_uint4_sp(0)
  42. typedef __vector signed int vec_int4;
  43. #define vec_int4_set(...) (vec_int4){__VA_ARGS__}
  44. #define vec_int4_sp(c) (__VSX_S4__(vec_int4, c))
  45. #define vec_int4_c(v) ((vec_int4)(v))
  46. #define vec_int4_z vec_int4_sp(0)
  47. typedef __vector float vec_float4;
  48. #define vec_float4_set(...) (vec_float4){__VA_ARGS__}
  49. #define vec_float4_sp(c) (__VSX_S4__(vec_float4, c))
  50. #define vec_float4_c(v) ((vec_float4)(v))
  51. #define vec_float4_z vec_float4_sp(0)
  52. typedef __vector unsigned long long vec_udword2;
  53. #define vec_udword2_set(...) (vec_udword2){__VA_ARGS__}
  54. #define vec_udword2_sp(c) (__VSX_S2__(vec_udword2, c))
  55. #define vec_udword2_c(v) ((vec_udword2)(v))
  56. #define vec_udword2_z vec_udword2_sp(0)
  57. typedef __vector signed long long vec_dword2;
  58. #define vec_dword2_set(...) (vec_dword2){__VA_ARGS__}
  59. #define vec_dword2_sp(c) (__VSX_S2__(vec_dword2, c))
  60. #define vec_dword2_c(v) ((vec_dword2)(v))
  61. #define vec_dword2_z vec_dword2_sp(0)
  62. typedef __vector double vec_double2;
  63. #define vec_double2_set(...) (vec_double2){__VA_ARGS__}
  64. #define vec_double2_c(v) ((vec_double2)(v))
  65. #define vec_double2_sp(c) (__VSX_S2__(vec_double2, c))
  66. #define vec_double2_z vec_double2_sp(0)
  67. #define vec_bchar16 __vector __bool char
  68. #define vec_bchar16_set(...) (vec_bchar16){__VA_ARGS__}
  69. #define vec_bchar16_c(v) ((vec_bchar16)(v))
  70. #define vec_bshort8 __vector __bool short
  71. #define vec_bshort8_set(...) (vec_bshort8){__VA_ARGS__}
  72. #define vec_bshort8_c(v) ((vec_bshort8)(v))
  73. #define vec_bint4 __vector __bool int
  74. #define vec_bint4_set(...) (vec_bint4){__VA_ARGS__}
  75. #define vec_bint4_c(v) ((vec_bint4)(v))
  76. #define vec_bdword2 __vector __bool long long
  77. #define vec_bdword2_set(...) (vec_bdword2){__VA_ARGS__}
  78. #define vec_bdword2_c(v) ((vec_bdword2)(v))
  79. #define VSX_FINLINE(tp) extern inline tp __attribute__((always_inline))
  80. #define VSX_REDIRECT_1RG(rt, rg, fnm, fn2) \
  81. VSX_FINLINE(rt) fnm(const rg& a) { return fn2(a); }
  82. #define VSX_REDIRECT_2RG(rt, rg, fnm, fn2) \
  83. VSX_FINLINE(rt) fnm(const rg& a, const rg& b) { return fn2(a, b); }
  84. /*
  85. * GCC VSX compatibility
  86. **/
  87. #if defined(__GNUG__) && !defined(__clang__)
  88. // inline asm helper
  89. #define VSX_IMPL_1RG(rt, rto, rg, rgo, opc, fnm) \
  90. VSX_FINLINE(rt) fnm(const rg& a) \
  91. { rt rs; __asm__ __volatile__(#opc" %x0,%x1" : "="#rto (rs) : #rgo (a)); return rs; }
  92. #define VSX_IMPL_1VRG(rt, rg, opc, fnm) \
  93. VSX_FINLINE(rt) fnm(const rg& a) \
  94. { rt rs; __asm__ __volatile__(#opc" %0,%1" : "=v" (rs) : "v" (a)); return rs; }
  95. #define VSX_IMPL_2VRG_F(rt, rg, fopc, fnm) \
  96. VSX_FINLINE(rt) fnm(const rg& a, const rg& b) \
  97. { rt rs; __asm__ __volatile__(fopc : "=v" (rs) : "v" (a), "v" (b)); return rs; }
  98. #define VSX_IMPL_2VRG(rt, rg, opc, fnm) VSX_IMPL_2VRG_F(rt, rg, #opc" %0,%1,%2", fnm)
  99. #if __GNUG__ < 7
  100. // up to GCC 6 vec_mul only supports precisions and llong
  101. # ifdef vec_mul
  102. # undef vec_mul
  103. # endif
  104. /*
  105. * there's no a direct instruction for supporting 16-bit multiplication in ISA 2.07,
  106. * XLC Implement it by using instruction "multiply even", "multiply odd" and "permute"
  107. * todo: Do I need to support 8-bit ?
  108. **/
  109. # define VSX_IMPL_MULH(Tvec, Tcast) \
  110. VSX_FINLINE(Tvec) vec_mul(const Tvec& a, const Tvec& b) \
  111. { \
  112. static const vec_uchar16 even_perm = {0, 1, 16, 17, 4, 5, 20, 21, \
  113. 8, 9, 24, 25, 12, 13, 28, 29}; \
  114. return vec_perm(Tcast(vec_mule(a, b)), Tcast(vec_mulo(a, b)), even_perm); \
  115. }
  116. VSX_IMPL_MULH(vec_short8, vec_short8_c)
  117. VSX_IMPL_MULH(vec_ushort8, vec_ushort8_c)
  118. // vmuluwm can be used for unsigned or signed integers, that's what they said
  119. VSX_IMPL_2VRG(vec_int4, vec_int4, vmuluwm, vec_mul)
  120. VSX_IMPL_2VRG(vec_uint4, vec_uint4, vmuluwm, vec_mul)
  121. // redirect to GCC builtin vec_mul, since it already supports precisions and llong
  122. VSX_REDIRECT_2RG(vec_float4, vec_float4, vec_mul, __builtin_vec_mul)
  123. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mul, __builtin_vec_mul)
  124. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mul, __builtin_vec_mul)
  125. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mul, __builtin_vec_mul)
  126. #endif // __GNUG__ < 7
  127. #if __GNUG__ < 6
  128. /*
  129. * Instruction "compare greater than or equal" in ISA 2.07 only supports single
  130. * and double precision.
  131. * In XLC and new versions of GCC implement integers by using instruction "greater than" and NOR.
  132. **/
  133. # ifdef vec_cmpge
  134. # undef vec_cmpge
  135. # endif
  136. # ifdef vec_cmple
  137. # undef vec_cmple
  138. # endif
  139. # define vec_cmple(a, b) vec_cmpge(b, a)
  140. # define VSX_IMPL_CMPGE(rt, rg, opc, fnm) \
  141. VSX_IMPL_2VRG_F(rt, rg, #opc" %0,%2,%1\n\t xxlnor %x0,%x0,%x0", fnm)
  142. VSX_IMPL_CMPGE(vec_bchar16, vec_char16, vcmpgtsb, vec_cmpge)
  143. VSX_IMPL_CMPGE(vec_bchar16, vec_uchar16, vcmpgtub, vec_cmpge)
  144. VSX_IMPL_CMPGE(vec_bshort8, vec_short8, vcmpgtsh, vec_cmpge)
  145. VSX_IMPL_CMPGE(vec_bshort8, vec_ushort8, vcmpgtuh, vec_cmpge)
  146. VSX_IMPL_CMPGE(vec_bint4, vec_int4, vcmpgtsw, vec_cmpge)
  147. VSX_IMPL_CMPGE(vec_bint4, vec_uint4, vcmpgtuw, vec_cmpge)
  148. VSX_IMPL_CMPGE(vec_bdword2, vec_dword2, vcmpgtsd, vec_cmpge)
  149. VSX_IMPL_CMPGE(vec_bdword2, vec_udword2, vcmpgtud, vec_cmpge)
  150. // redirect to GCC builtin cmpge, since it already supports precisions
  151. VSX_REDIRECT_2RG(vec_bint4, vec_float4, vec_cmpge, __builtin_vec_cmpge)
  152. VSX_REDIRECT_2RG(vec_bdword2, vec_double2, vec_cmpge, __builtin_vec_cmpge)
  153. // up to gcc5 vec_nor doesn't support bool long long
  154. # undef vec_nor
  155. template<typename T>
  156. VSX_REDIRECT_2RG(T, T, vec_nor, __builtin_vec_nor)
  157. VSX_FINLINE(vec_bdword2) vec_nor(const vec_bdword2& a, const vec_bdword2& b)
  158. { return vec_bdword2_c(__builtin_vec_nor(vec_dword2_c(a), vec_dword2_c(b))); }
  159. // vec_packs doesn't support double words in gcc4 and old versions of gcc5
  160. # undef vec_packs
  161. VSX_REDIRECT_2RG(vec_char16, vec_short8, vec_packs, __builtin_vec_packs)
  162. VSX_REDIRECT_2RG(vec_uchar16, vec_ushort8, vec_packs, __builtin_vec_packs)
  163. VSX_REDIRECT_2RG(vec_short8, vec_int4, vec_packs, __builtin_vec_packs)
  164. VSX_REDIRECT_2RG(vec_ushort8, vec_uint4, vec_packs, __builtin_vec_packs)
  165. VSX_IMPL_2VRG_F(vec_int4, vec_dword2, "vpksdss %0,%2,%1", vec_packs)
  166. VSX_IMPL_2VRG_F(vec_uint4, vec_udword2, "vpkudus %0,%2,%1", vec_packs)
  167. #endif // __GNUG__ < 6
  168. #if __GNUG__ < 5
  169. // vec_xxpermdi in gcc4 missing little-endian supports just like clang
  170. # define vec_permi(a, b, c) vec_xxpermdi(b, a, (3 ^ ((c & 1) << 1 | c >> 1)))
  171. #else
  172. # define vec_permi vec_xxpermdi
  173. #endif // __GNUG__ < 5
  174. // shift left double by word immediate
  175. #ifndef vec_sldw
  176. # define vec_sldw __builtin_vsx_xxsldwi
  177. #endif
  178. // vector population count
  179. VSX_IMPL_1VRG(vec_uchar16, vec_uchar16, vpopcntb, vec_popcntu)
  180. VSX_IMPL_1VRG(vec_uchar16, vec_char16, vpopcntb, vec_popcntu)
  181. VSX_IMPL_1VRG(vec_ushort8, vec_ushort8, vpopcnth, vec_popcntu)
  182. VSX_IMPL_1VRG(vec_ushort8, vec_short8, vpopcnth, vec_popcntu)
  183. VSX_IMPL_1VRG(vec_uint4, vec_uint4, vpopcntw, vec_popcntu)
  184. VSX_IMPL_1VRG(vec_uint4, vec_int4, vpopcntw, vec_popcntu)
  185. VSX_IMPL_1VRG(vec_udword2, vec_udword2, vpopcntd, vec_popcntu)
  186. VSX_IMPL_1VRG(vec_udword2, vec_dword2, vpopcntd, vec_popcntu)
  187. // converts between single and double-precision
  188. VSX_REDIRECT_1RG(vec_float4, vec_double2, vec_cvfo, __builtin_vsx_xvcvdpsp)
  189. VSX_REDIRECT_1RG(vec_double2, vec_float4, vec_cvfo, __builtin_vsx_xvcvspdp)
  190. // converts word and doubleword to double-precision
  191. #ifdef vec_ctd
  192. # undef vec_ctd
  193. #endif
  194. VSX_IMPL_1RG(vec_double2, wd, vec_int4, wa, xvcvsxwdp, vec_ctdo)
  195. VSX_IMPL_1RG(vec_double2, wd, vec_uint4, wa, xvcvuxwdp, vec_ctdo)
  196. VSX_IMPL_1RG(vec_double2, wd, vec_dword2, wi, xvcvsxddp, vec_ctd)
  197. VSX_IMPL_1RG(vec_double2, wd, vec_udword2, wi, xvcvuxddp, vec_ctd)
  198. // converts word and doubleword to single-precision
  199. #undef vec_ctf
  200. VSX_IMPL_1RG(vec_float4, wf, vec_int4, wa, xvcvsxwsp, vec_ctf)
  201. VSX_IMPL_1RG(vec_float4, wf, vec_uint4, wa, xvcvuxwsp, vec_ctf)
  202. VSX_IMPL_1RG(vec_float4, wf, vec_dword2, wi, xvcvsxdsp, vec_ctfo)
  203. VSX_IMPL_1RG(vec_float4, wf, vec_udword2, wi, xvcvuxdsp, vec_ctfo)
  204. // converts single and double precision to signed word
  205. #undef vec_cts
  206. VSX_IMPL_1RG(vec_int4, wa, vec_double2, wd, xvcvdpsxws, vec_ctso)
  207. VSX_IMPL_1RG(vec_int4, wa, vec_float4, wf, xvcvspsxws, vec_cts)
  208. // converts single and double precision to unsigned word
  209. #undef vec_ctu
  210. VSX_IMPL_1RG(vec_uint4, wa, vec_double2, wd, xvcvdpuxws, vec_ctuo)
  211. VSX_IMPL_1RG(vec_uint4, wa, vec_float4, wf, xvcvspuxws, vec_ctu)
  212. // converts single and double precision to signed doubleword
  213. #ifdef vec_ctsl
  214. # undef vec_ctsl
  215. #endif
  216. VSX_IMPL_1RG(vec_dword2, wi, vec_double2, wd, xvcvdpsxds, vec_ctsl)
  217. VSX_IMPL_1RG(vec_dword2, wi, vec_float4, wf, xvcvspsxds, vec_ctslo)
  218. // converts single and double precision to unsigned doubleword
  219. #ifdef vec_ctul
  220. # undef vec_ctul
  221. #endif
  222. VSX_IMPL_1RG(vec_udword2, wi, vec_double2, wd, xvcvdpuxds, vec_ctul)
  223. VSX_IMPL_1RG(vec_udword2, wi, vec_float4, wf, xvcvspuxds, vec_ctulo)
  224. // just in case if GCC doesn't define it
  225. #ifndef vec_xl
  226. # define vec_xl vec_vsx_ld
  227. # define vec_xst vec_vsx_st
  228. #endif
  229. #endif // GCC VSX compatibility
  230. /*
  231. * CLANG VSX compatibility
  232. **/
  233. #if defined(__clang__) && !defined(__IBMCPP__)
  234. /*
  235. * CLANG doesn't support %x<n> in the inline asm template which fixes register number
  236. * when using any of the register constraints wa, wd, wf
  237. *
  238. * For more explanation checkout PowerPC and IBM RS6000 in https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
  239. * Also there's already an open bug https://bugs.llvm.org/show_bug.cgi?id=31837
  240. *
  241. * So we're not able to use inline asm and only use built-in functions that CLANG supports
  242. * and use __builtin_convertvector if clang missng any of vector conversions built-in functions
  243. */
  244. // convert vector helper
  245. #define VSX_IMPL_CONVERT(rt, rg, fnm) \
  246. VSX_FINLINE(rt) fnm(const rg& a) { return __builtin_convertvector(a, rt); }
  247. #if __clang_major__ < 5
  248. // implement vec_permi in a dirty way
  249. # define VSX_IMPL_CLANG_4_PERMI(Tvec) \
  250. VSX_FINLINE(Tvec) vec_permi(const Tvec& a, const Tvec& b, unsigned const char c) \
  251. { \
  252. switch (c) \
  253. { \
  254. case 0: \
  255. return vec_mergeh(a, b); \
  256. case 1: \
  257. return vec_mergel(vec_mergeh(a, a), b); \
  258. case 2: \
  259. return vec_mergeh(vec_mergel(a, a), b); \
  260. default: \
  261. return vec_mergel(a, b); \
  262. } \
  263. }
  264. VSX_IMPL_CLANG_4_PERMI(vec_udword2)
  265. VSX_IMPL_CLANG_4_PERMI(vec_dword2)
  266. VSX_IMPL_CLANG_4_PERMI(vec_double2)
  267. // vec_xxsldwi is missing in clang 4
  268. # define vec_xxsldwi(a, b, c) vec_sld(a, b, (c) * 4)
  269. #else
  270. // vec_xxpermdi is missing little-endian supports in clang 4 just like gcc4
  271. # define vec_permi(a, b, c) vec_xxpermdi(b, a, (3 ^ ((c & 1) << 1 | c >> 1)))
  272. #endif // __clang_major__ < 5
  273. // shift left double by word immediate
  274. #ifndef vec_sldw
  275. # define vec_sldw vec_xxsldwi
  276. #endif
  277. // Implement vec_rsqrt since clang only supports vec_rsqrte
  278. #ifndef vec_rsqrt
  279. VSX_FINLINE(vec_float4) vec_rsqrt(const vec_float4& a)
  280. { return vec_div(vec_float4_sp(1), vec_sqrt(a)); }
  281. VSX_FINLINE(vec_double2) vec_rsqrt(const vec_double2& a)
  282. { return vec_div(vec_double2_sp(1), vec_sqrt(a)); }
  283. #endif
  284. // vec_promote missing support for doubleword
  285. VSX_FINLINE(vec_dword2) vec_promote(long long a, int b)
  286. {
  287. vec_dword2 ret = vec_dword2_z;
  288. ret[b & 1] = a;
  289. return ret;
  290. }
  291. VSX_FINLINE(vec_udword2) vec_promote(unsigned long long a, int b)
  292. {
  293. vec_udword2 ret = vec_udword2_z;
  294. ret[b & 1] = a;
  295. return ret;
  296. }
  297. // vec_popcnt should return unsigned but clang has different thought just like gcc in vec_vpopcnt
  298. #define VSX_IMPL_POPCNTU(Tvec, Tvec2, ucast) \
  299. VSX_FINLINE(Tvec) vec_popcntu(const Tvec2& a) \
  300. { return ucast(vec_popcnt(a)); }
  301. VSX_IMPL_POPCNTU(vec_uchar16, vec_char16, vec_uchar16_c);
  302. VSX_IMPL_POPCNTU(vec_ushort8, vec_short8, vec_ushort8_c);
  303. VSX_IMPL_POPCNTU(vec_uint4, vec_int4, vec_uint4_c);
  304. // redirect unsigned types
  305. VSX_REDIRECT_1RG(vec_uchar16, vec_uchar16, vec_popcntu, vec_popcnt)
  306. VSX_REDIRECT_1RG(vec_ushort8, vec_ushort8, vec_popcntu, vec_popcnt)
  307. VSX_REDIRECT_1RG(vec_uint4, vec_uint4, vec_popcntu, vec_popcnt)
  308. // converts between single and double precision
  309. VSX_REDIRECT_1RG(vec_float4, vec_double2, vec_cvfo, __builtin_vsx_xvcvdpsp)
  310. VSX_REDIRECT_1RG(vec_double2, vec_float4, vec_cvfo, __builtin_vsx_xvcvspdp)
  311. // converts word and doubleword to double-precision
  312. #ifdef vec_ctd
  313. # undef vec_ctd
  314. #endif
  315. VSX_REDIRECT_1RG(vec_double2, vec_int4, vec_ctdo, __builtin_vsx_xvcvsxwdp)
  316. VSX_REDIRECT_1RG(vec_double2, vec_uint4, vec_ctdo, __builtin_vsx_xvcvuxwdp)
  317. VSX_IMPL_CONVERT(vec_double2, vec_dword2, vec_ctd)
  318. VSX_IMPL_CONVERT(vec_double2, vec_udword2, vec_ctd)
  319. // converts word and doubleword to single-precision
  320. #if __clang_major__ > 4
  321. # undef vec_ctf
  322. #endif
  323. VSX_IMPL_CONVERT(vec_float4, vec_int4, vec_ctf)
  324. VSX_IMPL_CONVERT(vec_float4, vec_uint4, vec_ctf)
  325. VSX_REDIRECT_1RG(vec_float4, vec_dword2, vec_ctfo, __builtin_vsx_xvcvsxdsp)
  326. VSX_REDIRECT_1RG(vec_float4, vec_udword2, vec_ctfo, __builtin_vsx_xvcvuxdsp)
  327. // converts single and double precision to signed word
  328. #if __clang_major__ > 4
  329. # undef vec_cts
  330. #endif
  331. VSX_REDIRECT_1RG(vec_int4, vec_double2, vec_ctso, __builtin_vsx_xvcvdpsxws)
  332. VSX_IMPL_CONVERT(vec_int4, vec_float4, vec_cts)
  333. // converts single and double precision to unsigned word
  334. #if __clang_major__ > 4
  335. # undef vec_ctu
  336. #endif
  337. VSX_REDIRECT_1RG(vec_uint4, vec_double2, vec_ctuo, __builtin_vsx_xvcvdpuxws)
  338. VSX_IMPL_CONVERT(vec_uint4, vec_float4, vec_ctu)
  339. // converts single and double precision to signed doubleword
  340. #ifdef vec_ctsl
  341. # undef vec_ctsl
  342. #endif
  343. VSX_IMPL_CONVERT(vec_dword2, vec_double2, vec_ctsl)
  344. // __builtin_convertvector unable to convert, xvcvspsxds is missing on it
  345. VSX_FINLINE(vec_dword2) vec_ctslo(const vec_float4& a)
  346. { return vec_ctsl(vec_cvfo(a)); }
  347. // converts single and double precision to unsigned doubleword
  348. #ifdef vec_ctul
  349. # undef vec_ctul
  350. #endif
  351. VSX_IMPL_CONVERT(vec_udword2, vec_double2, vec_ctul)
  352. // __builtin_convertvector unable to convert, xvcvspuxds is missing on it
  353. VSX_FINLINE(vec_udword2) vec_ctulo(const vec_float4& a)
  354. { return vec_ctul(vec_cvfo(a)); }
  355. #endif // CLANG VSX compatibility
  356. /*
  357. * Common GCC, CLANG compatibility
  358. **/
  359. #if defined(__GNUG__) && !defined(__IBMCPP__)
  360. #ifdef vec_cvf
  361. # undef vec_cvf
  362. #endif
  363. #define VSX_IMPL_CONV_EVEN_4_2(rt, rg, fnm, fn2) \
  364. VSX_FINLINE(rt) fnm(const rg& a) \
  365. { return fn2(vec_sldw(a, a, 1)); }
  366. VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_float4, vec_cvf, vec_cvfo)
  367. VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_int4, vec_ctd, vec_ctdo)
  368. VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_uint4, vec_ctd, vec_ctdo)
  369. VSX_IMPL_CONV_EVEN_4_2(vec_dword2, vec_float4, vec_ctsl, vec_ctslo)
  370. VSX_IMPL_CONV_EVEN_4_2(vec_udword2, vec_float4, vec_ctul, vec_ctulo)
  371. #define VSX_IMPL_CONV_EVEN_2_4(rt, rg, fnm, fn2) \
  372. VSX_FINLINE(rt) fnm(const rg& a) \
  373. { \
  374. rt v4 = fn2(a); \
  375. return vec_sldw(v4, v4, 3); \
  376. }
  377. VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_double2, vec_cvf, vec_cvfo)
  378. VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_dword2, vec_ctf, vec_ctfo)
  379. VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_udword2, vec_ctf, vec_ctfo)
  380. VSX_IMPL_CONV_EVEN_2_4(vec_int4, vec_double2, vec_cts, vec_ctso)
  381. VSX_IMPL_CONV_EVEN_2_4(vec_uint4, vec_double2, vec_ctu, vec_ctuo)
  382. // Only for Eigen!
  383. /*
  384. * changing behavior of conversion intrinsics for gcc has effect on Eigen
  385. * so we redfine old behavior again only on gcc, clang
  386. */
  387. #if !defined(__clang__) || __clang_major__ > 4
  388. // ignoring second arg since Eigen only truncates toward zero
  389. # define VSX_IMPL_CONV_2VARIANT(rt, rg, fnm, fn2) \
  390. VSX_FINLINE(rt) fnm(const rg& a, int only_truncate) \
  391. { \
  392. assert(only_truncate == 0); \
  393. (void)only_truncate; \
  394. return fn2(a); \
  395. }
  396. VSX_IMPL_CONV_2VARIANT(vec_int4, vec_float4, vec_cts, vec_cts)
  397. VSX_IMPL_CONV_2VARIANT(vec_float4, vec_int4, vec_ctf, vec_ctf)
  398. // define vec_cts for converting double precision to signed doubleword
  399. // which isn't combitable with xlc but its okay since Eigen only use it for gcc
  400. VSX_IMPL_CONV_2VARIANT(vec_dword2, vec_double2, vec_cts, vec_ctsl)
  401. #endif // Eigen
  402. #endif // Common GCC, CLANG compatibility
  403. /*
  404. * XLC VSX compatibility
  405. **/
  406. #if defined(__IBMCPP__)
  407. // vector population count
  408. #define vec_popcntu vec_popcnt
  409. // overload and redirect with setting second arg to zero
  410. // since we only support conversions without the second arg
  411. #define VSX_IMPL_OVERLOAD_Z2(rt, rg, fnm) \
  412. VSX_FINLINE(rt) fnm(const rg& a) { return fnm(a, 0); }
  413. VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_int4, vec_ctd)
  414. VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_uint4, vec_ctd)
  415. VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_dword2, vec_ctd)
  416. VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_udword2, vec_ctd)
  417. VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_int4, vec_ctf)
  418. VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_uint4, vec_ctf)
  419. VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_dword2, vec_ctf)
  420. VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_udword2, vec_ctf)
  421. VSX_IMPL_OVERLOAD_Z2(vec_int4, vec_double2, vec_cts)
  422. VSX_IMPL_OVERLOAD_Z2(vec_int4, vec_float4, vec_cts)
  423. VSX_IMPL_OVERLOAD_Z2(vec_uint4, vec_double2, vec_ctu)
  424. VSX_IMPL_OVERLOAD_Z2(vec_uint4, vec_float4, vec_ctu)
  425. VSX_IMPL_OVERLOAD_Z2(vec_dword2, vec_double2, vec_ctsl)
  426. VSX_IMPL_OVERLOAD_Z2(vec_dword2, vec_float4, vec_ctsl)
  427. VSX_IMPL_OVERLOAD_Z2(vec_udword2, vec_double2, vec_ctul)
  428. VSX_IMPL_OVERLOAD_Z2(vec_udword2, vec_float4, vec_ctul)
  429. // fixme: implement conversions of odd-numbered elements in a dirty way
  430. // since xlc doesn't support VSX registers operand in inline asm.
  431. #define VSX_IMPL_CONV_ODD_4_2(rt, rg, fnm, fn2) \
  432. VSX_FINLINE(rt) fnm(const rg& a) { return fn2(vec_sldw(a, a, 3)); }
  433. VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_float4, vec_cvfo, vec_cvf)
  434. VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_int4, vec_ctdo, vec_ctd)
  435. VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_uint4, vec_ctdo, vec_ctd)
  436. VSX_IMPL_CONV_ODD_4_2(vec_dword2, vec_float4, vec_ctslo, vec_ctsl)
  437. VSX_IMPL_CONV_ODD_4_2(vec_udword2, vec_float4, vec_ctulo, vec_ctul)
  438. #define VSX_IMPL_CONV_ODD_2_4(rt, rg, fnm, fn2) \
  439. VSX_FINLINE(rt) fnm(const rg& a) \
  440. { \
  441. rt v4 = fn2(a); \
  442. return vec_sldw(v4, v4, 1); \
  443. }
  444. VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_double2, vec_cvfo, vec_cvf)
  445. VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_dword2, vec_ctfo, vec_ctf)
  446. VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_udword2, vec_ctfo, vec_ctf)
  447. VSX_IMPL_CONV_ODD_2_4(vec_int4, vec_double2, vec_ctso, vec_cts)
  448. VSX_IMPL_CONV_ODD_2_4(vec_uint4, vec_double2, vec_ctuo, vec_ctu)
  449. #endif // XLC VSX compatibility
  450. // ignore GCC warning that caused by -Wunused-but-set-variable in rare cases
  451. #if defined(__GNUG__) && !defined(__clang__)
  452. # define VSX_UNUSED(Tvec) Tvec __attribute__((__unused__))
  453. #else // CLANG, XLC
  454. # define VSX_UNUSED(Tvec) Tvec
  455. #endif
  456. // gcc can find his way in casting log int and XLC, CLANG ambiguous
  457. #if defined(__clang__) || defined(__IBMCPP__)
  458. VSX_FINLINE(vec_udword2) vec_splats(uint64 v)
  459. { return vec_splats((unsigned long long) v); }
  460. VSX_FINLINE(vec_dword2) vec_splats(int64 v)
  461. { return vec_splats((long long) v); }
  462. VSX_FINLINE(vec_udword2) vec_promote(uint64 a, int b)
  463. { return vec_promote((unsigned long long) a, b); }
  464. VSX_FINLINE(vec_dword2) vec_promote(int64 a, int b)
  465. { return vec_promote((long long) a, b); }
  466. #endif
  467. /*
  468. * implement vsx_ld(offset, pointer), vsx_st(vector, offset, pointer)
  469. * load and set using offset depend on the pointer type
  470. *
  471. * implement vsx_ldf(offset, pointer), vsx_stf(vector, offset, pointer)
  472. * load and set using offset depend on fixed bytes size
  473. *
  474. * Note: In clang vec_xl and vec_xst fails to load unaligned addresses
  475. * so we are using vec_vsx_ld, vec_vsx_st instead
  476. */
  477. #if defined(__clang__) && !defined(__IBMCPP__)
  478. # define vsx_ldf vec_vsx_ld
  479. # define vsx_stf vec_vsx_st
  480. #else // GCC , XLC
  481. # define vsx_ldf vec_xl
  482. # define vsx_stf vec_xst
  483. #endif
  484. #define VSX_OFFSET(o, p) ((o) * sizeof(*(p)))
  485. #define vsx_ld(o, p) vsx_ldf(VSX_OFFSET(o, p), p)
  486. #define vsx_st(v, o, p) vsx_stf(v, VSX_OFFSET(o, p), p)
  487. /*
  488. * implement vsx_ld2(offset, pointer), vsx_st2(vector, offset, pointer) to load and store double words
  489. * In GCC vec_xl and vec_xst it maps to vec_vsx_ld, vec_vsx_st which doesn't support long long
  490. * and in CLANG we are using vec_vsx_ld, vec_vsx_st because vec_xl, vec_xst fails to load unaligned addresses
  491. *
  492. * In XLC vec_xl and vec_xst fail to cast int64(long int) to long long
  493. */
  494. #if (defined(__GNUG__) || defined(__clang__)) && !defined(__IBMCPP__)
  495. VSX_FINLINE(vec_udword2) vsx_ld2(long o, const uint64* p)
  496. { return vec_udword2_c(vsx_ldf(VSX_OFFSET(o, p), (unsigned int*)p)); }
  497. VSX_FINLINE(vec_dword2) vsx_ld2(long o, const int64* p)
  498. { return vec_dword2_c(vsx_ldf(VSX_OFFSET(o, p), (int*)p)); }
  499. VSX_FINLINE(void) vsx_st2(const vec_udword2& vec, long o, uint64* p)
  500. { vsx_stf(vec_uint4_c(vec), VSX_OFFSET(o, p), (unsigned int*)p); }
  501. VSX_FINLINE(void) vsx_st2(const vec_dword2& vec, long o, int64* p)
  502. { vsx_stf(vec_int4_c(vec), VSX_OFFSET(o, p), (int*)p); }
  503. #else // XLC
  504. VSX_FINLINE(vec_udword2) vsx_ld2(long o, const uint64* p)
  505. { return vsx_ldf(VSX_OFFSET(o, p), (unsigned long long*)p); }
  506. VSX_FINLINE(vec_dword2) vsx_ld2(long o, const int64* p)
  507. { return vsx_ldf(VSX_OFFSET(o, p), (long long*)p); }
  508. VSX_FINLINE(void) vsx_st2(const vec_udword2& vec, long o, uint64* p)
  509. { vsx_stf(vec, VSX_OFFSET(o, p), (unsigned long long*)p); }
  510. VSX_FINLINE(void) vsx_st2(const vec_dword2& vec, long o, int64* p)
  511. { vsx_stf(vec, VSX_OFFSET(o, p), (long long*)p); }
  512. #endif
  513. // Store lower 8 byte
  514. #define vec_st_l8(v, p) *((uint64*)(p)) = vec_extract(vec_udword2_c(v), 0)
  515. // Store higher 8 byte
  516. #define vec_st_h8(v, p) *((uint64*)(p)) = vec_extract(vec_udword2_c(v), 1)
  517. // Load 64-bits of integer data to lower part
  518. #define VSX_IMPL_LOAD_L8(Tvec, Tp) \
  519. VSX_FINLINE(Tvec) vec_ld_l8(const Tp *p) \
  520. { return ((Tvec)vec_promote(*((uint64*)p), 0)); }
  521. VSX_IMPL_LOAD_L8(vec_uchar16, uchar)
  522. VSX_IMPL_LOAD_L8(vec_char16, schar)
  523. VSX_IMPL_LOAD_L8(vec_ushort8, ushort)
  524. VSX_IMPL_LOAD_L8(vec_short8, short)
  525. VSX_IMPL_LOAD_L8(vec_uint4, uint)
  526. VSX_IMPL_LOAD_L8(vec_int4, int)
  527. VSX_IMPL_LOAD_L8(vec_float4, float)
  528. VSX_IMPL_LOAD_L8(vec_udword2, uint64)
  529. VSX_IMPL_LOAD_L8(vec_dword2, int64)
  530. VSX_IMPL_LOAD_L8(vec_double2, double)
  531. // logical not
  532. #define vec_not(a) vec_nor(a, a)
  533. // power9 yaya
  534. // not equal
  535. #ifndef vec_cmpne
  536. # define vec_cmpne(a, b) vec_not(vec_cmpeq(a, b))
  537. #endif
  538. // absolute difference
  539. #ifndef vec_absd
  540. # define vec_absd(a, b) vec_sub(vec_max(a, b), vec_min(a, b))
  541. #endif
  542. /*
  543. * Implement vec_unpacklu and vec_unpackhu
  544. * since vec_unpackl, vec_unpackh only support signed integers
  545. **/
  546. #define VSX_IMPL_UNPACKU(rt, rg, zero) \
  547. VSX_FINLINE(rt) vec_unpacklu(const rg& a) \
  548. { return (rt)(vec_mergel(a, zero)); } \
  549. VSX_FINLINE(rt) vec_unpackhu(const rg& a) \
  550. { return (rt)(vec_mergeh(a, zero)); }
  551. VSX_IMPL_UNPACKU(vec_ushort8, vec_uchar16, vec_uchar16_z)
  552. VSX_IMPL_UNPACKU(vec_uint4, vec_ushort8, vec_ushort8_z)
  553. VSX_IMPL_UNPACKU(vec_udword2, vec_uint4, vec_uint4_z)
  554. /*
  555. * Implement vec_mergesqe and vec_mergesqo
  556. * Merges the sequence values of even and odd elements of two vectors
  557. */
  558. #define VSX_IMPL_PERM(rt, fnm, ...) \
  559. VSX_FINLINE(rt) fnm(const rt& a, const rt& b) \
  560. { static const vec_uchar16 perm = {__VA_ARGS__}; return vec_perm(a, b, perm); }
  561. // 16
  562. #define perm16_mergesqe 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
  563. #define perm16_mergesqo 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
  564. VSX_IMPL_PERM(vec_uchar16, vec_mergesqe, perm16_mergesqe)
  565. VSX_IMPL_PERM(vec_uchar16, vec_mergesqo, perm16_mergesqo)
  566. VSX_IMPL_PERM(vec_char16, vec_mergesqe, perm16_mergesqe)
  567. VSX_IMPL_PERM(vec_char16, vec_mergesqo, perm16_mergesqo)
  568. // 8
  569. #define perm8_mergesqe 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29
  570. #define perm8_mergesqo 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31
  571. VSX_IMPL_PERM(vec_ushort8, vec_mergesqe, perm8_mergesqe)
  572. VSX_IMPL_PERM(vec_ushort8, vec_mergesqo, perm8_mergesqo)
  573. VSX_IMPL_PERM(vec_short8, vec_mergesqe, perm8_mergesqe)
  574. VSX_IMPL_PERM(vec_short8, vec_mergesqo, perm8_mergesqo)
  575. // 4
  576. #define perm4_mergesqe 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
  577. #define perm4_mergesqo 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
  578. VSX_IMPL_PERM(vec_uint4, vec_mergesqe, perm4_mergesqe)
  579. VSX_IMPL_PERM(vec_uint4, vec_mergesqo, perm4_mergesqo)
  580. VSX_IMPL_PERM(vec_int4, vec_mergesqe, perm4_mergesqe)
  581. VSX_IMPL_PERM(vec_int4, vec_mergesqo, perm4_mergesqo)
  582. VSX_IMPL_PERM(vec_float4, vec_mergesqe, perm4_mergesqe)
  583. VSX_IMPL_PERM(vec_float4, vec_mergesqo, perm4_mergesqo)
  584. // 2
  585. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqe, vec_mergeh)
  586. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqo, vec_mergel)
  587. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqe, vec_mergeh)
  588. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqo, vec_mergel)
  589. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqe, vec_mergeh)
  590. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqo, vec_mergel)
  591. /*
  592. * Implement vec_mergesqh and vec_mergesql
  593. * Merges the sequence most and least significant halves of two vectors
  594. */
  595. #define VSX_IMPL_MERGESQHL(Tvec) \
  596. VSX_FINLINE(Tvec) vec_mergesqh(const Tvec& a, const Tvec& b) \
  597. { return (Tvec)vec_mergeh(vec_udword2_c(a), vec_udword2_c(b)); } \
  598. VSX_FINLINE(Tvec) vec_mergesql(const Tvec& a, const Tvec& b) \
  599. { return (Tvec)vec_mergel(vec_udword2_c(a), vec_udword2_c(b)); }
  600. VSX_IMPL_MERGESQHL(vec_uchar16)
  601. VSX_IMPL_MERGESQHL(vec_char16)
  602. VSX_IMPL_MERGESQHL(vec_ushort8)
  603. VSX_IMPL_MERGESQHL(vec_short8)
  604. VSX_IMPL_MERGESQHL(vec_uint4)
  605. VSX_IMPL_MERGESQHL(vec_int4)
  606. VSX_IMPL_MERGESQHL(vec_float4)
  607. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqh, vec_mergeh)
  608. VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesql, vec_mergel)
  609. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqh, vec_mergeh)
  610. VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesql, vec_mergel)
  611. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqh, vec_mergeh)
  612. VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesql, vec_mergel)
  613. // 2 and 4 channels interleave for all types except 2 lanes
  614. #define VSX_IMPL_ST_INTERLEAVE(Tp, Tvec) \
  615. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, Tp* ptr) \
  616. { \
  617. vsx_stf(vec_mergeh(a, b), 0, ptr); \
  618. vsx_stf(vec_mergel(a, b), 16, ptr); \
  619. } \
  620. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  621. const Tvec& c, const Tvec& d, Tp* ptr) \
  622. { \
  623. Tvec ac = vec_mergeh(a, c); \
  624. Tvec bd = vec_mergeh(b, d); \
  625. vsx_stf(vec_mergeh(ac, bd), 0, ptr); \
  626. vsx_stf(vec_mergel(ac, bd), 16, ptr); \
  627. ac = vec_mergel(a, c); \
  628. bd = vec_mergel(b, d); \
  629. vsx_stf(vec_mergeh(ac, bd), 32, ptr); \
  630. vsx_stf(vec_mergel(ac, bd), 48, ptr); \
  631. }
  632. VSX_IMPL_ST_INTERLEAVE(uchar, vec_uchar16)
  633. VSX_IMPL_ST_INTERLEAVE(schar, vec_char16)
  634. VSX_IMPL_ST_INTERLEAVE(ushort, vec_ushort8)
  635. VSX_IMPL_ST_INTERLEAVE(short, vec_short8)
  636. VSX_IMPL_ST_INTERLEAVE(uint, vec_uint4)
  637. VSX_IMPL_ST_INTERLEAVE(int, vec_int4)
  638. VSX_IMPL_ST_INTERLEAVE(float, vec_float4)
  639. // 2 and 4 channels deinterleave for 16 lanes
  640. #define VSX_IMPL_ST_DINTERLEAVE_8(Tp, Tvec) \
  641. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \
  642. { \
  643. Tvec v0 = vsx_ld(0, ptr); \
  644. Tvec v1 = vsx_ld(16, ptr); \
  645. a = vec_mergesqe(v0, v1); \
  646. b = vec_mergesqo(v0, v1); \
  647. } \
  648. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \
  649. Tvec& c, Tvec& d) \
  650. { \
  651. Tvec v0 = vsx_ld(0, ptr); \
  652. Tvec v1 = vsx_ld(16, ptr); \
  653. Tvec v2 = vsx_ld(32, ptr); \
  654. Tvec v3 = vsx_ld(48, ptr); \
  655. Tvec m0 = vec_mergesqe(v0, v1); \
  656. Tvec m1 = vec_mergesqe(v2, v3); \
  657. a = vec_mergesqe(m0, m1); \
  658. c = vec_mergesqo(m0, m1); \
  659. m0 = vec_mergesqo(v0, v1); \
  660. m1 = vec_mergesqo(v2, v3); \
  661. b = vec_mergesqe(m0, m1); \
  662. d = vec_mergesqo(m0, m1); \
  663. }
  664. VSX_IMPL_ST_DINTERLEAVE_8(uchar, vec_uchar16)
  665. VSX_IMPL_ST_DINTERLEAVE_8(schar, vec_char16)
  666. // 2 and 4 channels deinterleave for 8 lanes
  667. #define VSX_IMPL_ST_DINTERLEAVE_16(Tp, Tvec) \
  668. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \
  669. { \
  670. Tvec v0 = vsx_ld(0, ptr); \
  671. Tvec v1 = vsx_ld(8, ptr); \
  672. a = vec_mergesqe(v0, v1); \
  673. b = vec_mergesqo(v0, v1); \
  674. } \
  675. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \
  676. Tvec& c, Tvec& d) \
  677. { \
  678. Tvec v0 = vsx_ld(0, ptr); \
  679. Tvec v1 = vsx_ld(8, ptr); \
  680. Tvec m0 = vec_mergeh(v0, v1); \
  681. Tvec m1 = vec_mergel(v0, v1); \
  682. Tvec ab0 = vec_mergeh(m0, m1); \
  683. Tvec cd0 = vec_mergel(m0, m1); \
  684. v0 = vsx_ld(16, ptr); \
  685. v1 = vsx_ld(24, ptr); \
  686. m0 = vec_mergeh(v0, v1); \
  687. m1 = vec_mergel(v0, v1); \
  688. Tvec ab1 = vec_mergeh(m0, m1); \
  689. Tvec cd1 = vec_mergel(m0, m1); \
  690. a = vec_mergesqh(ab0, ab1); \
  691. b = vec_mergesql(ab0, ab1); \
  692. c = vec_mergesqh(cd0, cd1); \
  693. d = vec_mergesql(cd0, cd1); \
  694. }
  695. VSX_IMPL_ST_DINTERLEAVE_16(ushort, vec_ushort8)
  696. VSX_IMPL_ST_DINTERLEAVE_16(short, vec_short8)
  697. // 2 and 4 channels deinterleave for 4 lanes
  698. #define VSX_IMPL_ST_DINTERLEAVE_32(Tp, Tvec) \
  699. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \
  700. { \
  701. a = vsx_ld(0, ptr); \
  702. b = vsx_ld(4, ptr); \
  703. Tvec m0 = vec_mergeh(a, b); \
  704. Tvec m1 = vec_mergel(a, b); \
  705. a = vec_mergeh(m0, m1); \
  706. b = vec_mergel(m0, m1); \
  707. } \
  708. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \
  709. Tvec& c, Tvec& d) \
  710. { \
  711. Tvec v0 = vsx_ld(0, ptr); \
  712. Tvec v1 = vsx_ld(4, ptr); \
  713. Tvec v2 = vsx_ld(8, ptr); \
  714. Tvec v3 = vsx_ld(12, ptr); \
  715. Tvec m0 = vec_mergeh(v0, v2); \
  716. Tvec m1 = vec_mergeh(v1, v3); \
  717. a = vec_mergeh(m0, m1); \
  718. b = vec_mergel(m0, m1); \
  719. m0 = vec_mergel(v0, v2); \
  720. m1 = vec_mergel(v1, v3); \
  721. c = vec_mergeh(m0, m1); \
  722. d = vec_mergel(m0, m1); \
  723. }
  724. VSX_IMPL_ST_DINTERLEAVE_32(uint, vec_uint4)
  725. VSX_IMPL_ST_DINTERLEAVE_32(int, vec_int4)
  726. VSX_IMPL_ST_DINTERLEAVE_32(float, vec_float4)
  727. // 2 and 4 channels interleave and deinterleave for 2 lanes
  728. #define VSX_IMPL_ST_D_INTERLEAVE_64(Tp, Tvec, ld_func, st_func) \
  729. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, Tp* ptr) \
  730. { \
  731. st_func(vec_mergeh(a, b), 0, ptr); \
  732. st_func(vec_mergel(a, b), 2, ptr); \
  733. } \
  734. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  735. const Tvec& c, const Tvec& d, Tp* ptr) \
  736. { \
  737. st_func(vec_mergeh(a, b), 0, ptr); \
  738. st_func(vec_mergeh(c, d), 2, ptr); \
  739. st_func(vec_mergel(a, b), 4, ptr); \
  740. st_func(vec_mergel(c, d), 6, ptr); \
  741. } \
  742. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \
  743. { \
  744. Tvec m0 = ld_func(0, ptr); \
  745. Tvec m1 = ld_func(2, ptr); \
  746. a = vec_mergeh(m0, m1); \
  747. b = vec_mergel(m0, m1); \
  748. } \
  749. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \
  750. Tvec& c, Tvec& d) \
  751. { \
  752. Tvec v0 = ld_func(0, ptr); \
  753. Tvec v1 = ld_func(2, ptr); \
  754. Tvec v2 = ld_func(4, ptr); \
  755. Tvec v3 = ld_func(6, ptr); \
  756. a = vec_mergeh(v0, v2); \
  757. b = vec_mergel(v0, v2); \
  758. c = vec_mergeh(v1, v3); \
  759. d = vec_mergel(v1, v3); \
  760. }
  761. VSX_IMPL_ST_D_INTERLEAVE_64(int64, vec_dword2, vsx_ld2, vsx_st2)
  762. VSX_IMPL_ST_D_INTERLEAVE_64(uint64, vec_udword2, vsx_ld2, vsx_st2)
  763. VSX_IMPL_ST_D_INTERLEAVE_64(double, vec_double2, vsx_ld, vsx_st)
  764. /* 3 channels */
  765. #define VSX_IMPL_ST_INTERLEAVE_3CH_16(Tp, Tvec) \
  766. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  767. const Tvec& c, Tp* ptr) \
  768. { \
  769. static const vec_uchar16 a12 = {0, 16, 0, 1, 17, 0, 2, 18, 0, 3, 19, 0, 4, 20, 0, 5}; \
  770. static const vec_uchar16 a123 = {0, 1, 16, 3, 4, 17, 6, 7, 18, 9, 10, 19, 12, 13, 20, 15}; \
  771. vsx_st(vec_perm(vec_perm(a, b, a12), c, a123), 0, ptr); \
  772. static const vec_uchar16 b12 = {21, 0, 6, 22, 0, 7, 23, 0, 8, 24, 0, 9, 25, 0, 10, 26}; \
  773. static const vec_uchar16 b123 = {0, 21, 2, 3, 22, 5, 6, 23, 8, 9, 24, 11, 12, 25, 14, 15}; \
  774. vsx_st(vec_perm(vec_perm(a, b, b12), c, b123), 16, ptr); \
  775. static const vec_uchar16 c12 = {0, 11, 27, 0, 12, 28, 0, 13, 29, 0, 14, 30, 0, 15, 31, 0}; \
  776. static const vec_uchar16 c123 = {26, 1, 2, 27, 4, 5, 28, 7, 8, 29, 10, 11, 30, 13, 14, 31}; \
  777. vsx_st(vec_perm(vec_perm(a, b, c12), c, c123), 32, ptr); \
  778. } \
  779. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \
  780. { \
  781. Tvec v1 = vsx_ld(0, ptr); \
  782. Tvec v2 = vsx_ld(16, ptr); \
  783. Tvec v3 = vsx_ld(32, ptr); \
  784. static const vec_uchar16 a12_perm = {0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 0, 0, 0, 0, 0}; \
  785. static const vec_uchar16 a123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 20, 23, 26, 29}; \
  786. a = vec_perm(vec_perm(v1, v2, a12_perm), v3, a123_perm); \
  787. static const vec_uchar16 b12_perm = {1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 0, 0, 0, 0, 0}; \
  788. static const vec_uchar16 b123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 18, 21, 24, 27, 30}; \
  789. b = vec_perm(vec_perm(v1, v2, b12_perm), v3, b123_perm); \
  790. static const vec_uchar16 c12_perm = {2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 0, 0, 0, 0, 0, 0}; \
  791. static const vec_uchar16 c123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 19, 22, 25, 28, 31}; \
  792. c = vec_perm(vec_perm(v1, v2, c12_perm), v3, c123_perm); \
  793. }
  794. VSX_IMPL_ST_INTERLEAVE_3CH_16(uchar, vec_uchar16)
  795. VSX_IMPL_ST_INTERLEAVE_3CH_16(schar, vec_char16)
  796. #define VSX_IMPL_ST_INTERLEAVE_3CH_8(Tp, Tvec) \
  797. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  798. const Tvec& c, Tp* ptr) \
  799. { \
  800. static const vec_uchar16 a12 = {0, 1, 16, 17, 0, 0, 2, 3, 18, 19, 0, 0, 4, 5, 20, 21}; \
  801. static const vec_uchar16 a123 = {0, 1, 2, 3, 16, 17, 6, 7, 8, 9, 18, 19, 12, 13, 14, 15}; \
  802. vsx_st(vec_perm(vec_perm(a, b, a12), c, a123), 0, ptr); \
  803. static const vec_uchar16 b12 = {0, 0, 6, 7, 22, 23, 0, 0, 8, 9, 24, 25, 0, 0, 10, 11}; \
  804. static const vec_uchar16 b123 = {20, 21, 2, 3, 4, 5, 22, 23, 8, 9, 10, 11, 24, 25, 14, 15}; \
  805. vsx_st(vec_perm(vec_perm(a, b, b12), c, b123), 8, ptr); \
  806. static const vec_uchar16 c12 = {26, 27, 0, 0, 12, 13, 28, 29, 0, 0, 14, 15, 30, 31, 0, 0}; \
  807. static const vec_uchar16 c123 = {0, 1, 26, 27, 4, 5, 6, 7, 28, 29, 10, 11, 12, 13, 30, 31}; \
  808. vsx_st(vec_perm(vec_perm(a, b, c12), c, c123), 16, ptr); \
  809. } \
  810. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \
  811. { \
  812. Tvec v1 = vsx_ld(0, ptr); \
  813. Tvec v2 = vsx_ld(8, ptr); \
  814. Tvec v3 = vsx_ld(16, ptr); \
  815. static const vec_uchar16 a12_perm = {0, 1, 6, 7, 12, 13, 18, 19, 24, 25, 30, 31, 0, 0, 0, 0}; \
  816. static const vec_uchar16 a123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 20, 21, 26, 27}; \
  817. a = vec_perm(vec_perm(v1, v2, a12_perm), v3, a123_perm); \
  818. static const vec_uchar16 b12_perm = {2, 3, 8, 9, 14, 15, 20, 21, 26, 27, 0, 0, 0, 0, 0, 0}; \
  819. static const vec_uchar16 b123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 17, 22, 23, 28, 29}; \
  820. b = vec_perm(vec_perm(v1, v2, b12_perm), v3, b123_perm); \
  821. static const vec_uchar16 c12_perm = {4, 5, 10, 11, 16, 17, 22, 23, 28, 29, 0, 0, 0, 0, 0, 0}; \
  822. static const vec_uchar16 c123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 18, 19, 24, 25, 30, 31}; \
  823. c = vec_perm(vec_perm(v1, v2, c12_perm), v3, c123_perm); \
  824. }
  825. VSX_IMPL_ST_INTERLEAVE_3CH_8(ushort, vec_ushort8)
  826. VSX_IMPL_ST_INTERLEAVE_3CH_8(short, vec_short8)
  827. #define VSX_IMPL_ST_INTERLEAVE_3CH_4(Tp, Tvec) \
  828. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  829. const Tvec& c, Tp* ptr) \
  830. { \
  831. Tvec hbc = vec_mergeh(b, c); \
  832. static const vec_uchar16 ahbc = {0, 1, 2, 3, 16, 17, 18, 19, 20, 21, 22, 23, 4, 5, 6, 7}; \
  833. vsx_st(vec_perm(a, hbc, ahbc), 0, ptr); \
  834. Tvec lab = vec_mergel(a, b); \
  835. vsx_st(vec_sld(lab, hbc, 8), 4, ptr); \
  836. static const vec_uchar16 clab = {8, 9, 10, 11, 24, 25, 26, 27, 28, 29, 30, 31, 12, 13, 14, 15};\
  837. vsx_st(vec_perm(c, lab, clab), 8, ptr); \
  838. } \
  839. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \
  840. { \
  841. Tvec v1 = vsx_ld(0, ptr); \
  842. Tvec v2 = vsx_ld(4, ptr); \
  843. Tvec v3 = vsx_ld(8, ptr); \
  844. static const vec_uchar16 flp = {0, 1, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19, 28, 29, 30, 31}; \
  845. a = vec_perm(v1, vec_sld(v3, v2, 8), flp); \
  846. static const vec_uchar16 flp2 = {28, 29, 30, 31, 0, 1, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19}; \
  847. b = vec_perm(v2, vec_sld(v1, v3, 8), flp2); \
  848. c = vec_perm(vec_sld(v2, v1, 8), v3, flp); \
  849. }
  850. VSX_IMPL_ST_INTERLEAVE_3CH_4(uint, vec_uint4)
  851. VSX_IMPL_ST_INTERLEAVE_3CH_4(int, vec_int4)
  852. VSX_IMPL_ST_INTERLEAVE_3CH_4(float, vec_float4)
  853. #define VSX_IMPL_ST_INTERLEAVE_3CH_2(Tp, Tvec, ld_func, st_func) \
  854. VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \
  855. const Tvec& c, Tp* ptr) \
  856. { \
  857. st_func(vec_mergeh(a, b), 0, ptr); \
  858. st_func(vec_permi(c, a, 1), 2, ptr); \
  859. st_func(vec_mergel(b, c), 4, ptr); \
  860. } \
  861. VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, \
  862. Tvec& b, Tvec& c) \
  863. { \
  864. Tvec v1 = ld_func(0, ptr); \
  865. Tvec v2 = ld_func(2, ptr); \
  866. Tvec v3 = ld_func(4, ptr); \
  867. a = vec_permi(v1, v2, 1); \
  868. b = vec_permi(v1, v3, 2); \
  869. c = vec_permi(v2, v3, 1); \
  870. }
  871. VSX_IMPL_ST_INTERLEAVE_3CH_2(int64, vec_dword2, vsx_ld2, vsx_st2)
  872. VSX_IMPL_ST_INTERLEAVE_3CH_2(uint64, vec_udword2, vsx_ld2, vsx_st2)
  873. VSX_IMPL_ST_INTERLEAVE_3CH_2(double, vec_double2, vsx_ld, vsx_st)
  874. #endif // CV_VSX
  875. //! @}
  876. #endif // OPENCV_HAL_VSX_UTILS_HPP