AkSimd.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. /*******************************************************************************
  2. The content of this file includes portions of the AUDIOKINETIC Wwise Technology
  3. released in source code form as part of the SDK installer package.
  4. Commercial License Usage
  5. Licensees holding valid commercial licenses to the AUDIOKINETIC Wwise Technology
  6. may use this file in accordance with the end user license agreement provided
  7. with the software or, alternatively, in accordance with the terms contained in a
  8. written agreement between you and Audiokinetic Inc.
  9. Apache License Usage
  10. Alternatively, this file may be used under the Apache License, Version 2.0 (the
  11. "Apache License"); you may not use this file except in compliance with the
  12. Apache License. You may obtain a copy of the Apache License at
  13. http://www.apache.org/licenses/LICENSE-2.0.
  14. Unless required by applicable law or agreed to in writing, software distributed
  15. under the Apache License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
  16. OR CONDITIONS OF ANY KIND, either express or implied. See the Apache License for
  17. the specific language governing permissions and limitations under the License.
  18. Copyright (c) 2023 Audiokinetic Inc.
  19. *******************************************************************************/
  20. // AkSimd.h
  21. /// \file
  22. /// AKSIMD - Generic (no SIMD support) implementation
  23. #ifndef _AKSIMD_GENERIC_H_
  24. #define _AKSIMD_GENERIC_H_
  25. #include <math.h>
  26. #include <string.h>
  27. #include <AK/SoundEngine/Common/AkTypes.h>
  28. #include <AK/Tools/Common/AkPlatformFuncs.h>
  29. ////////////////////////////////////////////////////////////////////////
  30. /// @name AKSIMD types
  31. //@{
  32. typedef AkInt32 AKSIMD_I32; ///< 32-bit signed integer
  33. typedef struct { AkInt32 m_data[4]; } AKSIMD_V4I32; ///< Vector of 4 32-bit signed integers
  34. typedef struct { AkUInt32 m_data[4]; } AKSIMD_V4UI32; ///< Vector of 4 32-bit signed integers
  35. typedef AkReal32 AKSIMD_F32; ///< 32-bit float
  36. typedef struct { AkReal32 m_data[2]; } AKSIMD_V2F32; ///< Vector of 2 32-bit floats
  37. typedef struct { AkReal32 m_data[4]; } AKSIMD_V4F32; ///< Vector of 4 32-bit floats
  38. typedef AKSIMD_V4UI32 AKSIMD_V4COND; ///< Vector of 4 comparison results
  39. #pragma pack(push,1)
  40. typedef struct { AkInt32 m_data[4]; } AKSIMD_V4I32_UNALIGNED; ///< Unaligned Vector of 4 32-bit signed integers
  41. typedef struct { AkUInt32 m_data[4]; } AKSIMD_V4UI32_UNALIGNED; ///< Unaligned Vector of 4 32-bit signed integers
  42. typedef struct { AkReal32 m_data[2]; } AKSIMD_V2F32_UNALIGNED; ///< Unaligned Vector of 2 32-bit floats
  43. typedef struct { AkReal32 m_data[4]; } AKSIMD_V4F32_UNALIGNED; ///< Unaligned Vector of 4 32-bit floats
  44. #pragma pack(pop)
  45. //@}
  46. ////////////////////////////////////////////////////////////////////////
  47. ////////////////////////////////////////////////////////////////////////
  48. /// @name Platform specific defines for prefetching
  49. //@{
  50. #define AKSIMD_ARCHCACHELINESIZE (32) ///< Assumed cache line width for architectures on this platform
  51. #define AKSIMD_ARCHMAXPREFETCHSIZE (512) ///< Use this to control how much prefetching maximum is desirable (assuming 8-way cache)
  52. /// Cross-platform memory prefetch of effective address assuming non-temporal data
  53. #define AKSIMD_PREFETCHMEMORY( __offset__, __add__ )
  54. //@}
  55. ////////////////////////////////////////////////////////////////////////
  56. ////////////////////////////////////////////////////////////////////////
  57. /// @name Platform specific memory size alignment for allocation purposes
  58. //@{
  59. #define AKSIMD_ALIGNSIZE( __Size__ ) (((__Size__) + 15) & ~15)
  60. //@}
  61. ////////////////////////////////////////////////////////////////////////
  62. ////////////////////////////////////////////////////////////////////////
  63. /// @name AKSIMD loading / setting
  64. //@{
  65. #define AKSIMD_LOADU_V4I32( in_pData ) (*(in_pData))
  66. #define AKSIMD_LOADU_V4F32( in_pValue ) (*(AKSIMD_V4F32*)(in_pValue))
  67. #define AKSIMD_LOAD_V4F32( in_pValue ) (*(AKSIMD_V4F32*)(in_pValue))
  68. AkForceInline AKSIMD_V4F32 AKSIMD_LOAD1_V4F32( AKSIMD_F32 in_value )
  69. {
  70. AKSIMD_V4F32 vector;
  71. vector.m_data[0] = in_value;
  72. vector.m_data[1] = in_value;
  73. vector.m_data[2] = in_value;
  74. vector.m_data[3] = in_value;
  75. return vector;
  76. }
  77. // _mm_set_ps1
  78. AkForceInline AKSIMD_V4F32 AKSIMD_SET_V4F32( AKSIMD_F32 in_value )
  79. {
  80. AKSIMD_V4F32 vector;
  81. vector.m_data[0] = in_value;
  82. vector.m_data[1] = in_value;
  83. vector.m_data[2] = in_value;
  84. vector.m_data[3] = in_value;
  85. return vector;
  86. }
  87. AkForceInline AKSIMD_V2F32 AKSIMD_SET_V2F32( AKSIMD_F32 in_value )
  88. {
  89. AKSIMD_V2F32 vector;
  90. vector.m_data[0] = in_value;
  91. vector.m_data[1] = in_value;
  92. return vector;
  93. }
  94. // _mm_setzero_ps()
  95. AkForceInline AKSIMD_V4F32 AKSIMD_SETZERO_V4F32()
  96. {
  97. AKSIMD_V4F32 vector;
  98. vector.m_data[0] = 0.f;
  99. vector.m_data[1] = 0.f;
  100. vector.m_data[2] = 0.f;
  101. vector.m_data[3] = 0.f;
  102. return vector;
  103. }
  104. AkForceInline AKSIMD_V2F32 AKSIMD_SETZERO_V2F32()
  105. {
  106. AKSIMD_V2F32 vector;
  107. vector.m_data[0] = 0.f;
  108. vector.m_data[1] = 0.f;
  109. return vector;
  110. }
  111. // _mm_setzero_si128()
  112. AkForceInline AKSIMD_V4I32 AKSIMD_SETZERO_V4I32()
  113. {
  114. AKSIMD_V4I32 vector;
  115. vector.m_data[0] = 0;
  116. vector.m_data[1] = 0;
  117. vector.m_data[2] = 0;
  118. vector.m_data[3] = 0;
  119. return vector;
  120. }
  121. /// Loads a single-precision, floating-point value into the low word
  122. /// and clears the upper three words.
  123. /// r0 := *p; r1 := 0.0 ; r2 := 0.0 ; r3 := 0.0 (see _mm_load_ss)
  124. AkForceInline AKSIMD_V4F32 AKSIMD_LOAD_SS_V4F32( const AKSIMD_F32* in_pData )
  125. {
  126. AKSIMD_V4F32 vector;
  127. vector.m_data[0] = *in_pData;
  128. vector.m_data[1] = 0.f;
  129. vector.m_data[2] = 0.f;
  130. vector.m_data[3] = 0.f;
  131. return vector;
  132. }
  133. //@}
  134. ////////////////////////////////////////////////////////////////////////
  135. ////////////////////////////////////////////////////////////////////////
  136. /// @name AKSIMD storing
  137. //@{
  138. // _mm_storeu_ps -- The address does not need to be 16-byte aligned.
  139. #define AKSIMD_STOREU_V4F32( in_pTo, in_vec ) (*(AKSIMD_V4F32*)(in_pTo)) = (in_vec)
  140. // _mm_store_ps -- The address must be 16-byte aligned.
  141. // ????? _mm_storeu_ps vs _mm_store_ps ?????
  142. #define AKSIMD_STORE_V4F32( __addr__, __vName__ ) AKSIMD_STOREU_V4F32(__addr__, __vName__)
  143. // _mm_storeu_si128
  144. #define AKSIMD_STOREU_V4I32( in_pTo, in_vec ) (*(AKSIMD_V4I32*)(in_pTo)) = (in_vec)
  145. /// Stores the lower single-precision, floating-point value.
  146. /// *p := a0 (see _mm_store_ss)
  147. AkForceInline void AKSIMD_STORE1_V4F32( AKSIMD_F32* in_pTo, const AKSIMD_V4F32& in_vec )
  148. {
  149. ((AKSIMD_V4F32*)in_pTo)->m_data[0] = in_vec.m_data[0];
  150. }
  151. //@}
  152. ////////////////////////////////////////////////////////////////////////
  153. ////////////////////////////////////////////////////////////////////////
  154. /// @name AKSIMD conversion
  155. //@{
  156. // _mm_cvtepi32_ps
  157. AkForceInline AKSIMD_V4F32 AKSIMD_CONVERT_V4I32_TO_V4F32( const AKSIMD_V4I32& in_from )
  158. {
  159. AKSIMD_V4F32 vector;
  160. vector.m_data[0] = (AkReal32)in_from.m_data[0];
  161. vector.m_data[1] = (AkReal32)in_from.m_data[1];
  162. vector.m_data[2] = (AkReal32)in_from.m_data[2];
  163. vector.m_data[3] = (AkReal32)in_from.m_data[3];
  164. return vector;
  165. }
  166. // _mm_cvtps_epi32
  167. AkForceInline AKSIMD_V4I32 AKSIMD_TRUNCATE_V4F32_TO_V4I32( const AKSIMD_V4F32& in_from )
  168. {
  169. AKSIMD_V4I32 vector;
  170. vector.m_data[0] = (AkInt32)in_from.m_data[0];
  171. vector.m_data[1] = (AkInt32)in_from.m_data[1];
  172. vector.m_data[2] = (AkInt32)in_from.m_data[2];
  173. vector.m_data[3] = (AkInt32)in_from.m_data[3];
  174. return vector;
  175. }
  176. //@}
  177. ////////////////////////////////////////////////////////////////////////
  178. ////////////////////////////////////////////////////////////////////////
  179. /// @name AKSIMD logical operations
  180. //@{
  181. // _mm_and_si128
  182. AkForceInline AKSIMD_V4I32 AKSIMD_AND_V4I32( const AKSIMD_V4I32& in_vec1, const AKSIMD_V4I32& in_vec2 )
  183. {
  184. AKSIMD_V4I32 vector;
  185. vector.m_data[0] = in_vec1.m_data[0] & in_vec2.m_data[0];
  186. vector.m_data[1] = in_vec1.m_data[1] & in_vec2.m_data[1];
  187. vector.m_data[2] = in_vec1.m_data[2] & in_vec2.m_data[2];
  188. vector.m_data[3] = in_vec1.m_data[3] & in_vec2.m_data[3];
  189. return vector;
  190. }
  191. /// Compares the 8 signed 16-bit integers in a and the 8 signed
  192. /// 16-bit integers in b for greater than (see _mm_cmpgt_epi16)
  193. AkForceInline AKSIMD_V4I32 AKSIMD_CMPGT_V8I16( const AKSIMD_V4I32& in_vec1, const AKSIMD_V4I32& in_vec2 )
  194. {
  195. AKSIMD_V4I32 vector;
  196. AkInt16 *pVec1,*pVec2,*pVec3;
  197. pVec1 = (AkInt16*)&in_vec1;
  198. pVec2 = (AkInt16*)&in_vec2;
  199. pVec3 = (AkInt16*)&vector;
  200. pVec3[0] = (pVec1[0] > pVec2[0]) ? 0xffff : 0x0;
  201. pVec3[1] = (pVec1[1] > pVec2[1]) ? 0xffff : 0x0;
  202. pVec3[2] = (pVec1[2] > pVec2[2]) ? 0xffff : 0x0;
  203. pVec3[3] = (pVec1[3] > pVec2[3]) ? 0xffff : 0x0;
  204. pVec3[4] = (pVec1[4] > pVec2[4]) ? 0xffff : 0x0;
  205. pVec3[5] = (pVec1[5] > pVec2[5]) ? 0xffff : 0x0;
  206. pVec3[6] = (pVec1[6] > pVec2[6]) ? 0xffff : 0x0;
  207. pVec3[7] = (pVec1[7] > pVec2[7]) ? 0xffff : 0x0;
  208. return vector;
  209. }
  210. /// Compares for less than or equal (see _mm_cmple_ps)
  211. AkForceInline AKSIMD_V4UI32 AKSIMD_CMPLE_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  212. {
  213. AKSIMD_V4UI32 vector;
  214. vector.m_data[0] = (in_vec1.m_data[0] <= in_vec2.m_data[0]) ? 0xffffffff : 0x0;
  215. vector.m_data[1] = (in_vec1.m_data[1] <= in_vec2.m_data[1]) ? 0xffffffff : 0x0;
  216. vector.m_data[2] = (in_vec1.m_data[2] <= in_vec2.m_data[2]) ? 0xffffffff : 0x0;
  217. vector.m_data[3] = (in_vec1.m_data[3] <= in_vec2.m_data[3]) ? 0xffffffff : 0x0;
  218. return vector;
  219. }
  220. AkForceInline AKSIMD_V4F32 AKSIMD_GTEQ_V4F32(const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2)
  221. {
  222. AKSIMD_V4F32 vector;
  223. vector.m_data[0] = (AkReal32)((in_vec1.m_data[0] >= in_vec2.m_data[0]) ? 0xffffffff : 0x0);
  224. vector.m_data[1] = (AkReal32)((in_vec1.m_data[1] >= in_vec2.m_data[1]) ? 0xffffffff : 0x0);
  225. vector.m_data[2] = (AkReal32)((in_vec1.m_data[2] >= in_vec2.m_data[2]) ? 0xffffffff : 0x0);
  226. vector.m_data[3] = (AkReal32)((in_vec1.m_data[3] >= in_vec2.m_data[3]) ? 0xffffffff : 0x0);
  227. return vector;
  228. }
  229. AkForceInline AKSIMD_V4F32 AKSIMD_GT_V4F32(const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2)
  230. {
  231. AKSIMD_V4F32 vector;
  232. vector.m_data[0] = (AkReal32)((in_vec1.m_data[0] > in_vec2.m_data[0]) ? 0xffffffff : 0x0);
  233. vector.m_data[1] = (AkReal32)((in_vec1.m_data[1] > in_vec2.m_data[1]) ? 0xffffffff : 0x0);
  234. vector.m_data[2] = (AkReal32)((in_vec1.m_data[2] > in_vec2.m_data[2]) ? 0xffffffff : 0x0);
  235. vector.m_data[3] = (AkReal32)((in_vec1.m_data[3] > in_vec2.m_data[3]) ? 0xffffffff : 0x0);
  236. return vector;
  237. }
  238. AkForceInline AKSIMD_V4F32 AKSIMD_LTEQ_V4F32(const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2)
  239. {
  240. AKSIMD_V4F32 vector;
  241. vector.m_data[0] = (AkReal32)((in_vec1.m_data[0] <= in_vec2.m_data[0]) ? 0xffffffff : 0x0);
  242. vector.m_data[1] = (AkReal32)((in_vec1.m_data[1] <= in_vec2.m_data[1]) ? 0xffffffff : 0x0);
  243. vector.m_data[2] = (AkReal32)((in_vec1.m_data[2] <= in_vec2.m_data[2]) ? 0xffffffff : 0x0);
  244. vector.m_data[3] = (AkReal32)((in_vec1.m_data[3] <= in_vec2.m_data[3]) ? 0xffffffff : 0x0);
  245. return vector;
  246. }
  247. AkForceInline AKSIMD_V4F32 AKSIMD_LT_V4F32(const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2)
  248. {
  249. AKSIMD_V4F32 vector;
  250. vector.m_data[0] = (AkReal32)((in_vec1.m_data[0] < in_vec2.m_data[0]) ? 0xffffffff : 0x0);
  251. vector.m_data[1] = (AkReal32)((in_vec1.m_data[1] < in_vec2.m_data[1]) ? 0xffffffff : 0x0);
  252. vector.m_data[2] = (AkReal32)((in_vec1.m_data[2] < in_vec2.m_data[2]) ? 0xffffffff : 0x0);
  253. vector.m_data[3] = (AkReal32)((in_vec1.m_data[3] < in_vec2.m_data[3]) ? 0xffffffff : 0x0);
  254. return vector;
  255. }
  256. AkForceInline AKSIMD_V4F32 AKSIMD_EQ_V4F32(const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2)
  257. {
  258. AKSIMD_V4F32 vector;
  259. vector.m_data[0] = (AkReal32)((in_vec1.m_data[0] == in_vec2.m_data[0]) ? 0xffffffff : 0x0);
  260. vector.m_data[1] = (AkReal32)((in_vec1.m_data[1] == in_vec2.m_data[1]) ? 0xffffffff : 0x0);
  261. vector.m_data[2] = (AkReal32)((in_vec1.m_data[2] == in_vec2.m_data[2]) ? 0xffffffff : 0x0);
  262. vector.m_data[3] = (AkReal32)((in_vec1.m_data[3] == in_vec2.m_data[3]) ? 0xffffffff : 0x0);
  263. return vector;
  264. }
  265. AkForceInline AKSIMD_V4F32 AKSIMD_XOR_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  266. {
  267. AKSIMD_V4F32 vector;
  268. vector.m_data[0] = (AkReal32)(((AkUInt32)in_vec1.m_data[0]) ^ ((AkUInt32)in_vec2.m_data[0]));
  269. vector.m_data[1] = (AkReal32)(((AkUInt32)in_vec1.m_data[1]) ^ ((AkUInt32)in_vec2.m_data[1]));
  270. vector.m_data[2] = (AkReal32)(((AkUInt32)in_vec1.m_data[2]) ^ ((AkUInt32)in_vec2.m_data[2]));
  271. vector.m_data[3] = (AkReal32)(((AkUInt32)in_vec1.m_data[3]) ^ ((AkUInt32)in_vec2.m_data[3]));
  272. return vector;
  273. }
  274. AkForceInline AKSIMD_V4I32 AKSIMD_SHIFTLEFT_V4I32( AKSIMD_V4I32 in_vector, int in_shiftBy)
  275. {
  276. in_vector.m_data[0] <<= in_shiftBy;
  277. in_vector.m_data[1] <<= in_shiftBy;
  278. in_vector.m_data[2] <<= in_shiftBy;
  279. in_vector.m_data[3] <<= in_shiftBy;
  280. return in_vector;
  281. }
  282. AkForceInline AKSIMD_V4I32 AKSIMD_SHIFTRIGHT_V4I32( AKSIMD_V4I32 in_vector, int in_shiftBy)
  283. {
  284. in_vector.m_data[0] = (AkInt32)((AkUInt32)in_vector.m_data[0] >> in_shiftBy);
  285. in_vector.m_data[1] = (AkInt32)((AkUInt32)in_vector.m_data[1] >> in_shiftBy);
  286. in_vector.m_data[2] = (AkInt32)((AkUInt32)in_vector.m_data[2] >> in_shiftBy);
  287. in_vector.m_data[3] = (AkInt32)((AkUInt32)in_vector.m_data[3] >> in_shiftBy);
  288. return in_vector;
  289. }
  290. AkForceInline AKSIMD_V4I32 AKSIMD_SHIFTRIGHTARITH_V4I32( AKSIMD_V4I32 in_vector, int in_shiftBy)
  291. {
  292. in_vector.m_data[0] >>= in_shiftBy;
  293. in_vector.m_data[1] >>= in_shiftBy;
  294. in_vector.m_data[2] >>= in_shiftBy;
  295. in_vector.m_data[3] >>= in_shiftBy;
  296. return in_vector;
  297. }
  298. //@}
  299. ////////////////////////////////////////////////////////////////////////
  300. ////////////////////////////////////////////////////////////////////////
  301. /// @name AKSIMD arithmetic
  302. //@{
  303. // _mm_sub_ps
  304. AkForceInline AKSIMD_V4F32 AKSIMD_SUB_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  305. {
  306. AKSIMD_V4F32 vector;
  307. vector.m_data[0] = in_vec1.m_data[0] - in_vec2.m_data[0];
  308. vector.m_data[1] = in_vec1.m_data[1] - in_vec2.m_data[1];
  309. vector.m_data[2] = in_vec1.m_data[2] - in_vec2.m_data[2];
  310. vector.m_data[3] = in_vec1.m_data[3] - in_vec2.m_data[3];
  311. return vector;
  312. }
  313. /// Subtracts the lower single-precision, floating-point values of a and b.
  314. /// The upper three single-precision, floating-point values are passed through from a.
  315. /// r0 := a0 - b0 ; r1 := a1 ; r2 := a2 ; r3 := a3 (see _mm_sub_ss)
  316. AkForceInline AKSIMD_V4F32 AKSIMD_SUB_SS_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  317. {
  318. AKSIMD_V4F32 vector;
  319. vector.m_data[0] = in_vec1.m_data[0] - in_vec2.m_data[0];
  320. vector.m_data[1] = in_vec1.m_data[1];
  321. vector.m_data[2] = in_vec1.m_data[2];
  322. vector.m_data[3] = in_vec1.m_data[3];
  323. return vector;
  324. }
  325. // _mm_add_ps
  326. AkForceInline AKSIMD_V4F32 AKSIMD_ADD_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  327. {
  328. AKSIMD_V4F32 vector;
  329. vector.m_data[0] = in_vec1.m_data[0] + in_vec2.m_data[0];
  330. vector.m_data[1] = in_vec1.m_data[1] + in_vec2.m_data[1];
  331. vector.m_data[2] = in_vec1.m_data[2] + in_vec2.m_data[2];
  332. vector.m_data[3] = in_vec1.m_data[3] + in_vec2.m_data[3];
  333. return vector;
  334. }
  335. AkForceInline AKSIMD_V4F32 AKSIMD_DIV_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  336. {
  337. AKSIMD_V4F32 vector;
  338. vector.m_data[0] = in_vec1.m_data[0] / in_vec2.m_data[0];
  339. vector.m_data[1] = in_vec1.m_data[1] / in_vec2.m_data[1];
  340. vector.m_data[2] = in_vec1.m_data[2] / in_vec2.m_data[2];
  341. vector.m_data[3] = in_vec1.m_data[3] / in_vec2.m_data[3];
  342. return vector;
  343. }
  344. AkForceInline AKSIMD_V2F32 AKSIMD_ADD_V2F32( const AKSIMD_V2F32& in_vec1, const AKSIMD_V2F32& in_vec2 )
  345. {
  346. AKSIMD_V2F32 vector;
  347. vector.m_data[0] = in_vec1.m_data[0] + in_vec2.m_data[0];
  348. vector.m_data[1] = in_vec1.m_data[1] + in_vec2.m_data[1];
  349. return vector;
  350. }
  351. /// Adds the lower single-precision, floating-point values of a and b; the
  352. /// upper three single-precision, floating-point values are passed through from a.
  353. /// r0 := a0 + b0; r1 := a1; r2 := a2; r3 := a3 (see _mm_add_ss)
  354. AkForceInline AKSIMD_V4F32 AKSIMD_ADD_SS_V4F32( const AKSIMD_V4F32& a, const AKSIMD_V4F32& b )
  355. {
  356. AKSIMD_V4F32 vector;
  357. vector.m_data[0] = a.m_data[0] + b.m_data[0];
  358. vector.m_data[1] = a.m_data[1];
  359. vector.m_data[2] = a.m_data[2];
  360. vector.m_data[3] = a.m_data[3];
  361. return vector;
  362. }
  363. // _mm_mul_ps
  364. AkForceInline AKSIMD_V4F32 AKSIMD_MUL_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  365. {
  366. AKSIMD_V4F32 vector;
  367. vector.m_data[0] = in_vec1.m_data[0] * in_vec2.m_data[0];
  368. vector.m_data[1] = in_vec1.m_data[1] * in_vec2.m_data[1];
  369. vector.m_data[2] = in_vec1.m_data[2] * in_vec2.m_data[2];
  370. vector.m_data[3] = in_vec1.m_data[3] * in_vec2.m_data[3];
  371. return vector;
  372. }
  373. AkForceInline AKSIMD_V2F32 AKSIMD_MUL_V2F32( const AKSIMD_V2F32& in_vec1, const AKSIMD_V2F32& in_vec2 )
  374. {
  375. AKSIMD_V2F32 vector;
  376. vector.m_data[0] = in_vec1.m_data[0] * in_vec2.m_data[0];
  377. vector.m_data[1] = in_vec1.m_data[1] * in_vec2.m_data[1];
  378. return vector;
  379. }
  380. /// Multiplies the lower single-precision, floating-point values of
  381. /// a and b; the upper three single-precision, floating-point values
  382. /// are passed through from a.
  383. /// r0 := a0 * b0; r1 := a1; r2 := a2; r3 := a3 (see _mm_add_ss)
  384. AkForceInline AKSIMD_V4F32 AKSIMD_MUL_SS_V4F32( const AKSIMD_V4F32& a, const AKSIMD_V4F32& b )
  385. {
  386. AKSIMD_V4F32 vector;
  387. vector.m_data[0] = a.m_data[0] * b.m_data[0];
  388. vector.m_data[1] = a.m_data[1];
  389. vector.m_data[2] = a.m_data[2];
  390. vector.m_data[3] = a.m_data[3];
  391. return vector;
  392. }
  393. /// Vector multiply-add operation.
  394. #define AKSIMD_MADD_V4F32( __a__, __b__, __c__ ) AKSIMD_ADD_V4F32( AKSIMD_MUL_V4F32( (__a__), (__b__) ), (__c__) )
  395. #define AKSIMD_MSUB_V4F32( __a__, __b__, __c__ ) AKSIMD_SUB_V4F32( AKSIMD_MUL_V4F32( (__a__), (__b__) ), (__c__) )
  396. /// Vector multiply-add operation.
  397. #define AKSIMD_MADD_SS_V4F32( __a__, __b__, __c__ ) AKSIMD_ADD_SS_V4F32( AKSIMD_MUL_SS_V4F32( (__a__), (__b__) ), (__c__) )
  398. // _mm_min_ps
  399. AkForceInline AKSIMD_V4F32 AKSIMD_MIN_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  400. {
  401. AKSIMD_V4F32 vector;
  402. vector.m_data[0] = AkMin(in_vec1.m_data[0], in_vec2.m_data[0]);
  403. vector.m_data[1] = AkMin(in_vec1.m_data[1], in_vec2.m_data[1]);
  404. vector.m_data[2] = AkMin(in_vec1.m_data[2], in_vec2.m_data[2]);
  405. vector.m_data[3] = AkMin(in_vec1.m_data[3], in_vec2.m_data[3]);
  406. return vector;
  407. }
  408. AkForceInline AKSIMD_V2F32 AKSIMD_MIN_V2F32( const AKSIMD_V2F32& in_vec1, const AKSIMD_V2F32& in_vec2 )
  409. {
  410. AKSIMD_V2F32 vector;
  411. vector.m_data[0] = AkMin(in_vec1.m_data[0], in_vec2.m_data[0]);
  412. vector.m_data[1] = AkMin(in_vec1.m_data[1], in_vec2.m_data[1]);
  413. return vector;
  414. }
  415. // _mm_max_ps
  416. AkForceInline AKSIMD_V4F32 AKSIMD_MAX_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  417. {
  418. AKSIMD_V4F32 vector;
  419. vector.m_data[0] = AkMax(in_vec1.m_data[0], in_vec2.m_data[0]);
  420. vector.m_data[1] = AkMax(in_vec1.m_data[1], in_vec2.m_data[1]);
  421. vector.m_data[2] = AkMax(in_vec1.m_data[2], in_vec2.m_data[2]);
  422. vector.m_data[3] = AkMax(in_vec1.m_data[3], in_vec2.m_data[3]);
  423. return vector;
  424. }
  425. AkForceInline AKSIMD_V2F32 AKSIMD_MAX_V2F32( const AKSIMD_V2F32& in_vec1, const AKSIMD_V2F32& in_vec2 )
  426. {
  427. AKSIMD_V2F32 vector;
  428. vector.m_data[0] = AkMax(in_vec1.m_data[0], in_vec2.m_data[0]);
  429. vector.m_data[1] = AkMax(in_vec1.m_data[1], in_vec2.m_data[1]);
  430. return vector;
  431. }
  432. AkForceInline AKSIMD_V4F32 AKSIMD_ABS_V4F32( const AKSIMD_V4F32& in_vec1 )
  433. {
  434. AKSIMD_V4F32 vector;
  435. vector.m_data[0] = fabsf(in_vec1.m_data[0]);
  436. vector.m_data[1] = fabsf(in_vec1.m_data[1]);
  437. vector.m_data[2] = fabsf(in_vec1.m_data[2]);
  438. vector.m_data[3] = fabsf(in_vec1.m_data[3]);
  439. return vector;
  440. }
  441. AkForceInline AKSIMD_V4F32 AKSIMD_NEG_V4F32( const AKSIMD_V4F32& in_vec1 )
  442. {
  443. AKSIMD_V4F32 vector;
  444. vector.m_data[0] = -in_vec1.m_data[0];
  445. vector.m_data[1] = -in_vec1.m_data[1];
  446. vector.m_data[2] = -in_vec1.m_data[2];
  447. vector.m_data[3] = -in_vec1.m_data[3];
  448. return vector;
  449. }
  450. // _mm_sqrt_ps
  451. AkForceInline AKSIMD_V4F32 AKSIMD_SQRT_V4F32( const AKSIMD_V4F32& in_vec )
  452. {
  453. AKSIMD_V4F32 vCompare;
  454. AKSIMD_GETELEMENT_V4F32(vCompare,0) = sqrtf( AKSIMD_GETELEMENT_V4F32(in_vec,0) );
  455. AKSIMD_GETELEMENT_V4F32(vCompare,1) = sqrtf( AKSIMD_GETELEMENT_V4F32(in_vec,1) );
  456. AKSIMD_GETELEMENT_V4F32(vCompare,2) = sqrtf( AKSIMD_GETELEMENT_V4F32(in_vec,2) );
  457. AKSIMD_GETELEMENT_V4F32(vCompare,3) = sqrtf( AKSIMD_GETELEMENT_V4F32(in_vec,3) );
  458. //AKSIMD_V4F32 res = vrecpeq_f32( vrsqrteq_f32( in_vec ) );
  459. return vCompare /*res*/;
  460. }
  461. /// Vector reciprocal square root approximation 1/sqrt(a), or equivalently, sqrt(1/a)
  462. AkForceInline AKSIMD_V4F32 AKSIMD_RSQRT_V4F32(const AKSIMD_V4F32& in_vec)
  463. {
  464. AKSIMD_V4F32 vCompare;
  465. AKSIMD_GETELEMENT_V4F32(vCompare, 0) = 1.f / sqrtf(AKSIMD_GETELEMENT_V4F32(in_vec, 0));
  466. AKSIMD_GETELEMENT_V4F32(vCompare, 1) = 1.f / sqrtf(AKSIMD_GETELEMENT_V4F32(in_vec, 1));
  467. AKSIMD_GETELEMENT_V4F32(vCompare, 2) = 1.f / sqrtf(AKSIMD_GETELEMENT_V4F32(in_vec, 2));
  468. AKSIMD_GETELEMENT_V4F32(vCompare, 3) = 1.f / sqrtf(AKSIMD_GETELEMENT_V4F32(in_vec, 3));
  469. return vCompare;
  470. }
  471. AkForceInline AKSIMD_V2F32 AKSIMD_SQRT_V2F32( const AKSIMD_V2F32& in_vec )
  472. {
  473. AKSIMD_V2F32 vCompare;
  474. AKSIMD_GETELEMENT_V4F32(vCompare,0) = sqrtf( AKSIMD_GETELEMENT_V4F32(in_vec,0) );
  475. AKSIMD_GETELEMENT_V4F32(vCompare,1) = sqrtf( AKSIMD_GETELEMENT_V4F32(in_vec,1) );
  476. //AKSIMD_V4F32 res = vrecpeq_f32( vrsqrteq_f32( in_vec ) );
  477. return vCompare /*res*/;
  478. }
  479. //@}
  480. ////////////////////////////////////////////////////////////////////////
  481. ////////////////////////////////////////////////////////////////////////
  482. /// @name AKSIMD packing / unpacking
  483. //@{
  484. //
  485. // _mm_unpacklo_epi16
  486. // r0 := a0
  487. // r1 := b0
  488. // r2 := a1
  489. // r3 := b1
  490. // r4 := a2
  491. // r5 := b2
  492. // r6 := a3
  493. // r7 := b3
  494. AkForceInline AKSIMD_V4I32 AKSIMD_UNPACKLO_VECTOR8I16( const AKSIMD_V4I32& in_vec1, const AKSIMD_V4I32& in_vec2 )
  495. {
  496. AKSIMD_V4I32 vector;
  497. AkInt16 *pVec1,*pVec2,*pDest;
  498. pVec1 = (AkInt16*)&in_vec1;
  499. pVec2 = (AkInt16*)&in_vec2;
  500. pDest = (AkInt16*)&vector;
  501. pDest[0] = pVec1[0];
  502. pDest[1] = pVec2[0];
  503. pDest[2] = pVec1[1];
  504. pDest[3] = pVec2[1];
  505. pDest[4] = pVec1[2];
  506. pDest[5] = pVec2[2];
  507. pDest[6] = pVec1[3];
  508. pDest[7] = pVec2[3];
  509. return vector;
  510. }
  511. // _mm_unpackhi_epi16
  512. AkForceInline AKSIMD_V4I32 AKSIMD_UNPACKHI_VECTOR8I16( const AKSIMD_V4I32& in_vec1, const AKSIMD_V4I32& in_vec2 )
  513. {
  514. AKSIMD_V4I32 vector;
  515. AkInt16 *pVec1,*pVec2,*pDest;
  516. pVec1 = (AkInt16*)&in_vec1;
  517. pVec2 = (AkInt16*)&in_vec2;
  518. pDest = (AkInt16*)&vector;
  519. pDest[0] = pVec1[4];
  520. pDest[1] = pVec2[4];
  521. pDest[2] = pVec1[5];
  522. pDest[3] = pVec2[5];
  523. pDest[4] = pVec1[6];
  524. pDest[5] = pVec2[6];
  525. pDest[6] = pVec1[7];
  526. pDest[7] = pVec2[7];
  527. return vector;
  528. }
  529. // _mm_unpacklo_ps
  530. AkForceInline AKSIMD_V4F32 AKSIMD_UNPACKLO_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  531. {
  532. AKSIMD_V4F32 vector;
  533. vector.m_data[0] = in_vec1.m_data[0];
  534. vector.m_data[1] = in_vec2.m_data[0];
  535. vector.m_data[2] = in_vec1.m_data[1];
  536. vector.m_data[3] = in_vec2.m_data[1];
  537. return vector;
  538. }
  539. // _mm_unpackhi_ps
  540. AkForceInline AKSIMD_V4F32 AKSIMD_UNPACKHI_V4F32( const AKSIMD_V4F32& in_vec1, const AKSIMD_V4F32& in_vec2 )
  541. {
  542. AKSIMD_V4F32 vector;
  543. vector.m_data[0] = in_vec1.m_data[2];
  544. vector.m_data[1] = in_vec2.m_data[2];
  545. vector.m_data[2] = in_vec1.m_data[3];
  546. vector.m_data[3] = in_vec2.m_data[3];
  547. return vector;
  548. }
  549. // _mm_packs_epi32
  550. AkForceInline AKSIMD_V4I32 AKSIMD_PACKS_V4I32( const AKSIMD_V4I32& in_vec1, const AKSIMD_V4I32& in_vec2 )
  551. {
  552. AKSIMD_V4I32 vector;
  553. AkInt16 *pDest = (AkInt16*)&vector;
  554. pDest[0] = (AkInt16)AkClamp((AkInt16)in_vec1.m_data[0], -32768, 32767);
  555. pDest[1] = (AkInt16)AkClamp((AkInt16)in_vec1.m_data[1], -32768, 32767);
  556. pDest[2] = (AkInt16)AkClamp((AkInt16)in_vec1.m_data[2], -32768, 32767);
  557. pDest[3] = (AkInt16)AkClamp((AkInt16)in_vec1.m_data[3], -32768, 32767);
  558. pDest[4] = (AkInt16)AkClamp((AkInt16)in_vec2.m_data[0], -32768, 32767);
  559. pDest[5] = (AkInt16)AkClamp((AkInt16)in_vec2.m_data[1], -32768, 32767);
  560. pDest[6] = (AkInt16)AkClamp((AkInt16)in_vec2.m_data[2], -32768, 32767);
  561. pDest[7] = (AkInt16)AkClamp((AkInt16)in_vec2.m_data[3], -32768, 32767);
  562. return vector;
  563. }
  564. //@}
  565. ////////////////////////////////////////////////////////////////////////
  566. //#define AKSIMD_GET_ITEM( vec, index ) vec[index]
  567. ////////////////////////////////////////////////////////////////////////
  568. /// @name AKSIMD shuffling
  569. //@{
  570. // See _MM_SHUFFLE
  571. #define AKSIMD_SHUFFLE( fp3, fp2, fp1, fp0 ) \
  572. (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
  573. // See _mm_shuffle_ps
  574. // Usage: AKSIMD_SHUFFLE_V4F32( vec1, vec2, AKSIMD_SHUFFLE( z, y, x, w ) )
  575. //#define AKSIMD_SHUFFLE_V4F32( a, b, zyxw )
  576. AkForceInline AKSIMD_V4F32 AKSIMD_SHUFFLE_V4F32( const AKSIMD_V4F32& xyzw, const AKSIMD_V4F32& abcd, int mask )
  577. {
  578. AKSIMD_V4F32 vector;
  579. vector.m_data[0] = xyzw.m_data[(mask) & 0x3];
  580. vector.m_data[1] = xyzw.m_data[(mask >> 2) & 0x3];
  581. vector.m_data[2] = abcd.m_data[(mask >> 4) & 0x3];
  582. vector.m_data[3] = abcd.m_data[(mask >> 6) & 0x3];
  583. return vector;
  584. }
  585. /// Moves the upper two single-precision, floating-point values of b to
  586. /// the lower two single-precision, floating-point values of the result.
  587. /// The upper two single-precision, floating-point values of a are passed
  588. /// through to the result.
  589. /// r3 := a3; r2 := a2; r1 := b3; r0 := b2 (see _mm_movehl_ps)
  590. #define AKSIMD_MOVEHL_V4F32( a, b ) \
  591. AKSIMD_SHUFFLE_V4F32( (b), (a), AKSIMD_SHUFFLE(3, 2, 3, 2) )
  592. /// Moves the lower two single-precision, floating-point values of b to
  593. /// the upper two single-precision, floating-point values of the result.
  594. /// The lower two single-precision, floating-point values of a are passed
  595. /// through to the result.
  596. /// r3 := b1 ; r2 := b0 ; r1 := a1 ; r0 := a0 (see _mm_movelh_ps)
  597. #define AKSIMD_MOVELH_V4F32( a, b ) \
  598. AKSIMD_SHUFFLE_V4F32( (a), (b), AKSIMD_SHUFFLE(1, 0, 1, 0) )
  599. /// Swap the 2 lower floats together and the 2 higher floats together.
  600. #define AKSIMD_SHUFFLE_BADC( __a__ ) AKSIMD_SHUFFLE_V4F32( (__a__), (__a__), AKSIMD_SHUFFLE(2,3,0,1))
  601. /// Swap the 2 lower floats with the 2 higher floats.
  602. #define AKSIMD_SHUFFLE_CDAB( __a__ ) AKSIMD_SHUFFLE_V4F32( (__a__), (__a__), AKSIMD_SHUFFLE(1,0,3,2))
  603. /// Barrel-shift all floats by one.
  604. #define AKSIMD_SHUFFLE_BCDA( __a__ ) AKSIMD_SHUFFLE_V4F32( (__a__), (__a__), AKSIMD_SHUFFLE(0,3,2,1))
  605. /// Duplicates the odd items into the even items (d c b a -> d d b b )
  606. #define AKSIMD_DUP_ODD(__vv) AKSIMD_SHUFFLE_V4F32(__vv, __vv, AKSIMD_SHUFFLE(3,3,1,1))
  607. /// Duplicates the even items into the odd items (d c b a -> c c a a )
  608. #define AKSIMD_DUP_EVEN(__vv) AKSIMD_SHUFFLE_V4F32(__vv, __vv, AKSIMD_SHUFFLE(2,2,0,0))
  609. //#include <AK/SoundEngine/Platforms/Generic/AkSimdShuffle.h>
  610. //@}
  611. ////////////////////////////////////////////////////////////////////////
  612. // Old AKSIMD -- will search-and-replace later
  613. #define AkReal32Vector AKSIMD_V4F32
  614. #define AKSIMD_LOAD1( __scalar__ ) AKSIMD_LOAD1_V4F32( &__scalar__ )
  615. #define AKSIMD_LOADVEC(v) AKSIMD_LOAD_V4F32((const AKSIMD_F32*)((v)))
  616. #define AKSIMD_MUL AKSIMD_MUL_V4F32
  617. #define AKSIMD_STOREVEC AKSIMD_STORE_V4F32
  618. /// Faked in-place vector horizontal add.
  619. /// \akwarning
  620. /// Don't expect this to be very efficient.
  621. /// \endakwarning
  622. static AkForceInline AKSIMD_V4F32 AKSIMD_HORIZONTALADD_V4F32( AKSIMD_V4F32 vVec )
  623. {
  624. AKSIMD_V4F32 vAb = AKSIMD_SHUFFLE_V4F32(vVec, vVec, 0xB1);
  625. AKSIMD_V4F32 vHaddAb = AKSIMD_ADD_V4F32(vVec, vAb);
  626. AKSIMD_V4F32 vHaddCd = AKSIMD_SHUFFLE_V4F32(vHaddAb, vHaddAb, 0x4E);
  627. AKSIMD_V4F32 vHaddAbcd = AKSIMD_ADD_V4F32(vHaddAb, vHaddCd);
  628. return vHaddAbcd;
  629. }
  630. /// Cross-platform SIMD multiplication of 2 complex data elements with interleaved real and imaginary parts
  631. static AkForceInline AKSIMD_V4F32 AKSIMD_COMPLEXMUL_V4F32( const AKSIMD_V4F32 vCIn1, const AKSIMD_V4F32 vCIn2 )
  632. {
  633. static const AKSIMD_V4F32 vSign = { 1.f, -1.f, 1.f, -1.f };
  634. AKSIMD_V4F32 vTmp1 = AKSIMD_SHUFFLE_V4F32( vCIn1, vCIn1, AKSIMD_SHUFFLE(2,2,0,0));
  635. vTmp1 = AKSIMD_MUL_V4F32( vTmp1, vCIn2 );
  636. AKSIMD_V4F32 vTmp2 = AKSIMD_SHUFFLE_V4F32( vCIn1, vCIn1, AKSIMD_SHUFFLE(3,3,1,1));
  637. vTmp2 = AKSIMD_MUL_V4F32( vTmp2, vSign );
  638. vTmp2 = AKSIMD_MUL_V4F32( vTmp2, vCIn2 );
  639. vTmp2 = AKSIMD_SHUFFLE_BADC( vTmp2 );
  640. vTmp2 = AKSIMD_ADD_V4F32( vTmp2, vTmp1 );
  641. return vTmp2;
  642. }
  643. #define AKSIMD_SPLAT_V4F32(var, idx) AKSIMD_SHUFFLE_V4F32(var,var, AKSIMD_SHUFFLE(idx,idx,idx,idx))
  644. #define AK_SIGN_BIT( val ) (((AkUInt32)val) >> 31)
  645. static AkForceInline int AKSIMD_MASK_V4F32( const AKSIMD_V4F32& in_vec )
  646. {
  647. return AK_SIGN_BIT(in_vec.m_data[0]) | AK_SIGN_BIT(in_vec.m_data[1]) << 1 | AK_SIGN_BIT(in_vec.m_data[2]) << 2 | AK_SIGN_BIT(in_vec.m_data[3]) << 3;
  648. }
  649. static AkForceInline AKSIMD_V4F32 AKSIMD_RECIP_V4F32(const AKSIMD_V4F32 &v)
  650. {
  651. AKSIMD_V4F32 r;
  652. r.m_data[0] = 1.f / v.m_data[0];
  653. r.m_data[1] = 1.f / v.m_data[1];
  654. r.m_data[2] = 1.f / v.m_data[2];
  655. r.m_data[3] = 1.f / v.m_data[3];
  656. return r;
  657. }
  658. static AkForceInline AKSIMD_V4F32 AKSIMD_CEIL_V4F32(const AKSIMD_V4F32 & x)
  659. {
  660. AKSIMD_V4F32 r;
  661. r.m_data[0] = ceil(x.m_data[0]);
  662. r.m_data[1] = ceil(x.m_data[1]);
  663. r.m_data[2] = ceil(x.m_data[2]);
  664. r.m_data[3] = ceil(x.m_data[3]);
  665. return r;
  666. }
  667. #endif //_AKSIMD_GENERIC_H_