NMSIS-Core  Version 1.2.0
NMSIS-Core support for Nuclei processor-based devices
core_feature_dsp.h
1 /*
2  * Copyright (c) 2019 Nuclei Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 #ifndef __CORE_FEATURE_DSP__
19 #define __CORE_FEATURE_DSP__
20 
25 /*
26  * DSP Feature Configuration Macro:
27  * 1. __DSP_PRESENT: Define whether Digital Signal Processing Unit(DSP) is present or not
28  * * 0: Not present
29  * * 1: Present
30  */
31 #ifdef __cplusplus
32  extern "C" {
33 #endif
34 
35 #include "core_feature_base.h"
36 
37 #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1)
38 
39 #if defined(__INC_INTRINSIC_API) && (__INC_INTRINSIC_API == 1)
40 #ifndef __ICCRISCV__
41 #include <rvp_intrinsic.h>
42 #endif
43 #endif
44 
45 #ifndef __ICCRISCV__
46 /* ########################### CPU SIMD DSP Intrinsic Functions ########################### */ /* End of Doxygen Group NMSIS_Core_DSP_Intrinsic */
83 
416 /* ===== Inline Function Start for 3.1. ADD8 ===== */
449 __STATIC_FORCEINLINE unsigned long __RV_ADD8(unsigned long a, unsigned long b)
450 {
451  unsigned long result;
452  __ASM volatile("add8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
453  return result;
454 }
455 /* ===== Inline Function End for 3.1. ADD8 ===== */
456 
457 /* ===== Inline Function Start for 3.2. ADD16 ===== */
490 __STATIC_FORCEINLINE unsigned long __RV_ADD16(unsigned long a, unsigned long b)
491 {
492  unsigned long result;
493  __ASM volatile("add16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
494  return result;
495 }
496 /* ===== Inline Function End for 3.2. ADD16 ===== */
497 
498 /* ===== Inline Function Start for 3.3. ADD64 ===== */
543 __STATIC_FORCEINLINE unsigned long long __RV_ADD64(unsigned long long a, unsigned long long b)
544 {
545  unsigned long long result;
546  __ASM volatile("add64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
547  return result;
548 }
549 /* ===== Inline Function End for 3.3. ADD64 ===== */
550 
551 /* ===== Inline Function Start for 3.4. AVE ===== */
582 __STATIC_FORCEINLINE long __RV_AVE(long a, long b)
583 {
584  long result;
585  __ASM volatile("ave %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
586  return result;
587 }
588 /* ===== Inline Function End for 3.4. AVE ===== */
589 
590 /* ===== Inline Function Start for 3.5. BITREV ===== */
623 __STATIC_FORCEINLINE unsigned long __RV_BITREV(unsigned long a, unsigned long b)
624 {
625  unsigned long result;
626  __ASM volatile("bitrev %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
627  return result;
628 }
629 /* ===== Inline Function End for 3.5. BITREV ===== */
630 
631 /* ===== Inline Function Start for 3.6. BITREVI ===== */
665 #define __RV_BITREVI(a, b) \
666  ({ \
667  unsigned long result; \
668  unsigned long __a = (unsigned long)(a); \
669  __ASM volatile("bitrevi %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
670  result; \
671  })
672 /* ===== Inline Function End for 3.6. BITREVI ===== */
673 
674 /* ===== Inline Function Start for 3.7. BPICK ===== */
706 __STATIC_FORCEINLINE unsigned long __RV_BPICK(unsigned long a, unsigned long b, unsigned long c)
707 {
708  unsigned long result;
709  __ASM volatile("bpick %0, %1, %2, %3" : "=r"(result) : "r"(a), "r"(b), "r"(c));
710  return result;
711 }
712 /* ===== Inline Function End for 3.7. BPICK ===== */
713 
714 /* ===== Inline Function Start for 3.8. CLROV ===== */
732 {
733  __ASM volatile("clrov ");
734 }
735 /* ===== Inline Function End for 3.8. CLROV ===== */
736 
737 /* ===== Inline Function Start for 3.9. CLRS8 ===== */
776 __STATIC_FORCEINLINE unsigned long __RV_CLRS8(unsigned long a)
777 {
778  unsigned long result;
779  __ASM volatile("clrs8 %0, %1" : "=r"(result) : "r"(a));
780  return result;
781 }
782 /* ===== Inline Function End for 3.9. CLRS8 ===== */
783 
784 /* ===== Inline Function Start for 3.10. CLRS16 ===== */
823 __STATIC_FORCEINLINE unsigned long __RV_CLRS16(unsigned long a)
824 {
825  unsigned long result;
826  __ASM volatile("clrs16 %0, %1" : "=r"(result) : "r"(a));
827  return result;
828 }
829 /* ===== Inline Function End for 3.10. CLRS16 ===== */
830 
831 /* ===== Inline Function Start for 3.11. CLRS32 ===== */
870 __STATIC_FORCEINLINE unsigned long __RV_CLRS32(unsigned long a)
871 {
872  unsigned long result;
873  __ASM volatile("clrs32 %0, %1" : "=r"(result) : "r"(a));
874  return result;
875 }
876 /* ===== Inline Function End for 3.11. CLRS32 ===== */
877 
878 /* ===== Inline Function Start for 3.12. CLO8 ===== */
917 __STATIC_FORCEINLINE unsigned long __RV_CLO8(unsigned long a)
918 {
919  unsigned long result;
920  __ASM volatile("clo8 %0, %1" : "=r"(result) : "r"(a));
921  return result;
922 }
923 /* ===== Inline Function End for 3.12. CLO8 ===== */
924 
925 /* ===== Inline Function Start for 3.13. CLO16 ===== */
964 __STATIC_FORCEINLINE unsigned long __RV_CLO16(unsigned long a)
965 {
966  unsigned long result;
967  __ASM volatile("clo16 %0, %1" : "=r"(result) : "r"(a));
968  return result;
969 }
970 /* ===== Inline Function End for 3.13. CLO16 ===== */
971 
972 /* ===== Inline Function Start for 3.14. CLO32 ===== */
1011 __STATIC_FORCEINLINE unsigned long __RV_CLO32(unsigned long a)
1012 {
1013  unsigned long result;
1014  __ASM volatile("clo32 %0, %1" : "=r"(result) : "r"(a));
1015  return result;
1016 }
1017 /* ===== Inline Function End for 3.14. CLO32 ===== */
1018 
1019 /* ===== Inline Function Start for 3.15. CLZ8 ===== */
1058 __STATIC_FORCEINLINE unsigned long __RV_CLZ8(unsigned long a)
1059 {
1060  unsigned long result;
1061  __ASM volatile("clz8 %0, %1" : "=r"(result) : "r"(a));
1062  return result;
1063 }
1064 /* ===== Inline Function End for 3.15. CLZ8 ===== */
1065 
1066 /* ===== Inline Function Start for 3.16. CLZ16 ===== */
1105 __STATIC_FORCEINLINE unsigned long __RV_CLZ16(unsigned long a)
1106 {
1107  unsigned long result;
1108  __ASM volatile("clz16 %0, %1" : "=r"(result) : "r"(a));
1109  return result;
1110 }
1111 /* ===== Inline Function End for 3.16. CLZ16 ===== */
1112 
1113 /* ===== Inline Function Start for 3.17. CLZ32 ===== */
1152 __STATIC_FORCEINLINE unsigned long __RV_CLZ32(unsigned long a)
1153 {
1154  unsigned long result;
1155  __ASM volatile("clz32 %0, %1" : "=r"(result) : "r"(a));
1156  return result;
1157 }
1158 /* ===== Inline Function End for 3.17. CLZ32 ===== */
1159 
1160 /* ===== Inline Function Start for 3.18. CMPEQ8 ===== */
1194 __STATIC_FORCEINLINE unsigned long __RV_CMPEQ8(unsigned long a, unsigned long b)
1195 {
1196  unsigned long result;
1197  __ASM volatile("cmpeq8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1198  return result;
1199 }
1200 /* ===== Inline Function End for 3.18. CMPEQ8 ===== */
1201 
1202 /* ===== Inline Function Start for 3.19. CMPEQ16 ===== */
1236 __STATIC_FORCEINLINE unsigned long __RV_CMPEQ16(unsigned long a, unsigned long b)
1237 {
1238  unsigned long result;
1239  __ASM volatile("cmpeq16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1240  return result;
1241 }
1242 /* ===== Inline Function End for 3.19. CMPEQ16 ===== */
1243 
1244 /* ===== Inline Function Start for 3.20. CRAS16 ===== */
1282 __STATIC_FORCEINLINE unsigned long __RV_CRAS16(unsigned long a, unsigned long b)
1283 {
1284  unsigned long result;
1285  __ASM volatile("cras16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1286  return result;
1287 }
1288 /* ===== Inline Function End for 3.20. CRAS16 ===== */
1289 
1290 /* ===== Inline Function Start for 3.21. CRSA16 ===== */
1328 __STATIC_FORCEINLINE unsigned long __RV_CRSA16(unsigned long a, unsigned long b)
1329 {
1330  unsigned long result;
1331  __ASM volatile("crsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1332  return result;
1333 }
1334 /* ===== Inline Function End for 3.21. CRSA16 ===== */
1335 
1336 /* ===== Inline Function Start for 3.22. INSB ===== */
1368 #define __RV_INSB(t, a, b) \
1369  ({ \
1370  unsigned long __t = (unsigned long)(t); \
1371  unsigned long __a = (unsigned long)(a); \
1372  __ASM volatile("insb %0, %1, %2" : "+r"(__t) : "r"(__a), "K"(b)); \
1373  __t; \
1374  })
1375 /* ===== Inline Function End for 3.22. INSB ===== */
1376 
1377 /* ===== Inline Function Start for 3.23. KABS8 ===== */
1414 __STATIC_FORCEINLINE unsigned long __RV_KABS8(unsigned long a)
1415 {
1416  unsigned long result;
1417  __ASM volatile("kabs8 %0, %1" : "=r"(result) : "r"(a));
1418  return result;
1419 }
1420 /* ===== Inline Function End for 3.23. KABS8 ===== */
1421 
1422 /* ===== Inline Function Start for 3.24. KABS16 ===== */
1459 __STATIC_FORCEINLINE unsigned long __RV_KABS16(unsigned long a)
1460 {
1461  unsigned long result;
1462  __ASM volatile("kabs16 %0, %1" : "=r"(result) : "r"(a));
1463  return result;
1464 }
1465 /* ===== Inline Function End for 3.24. KABS16 ===== */
1466 
1467 /* ===== Inline Function Start for 3.25. KABSW ===== */
1506 __STATIC_FORCEINLINE unsigned long __RV_KABSW(signed long a)
1507 {
1508  unsigned long result;
1509  __ASM volatile("kabsw %0, %1" : "=r"(result) : "r"(a));
1510  return result;
1511 }
1512 /* ===== Inline Function End for 3.25. KABSW ===== */
1513 
1514 /* ===== Inline Function Start for 3.26. KADD8 ===== */
1553 __STATIC_FORCEINLINE unsigned long __RV_KADD8(unsigned long a, unsigned long b)
1554 {
1555  unsigned long result;
1556  __ASM volatile("kadd8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1557  return result;
1558 }
1559 /* ===== Inline Function End for 3.26. KADD8 ===== */
1560 
1561 /* ===== Inline Function Start for 3.27. KADD16 ===== */
1600 __STATIC_FORCEINLINE unsigned long __RV_KADD16(unsigned long a, unsigned long b)
1601 {
1602  unsigned long result;
1603  __ASM volatile("kadd16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1604  return result;
1605 }
1606 /* ===== Inline Function End for 3.27. KADD16 ===== */
1607 
1608 /* ===== Inline Function Start for 3.28. KADD64 ===== */
1666 __STATIC_FORCEINLINE long long __RV_KADD64(long long a, long long b)
1667 {
1668  long long result;
1669  __ASM volatile("kadd64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1670  return result;
1671 }
1672 /* ===== Inline Function End for 3.28. KADD64 ===== */
1673 
1674 /* ===== Inline Function Start for 3.29. KADDH ===== */
1714 {
1715  long result;
1716  __ASM volatile("kaddh %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1717  return result;
1718 }
1719 /* ===== Inline Function End for 3.29. KADDH ===== */
1720 
1721 /* ===== Inline Function Start for 3.30. KADDW ===== */
1762 {
1763  long result;
1764  __ASM volatile("kaddw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1765  return result;
1766 }
1767 /* ===== Inline Function End for 3.30. KADDW ===== */
1768 
1769 /* ===== Inline Function Start for 3.31. KCRAS16 ===== */
1818 __STATIC_FORCEINLINE unsigned long __RV_KCRAS16(unsigned long a, unsigned long b)
1819 {
1820  unsigned long result;
1821  __ASM volatile("kcras16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1822  return result;
1823 }
1824 /* ===== Inline Function End for 3.31. KCRAS16 ===== */
1825 
1826 /* ===== Inline Function Start for 3.32. KCRSA16 ===== */
1875 __STATIC_FORCEINLINE unsigned long __RV_KCRSA16(unsigned long a, unsigned long b)
1876 {
1877  unsigned long result;
1878  __ASM volatile("kcrsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1879  return result;
1880 }
1881 /* ===== Inline Function End for 3.32. KCRSA16 ===== */
1882 
1883 /* ===== Inline Function Start for 3.33.1. KDMBB ===== */
1930 __STATIC_FORCEINLINE long __RV_KDMBB(unsigned int a, unsigned int b)
1931 {
1932  long result;
1933  __ASM volatile("kdmbb %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1934  return result;
1935 }
1936 /* ===== Inline Function End for 3.33.1. KDMBB ===== */
1937 
1938 /* ===== Inline Function Start for 3.33.2. KDMBT ===== */
1985 __STATIC_FORCEINLINE long __RV_KDMBT(unsigned int a, unsigned int b)
1986 {
1987  long result;
1988  __ASM volatile("kdmbt %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
1989  return result;
1990 }
1991 /* ===== Inline Function End for 3.33.2. KDMBT ===== */
1992 
1993 /* ===== Inline Function Start for 3.33.3. KDMTT ===== */
2040 __STATIC_FORCEINLINE long __RV_KDMTT(unsigned int a, unsigned int b)
2041 {
2042  long result;
2043  __ASM volatile("kdmtt %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
2044  return result;
2045 }
2046 /* ===== Inline Function End for 3.33.3. KDMTT ===== */
2047 
2048 /* ===== Inline Function Start for 3.34.1. KDMABB ===== */
2105 __STATIC_FORCEINLINE long __RV_KDMABB(long t, unsigned int a, unsigned int b)
2106 {
2107  __ASM volatile("kdmabb %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
2108  return t;
2109 }
2110 /* ===== Inline Function End for 3.34.1. KDMABB ===== */
2111 
2112 /* ===== Inline Function Start for 3.34.2. KDMABT ===== */
2169 __STATIC_FORCEINLINE long __RV_KDMABT(long t, unsigned int a, unsigned int b)
2170 {
2171  __ASM volatile("kdmabt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
2172  return t;
2173 }
2174 /* ===== Inline Function End for 3.34.2. KDMABT ===== */
2175 
2176 /* ===== Inline Function Start for 3.34.3. KDMATT ===== */
2233 __STATIC_FORCEINLINE long __RV_KDMATT(long t, unsigned int a, unsigned int b)
2234 {
2235  __ASM volatile("kdmatt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
2236  return t;
2237 }
2238 /* ===== Inline Function End for 3.34.3. KDMATT ===== */
2239 
2240 /* ===== Inline Function Start for 3.35.1. KHM8 ===== */
2294 __STATIC_FORCEINLINE unsigned long __RV_KHM8(unsigned long a, unsigned long b)
2295 {
2296  unsigned long result;
2297  __ASM volatile("khm8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
2298  return result;
2299 }
2300 /* ===== Inline Function End for 3.35.1. KHM8 ===== */
2301 
2302 /* ===== Inline Function Start for 3.35.2. KHMX8 ===== */
2356 __STATIC_FORCEINLINE unsigned long __RV_KHMX8(unsigned long a, unsigned long b)
2357 {
2358  unsigned long result;
2359  __ASM volatile("khmx8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
2360  return result;
2361 }
2362 /* ===== Inline Function End for 3.35.2. KHMX8 ===== */
2363 
2364 /* ===== Inline Function Start for 3.36.1. KHM16 ===== */
2419 __STATIC_FORCEINLINE unsigned long __RV_KHM16(unsigned long a, unsigned long b)
2420 {
2421  unsigned long result;
2422  __ASM volatile("khm16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
2423  return result;
2424 }
2425 /* ===== Inline Function End for 3.36.1. KHM16 ===== */
2426 
2427 /* ===== Inline Function Start for 3.36.2. KHMX16 ===== */
2482 __STATIC_FORCEINLINE unsigned long __RV_KHMX16(unsigned long a, unsigned long b)
2483 {
2484  unsigned long result;
2485  __ASM volatile("khmx16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
2486  return result;
2487 }
2488 /* ===== Inline Function End for 3.36.2. KHMX16 ===== */
2489 
2490 /* ===== Inline Function Start for 3.37.1. KHMBB ===== */
2535 __STATIC_FORCEINLINE long __RV_KHMBB(unsigned int a, unsigned int b)
2536 {
2537  long result;
2538  __ASM volatile("khmbb %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
2539  return result;
2540 }
2541 /* ===== Inline Function End for 3.37.1. KHMBB ===== */
2542 
2543 /* ===== Inline Function Start for 3.37.2. KHMBT ===== */
2588 __STATIC_FORCEINLINE long __RV_KHMBT(unsigned int a, unsigned int b)
2589 {
2590  long result;
2591  __ASM volatile("khmbt %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
2592  return result;
2593 }
2594 /* ===== Inline Function End for 3.37.2. KHMBT ===== */
2595 
2596 /* ===== Inline Function Start for 3.37.3. KHMTT ===== */
2641 __STATIC_FORCEINLINE long __RV_KHMTT(unsigned int a, unsigned int b)
2642 {
2643  long result;
2644  __ASM volatile("khmtt %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
2645  return result;
2646 }
2647 /* ===== Inline Function End for 3.37.3. KHMTT ===== */
2648 
2649 /* ===== Inline Function Start for 3.38.1. KMABB ===== */
2705 __STATIC_FORCEINLINE long __RV_KMABB(long t, unsigned long a, unsigned long b)
2706 {
2707  __ASM volatile("kmabb %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
2708  return t;
2709 }
2710 /* ===== Inline Function End for 3.38.1. KMABB ===== */
2711 
2712 /* ===== Inline Function Start for 3.38.2. KMABT ===== */
2768 __STATIC_FORCEINLINE long __RV_KMABT(long t, unsigned long a, unsigned long b)
2769 {
2770  __ASM volatile("kmabt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
2771  return t;
2772 }
2773 /* ===== Inline Function End for 3.38.2. KMABT ===== */
2774 
2775 /* ===== Inline Function Start for 3.38.3. KMATT ===== */
2831 __STATIC_FORCEINLINE long __RV_KMATT(long t, unsigned long a, unsigned long b)
2832 {
2833  __ASM volatile("kmatt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
2834  return t;
2835 }
2836 /* ===== Inline Function End for 3.38.3. KMATT ===== */
2837 
2838 /* ===== Inline Function Start for 3.39.1. KMADA ===== */
2895 __STATIC_FORCEINLINE long __RV_KMADA(long t, unsigned long a, unsigned long b)
2896 {
2897  __ASM volatile("kmada %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
2898  return t;
2899 }
2900 /* ===== Inline Function End for 3.39.1. KMADA ===== */
2901 
2902 /* ===== Inline Function Start for 3.39.2. KMAXDA ===== */
2959 __STATIC_FORCEINLINE long __RV_KMAXDA(long t, unsigned long a, unsigned long b)
2960 {
2961  __ASM volatile("kmaxda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
2962  return t;
2963 }
2964 /* ===== Inline Function End for 3.39.2. KMAXDA ===== */
2965 
2966 /* ===== Inline Function Start for 3.40.1. KMADS ===== */
3031 __STATIC_FORCEINLINE long __RV_KMADS(long t, unsigned long a, unsigned long b)
3032 {
3033  __ASM volatile("kmads %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3034  return t;
3035 }
3036 /* ===== Inline Function End for 3.40.1. KMADS ===== */
3037 
3038 /* ===== Inline Function Start for 3.40.2. KMADRS ===== */
3103 __STATIC_FORCEINLINE long __RV_KMADRS(long t, unsigned long a, unsigned long b)
3104 {
3105  __ASM volatile("kmadrs %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3106  return t;
3107 }
3108 /* ===== Inline Function End for 3.40.2. KMADRS ===== */
3109 
3110 /* ===== Inline Function Start for 3.40.3. KMAXDS ===== */
3175 __STATIC_FORCEINLINE long __RV_KMAXDS(long t, unsigned long a, unsigned long b)
3176 {
3177  __ASM volatile("kmaxds %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3178  return t;
3179 }
3180 /* ===== Inline Function End for 3.40.3. KMAXDS ===== */
3181 
3182 /* ===== Inline Function Start for 3.41. KMAR64 ===== */
3243 __STATIC_FORCEINLINE long long __RV_KMAR64(long long t, long a, long b)
3244 {
3245  __ASM volatile("kmar64 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3246  return t;
3247 }
3248 /* ===== Inline Function End for 3.41. KMAR64 ===== */
3249 
3250 /* ===== Inline Function Start for 3.42.1. KMDA ===== */
3293 __STATIC_FORCEINLINE long __RV_KMDA(unsigned long a, unsigned long b)
3294 {
3295  long result;
3296  __ASM volatile("kmda %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
3297  return result;
3298 }
3299 /* ===== Inline Function End for 3.42.1. KMDA ===== */
3300 
3301 /* ===== Inline Function Start for 3.42.2. KMXDA ===== */
3344 __STATIC_FORCEINLINE long __RV_KMXDA(unsigned long a, unsigned long b)
3345 {
3346  long result;
3347  __ASM volatile("kmxda %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
3348  return result;
3349 }
3350 /* ===== Inline Function End for 3.42.2. KMXDA ===== */
3351 
3352 /* ===== Inline Function Start for 3.43.1. KMMAC ===== */
3406 __STATIC_FORCEINLINE long __RV_KMMAC(long t, long a, long b)
3407 {
3408  __ASM volatile("kmmac %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3409  return t;
3410 }
3411 /* ===== Inline Function End for 3.43.1. KMMAC ===== */
3412 
3413 /* ===== Inline Function Start for 3.43.2. KMMAC.u ===== */
3467 __STATIC_FORCEINLINE long __RV_KMMAC_U(long t, long a, long b)
3468 {
3469  __ASM volatile("kmmac.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3470  return t;
3471 }
3472 /* ===== Inline Function End for 3.43.2. KMMAC.u ===== */
3473 
3474 /* ===== Inline Function Start for 3.44.1. KMMAWB ===== */
3529 __STATIC_FORCEINLINE long __RV_KMMAWB(long t, unsigned long a, unsigned long b)
3530 {
3531  __ASM volatile("kmmawb %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3532  return t;
3533 }
3534 /* ===== Inline Function End for 3.44.1. KMMAWB ===== */
3535 
3536 /* ===== Inline Function Start for 3.44.2. KMMAWB.u ===== */
3591 __STATIC_FORCEINLINE long __RV_KMMAWB_U(long t, unsigned long a, unsigned long b)
3592 {
3593  __ASM volatile("kmmawb.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3594  return t;
3595 }
3596 /* ===== Inline Function End for 3.44.2. KMMAWB.u ===== */
3597 
3598 /* ===== Inline Function Start for 3.45.1. KMMAWB2 ===== */
3659 __STATIC_FORCEINLINE long __RV_KMMAWB2(long t, unsigned long a, unsigned long b)
3660 {
3661  __ASM volatile("kmmawb2 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3662  return t;
3663 }
3664 /* ===== Inline Function End for 3.45.1. KMMAWB2 ===== */
3665 
3666 /* ===== Inline Function Start for 3.45.2. KMMAWB2.u ===== */
3727 __STATIC_FORCEINLINE long __RV_KMMAWB2_U(long t, unsigned long a, unsigned long b)
3728 {
3729  __ASM volatile("kmmawb2.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3730  return t;
3731 }
3732 /* ===== Inline Function End for 3.45.2. KMMAWB2.u ===== */
3733 
3734 /* ===== Inline Function Start for 3.46.1. KMMAWT ===== */
3789 __STATIC_FORCEINLINE long __RV_KMMAWT(long t, unsigned long a, unsigned long b)
3790 {
3791  __ASM volatile("kmmawt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3792  return t;
3793 }
3794 /* ===== Inline Function End for 3.46.1. KMMAWT ===== */
3795 
3796 /* ===== Inline Function Start for 3.46.2. KMMAWT.u ===== */
3851 __STATIC_FORCEINLINE long __RV_KMMAWT_U(long t, unsigned long a, unsigned long b)
3852 {
3853  __ASM volatile("kmmawt.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3854  return t;
3855 }
3856 /* ===== Inline Function End for 3.46.2. KMMAWT.u ===== */
3857 
3858 /* ===== Inline Function Start for 3.47.1. KMMAWT2 ===== */
3919 __STATIC_FORCEINLINE long __RV_KMMAWT2(long t, unsigned long a, unsigned long b)
3920 {
3921  __ASM volatile("kmmawt2 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3922  return t;
3923 }
3924 /* ===== Inline Function End for 3.47.1. KMMAWT2 ===== */
3925 
3926 /* ===== Inline Function Start for 3.47.2. KMMAWT2.u ===== */
3987 __STATIC_FORCEINLINE long __RV_KMMAWT2_U(long t, unsigned long a, unsigned long b)
3988 {
3989  __ASM volatile("kmmawt2.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
3990  return t;
3991 }
3992 /* ===== Inline Function End for 3.47.2. KMMAWT2.u ===== */
3993 
3994 /* ===== Inline Function Start for 3.48.1. KMMSB ===== */
4047 __STATIC_FORCEINLINE long __RV_KMMSB(long t, long a, long b)
4048 {
4049  __ASM volatile("kmmsb %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
4050  return t;
4051 }
4052 /* ===== Inline Function End for 3.48.1. KMMSB ===== */
4053 
4054 /* ===== Inline Function Start for 3.48.2. KMMSB.u ===== */
4107 __STATIC_FORCEINLINE long __RV_KMMSB_U(long t, long a, long b)
4108 {
4109  __ASM volatile("kmmsb.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
4110  return t;
4111 }
4112 /* ===== Inline Function End for 3.48.2. KMMSB.u ===== */
4113 
4114 /* ===== Inline Function Start for 3.49.1. KMMWB2 ===== */
4162 __STATIC_FORCEINLINE long __RV_KMMWB2(long a, unsigned long b)
4163 {
4164  long result;
4165  __ASM volatile("kmmwb2 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4166  return result;
4167 }
4168 /* ===== Inline Function End for 3.49.1. KMMWB2 ===== */
4169 
4170 /* ===== Inline Function Start for 3.49.2. KMMWB2.u ===== */
4218 __STATIC_FORCEINLINE long __RV_KMMWB2_U(long a, unsigned long b)
4219 {
4220  long result;
4221  __ASM volatile("kmmwb2.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4222  return result;
4223 }
4224 /* ===== Inline Function End for 3.49.2. KMMWB2.u ===== */
4225 
4226 /* ===== Inline Function Start for 3.50.1. KMMWT2 ===== */
4274 __STATIC_FORCEINLINE long __RV_KMMWT2(long a, unsigned long b)
4275 {
4276  long result;
4277  __ASM volatile("kmmwt2 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4278  return result;
4279 }
4280 /* ===== Inline Function End for 3.50.1. KMMWT2 ===== */
4281 
4282 /* ===== Inline Function Start for 3.50.2. KMMWT2.u ===== */
4330 __STATIC_FORCEINLINE long __RV_KMMWT2_U(long a, unsigned long b)
4331 {
4332  long result;
4333  __ASM volatile("kmmwt2.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4334  return result;
4335 }
4336 /* ===== Inline Function End for 3.50.2. KMMWT2.u ===== */
4337 
4338 /* ===== Inline Function Start for 3.51.1. KMSDA ===== */
4393 __STATIC_FORCEINLINE long __RV_KMSDA(long t, unsigned long a, unsigned long b)
4394 {
4395  __ASM volatile("kmsda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
4396  return t;
4397 }
4398 /* ===== Inline Function End for 3.51.1. KMSDA ===== */
4399 
4400 /* ===== Inline Function Start for 3.51.2. KMSXDA ===== */
4455 __STATIC_FORCEINLINE long __RV_KMSXDA(long t, unsigned long a, unsigned long b)
4456 {
4457  __ASM volatile("kmsxda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
4458  return t;
4459 }
4460 /* ===== Inline Function End for 3.51.2. KMSXDA ===== */
4461 
4462 /* ===== Inline Function Start for 3.52. KMSR64 ===== */
4523 __STATIC_FORCEINLINE long long __RV_KMSR64(long long t, long a, long b)
4524 {
4525  __ASM volatile("kmsr64 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
4526  return t;
4527 }
4528 /* ===== Inline Function End for 3.52. KMSR64 ===== */
4529 
4530 /* ===== Inline Function Start for 3.53. KSLLW ===== */
4570 __STATIC_FORCEINLINE long __RV_KSLLW(long a, unsigned int b)
4571 {
4572  long result;
4573  __ASM volatile("ksllw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4574  return result;
4575 }
4576 /* ===== Inline Function End for 3.53. KSLLW ===== */
4577 
4578 /* ===== Inline Function Start for 3.54. KSLLIW ===== */
4617 #define __RV_KSLLIW(a, b) \
4618  ({ \
4619  long result; \
4620  long __a = (long)(a); \
4621  __ASM volatile("kslliw %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
4622  result; \
4623  })
4624 /* ===== Inline Function End for 3.54. KSLLIW ===== */
4625 
4626 /* ===== Inline Function Start for 3.55. KSLL8 ===== */
4671 __STATIC_FORCEINLINE unsigned long __RV_KSLL8(unsigned long a, unsigned int b)
4672 {
4673  unsigned long result;
4674  __ASM volatile("ksll8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4675  return result;
4676 }
4677 /* ===== Inline Function End for 3.55. KSLL8 ===== */
4678 
4679 /* ===== Inline Function Start for 3.56. KSLLI8 ===== */
4723 #define __RV_KSLLI8(a, b) \
4724  ({ \
4725  unsigned long result; \
4726  unsigned long __a = (unsigned long)(a); \
4727  __ASM volatile("kslli8 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
4728  result; \
4729  })
4730 /* ===== Inline Function End for 3.56. KSLLI8 ===== */
4731 
4732 /* ===== Inline Function Start for 3.57. KSLL16 ===== */
4777 __STATIC_FORCEINLINE unsigned long __RV_KSLL16(unsigned long a, unsigned int b)
4778 {
4779  unsigned long result;
4780  __ASM volatile("ksll16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4781  return result;
4782 }
4783 /* ===== Inline Function End for 3.57. KSLL16 ===== */
4784 
4785 /* ===== Inline Function Start for 3.58. KSLLI16 ===== */
4829 #define __RV_KSLLI16(a, b) \
4830  ({ \
4831  unsigned long result; \
4832  unsigned long __a = (unsigned long)(a); \
4833  __ASM volatile("kslli16 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
4834  result; \
4835  })
4836 /* ===== Inline Function End for 3.58. KSLLI16 ===== */
4837 
4838 /* ===== Inline Function Start for 3.59.1. KSLRA8 ===== */
4897 __STATIC_FORCEINLINE unsigned long __RV_KSLRA8(unsigned long a, int b)
4898 {
4899  unsigned long result;
4900  __ASM volatile("kslra8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4901  return result;
4902 }
4903 /* ===== Inline Function End for 3.59.1. KSLRA8 ===== */
4904 
4905 /* ===== Inline Function Start for 3.59.2. KSLRA8.u ===== */
4964 __STATIC_FORCEINLINE unsigned long __RV_KSLRA8_U(unsigned long a, int b)
4965 {
4966  unsigned long result;
4967  __ASM volatile("kslra8.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
4968  return result;
4969 }
4970 /* ===== Inline Function End for 3.59.2. KSLRA8.u ===== */
4971 
4972 /* ===== Inline Function Start for 3.60.1. KSLRA16 ===== */
5031 __STATIC_FORCEINLINE unsigned long __RV_KSLRA16(unsigned long a, int b)
5032 {
5033  unsigned long result;
5034  __ASM volatile("kslra16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5035  return result;
5036 }
5037 /* ===== Inline Function End for 3.60.1. KSLRA16 ===== */
5038 
5039 /* ===== Inline Function Start for 3.60.2. KSLRA16.u ===== */
5098 __STATIC_FORCEINLINE unsigned long __RV_KSLRA16_U(unsigned long a, int b)
5099 {
5100  unsigned long result;
5101  __ASM volatile("kslra16.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5102  return result;
5103 }
5104 /* ===== Inline Function End for 3.60.2. KSLRA16.u ===== */
5105 
5106 /* ===== Inline Function Start for 3.61. KSLRAW ===== */
5159 {
5160  long result;
5161  __ASM volatile("kslraw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5162  return result;
5163 }
5164 /* ===== Inline Function End for 3.61. KSLRAW ===== */
5165 
5166 /* ===== Inline Function Start for 3.62. KSLRAW.u ===== */
5222 {
5223  long result;
5224  __ASM volatile("kslraw.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5225  return result;
5226 }
5227 /* ===== Inline Function End for 3.62. KSLRAW.u ===== */
5228 
5229 /* ===== Inline Function Start for 3.63. KSTAS16 ===== */
5278 __STATIC_FORCEINLINE unsigned long __RV_KSTAS16(unsigned long a, unsigned long b)
5279 {
5280  unsigned long result;
5281  __ASM volatile("kstas16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5282  return result;
5283 }
5284 /* ===== Inline Function End for 3.63. KSTAS16 ===== */
5285 
5286 /* ===== Inline Function Start for 3.64. KSTSA16 ===== */
5335 __STATIC_FORCEINLINE unsigned long __RV_KSTSA16(unsigned long a, unsigned long b)
5336 {
5337  unsigned long result;
5338  __ASM volatile("kstsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5339  return result;
5340 }
5341 /* ===== Inline Function End for 3.64. KSTSA16 ===== */
5342 
5343 /* ===== Inline Function Start for 3.65. KSUB8 ===== */
5382 __STATIC_FORCEINLINE unsigned long __RV_KSUB8(unsigned long a, unsigned long b)
5383 {
5384  unsigned long result;
5385  __ASM volatile("ksub8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5386  return result;
5387 }
5388 /* ===== Inline Function End for 3.65. KSUB8 ===== */
5389 
5390 /* ===== Inline Function Start for 3.66. KSUB16 ===== */
5430 __STATIC_FORCEINLINE unsigned long __RV_KSUB16(unsigned long a, unsigned long b)
5431 {
5432  unsigned long result;
5433  __ASM volatile("ksub16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5434  return result;
5435 }
5436 /* ===== Inline Function End for 3.66. KSUB16 ===== */
5437 
5438 /* ===== Inline Function Start for 3.67. KSUB64 ===== */
5496 __STATIC_FORCEINLINE long long __RV_KSUB64(long long a, long long b)
5497 {
5498  long long result;
5499  __ASM volatile("ksub64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5500  return result;
5501 }
5502 /* ===== Inline Function End for 3.67. KSUB64 ===== */
5503 
5504 /* ===== Inline Function Start for 3.68. KSUBH ===== */
5544 {
5545  long result;
5546  __ASM volatile("ksubh %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5547  return result;
5548 }
5549 /* ===== Inline Function End for 3.68. KSUBH ===== */
5550 
5551 /* ===== Inline Function Start for 3.69. KSUBW ===== */
5592 {
5593  long result;
5594  __ASM volatile("ksubw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5595  return result;
5596 }
5597 /* ===== Inline Function End for 3.69. KSUBW ===== */
5598 
5599 /* ===== Inline Function Start for 3.70.1. KWMMUL ===== */
5647 __STATIC_FORCEINLINE long __RV_KWMMUL(long a, long b)
5648 {
5649  long result;
5650  __ASM volatile("kwmmul %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5651  return result;
5652 }
5653 /* ===== Inline Function End for 3.70.1. KWMMUL ===== */
5654 
5655 /* ===== Inline Function Start for 3.70.2. KWMMUL.u ===== */
5704 {
5705  long result;
5706  __ASM volatile("kwmmul.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5707  return result;
5708 }
5709 /* ===== Inline Function End for 3.70.2. KWMMUL.u ===== */
5710 
5711 /* ===== Inline Function Start for 3.71. MADDR32 ===== */
5749 __STATIC_FORCEINLINE unsigned long __RV_MADDR32(unsigned long t, unsigned long a, unsigned long b)
5750 {
5751  __ASM volatile("maddr32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
5752  return t;
5753 }
5754 /* ===== Inline Function End for 3.71. MADDR32 ===== */
5755 
5756 /* ===== Inline Function Start for 3.72. MAXW ===== */
5789 {
5790  long result;
5791  __ASM volatile("maxw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5792  return result;
5793 }
5794 /* ===== Inline Function End for 3.72. MAXW ===== */
5795 
5796 /* ===== Inline Function Start for 3.73. MINW ===== */
5825 {
5826  long result;
5827  __ASM volatile("minw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5828  return result;
5829 }
5830 /* ===== Inline Function End for 3.73. MINW ===== */
5831 
5832 /* ===== Inline Function Start for 3.74. MSUBR32 ===== */
5871 __STATIC_FORCEINLINE unsigned long __RV_MSUBR32(unsigned long t, unsigned long a, unsigned long b)
5872 {
5873  __ASM volatile("msubr32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
5874  return t;
5875 }
5876 /* ===== Inline Function End for 3.74. MSUBR32 ===== */
5877 
5878 /* ===== Inline Function Start for 3.75. MULR64 ===== */
5922 __STATIC_FORCEINLINE unsigned long long __RV_MULR64(unsigned long a, unsigned long b)
5923 {
5924  unsigned long long result;
5925  __ASM volatile("mulr64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5926  return result;
5927 }
5928 /* ===== Inline Function End for 3.75. MULR64 ===== */
5929 
5930 /* ===== Inline Function Start for 3.76. MULSR64 ===== */
5974 __STATIC_FORCEINLINE long long __RV_MULSR64(long a, long b)
5975 {
5976  long long result;
5977  __ASM volatile("mulsr64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
5978  return result;
5979 }
5980 /* ===== Inline Function End for 3.76. MULSR64 ===== */
5981 
5982 /* ===== Inline Function Start for 3.77. PBSAD ===== */
6013 __STATIC_FORCEINLINE unsigned long __RV_PBSAD(unsigned long a, unsigned long b)
6014 {
6015  unsigned long result;
6016  __ASM volatile("pbsad %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6017  return result;
6018 }
6019 /* ===== Inline Function End for 3.77. PBSAD ===== */
6020 
6021 /* ===== Inline Function Start for 3.78. PBSADA ===== */
6055 __STATIC_FORCEINLINE unsigned long __RV_PBSADA(unsigned long t, unsigned long a, unsigned long b)
6056 {
6057  __ASM volatile("pbsada %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
6058  return t;
6059 }
6060 /* ===== Inline Function End for 3.78. PBSADA ===== */
6061 
6062 /* ===== Inline Function Start for 3.79.1. PKBB16 ===== */
6105 __STATIC_FORCEINLINE unsigned long __RV_PKBB16(unsigned long a, unsigned long b)
6106 {
6107  unsigned long result;
6108  __ASM volatile("pkbb16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6109  return result;
6110 }
6111 /* ===== Inline Function End for 3.79.1. PKBB16 ===== */
6112 
6113 /* ===== Inline Function Start for 3.79.2. PKBT16 ===== */
6156 __STATIC_FORCEINLINE unsigned long __RV_PKBT16(unsigned long a, unsigned long b)
6157 {
6158  unsigned long result;
6159  __ASM volatile("pkbt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6160  return result;
6161 }
6162 /* ===== Inline Function End for 3.79.2. PKBT16 ===== */
6163 
6164 /* ===== Inline Function Start for 3.79.3. PKTT16 ===== */
6207 __STATIC_FORCEINLINE unsigned long __RV_PKTT16(unsigned long a, unsigned long b)
6208 {
6209  unsigned long result;
6210  __ASM volatile("pktt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6211  return result;
6212 }
6213 /* ===== Inline Function End for 3.79.3. PKTT16 ===== */
6214 
6215 /* ===== Inline Function Start for 3.79.4. PKTB16 ===== */
6258 __STATIC_FORCEINLINE unsigned long __RV_PKTB16(unsigned long a, unsigned long b)
6259 {
6260  unsigned long result;
6261  __ASM volatile("pktb16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6262  return result;
6263 }
6264 /* ===== Inline Function End for 3.79.4. PKTB16 ===== */
6265 
6266 /* ===== Inline Function Start for 3.80. RADD8 ===== */
6303 __STATIC_FORCEINLINE unsigned long __RV_RADD8(unsigned long a, unsigned long b)
6304 {
6305  unsigned long result;
6306  __ASM volatile("radd8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6307  return result;
6308 }
6309 /* ===== Inline Function End for 3.80. RADD8 ===== */
6310 
6311 /* ===== Inline Function Start for 3.81. RADD16 ===== */
6348 __STATIC_FORCEINLINE unsigned long __RV_RADD16(unsigned long a, unsigned long b)
6349 {
6350  unsigned long result;
6351  __ASM volatile("radd16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6352  return result;
6353 }
6354 /* ===== Inline Function End for 3.81. RADD16 ===== */
6355 
6356 /* ===== Inline Function Start for 3.82. RADD64 ===== */
6401 __STATIC_FORCEINLINE long long __RV_RADD64(long long a, long long b)
6402 {
6403  long long result;
6404  __ASM volatile("radd64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6405  return result;
6406 }
6407 /* ===== Inline Function End for 3.82. RADD64 ===== */
6408 
6409 /* ===== Inline Function Start for 3.83. RADDW ===== */
6450 {
6451  long result;
6452  __ASM volatile("raddw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6453  return result;
6454 }
6455 /* ===== Inline Function End for 3.83. RADDW ===== */
6456 
6457 /* ===== Inline Function Start for 3.84. RCRAS16 ===== */
6498 __STATIC_FORCEINLINE unsigned long __RV_RCRAS16(unsigned long a, unsigned long b)
6499 {
6500  unsigned long result;
6501  __ASM volatile("rcras16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6502  return result;
6503 }
6504 /* ===== Inline Function End for 3.84. RCRAS16 ===== */
6505 
6506 /* ===== Inline Function Start for 3.85. RCRSA16 ===== */
6547 __STATIC_FORCEINLINE unsigned long __RV_RCRSA16(unsigned long a, unsigned long b)
6548 {
6549  unsigned long result;
6550  __ASM volatile("rcrsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6551  return result;
6552 }
6553 /* ===== Inline Function End for 3.85. RCRSA16 ===== */
6554 
6555 /* ===== Inline Function Start for 3.86. RDOV ===== */
6574 __STATIC_FORCEINLINE unsigned long __RV_RDOV(void)
6575 {
6576  unsigned long result;
6577  __ASM volatile("rdov %0" : "=r"(result));
6578  return result;
6579 }
6580 /* ===== Inline Function End for 3.86. RDOV ===== */
6581 
6582 /* ===== Inline Function Start for 3.87. RSTAS16 ===== */
6623 __STATIC_FORCEINLINE unsigned long __RV_RSTAS16(unsigned long a, unsigned long b)
6624 {
6625  unsigned long result;
6626  __ASM volatile("rstas16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6627  return result;
6628 }
6629 /* ===== Inline Function End for 3.87. RSTAS16 ===== */
6630 
6631 /* ===== Inline Function Start for 3.88. RSTSA16 ===== */
6672 __STATIC_FORCEINLINE unsigned long __RV_RSTSA16(unsigned long a, unsigned long b)
6673 {
6674  unsigned long result;
6675  __ASM volatile("rstsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6676  return result;
6677 }
6678 /* ===== Inline Function End for 3.88. RSTSA16 ===== */
6679 
6680 /* ===== Inline Function Start for 3.89. RSUB8 ===== */
6719 __STATIC_FORCEINLINE unsigned long __RV_RSUB8(unsigned long a, unsigned long b)
6720 {
6721  unsigned long result;
6722  __ASM volatile("rsub8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6723  return result;
6724 }
6725 /* ===== Inline Function End for 3.89. RSUB8 ===== */
6726 
6727 /* ===== Inline Function Start for 3.90. RSUB16 ===== */
6766 __STATIC_FORCEINLINE unsigned long __RV_RSUB16(unsigned long a, unsigned long b)
6767 {
6768  unsigned long result;
6769  __ASM volatile("rsub16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6770  return result;
6771 }
6772 /* ===== Inline Function End for 3.90. RSUB16 ===== */
6773 
6774 /* ===== Inline Function Start for 3.91. RSUB64 ===== */
6820 __STATIC_FORCEINLINE long long __RV_RSUB64(long long a, long long b)
6821 {
6822  long long result;
6823  __ASM volatile("rsub64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6824  return result;
6825 }
6826 /* ===== Inline Function End for 3.91. RSUB64 ===== */
6827 
6828 /* ===== Inline Function Start for 3.92. RSUBW ===== */
6869 {
6870  long result;
6871  __ASM volatile("rsubw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
6872  return result;
6873 }
6874 /* ===== Inline Function End for 3.92. RSUBW ===== */
6875 
6876 /* ===== Inline Function Start for 3.93. SCLIP8 ===== */
6916 #define __RV_SCLIP8(a, b) \
6917  ({ \
6918  unsigned long result; \
6919  unsigned long __a = (unsigned long)(a); \
6920  __ASM volatile("sclip8 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
6921  result; \
6922  })
6923 /* ===== Inline Function End for 3.93. SCLIP8 ===== */
6924 
6925 /* ===== Inline Function Start for 3.94. SCLIP16 ===== */
6965 #define __RV_SCLIP16(a, b) \
6966  ({ \
6967  unsigned long result; \
6968  unsigned long __a = (unsigned long)(a); \
6969  __ASM volatile("sclip16 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
6970  result; \
6971  })
6972 /* ===== Inline Function End for 3.94. SCLIP16 ===== */
6973 
6974 /* ===== Inline Function Start for 3.95. SCLIP32 ===== */
7014 #define __RV_SCLIP32(a, b) \
7015  ({ \
7016  long result; \
7017  long __a = (long)(a); \
7018  __ASM volatile("sclip32 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
7019  result; \
7020  })
7021 /* ===== Inline Function End for 3.95. SCLIP32 ===== */
7022 
7023 /* ===== Inline Function Start for 3.96. SCMPLE8 ===== */
7055 __STATIC_FORCEINLINE unsigned long __RV_SCMPLE8(unsigned long a, unsigned long b)
7056 {
7057  unsigned long result;
7058  __ASM volatile("scmple8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
7059  return result;
7060 }
7061 /* ===== Inline Function End for 3.96. SCMPLE8 ===== */
7062 
7063 /* ===== Inline Function Start for 3.97. SCMPLE16 ===== */
7095 __STATIC_FORCEINLINE unsigned long __RV_SCMPLE16(unsigned long a, unsigned long b)
7096 {
7097  unsigned long result;
7098  __ASM volatile("scmple16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
7099  return result;
7100 }
7101 /* ===== Inline Function End for 3.97. SCMPLE16 ===== */
7102 
7103 /* ===== Inline Function Start for 3.98. SCMPLT8 ===== */
7134 __STATIC_FORCEINLINE unsigned long __RV_SCMPLT8(unsigned long a, unsigned long b)
7135 {
7136  unsigned long result;
7137  __ASM volatile("scmplt8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
7138  return result;
7139 }
7140 /* ===== Inline Function End for 3.98. SCMPLT8 ===== */
7141 
7142 /* ===== Inline Function Start for 3.99. SCMPLT16 ===== */
7173 __STATIC_FORCEINLINE unsigned long __RV_SCMPLT16(unsigned long a, unsigned long b)
7174 {
7175  unsigned long result;
7176  __ASM volatile("scmplt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
7177  return result;
7178 }
7179 /* ===== Inline Function End for 3.99. SCMPLT16 ===== */
7180 
7181 /* ===== Inline Function Start for 3.100. SLL8 ===== */
7214 __STATIC_FORCEINLINE unsigned long __RV_SLL8(unsigned long a, unsigned int b)
7215 {
7216  unsigned long result;
7217  __ASM volatile("sll8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
7218  return result;
7219 }
7220 /* ===== Inline Function End for 3.100. SLL8 ===== */
7221 
7222 /* ===== Inline Function Start for 3.101. SLLI8 ===== */
7254 #define __RV_SLLI8(a, b) \
7255  ({ \
7256  unsigned long result; \
7257  unsigned long __a = (unsigned long)(a); \
7258  __ASM volatile("slli8 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
7259  result; \
7260  })
7261 /* ===== Inline Function End for 3.101. SLLI8 ===== */
7262 
7263 /* ===== Inline Function Start for 3.102. SLL16 ===== */
7296 __STATIC_FORCEINLINE unsigned long __RV_SLL16(unsigned long a, unsigned int b)
7297 {
7298  unsigned long result;
7299  __ASM volatile("sll16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
7300  return result;
7301 }
7302 /* ===== Inline Function End for 3.102. SLL16 ===== */
7303 
7304 /* ===== Inline Function Start for 3.103. SLLI16 ===== */
7336 #define __RV_SLLI16(a, b) \
7337  ({ \
7338  unsigned long result; \
7339  unsigned long __a = (unsigned long)(a); \
7340  __ASM volatile("slli16 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
7341  result; \
7342  })
7343 /* ===== Inline Function End for 3.103. SLLI16 ===== */
7344 
7345 /* ===== Inline Function Start for 3.104. SMAL ===== */
7397 __STATIC_FORCEINLINE long long __RV_SMAL(long long a, unsigned long b)
7398 {
7399  long long result;
7400  __ASM volatile("smal %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
7401  return result;
7402 }
7403 /* ===== Inline Function End for 3.104. SMAL ===== */
7404 
7405 /* ===== Inline Function Start for 3.105.1. SMALBB ===== */
7480 __STATIC_FORCEINLINE long long __RV_SMALBB(long long t, unsigned long a, unsigned long b)
7481 {
7482  __ASM volatile("smalbb %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
7483  return t;
7484 }
7485 /* ===== Inline Function End for 3.105.1. SMALBB ===== */
7486 
7487 /* ===== Inline Function Start for 3.105.2. SMALBT ===== */
7562 __STATIC_FORCEINLINE long long __RV_SMALBT(long long t, unsigned long a, unsigned long b)
7563 {
7564  __ASM volatile("smalbt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
7565  return t;
7566 }
7567 /* ===== Inline Function End for 3.105.2. SMALBT ===== */
7568 
7569 /* ===== Inline Function Start for 3.105.3. SMALTT ===== */
7644 __STATIC_FORCEINLINE long long __RV_SMALTT(long long t, unsigned long a, unsigned long b)
7645 {
7646  __ASM volatile("smaltt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
7647  return t;
7648 }
7649 /* ===== Inline Function End for 3.105.3. SMALTT ===== */
7650 
7651 /* ===== Inline Function Start for 3.106.1. SMALDA ===== */
7728 __STATIC_FORCEINLINE long long __RV_SMALDA(long long t, unsigned long a, unsigned long b)
7729 {
7730  __ASM volatile("smalda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
7731  return t;
7732 }
7733 /* ===== Inline Function End for 3.106.1. SMALDA ===== */
7734 
7735 /* ===== Inline Function Start for 3.106.2. SMALXDA ===== */
7812 __STATIC_FORCEINLINE long long __RV_SMALXDA(long long t, unsigned long a, unsigned long b)
7813 {
7814  __ASM volatile("smalxda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
7815  return t;
7816 }
7817 /* ===== Inline Function End for 3.106.2. SMALXDA ===== */
7818 
7819 /* ===== Inline Function Start for 3.107.1. SMALDS ===== */
7903 __STATIC_FORCEINLINE long long __RV_SMALDS(long long t, unsigned long a, unsigned long b)
7904 {
7905  __ASM volatile("smalds %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
7906  return t;
7907 }
7908 /* ===== Inline Function End for 3.107.1. SMALDS ===== */
7909 
7910 /* ===== Inline Function Start for 3.107.2. SMALDRS ===== */
7994 __STATIC_FORCEINLINE long long __RV_SMALDRS(long long t, unsigned long a, unsigned long b)
7995 {
7996  __ASM volatile("smaldrs %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
7997  return t;
7998 }
7999 /* ===== Inline Function End for 3.107.2. SMALDRS ===== */
8000 
8001 /* ===== Inline Function Start for 3.107.3. SMALXDS ===== */
8085 __STATIC_FORCEINLINE long long __RV_SMALXDS(long long t, unsigned long a, unsigned long b)
8086 {
8087  __ASM volatile("smalxds %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
8088  return t;
8089 }
8090 /* ===== Inline Function End for 3.107.3. SMALXDS ===== */
8091 
8092 /* ===== Inline Function Start for 3.108. SMAR64 ===== */
8137 __STATIC_FORCEINLINE long long __RV_SMAR64(long long t, long a, long b)
8138 {
8139  __ASM volatile("smar64 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
8140  return t;
8141 }
8142 /* ===== Inline Function End for 3.108. SMAR64 ===== */
8143 
8144 /* ===== Inline Function Start for 3.109. SMAQA ===== */
8181 __STATIC_FORCEINLINE long __RV_SMAQA(long t, unsigned long a, unsigned long b)
8182 {
8183  __ASM volatile("smaqa %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
8184  return t;
8185 }
8186 /* ===== Inline Function End for 3.109. SMAQA ===== */
8187 
8188 /* ===== Inline Function Start for 3.110. SMAQA.SU ===== */
8226 __STATIC_FORCEINLINE long __RV_SMAQA_SU(long t, unsigned long a, unsigned long b)
8227 {
8228  __ASM volatile("smaqa.su %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
8229  return t;
8230 }
8231 /* ===== Inline Function End for 3.110. SMAQA.SU ===== */
8232 
8233 /* ===== Inline Function Start for 3.111. SMAX8 ===== */
8264 __STATIC_FORCEINLINE unsigned long __RV_SMAX8(unsigned long a, unsigned long b)
8265 {
8266  unsigned long result;
8267  __ASM volatile("smax8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8268  return result;
8269 }
8270 /* ===== Inline Function End for 3.111. SMAX8 ===== */
8271 
8272 /* ===== Inline Function Start for 3.112. SMAX16 ===== */
8303 __STATIC_FORCEINLINE unsigned long __RV_SMAX16(unsigned long a, unsigned long b)
8304 {
8305  unsigned long result;
8306  __ASM volatile("smax16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8307  return result;
8308 }
8309 /* ===== Inline Function End for 3.112. SMAX16 ===== */
8310 
8311 /* ===== Inline Function Start for 3.113.1. SMBB16 ===== */
8355 __STATIC_FORCEINLINE long __RV_SMBB16(unsigned long a, unsigned long b)
8356 {
8357  long result;
8358  __ASM volatile("smbb16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8359  return result;
8360 }
8361 /* ===== Inline Function End for 3.113.1. SMBB16 ===== */
8362 
8363 /* ===== Inline Function Start for 3.113.2. SMBT16 ===== */
8407 __STATIC_FORCEINLINE long __RV_SMBT16(unsigned long a, unsigned long b)
8408 {
8409  long result;
8410  __ASM volatile("smbt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8411  return result;
8412 }
8413 /* ===== Inline Function End for 3.113.2. SMBT16 ===== */
8414 
8415 /* ===== Inline Function Start for 3.113.3. SMTT16 ===== */
8459 __STATIC_FORCEINLINE long __RV_SMTT16(unsigned long a, unsigned long b)
8460 {
8461  long result;
8462  __ASM volatile("smtt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8463  return result;
8464 }
8465 /* ===== Inline Function End for 3.113.3. SMTT16 ===== */
8466 
8467 /* ===== Inline Function Start for 3.114.1. SMDS ===== */
8518 __STATIC_FORCEINLINE long __RV_SMDS(unsigned long a, unsigned long b)
8519 {
8520  long result;
8521  __ASM volatile("smds %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8522  return result;
8523 }
8524 /* ===== Inline Function End for 3.114.1. SMDS ===== */
8525 
8526 /* ===== Inline Function Start for 3.114.2. SMDRS ===== */
8577 __STATIC_FORCEINLINE long __RV_SMDRS(unsigned long a, unsigned long b)
8578 {
8579  long result;
8580  __ASM volatile("smdrs %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8581  return result;
8582 }
8583 /* ===== Inline Function End for 3.114.2. SMDRS ===== */
8584 
8585 /* ===== Inline Function Start for 3.114.3. SMXDS ===== */
8636 __STATIC_FORCEINLINE long __RV_SMXDS(unsigned long a, unsigned long b)
8637 {
8638  long result;
8639  __ASM volatile("smxds %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8640  return result;
8641 }
8642 /* ===== Inline Function End for 3.114.3. SMXDS ===== */
8643 
8644 /* ===== Inline Function Start for 3.115. SMIN8 ===== */
8675 __STATIC_FORCEINLINE unsigned long __RV_SMIN8(unsigned long a, unsigned long b)
8676 {
8677  unsigned long result;
8678  __ASM volatile("smin8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8679  return result;
8680 }
8681 /* ===== Inline Function End for 3.115. SMIN8 ===== */
8682 
8683 /* ===== Inline Function Start for 3.116. SMIN16 ===== */
8714 __STATIC_FORCEINLINE unsigned long __RV_SMIN16(unsigned long a, unsigned long b)
8715 {
8716  unsigned long result;
8717  __ASM volatile("smin16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8718  return result;
8719 }
8720 /* ===== Inline Function End for 3.116. SMIN16 ===== */
8721 
8722 /* ===== Inline Function Start for 3.117.1. SMMUL ===== */
8765 __STATIC_FORCEINLINE long __RV_SMMUL(long a, long b)
8766 {
8767  long result;
8768  __ASM volatile("smmul %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8769  return result;
8770 }
8771 /* ===== Inline Function End for 3.117.1. SMMUL ===== */
8772 
8773 /* ===== Inline Function Start for 3.117.2. SMMUL.u ===== */
8817 {
8818  long result;
8819  __ASM volatile("smmul.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8820  return result;
8821 }
8822 /* ===== Inline Function End for 3.117.2. SMMUL.u ===== */
8823 
8824 /* ===== Inline Function Start for 3.118.1. SMMWB ===== */
8866 __STATIC_FORCEINLINE long __RV_SMMWB(long a, unsigned long b)
8867 {
8868  long result;
8869  __ASM volatile("smmwb %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8870  return result;
8871 }
8872 /* ===== Inline Function End for 3.118.1. SMMWB ===== */
8873 
8874 /* ===== Inline Function Start for 3.118.2. SMMWB.u ===== */
8916 __STATIC_FORCEINLINE long __RV_SMMWB_U(long a, unsigned long b)
8917 {
8918  long result;
8919  __ASM volatile("smmwb.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8920  return result;
8921 }
8922 /* ===== Inline Function End for 3.118.2. SMMWB.u ===== */
8923 
8924 /* ===== Inline Function Start for 3.119.1. SMMWT ===== */
8966 __STATIC_FORCEINLINE long __RV_SMMWT(long a, unsigned long b)
8967 {
8968  long result;
8969  __ASM volatile("smmwt %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
8970  return result;
8971 }
8972 /* ===== Inline Function End for 3.119.1. SMMWT ===== */
8973 
8974 /* ===== Inline Function Start for 3.119.2. SMMWT.u ===== */
9016 __STATIC_FORCEINLINE long __RV_SMMWT_U(long a, unsigned long b)
9017 {
9018  long result;
9019  __ASM volatile("smmwt.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9020  return result;
9021 }
9022 /* ===== Inline Function End for 3.119.2. SMMWT.u ===== */
9023 
9024 /* ===== Inline Function Start for 3.120.1. SMSLDA ===== */
9099 __STATIC_FORCEINLINE long long __RV_SMSLDA(long long t, unsigned long a, unsigned long b)
9100 {
9101  __ASM volatile("smslda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
9102  return t;
9103 }
9104 /* ===== Inline Function End for 3.120.1. SMSLDA ===== */
9105 
9106 /* ===== Inline Function Start for 3.120.2. SMSLXDA ===== */
9181 __STATIC_FORCEINLINE long long __RV_SMSLXDA(long long t, unsigned long a, unsigned long b)
9182 {
9183  __ASM volatile("smslxda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
9184  return t;
9185 }
9186 /* ===== Inline Function End for 3.120.2. SMSLXDA ===== */
9187 
9188 /* ===== Inline Function Start for 3.121. SMSR64 ===== */
9234 __STATIC_FORCEINLINE long long __RV_SMSR64(long long t, long a, long b)
9235 {
9236  __ASM volatile("smsr64 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
9237  return t;
9238 }
9239 /* ===== Inline Function End for 3.121. SMSR64 ===== */
9240 
9241 /* ===== Inline Function Start for 3.122.1. SMUL8 ===== */
9316 __STATIC_FORCEINLINE unsigned long long __RV_SMUL8(unsigned int a, unsigned int b)
9317 {
9318  unsigned long long result;
9319  __ASM volatile("smul8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9320  return result;
9321 }
9322 /* ===== Inline Function End for 3.122.1. SMUL8 ===== */
9323 
9324 /* ===== Inline Function Start for 3.122.2. SMULX8 ===== */
9399 __STATIC_FORCEINLINE unsigned long long __RV_SMULX8(unsigned int a, unsigned int b)
9400 {
9401  unsigned long long result;
9402  __ASM volatile("smulx8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9403  return result;
9404 }
9405 /* ===== Inline Function End for 3.122.2. SMULX8 ===== */
9406 
9407 /* ===== Inline Function Start for 3.123.1. SMUL16 ===== */
9484 __STATIC_FORCEINLINE unsigned long long __RV_SMUL16(unsigned int a, unsigned int b)
9485 {
9486  unsigned long long result;
9487  __ASM volatile("smul16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9488  return result;
9489 }
9490 /* ===== Inline Function End for 3.123.1. SMUL16 ===== */
9491 
9492 /* ===== Inline Function Start for 3.123.2. SMULX16 ===== */
9569 __STATIC_FORCEINLINE unsigned long long __RV_SMULX16(unsigned int a, unsigned int b)
9570 {
9571  unsigned long long result;
9572  __ASM volatile("smulx16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9573  return result;
9574 }
9575 /* ===== Inline Function End for 3.123.2. SMULX16 ===== */
9576 
9577 /* ===== Inline Function Start for 3.124. SRA.u ===== */
9623 __STATIC_FORCEINLINE long __RV_SRA_U(long a, unsigned int b)
9624 {
9625  long result;
9626  __ASM volatile("sra.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9627  return result;
9628 }
9629 /* ===== Inline Function End for 3.124. SRA.u ===== */
9630 
9631 /* ===== Inline Function Start for 3.125. SRAI.u ===== */
9678 #define __RV_SRAI_U(a, b) \
9679  ({ \
9680  long result; \
9681  long __a = (long)(a); \
9682  __ASM volatile("srai.u %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
9683  result; \
9684  })
9685 /* ===== Inline Function End for 3.125. SRAI.u ===== */
9686 
9687 /* ===== Inline Function Start for 3.126.1. SRA8 ===== */
9733 __STATIC_FORCEINLINE unsigned long __RV_SRA8(unsigned long a, unsigned int b)
9734 {
9735  unsigned long result;
9736  __ASM volatile("sra8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9737  return result;
9738 }
9739 /* ===== Inline Function End for 3.126.1. SRA8 ===== */
9740 
9741 /* ===== Inline Function Start for 3.126.2. SRA8.u ===== */
9787 __STATIC_FORCEINLINE unsigned long __RV_SRA8_U(unsigned long a, unsigned int b)
9788 {
9789  unsigned long result;
9790  __ASM volatile("sra8.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9791  return result;
9792 }
9793 /* ===== Inline Function End for 3.126.2. SRA8.u ===== */
9794 
9795 /* ===== Inline Function Start for 3.127.1. SRAI8 ===== */
9840 #define __RV_SRAI8(a, b) \
9841  ({ \
9842  unsigned long result; \
9843  unsigned long __a = (unsigned long)(a); \
9844  __ASM volatile("srai8 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
9845  result; \
9846  })
9847 /* ===== Inline Function End for 3.127.1. SRAI8 ===== */
9848 
9849 /* ===== Inline Function Start for 3.127.2. SRAI8.u ===== */
9894 #define __RV_SRAI8_U(a, b) \
9895  ({ \
9896  unsigned long result; \
9897  unsigned long __a = (unsigned long)(a); \
9898  __ASM volatile("srai8.u %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
9899  result; \
9900  })
9901 /* ===== Inline Function End for 3.127.2. SRAI8.u ===== */
9902 
9903 /* ===== Inline Function Start for 3.128.1. SRA16 ===== */
9949 __STATIC_FORCEINLINE unsigned long __RV_SRA16(unsigned long a, unsigned long b)
9950 {
9951  unsigned long result;
9952  __ASM volatile("sra16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
9953  return result;
9954 }
9955 /* ===== Inline Function End for 3.128.1. SRA16 ===== */
9956 
9957 /* ===== Inline Function Start for 3.128.2. SRA16.u ===== */
10003 __STATIC_FORCEINLINE unsigned long __RV_SRA16_U(unsigned long a, unsigned long b)
10004 {
10005  unsigned long result;
10006  __ASM volatile("sra16.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10007  return result;
10008 }
10009 /* ===== Inline Function End for 3.128.2. SRA16.u ===== */
10010 
10011 /* ===== Inline Function Start for 3.129.1. SRAI16 ===== */
10057 #define __RV_SRAI16(a, b) \
10058  ({ \
10059  unsigned long result; \
10060  unsigned long __a = (unsigned long)(a); \
10061  __ASM volatile("srai16 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
10062  result; \
10063  })
10064 /* ===== Inline Function End for 3.129.1. SRAI16 ===== */
10065 
10066 /* ===== Inline Function Start for 3.129.2. SRAI16.u ===== */
10112 #define __RV_SRAI16_U(a, b) \
10113  ({ \
10114  unsigned long result; \
10115  unsigned long __a = (unsigned long)(a); \
10116  __ASM volatile("srai16.u %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
10117  result; \
10118  })
10119 /* ===== Inline Function End for 3.129.2. SRAI16.u ===== */
10120 
10121 /* ===== Inline Function Start for 3.130.1. SRL8 ===== */
10166 __STATIC_FORCEINLINE unsigned long __RV_SRL8(unsigned long a, unsigned int b)
10167 {
10168  unsigned long result;
10169  __ASM volatile("srl8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10170  return result;
10171 }
10172 /* ===== Inline Function End for 3.130.1. SRL8 ===== */
10173 
10174 /* ===== Inline Function Start for 3.130.2. SRL8.u ===== */
10219 __STATIC_FORCEINLINE unsigned long __RV_SRL8_U(unsigned long a, unsigned int b)
10220 {
10221  unsigned long result;
10222  __ASM volatile("srl8.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10223  return result;
10224 }
10225 /* ===== Inline Function End for 3.130.2. SRL8.u ===== */
10226 
10227 /* ===== Inline Function Start for 3.131.1. SRLI8 ===== */
10271 #define __RV_SRLI8(a, b) \
10272  ({ \
10273  unsigned long result; \
10274  unsigned long __a = (unsigned long)(a); \
10275  __ASM volatile("srli8 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
10276  result; \
10277  })
10278 /* ===== Inline Function End for 3.131.1. SRLI8 ===== */
10279 
10280 /* ===== Inline Function Start for 3.131.2. SRLI8.u ===== */
10324 #define __RV_SRLI8_U(a, b) \
10325  ({ \
10326  unsigned long result; \
10327  unsigned long __a = (unsigned long)(a); \
10328  __ASM volatile("srli8.u %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
10329  result; \
10330  })
10331 /* ===== Inline Function End for 3.131.2. SRLI8.u ===== */
10332 
10333 /* ===== Inline Function Start for 3.132.1. SRL16 ===== */
10377 __STATIC_FORCEINLINE unsigned long __RV_SRL16(unsigned long a, unsigned int b)
10378 {
10379  unsigned long result;
10380  __ASM volatile("srl16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10381  return result;
10382 }
10383 /* ===== Inline Function End for 3.132.1. SRL16 ===== */
10384 
10385 /* ===== Inline Function Start for 3.132.2. SRL16.u ===== */
10429 __STATIC_FORCEINLINE unsigned long __RV_SRL16_U(unsigned long a, unsigned int b)
10430 {
10431  unsigned long result;
10432  __ASM volatile("srl16.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10433  return result;
10434 }
10435 /* ===== Inline Function End for 3.132.2. SRL16.u ===== */
10436 
10437 /* ===== Inline Function Start for 3.133.1. SRLI16 ===== */
10481 #define __RV_SRLI16(a, b) \
10482  ({ \
10483  unsigned long result; \
10484  unsigned long __a = (unsigned long)(a); \
10485  __ASM volatile("srli16 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
10486  result; \
10487  })
10488 /* ===== Inline Function End for 3.133.1. SRLI16 ===== */
10489 
10490 /* ===== Inline Function Start for 3.133.2. SRLI16.u ===== */
10534 #define __RV_SRLI16_U(a, b) \
10535  ({ \
10536  unsigned long result; \
10537  unsigned long __a = (unsigned long)(a); \
10538  __ASM volatile("srli16.u %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
10539  result; \
10540  })
10541 /* ===== Inline Function End for 3.133.2. SRLI16.u ===== */
10542 
10543 /* ===== Inline Function Start for 3.134. STAS16 ===== */
10581 __STATIC_FORCEINLINE unsigned long __RV_STAS16(unsigned long a, unsigned long b)
10582 {
10583  unsigned long result;
10584  __ASM volatile("stas16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10585  return result;
10586 }
10587 /* ===== Inline Function End for 3.134. STAS16 ===== */
10588 
10589 /* ===== Inline Function Start for 3.135. STSA16 ===== */
10627 __STATIC_FORCEINLINE unsigned long __RV_STSA16(unsigned long a, unsigned long b)
10628 {
10629  unsigned long result;
10630  __ASM volatile("stsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10631  return result;
10632 }
10633 /* ===== Inline Function End for 3.135. STSA16 ===== */
10634 
10635 /* ===== Inline Function Start for 3.136. SUB8 ===== */
10668 __STATIC_FORCEINLINE unsigned long __RV_SUB8(unsigned long a, unsigned long b)
10669 {
10670  unsigned long result;
10671  __ASM volatile("sub8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10672  return result;
10673 }
10674 /* ===== Inline Function End for 3.136. SUB8 ===== */
10675 
10676 /* ===== Inline Function Start for 3.137. SUB16 ===== */
10709 __STATIC_FORCEINLINE unsigned long __RV_SUB16(unsigned long a, unsigned long b)
10710 {
10711  unsigned long result;
10712  __ASM volatile("sub16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10713  return result;
10714 }
10715 /* ===== Inline Function End for 3.137. SUB16 ===== */
10716 
10717 /* ===== Inline Function Start for 3.138. SUB64 ===== */
10763 __STATIC_FORCEINLINE unsigned long long __RV_SUB64(unsigned long long a, unsigned long long b)
10764 {
10765  unsigned long long result;
10766  __ASM volatile("sub64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
10767  return result;
10768 }
10769 /* ===== Inline Function End for 3.138. SUB64 ===== */
10770 
10771 /* ===== Inline Function Start for 3.139.1. SUNPKD810 ===== */
10809 __STATIC_FORCEINLINE unsigned long __RV_SUNPKD810(unsigned long a)
10810 {
10811  unsigned long result;
10812  __ASM volatile("sunpkd810 %0, %1" : "=r"(result) : "r"(a));
10813  return result;
10814 }
10815 /* ===== Inline Function End for 3.139.1. SUNPKD810 ===== */
10816 
10817 /* ===== Inline Function Start for 3.139.2. SUNPKD820 ===== */
10855 __STATIC_FORCEINLINE unsigned long __RV_SUNPKD820(unsigned long a)
10856 {
10857  unsigned long result;
10858  __ASM volatile("sunpkd820 %0, %1" : "=r"(result) : "r"(a));
10859  return result;
10860 }
10861 /* ===== Inline Function End for 3.139.2. SUNPKD820 ===== */
10862 
10863 /* ===== Inline Function Start for 3.139.3. SUNPKD830 ===== */
10901 __STATIC_FORCEINLINE unsigned long __RV_SUNPKD830(unsigned long a)
10902 {
10903  unsigned long result;
10904  __ASM volatile("sunpkd830 %0, %1" : "=r"(result) : "r"(a));
10905  return result;
10906 }
10907 /* ===== Inline Function End for 3.139.3. SUNPKD830 ===== */
10908 
10909 /* ===== Inline Function Start for 3.139.4. SUNPKD831 ===== */
10947 __STATIC_FORCEINLINE unsigned long __RV_SUNPKD831(unsigned long a)
10948 {
10949  unsigned long result;
10950  __ASM volatile("sunpkd831 %0, %1" : "=r"(result) : "r"(a));
10951  return result;
10952 }
10953 /* ===== Inline Function End for 3.139.4. SUNPKD831 ===== */
10954 
10955 /* ===== Inline Function Start for 3.139.5. SUNPKD832 ===== */
10993 __STATIC_FORCEINLINE unsigned long __RV_SUNPKD832(unsigned long a)
10994 {
10995  unsigned long result;
10996  __ASM volatile("sunpkd832 %0, %1" : "=r"(result) : "r"(a));
10997  return result;
10998 }
10999 /* ===== Inline Function End for 3.139.5. SUNPKD832 ===== */
11000 
11001 /* ===== Inline Function Start for 3.140. SWAP8 ===== */
11030 __STATIC_FORCEINLINE unsigned long __RV_SWAP8(unsigned long a)
11031 {
11032  unsigned long result;
11033  __ASM volatile("swap8 %0, %1" : "=r"(result) : "r"(a));
11034  return result;
11035 }
11036 /* ===== Inline Function End for 3.140. SWAP8 ===== */
11037 
11038 /* ===== Inline Function Start for 3.141. SWAP16 ===== */
11067 __STATIC_FORCEINLINE unsigned long __RV_SWAP16(unsigned long a)
11068 {
11069  unsigned long result;
11070  __ASM volatile("swap16 %0, %1" : "=r"(result) : "r"(a));
11071  return result;
11072 }
11073 /* ===== Inline Function End for 3.141. SWAP16 ===== */
11074 
11075 /* ===== Inline Function Start for 3.142. UCLIP8 ===== */
11114 #define __RV_UCLIP8(a, b) \
11115  ({ \
11116  unsigned long result; \
11117  unsigned long __a = (unsigned long)(a); \
11118  __ASM volatile("uclip8 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
11119  result; \
11120  })
11121 /* ===== Inline Function End for 3.142. UCLIP8 ===== */
11122 
11123 /* ===== Inline Function Start for 3.143. UCLIP16 ===== */
11163 #define __RV_UCLIP16(a, b) \
11164  ({ \
11165  unsigned long result; \
11166  unsigned long __a = (unsigned long)(a); \
11167  __ASM volatile("uclip16 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
11168  result; \
11169  })
11170 /* ===== Inline Function End for 3.143. UCLIP16 ===== */
11171 
11172 /* ===== Inline Function Start for 3.144. UCLIP32 ===== */
11213 #define __RV_UCLIP32(a, b) \
11214  ({ \
11215  unsigned long result; \
11216  unsigned long __a = (unsigned long)(a); \
11217  __ASM volatile("uclip32 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
11218  result; \
11219  })
11220 /* ===== Inline Function End for 3.144. UCLIP32 ===== */
11221 
11222 /* ===== Inline Function Start for 3.145. UCMPLE8 ===== */
11254 __STATIC_FORCEINLINE unsigned long __RV_UCMPLE8(unsigned long a, unsigned long b)
11255 {
11256  unsigned long result;
11257  __ASM volatile("ucmple8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11258  return result;
11259 }
11260 /* ===== Inline Function End for 3.145. UCMPLE8 ===== */
11261 
11262 /* ===== Inline Function Start for 3.146. UCMPLE16 ===== */
11294 __STATIC_FORCEINLINE unsigned long __RV_UCMPLE16(unsigned long a, unsigned long b)
11295 {
11296  unsigned long result;
11297  __ASM volatile("ucmple16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11298  return result;
11299 }
11300 /* ===== Inline Function End for 3.146. UCMPLE16 ===== */
11301 
11302 /* ===== Inline Function Start for 3.147. UCMPLT8 ===== */
11333 __STATIC_FORCEINLINE unsigned long __RV_UCMPLT8(unsigned long a, unsigned long b)
11334 {
11335  unsigned long result;
11336  __ASM volatile("ucmplt8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11337  return result;
11338 }
11339 /* ===== Inline Function End for 3.147. UCMPLT8 ===== */
11340 
11341 /* ===== Inline Function Start for 3.148. UCMPLT16 ===== */
11372 __STATIC_FORCEINLINE unsigned long __RV_UCMPLT16(unsigned long a, unsigned long b)
11373 {
11374  unsigned long result;
11375  __ASM volatile("ucmplt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11376  return result;
11377 }
11378 /* ===== Inline Function End for 3.148. UCMPLT16 ===== */
11379 
11380 /* ===== Inline Function Start for 3.149. UKADD8 ===== */
11417 __STATIC_FORCEINLINE unsigned long __RV_UKADD8(unsigned long a, unsigned long b)
11418 {
11419  unsigned long result;
11420  __ASM volatile("ukadd8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11421  return result;
11422 }
11423 /* ===== Inline Function End for 3.149. UKADD8 ===== */
11424 
11425 /* ===== Inline Function Start for 3.150. UKADD16 ===== */
11462 __STATIC_FORCEINLINE unsigned long __RV_UKADD16(unsigned long a, unsigned long b)
11463 {
11464  unsigned long result;
11465  __ASM volatile("ukadd16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11466  return result;
11467 }
11468 /* ===== Inline Function End for 3.150. UKADD16 ===== */
11469 
11470 /* ===== Inline Function Start for 3.151. UKADD64 ===== */
11524 __STATIC_FORCEINLINE unsigned long long __RV_UKADD64(unsigned long long a, unsigned long long b)
11525 {
11526  unsigned long long result;
11527  __ASM volatile("ukadd64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11528  return result;
11529 }
11530 /* ===== Inline Function End for 3.151. UKADD64 ===== */
11531 
11532 /* ===== Inline Function Start for 3.152. UKADDH ===== */
11566 __STATIC_FORCEINLINE unsigned long __RV_UKADDH(unsigned int a, unsigned int b)
11567 {
11568  unsigned long result;
11569  __ASM volatile("ukaddh %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11570  return result;
11571 }
11572 /* ===== Inline Function End for 3.152. UKADDH ===== */
11573 
11574 /* ===== Inline Function Start for 3.153. UKADDW ===== */
11609 __STATIC_FORCEINLINE unsigned long __RV_UKADDW(unsigned int a, unsigned int b)
11610 {
11611  unsigned long result;
11612  __ASM volatile("ukaddw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11613  return result;
11614 }
11615 /* ===== Inline Function End for 3.153. UKADDW ===== */
11616 
11617 /* ===== Inline Function Start for 3.154. UKCRAS16 ===== */
11665 __STATIC_FORCEINLINE unsigned long __RV_UKCRAS16(unsigned long a, unsigned long b)
11666 {
11667  unsigned long result;
11668  __ASM volatile("ukcras16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11669  return result;
11670 }
11671 /* ===== Inline Function End for 3.154. UKCRAS16 ===== */
11672 
11673 /* ===== Inline Function Start for 3.155. UKCRSA16 ===== */
11720 __STATIC_FORCEINLINE unsigned long __RV_UKCRSA16(unsigned long a, unsigned long b)
11721 {
11722  unsigned long result;
11723  __ASM volatile("ukcrsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11724  return result;
11725 }
11726 /* ===== Inline Function End for 3.155. UKCRSA16 ===== */
11727 
11728 /* ===== Inline Function Start for 3.156. UKMAR64 ===== */
11785 __STATIC_FORCEINLINE unsigned long long __RV_UKMAR64(unsigned long long t, unsigned long a, unsigned long b)
11786 {
11787  __ASM volatile("ukmar64 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
11788  return t;
11789 }
11790 /* ===== Inline Function End for 3.156. UKMAR64 ===== */
11791 
11792 /* ===== Inline Function Start for 3.157. UKMSR64 ===== */
11850 __STATIC_FORCEINLINE unsigned long long __RV_UKMSR64(unsigned long long t, unsigned long a, unsigned long b)
11851 {
11852  __ASM volatile("ukmsr64 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
11853  return t;
11854 }
11855 /* ===== Inline Function End for 3.157. UKMSR64 ===== */
11856 
11857 /* ===== Inline Function Start for 3.158. UKSTAS16 ===== */
11905 __STATIC_FORCEINLINE unsigned long __RV_UKSTAS16(unsigned long a, unsigned long b)
11906 {
11907  unsigned long result;
11908  __ASM volatile("ukstas16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11909  return result;
11910 }
11911 /* ===== Inline Function End for 3.158. UKSTAS16 ===== */
11912 
11913 /* ===== Inline Function Start for 3.159. UKSTSA16 ===== */
11960 __STATIC_FORCEINLINE unsigned long __RV_UKSTSA16(unsigned long a, unsigned long b)
11961 {
11962  unsigned long result;
11963  __ASM volatile("ukstsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
11964  return result;
11965 }
11966 /* ===== Inline Function End for 3.159. UKSTSA16 ===== */
11967 
11968 /* ===== Inline Function Start for 3.160. UKSUB8 ===== */
12005 __STATIC_FORCEINLINE unsigned long __RV_UKSUB8(unsigned long a, unsigned long b)
12006 {
12007  unsigned long result;
12008  __ASM volatile("uksub8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12009  return result;
12010 }
12011 /* ===== Inline Function End for 3.160. UKSUB8 ===== */
12012 
12013 /* ===== Inline Function Start for 3.161. UKSUB16 ===== */
12050 __STATIC_FORCEINLINE unsigned long __RV_UKSUB16(unsigned long a, unsigned long b)
12051 {
12052  unsigned long result;
12053  __ASM volatile("uksub16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12054  return result;
12055 }
12056 /* ===== Inline Function End for 3.161. UKSUB16 ===== */
12057 
12058 /* ===== Inline Function Start for 3.162. UKSUB64 ===== */
12113 __STATIC_FORCEINLINE unsigned long long __RV_UKSUB64(unsigned long long a, unsigned long long b)
12114 {
12115  unsigned long long result;
12116  __ASM volatile("uksub64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12117  return result;
12118 }
12119 /* ===== Inline Function End for 3.162. UKSUB64 ===== */
12120 
12121 /* ===== Inline Function Start for 3.163. UKSUBH ===== */
12159 __STATIC_FORCEINLINE unsigned long __RV_UKSUBH(unsigned int a, unsigned int b)
12160 {
12161  unsigned long result;
12162  __ASM volatile("uksubh %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12163  return result;
12164 }
12165 /* ===== Inline Function End for 3.163. UKSUBH ===== */
12166 
12167 /* ===== Inline Function Start for 3.164. UKSUBW ===== */
12203 __STATIC_FORCEINLINE unsigned long __RV_UKSUBW(unsigned int a, unsigned int b)
12204 {
12205  unsigned long result;
12206  __ASM volatile("uksubw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12207  return result;
12208 }
12209 /* ===== Inline Function End for 3.164. UKSUBW ===== */
12210 
12211 /* ===== Inline Function Start for 3.165. UMAR64 ===== */
12257 __STATIC_FORCEINLINE unsigned long long __RV_UMAR64(unsigned long long t, unsigned long a, unsigned long b)
12258 {
12259  __ASM volatile("umar64 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
12260  return t;
12261 }
12262 /* ===== Inline Function End for 3.165. UMAR64 ===== */
12263 
12264 /* ===== Inline Function Start for 3.166. UMAQA ===== */
12301 __STATIC_FORCEINLINE unsigned long __RV_UMAQA(unsigned long t, unsigned long a, unsigned long b)
12302 {
12303  __ASM volatile("umaqa %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
12304  return t;
12305 }
12306 /* ===== Inline Function End for 3.166. UMAQA ===== */
12307 
12308 /* ===== Inline Function Start for 3.167. UMAX8 ===== */
12339 __STATIC_FORCEINLINE unsigned long __RV_UMAX8(unsigned long a, unsigned long b)
12340 {
12341  unsigned long result;
12342  __ASM volatile("umax8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12343  return result;
12344 }
12345 /* ===== Inline Function End for 3.167. UMAX8 ===== */
12346 
12347 /* ===== Inline Function Start for 3.168. UMAX16 ===== */
12378 __STATIC_FORCEINLINE unsigned long __RV_UMAX16(unsigned long a, unsigned long b)
12379 {
12380  unsigned long result;
12381  __ASM volatile("umax16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12382  return result;
12383 }
12384 /* ===== Inline Function End for 3.168. UMAX16 ===== */
12385 
12386 /* ===== Inline Function Start for 3.169. UMIN8 ===== */
12417 __STATIC_FORCEINLINE unsigned long __RV_UMIN8(unsigned long a, unsigned long b)
12418 {
12419  unsigned long result;
12420  __ASM volatile("umin8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12421  return result;
12422 }
12423 /* ===== Inline Function End for 3.169. UMIN8 ===== */
12424 
12425 /* ===== Inline Function Start for 3.170. UMIN16 ===== */
12456 __STATIC_FORCEINLINE unsigned long __RV_UMIN16(unsigned long a, unsigned long b)
12457 {
12458  unsigned long result;
12459  __ASM volatile("umin16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12460  return result;
12461 }
12462 /* ===== Inline Function End for 3.170. UMIN16 ===== */
12463 
12464 /* ===== Inline Function Start for 3.171. UMSR64 ===== */
12510 __STATIC_FORCEINLINE unsigned long long __RV_UMSR64(unsigned long long t, unsigned long a, unsigned long b)
12511 {
12512  __ASM volatile("umsr64 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
12513  return t;
12514 }
12515 /* ===== Inline Function End for 3.171. UMSR64 ===== */
12516 
12517 /* ===== Inline Function Start for 3.172.1. UMUL8 ===== */
12593 __STATIC_FORCEINLINE unsigned long long __RV_UMUL8(unsigned int a, unsigned int b)
12594 {
12595  unsigned long long result;
12596  __ASM volatile("umul8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12597  return result;
12598 }
12599 /* ===== Inline Function End for 3.172.1. UMUL8 ===== */
12600 
12601 /* ===== Inline Function Start for 3.172.2. UMULX8 ===== */
12677 __STATIC_FORCEINLINE unsigned long long __RV_UMULX8(unsigned int a, unsigned int b)
12678 {
12679  unsigned long long result;
12680  __ASM volatile("umulx8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12681  return result;
12682 }
12683 /* ===== Inline Function End for 3.172.2. UMULX8 ===== */
12684 
12685 /* ===== Inline Function Start for 3.173.1. UMUL16 ===== */
12762 __STATIC_FORCEINLINE unsigned long long __RV_UMUL16(unsigned int a, unsigned int b)
12763 {
12764  unsigned long long result;
12765  __ASM volatile("umul16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12766  return result;
12767 }
12768 /* ===== Inline Function End for 3.173.1. UMUL16 ===== */
12769 
12770 /* ===== Inline Function Start for 3.173.2. UMULX16 ===== */
12847 __STATIC_FORCEINLINE unsigned long long __RV_UMULX16(unsigned int a, unsigned int b)
12848 {
12849  unsigned long long result;
12850  __ASM volatile("umulx16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12851  return result;
12852 }
12853 /* ===== Inline Function End for 3.173.2. UMULX16 ===== */
12854 
12855 /* ===== Inline Function Start for 3.174. URADD8 ===== */
12894 __STATIC_FORCEINLINE unsigned long __RV_URADD8(unsigned long a, unsigned long b)
12895 {
12896  unsigned long result;
12897  __ASM volatile("uradd8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12898  return result;
12899 }
12900 /* ===== Inline Function End for 3.174. URADD8 ===== */
12901 
12902 /* ===== Inline Function Start for 3.175. URADD16 ===== */
12941 __STATIC_FORCEINLINE unsigned long __RV_URADD16(unsigned long a, unsigned long b)
12942 {
12943  unsigned long result;
12944  __ASM volatile("uradd16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12945  return result;
12946 }
12947 /* ===== Inline Function End for 3.175. URADD16 ===== */
12948 
12949 /* ===== Inline Function Start for 3.176. URADD64 ===== */
12993 __STATIC_FORCEINLINE unsigned long long __RV_URADD64(unsigned long long a, unsigned long long b)
12994 {
12995  unsigned long long result;
12996  __ASM volatile("uradd64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
12997  return result;
12998 }
12999 /* ===== Inline Function End for 3.176. URADD64 ===== */
13000 
13001 /* ===== Inline Function Start for 3.177. URADDW ===== */
13041 __STATIC_FORCEINLINE unsigned long __RV_URADDW(unsigned int a, unsigned int b)
13042 {
13043  unsigned long result;
13044  __ASM volatile("uraddw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13045  return result;
13046 }
13047 /* ===== Inline Function End for 3.177. URADDW ===== */
13048 
13049 /* ===== Inline Function Start for 3.178. URCRAS16 ===== */
13090 __STATIC_FORCEINLINE unsigned long __RV_URCRAS16(unsigned long a, unsigned long b)
13091 {
13092  unsigned long result;
13093  __ASM volatile("urcras16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13094  return result;
13095 }
13096 /* ===== Inline Function End for 3.178. URCRAS16 ===== */
13097 
13098 /* ===== Inline Function Start for 3.179. URCRSA16 ===== */
13139 __STATIC_FORCEINLINE unsigned long __RV_URCRSA16(unsigned long a, unsigned long b)
13140 {
13141  unsigned long result;
13142  __ASM volatile("urcrsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13143  return result;
13144 }
13145 /* ===== Inline Function End for 3.179. URCRSA16 ===== */
13146 
13147 /* ===== Inline Function Start for 3.180. URSTAS16 ===== */
13188 __STATIC_FORCEINLINE unsigned long __RV_URSTAS16(unsigned long a, unsigned long b)
13189 {
13190  unsigned long result;
13191  __ASM volatile("urstas16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13192  return result;
13193 }
13194 /* ===== Inline Function End for 3.180. URSTAS16 ===== */
13195 
13196 /* ===== Inline Function Start for 3.181. URSTSA16 ===== */
13237 __STATIC_FORCEINLINE unsigned long __RV_URSTSA16(unsigned long a, unsigned long b)
13238 {
13239  unsigned long result;
13240  __ASM volatile("urstsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13241  return result;
13242 }
13243 /* ===== Inline Function End for 3.181. URSTSA16 ===== */
13244 
13245 /* ===== Inline Function Start for 3.182. URSUB8 ===== */
13284 __STATIC_FORCEINLINE unsigned long __RV_URSUB8(unsigned long a, unsigned long b)
13285 {
13286  unsigned long result;
13287  __ASM volatile("ursub8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13288  return result;
13289 }
13290 /* ===== Inline Function End for 3.182. URSUB8 ===== */
13291 
13292 /* ===== Inline Function Start for 3.183. URSUB16 ===== */
13331 __STATIC_FORCEINLINE unsigned long __RV_URSUB16(unsigned long a, unsigned long b)
13332 {
13333  unsigned long result;
13334  __ASM volatile("ursub16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13335  return result;
13336 }
13337 /* ===== Inline Function End for 3.183. URSUB16 ===== */
13338 
13339 /* ===== Inline Function Start for 3.184. URSUB64 ===== */
13385 __STATIC_FORCEINLINE unsigned long long __RV_URSUB64(unsigned long long a, unsigned long long b)
13386 {
13387  unsigned long long result;
13388  __ASM volatile("ursub64 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13389  return result;
13390 }
13391 /* ===== Inline Function End for 3.184. URSUB64 ===== */
13392 
13393 /* ===== Inline Function Start for 3.185. URSUBW ===== */
13433 __STATIC_FORCEINLINE unsigned long __RV_URSUBW(unsigned int a, unsigned int b)
13434 {
13435  unsigned long result;
13436  __ASM volatile("ursubw %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13437  return result;
13438 }
13439 /* ===== Inline Function End for 3.185. URSUBW ===== */
13440 
13441 /* ===== Inline Function Start for 3.186. WEXTI ===== */
13486 #define __RV_WEXTI(a, b) \
13487  ({ \
13488  unsigned long result; \
13489  long long __a = (long long)(a); \
13490  __ASM volatile("wexti %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
13491  result; \
13492  })
13493 /* ===== Inline Function End for 3.186. WEXTI ===== */
13494 
13495 /* ===== Inline Function Start for 3.187. WEXT ===== */
13537 __STATIC_FORCEINLINE unsigned long __RV_WEXT(long long a, unsigned int b)
13538 {
13539  unsigned long result;
13540  __ASM volatile("wext %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13541  return result;
13542 }
13543 /* ===== Inline Function End for 3.187. WEXT ===== */
13544 
13545 /* ===== Inline Function Start for 3.188.1. ZUNPKD810 ===== */
13583 __STATIC_FORCEINLINE unsigned long __RV_ZUNPKD810(unsigned long a)
13584 {
13585  unsigned long result;
13586  __ASM volatile("zunpkd810 %0, %1" : "=r"(result) : "r"(a));
13587  return result;
13588 }
13589 /* ===== Inline Function End for 3.188.1. ZUNPKD810 ===== */
13590 
13591 /* ===== Inline Function Start for 3.188.2. ZUNPKD820 ===== */
13629 __STATIC_FORCEINLINE unsigned long __RV_ZUNPKD820(unsigned long a)
13630 {
13631  unsigned long result;
13632  __ASM volatile("zunpkd820 %0, %1" : "=r"(result) : "r"(a));
13633  return result;
13634 }
13635 /* ===== Inline Function End for 3.188.2. ZUNPKD820 ===== */
13636 
13637 /* ===== Inline Function Start for 3.188.3. ZUNPKD830 ===== */
13675 __STATIC_FORCEINLINE unsigned long __RV_ZUNPKD830(unsigned long a)
13676 {
13677  unsigned long result;
13678  __ASM volatile("zunpkd830 %0, %1" : "=r"(result) : "r"(a));
13679  return result;
13680 }
13681 /* ===== Inline Function End for 3.188.3. ZUNPKD830 ===== */
13682 
13683 /* ===== Inline Function Start for 3.188.4. ZUNPKD831 ===== */
13721 __STATIC_FORCEINLINE unsigned long __RV_ZUNPKD831(unsigned long a)
13722 {
13723  unsigned long result;
13724  __ASM volatile("zunpkd831 %0, %1" : "=r"(result) : "r"(a));
13725  return result;
13726 }
13727 /* ===== Inline Function End for 3.188.4. ZUNPKD831 ===== */
13728 
13729 /* ===== Inline Function Start for 3.188.5. ZUNPKD832 ===== */
13767 __STATIC_FORCEINLINE unsigned long __RV_ZUNPKD832(unsigned long a)
13768 {
13769  unsigned long result;
13770  __ASM volatile("zunpkd832 %0, %1" : "=r"(result) : "r"(a));
13771  return result;
13772 }
13773 /* ===== Inline Function End for 3.188.5. ZUNPKD832 ===== */
13774 
13775 #if (__RISCV_XLEN == 64) || defined(__ONLY_FOR_DOXYGEN_DOCUMENT_GENERATION__)
13776 
13777 /* ===== Inline Function Start for 4.1. ADD32 ===== */
13809 __STATIC_FORCEINLINE unsigned long __RV_ADD32(unsigned long a, unsigned long b)
13810 {
13811  unsigned long result;
13812  __ASM volatile("add32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13813  return result;
13814 }
13815 /* ===== Inline Function End for 4.1. ADD32 ===== */
13816 
13817 /* ===== Inline Function Start for 4.2. CRAS32 ===== */
13852 __STATIC_FORCEINLINE unsigned long __RV_CRAS32(unsigned long a, unsigned long b)
13853 {
13854  unsigned long result;
13855  __ASM volatile("cras32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13856  return result;
13857 }
13858 /* ===== Inline Function End for 4.2. CRAS32 ===== */
13859 
13860 /* ===== Inline Function Start for 4.3. CRSA32 ===== */
13894 __STATIC_FORCEINLINE unsigned long __RV_CRSA32(unsigned long a, unsigned long b)
13895 {
13896  unsigned long result;
13897  __ASM volatile("crsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
13898  return result;
13899 }
13900 /* ===== Inline Function End for 4.3. CRSA32 ===== */
13901 
13902 /* ===== Inline Function Start for 4.4. KABS32 ===== */
13954 __STATIC_FORCEINLINE unsigned long __RV_KABS32(unsigned long a)
13955 {
13956  unsigned long result;
13957  __ASM volatile("kabs32 %0, %1" : "=r"(result) : "r"(a));
13958  return result;
13959 }
13960 /* ===== Inline Function End for 4.4. KABS32 ===== */
13961 
13962 /* ===== Inline Function Start for 4.5. KADD32 ===== */
14000 __STATIC_FORCEINLINE unsigned long __RV_KADD32(unsigned long a, unsigned long b)
14001 {
14002  unsigned long result;
14003  __ASM volatile("kadd32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14004  return result;
14005 }
14006 /* ===== Inline Function End for 4.5. KADD32 ===== */
14007 
14008 /* ===== Inline Function Start for 4.6. KCRAS32 ===== */
14051 __STATIC_FORCEINLINE unsigned long __RV_KCRAS32(unsigned long a, unsigned long b)
14052 {
14053  unsigned long result;
14054  __ASM volatile("kcras32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14055  return result;
14056 }
14057 /* ===== Inline Function End for 4.6. KCRAS32 ===== */
14058 
14059 /* ===== Inline Function Start for 4.7. KCRSA32 ===== */
14101 __STATIC_FORCEINLINE unsigned long __RV_KCRSA32(unsigned long a, unsigned long b)
14102 {
14103  unsigned long result;
14104  __ASM volatile("kcrsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14105  return result;
14106 }
14107 /* ===== Inline Function End for 4.7. KCRSA32 ===== */
14108 
14109 /* ===== Inline Function Start for 4.8.1. KDMBB16 ===== */
14153 __STATIC_FORCEINLINE unsigned long __RV_KDMBB16(unsigned long a, unsigned long b)
14154 {
14155  unsigned long result;
14156  __ASM volatile("kdmbb16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14157  return result;
14158 }
14159 /* ===== Inline Function End for 4.8.1. KDMBB16 ===== */
14160 
14161 /* ===== Inline Function Start for 4.8.2. KDMBT16 ===== */
14205 __STATIC_FORCEINLINE unsigned long __RV_KDMBT16(unsigned long a, unsigned long b)
14206 {
14207  unsigned long result;
14208  __ASM volatile("kdmbt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14209  return result;
14210 }
14211 /* ===== Inline Function End for 4.8.2. KDMBT16 ===== */
14212 
14213 /* ===== Inline Function Start for 4.8.3. KDMTT16 ===== */
14257 __STATIC_FORCEINLINE unsigned long __RV_KDMTT16(unsigned long a, unsigned long b)
14258 {
14259  unsigned long result;
14260  __ASM volatile("kdmtt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14261  return result;
14262 }
14263 /* ===== Inline Function End for 4.8.3. KDMTT16 ===== */
14264 
14265 /* ===== Inline Function Start for 4.9.1. KDMABB16 ===== */
14323 __STATIC_FORCEINLINE unsigned long __RV_KDMABB16(unsigned long t, unsigned long a, unsigned long b)
14324 {
14325  __ASM volatile("kdmabb16 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
14326  return t;
14327 }
14328 /* ===== Inline Function End for 4.9.1. KDMABB16 ===== */
14329 
14330 /* ===== Inline Function Start for 4.9.2. KDMABT16 ===== */
14388 __STATIC_FORCEINLINE unsigned long __RV_KDMABT16(unsigned long t, unsigned long a, unsigned long b)
14389 {
14390  __ASM volatile("kdmabt16 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
14391  return t;
14392 }
14393 /* ===== Inline Function End for 4.9.2. KDMABT16 ===== */
14394 
14395 /* ===== Inline Function Start for 4.9.3. KDMATT16 ===== */
14453 __STATIC_FORCEINLINE unsigned long __RV_KDMATT16(unsigned long t, unsigned long a, unsigned long b)
14454 {
14455  __ASM volatile("kdmatt16 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
14456  return t;
14457 }
14458 /* ===== Inline Function End for 4.9.3. KDMATT16 ===== */
14459 
14460 /* ===== Inline Function Start for 4.10.1. KHMBB16 ===== */
14505 __STATIC_FORCEINLINE unsigned long __RV_KHMBB16(unsigned long a, unsigned long b)
14506 {
14507  unsigned long result;
14508  __ASM volatile("khmbb16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14509  return result;
14510 }
14511 /* ===== Inline Function End for 4.10.1. KHMBB16 ===== */
14512 
14513 /* ===== Inline Function Start for 4.10.2. KHMBT16 ===== */
14558 __STATIC_FORCEINLINE unsigned long __RV_KHMBT16(unsigned long a, unsigned long b)
14559 {
14560  unsigned long result;
14561  __ASM volatile("khmbt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14562  return result;
14563 }
14564 /* ===== Inline Function End for 4.10.2. KHMBT16 ===== */
14565 
14566 /* ===== Inline Function Start for 4.10.3. KHMTT16 ===== */
14611 __STATIC_FORCEINLINE unsigned long __RV_KHMTT16(unsigned long a, unsigned long b)
14612 {
14613  unsigned long result;
14614  __ASM volatile("khmtt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14615  return result;
14616 }
14617 /* ===== Inline Function End for 4.10.3. KHMTT16 ===== */
14618 
14619 /* ===== Inline Function Start for 4.11.1. KMABB32 ===== */
14674 __STATIC_FORCEINLINE long __RV_KMABB32(long t, unsigned long a, unsigned long b)
14675 {
14676  __ASM volatile("kmabb32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
14677  return t;
14678 }
14679 /* ===== Inline Function End for 4.11.1. KMABB32 ===== */
14680 
14681 /* ===== Inline Function Start for 4.11.2. KMABT32 ===== */
14736 __STATIC_FORCEINLINE long __RV_KMABT32(long t, unsigned long a, unsigned long b)
14737 {
14738  __ASM volatile("kmabt32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
14739  return t;
14740 }
14741 /* ===== Inline Function End for 4.11.2. KMABT32 ===== */
14742 
14743 /* ===== Inline Function Start for 4.11.3. KMATT32 ===== */
14798 __STATIC_FORCEINLINE long __RV_KMATT32(long t, unsigned long a, unsigned long b)
14799 {
14800  __ASM volatile("kmatt32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
14801  return t;
14802 }
14803 /* ===== Inline Function End for 4.11.3. KMATT32 ===== */
14804 
14805 /* ===== Inline Function Start for 4.12.1. KMADA32 ===== */
14854 __STATIC_FORCEINLINE long __RV_KMADA32(long t, unsigned long a, unsigned long b)
14855 {
14856  __ASM volatile("kmada32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
14857  return t;
14858 }
14859 /* ===== Inline Function End for 4.12.1. KMADA32 ===== */
14860 
14861 /* ===== Inline Function Start for 4.12.2. KMAXDA32 ===== */
14910 __STATIC_FORCEINLINE long __RV_KMAXDA32(long t, unsigned long a, unsigned long b)
14911 {
14912  __ASM volatile("kmaxda32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
14913  return t;
14914 }
14915 /* ===== Inline Function End for 4.12.2. KMAXDA32 ===== */
14916 
14917 /* ===== Inline Function Start for 4.13.1. KMDA32 ===== */
14961 __STATIC_FORCEINLINE long __RV_KMDA32(unsigned long a, unsigned long b)
14962 {
14963  long result;
14964  __ASM volatile("kmda32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
14965  return result;
14966 }
14967 /* ===== Inline Function End for 4.13.1. KMDA32 ===== */
14968 
14969 /* ===== Inline Function Start for 4.13.2. KMXDA32 ===== */
15013 __STATIC_FORCEINLINE long __RV_KMXDA32(unsigned long a, unsigned long b)
15014 {
15015  long result;
15016  __ASM volatile("kmxda32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15017  return result;
15018 }
15019 /* ===== Inline Function End for 4.13.2. KMXDA32 ===== */
15020 
15021 /* ===== Inline Function Start for 4.14.1. KMADS32 ===== */
15078 __STATIC_FORCEINLINE long __RV_KMADS32(long t, unsigned long a, unsigned long b)
15079 {
15080  __ASM volatile("kmads32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
15081  return t;
15082 }
15083 /* ===== Inline Function End for 4.14.1. KMADS32 ===== */
15084 
15085 /* ===== Inline Function Start for 4.14.2. KMADRS32 ===== */
15142 __STATIC_FORCEINLINE long __RV_KMADRS32(long t, unsigned long a, unsigned long b)
15143 {
15144  __ASM volatile("kmadrs32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
15145  return t;
15146 }
15147 /* ===== Inline Function End for 4.14.2. KMADRS32 ===== */
15148 
15149 /* ===== Inline Function Start for 4.14.3. KMAXDS32 ===== */
15206 __STATIC_FORCEINLINE long __RV_KMAXDS32(long t, unsigned long a, unsigned long b)
15207 {
15208  __ASM volatile("kmaxds32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
15209  return t;
15210 }
15211 /* ===== Inline Function End for 4.14.3. KMAXDS32 ===== */
15212 
15213 /* ===== Inline Function Start for 4.15.1. KMSDA32 ===== */
15261 __STATIC_FORCEINLINE long __RV_KMSDA32(long t, unsigned long a, unsigned long b)
15262 {
15263  __ASM volatile("kmsda32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
15264  return t;
15265 }
15266 /* ===== Inline Function End for 4.15.1. KMSDA32 ===== */
15267 
15268 /* ===== Inline Function Start for 4.15.2. KMSXDA32 ===== */
15316 __STATIC_FORCEINLINE long __RV_KMSXDA32(long t, unsigned long a, unsigned long b)
15317 {
15318  __ASM volatile("kmsxda32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
15319  return t;
15320 }
15321 /* ===== Inline Function End for 4.15.2. KMSXDA32 ===== */
15322 
15323 /* ===== Inline Function Start for 4.16. KSLL32 ===== */
15367 __STATIC_FORCEINLINE unsigned long __RV_KSLL32(unsigned long a, unsigned int b)
15368 {
15369  unsigned long result;
15370  __ASM volatile("ksll32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15371  return result;
15372 }
15373 /* ===== Inline Function End for 4.16. KSLL32 ===== */
15374 
15375 /* ===== Inline Function Start for 4.17. KSLLI32 ===== */
15418 #define __RV_KSLLI32(a, b) \
15419  ({ \
15420  unsigned long result; \
15421  unsigned long __a = (unsigned long)(a); \
15422  __ASM volatile("kslli32 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
15423  result; \
15424  })
15425 /* ===== Inline Function End for 4.17. KSLLI32 ===== */
15426 
15427 /* ===== Inline Function Start for 4.18.1. KSLRA32 ===== */
15485 __STATIC_FORCEINLINE unsigned long __RV_KSLRA32(unsigned long a, int b)
15486 {
15487  unsigned long result;
15488  __ASM volatile("kslra32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15489  return result;
15490 }
15491 /* ===== Inline Function End for 4.18.1. KSLRA32 ===== */
15492 
15493 /* ===== Inline Function Start for 4.18.2. KSLRA32.u ===== */
15551 __STATIC_FORCEINLINE unsigned long __RV_KSLRA32_U(unsigned long a, int b)
15552 {
15553  unsigned long result;
15554  __ASM volatile("kslra32.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15555  return result;
15556 }
15557 /* ===== Inline Function End for 4.18.2. KSLRA32.u ===== */
15558 
15559 /* ===== Inline Function Start for 4.19. KSTAS32 ===== */
15603 __STATIC_FORCEINLINE unsigned long __RV_KSTAS32(unsigned long a, unsigned long b)
15604 {
15605  unsigned long result;
15606  __ASM volatile("kstas32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15607  return result;
15608 }
15609 /* ===== Inline Function End for 4.19. KSTAS32 ===== */
15610 
15611 /* ===== Inline Function Start for 4.20. KSTSA32 ===== */
15654 __STATIC_FORCEINLINE unsigned long __RV_KSTSA32(unsigned long a, unsigned long b)
15655 {
15656  unsigned long result;
15657  __ASM volatile("kstsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15658  return result;
15659 }
15660 /* ===== Inline Function End for 4.20. KSTSA32 ===== */
15661 
15662 /* ===== Inline Function Start for 4.21. KSUB32 ===== */
15701 __STATIC_FORCEINLINE unsigned long __RV_KSUB32(unsigned long a, unsigned long b)
15702 {
15703  unsigned long result;
15704  __ASM volatile("ksub32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15705  return result;
15706 }
15707 /* ===== Inline Function End for 4.21. KSUB32 ===== */
15708 
15709 /* ===== Inline Function Start for 4.22.1. PKBB32 ===== */
15749 __STATIC_FORCEINLINE unsigned long __RV_PKBB32(unsigned long a, unsigned long b)
15750 {
15751  unsigned long result;
15752  __ASM volatile("pkbb32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15753  return result;
15754 }
15755 /* ===== Inline Function End for 4.22.1. PKBB32 ===== */
15756 
15757 /* ===== Inline Function Start for 4.22.2. PKBT32 ===== */
15797 __STATIC_FORCEINLINE unsigned long __RV_PKBT32(unsigned long a, unsigned long b)
15798 {
15799  unsigned long result;
15800  __ASM volatile("pkbt32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15801  return result;
15802 }
15803 /* ===== Inline Function End for 4.22.2. PKBT32 ===== */
15804 
15805 /* ===== Inline Function Start for 4.22.3. PKTT32 ===== */
15845 __STATIC_FORCEINLINE unsigned long __RV_PKTT32(unsigned long a, unsigned long b)
15846 {
15847  unsigned long result;
15848  __ASM volatile("pktt32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15849  return result;
15850 }
15851 /* ===== Inline Function End for 4.22.3. PKTT32 ===== */
15852 
15853 /* ===== Inline Function Start for 4.22.4. PKTB32 ===== */
15893 __STATIC_FORCEINLINE unsigned long __RV_PKTB32(unsigned long a, unsigned long b)
15894 {
15895  unsigned long result;
15896  __ASM volatile("pktb32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15897  return result;
15898 }
15899 /* ===== Inline Function End for 4.22.4. PKTB32 ===== */
15900 
15901 /* ===== Inline Function Start for 4.23. RADD32 ===== */
15939 __STATIC_FORCEINLINE unsigned long __RV_RADD32(unsigned long a, unsigned long b)
15940 {
15941  unsigned long result;
15942  __ASM volatile("radd32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15943  return result;
15944 }
15945 /* ===== Inline Function End for 4.23. RADD32 ===== */
15946 
15947 /* ===== Inline Function Start for 4.24. RCRAS32 ===== */
15986 __STATIC_FORCEINLINE unsigned long __RV_RCRAS32(unsigned long a, unsigned long b)
15987 {
15988  unsigned long result;
15989  __ASM volatile("rcras32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
15990  return result;
15991 }
15992 /* ===== Inline Function End for 4.24. RCRAS32 ===== */
15993 
15994 /* ===== Inline Function Start for 4.25. RCRSA32 ===== */
16033 __STATIC_FORCEINLINE unsigned long __RV_RCRSA32(unsigned long a, unsigned long b)
16034 {
16035  unsigned long result;
16036  __ASM volatile("rcrsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16037  return result;
16038 }
16039 /* ===== Inline Function End for 4.25. RCRSA32 ===== */
16040 
16041 /* ===== Inline Function Start for 4.26. RSTAS32 ===== */
16080 __STATIC_FORCEINLINE unsigned long __RV_RSTAS32(unsigned long a, unsigned long b)
16081 {
16082  unsigned long result;
16083  __ASM volatile("rstas32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16084  return result;
16085 }
16086 /* ===== Inline Function End for 4.26. RSTAS32 ===== */
16087 
16088 /* ===== Inline Function Start for 4.27. RSTSA32 ===== */
16126 __STATIC_FORCEINLINE unsigned long __RV_RSTSA32(unsigned long a, unsigned long b)
16127 {
16128  unsigned long result;
16129  __ASM volatile("rstsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16130  return result;
16131 }
16132 /* ===== Inline Function End for 4.27. RSTSA32 ===== */
16133 
16134 /* ===== Inline Function Start for 4.28. RSUB32 ===== */
16172 __STATIC_FORCEINLINE unsigned long __RV_RSUB32(unsigned long a, unsigned long b)
16173 {
16174  unsigned long result;
16175  __ASM volatile("rsub32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16176  return result;
16177 }
16178 /* ===== Inline Function End for 4.28. RSUB32 ===== */
16179 
16180 /* ===== Inline Function Start for 4.29. SLL32 ===== */
16212 __STATIC_FORCEINLINE unsigned long __RV_SLL32(unsigned long a, unsigned int b)
16213 {
16214  unsigned long result;
16215  __ASM volatile("sll32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16216  return result;
16217 }
16218 /* ===== Inline Function End for 4.29. SLL32 ===== */
16219 
16220 /* ===== Inline Function Start for 4.30. SLLI32 ===== */
16251 #define __RV_SLLI32(a, b) \
16252  ({ \
16253  unsigned long result; \
16254  unsigned long __a = (unsigned long)(a); \
16255  __ASM volatile("slli32 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
16256  result; \
16257  })
16258 /* ===== Inline Function End for 4.30. SLLI32 ===== */
16259 
16260 /* ===== Inline Function Start for 4.31. SMAX32 ===== */
16290 __STATIC_FORCEINLINE unsigned long __RV_SMAX32(unsigned long a, unsigned long b)
16291 {
16292  unsigned long result;
16293  __ASM volatile("smax32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16294  return result;
16295 }
16296 /* ===== Inline Function End for 4.31. SMAX32 ===== */
16297 
16298 /* ===== Inline Function Start for 4.32.1. SMBB32 ===== */
16339 __STATIC_FORCEINLINE long __RV_SMBB32(unsigned long a, unsigned long b)
16340 {
16341  long result;
16342  __ASM volatile("smbb32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16343  return result;
16344 }
16345 /* ===== Inline Function End for 4.32.1. SMBB32 ===== */
16346 
16347 /* ===== Inline Function Start for 4.32.2. SMBT32 ===== */
16388 __STATIC_FORCEINLINE long __RV_SMBT32(unsigned long a, unsigned long b)
16389 {
16390  long result;
16391  __ASM volatile("smbt32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16392  return result;
16393 }
16394 /* ===== Inline Function End for 4.32.2. SMBT32 ===== */
16395 
16396 /* ===== Inline Function Start for 4.32.3. SMTT32 ===== */
16437 __STATIC_FORCEINLINE long __RV_SMTT32(unsigned long a, unsigned long b)
16438 {
16439  long result;
16440  __ASM volatile("smtt32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16441  return result;
16442 }
16443 /* ===== Inline Function End for 4.32.3. SMTT32 ===== */
16444 
16445 /* ===== Inline Function Start for 4.33.1. SMDS32 ===== */
16490 __STATIC_FORCEINLINE long __RV_SMDS32(unsigned long a, unsigned long b)
16491 {
16492  long result;
16493  __ASM volatile("smds32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16494  return result;
16495 }
16496 /* ===== Inline Function End for 4.33.1. SMDS32 ===== */
16497 
16498 /* ===== Inline Function Start for 4.33.2. SMDRS32 ===== */
16543 __STATIC_FORCEINLINE long __RV_SMDRS32(unsigned long a, unsigned long b)
16544 {
16545  long result;
16546  __ASM volatile("smdrs32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16547  return result;
16548 }
16549 /* ===== Inline Function End for 4.33.2. SMDRS32 ===== */
16550 
16551 /* ===== Inline Function Start for 4.33.3. SMXDS32 ===== */
16596 __STATIC_FORCEINLINE long __RV_SMXDS32(unsigned long a, unsigned long b)
16597 {
16598  long result;
16599  __ASM volatile("smxds32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16600  return result;
16601 }
16602 /* ===== Inline Function End for 4.33.3. SMXDS32 ===== */
16603 
16604 /* ===== Inline Function Start for 4.34. SMIN32 ===== */
16634 __STATIC_FORCEINLINE unsigned long __RV_SMIN32(unsigned long a, unsigned long b)
16635 {
16636  unsigned long result;
16637  __ASM volatile("smin32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16638  return result;
16639 }
16640 /* ===== Inline Function End for 4.34. SMIN32 ===== */
16641 
16642 /* ===== Inline Function Start for 4.35.1. SRA32 ===== */
16687 __STATIC_FORCEINLINE unsigned long __RV_SRA32(unsigned long a, unsigned int b)
16688 {
16689  unsigned long result;
16690  __ASM volatile("sra32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16691  return result;
16692 }
16693 /* ===== Inline Function End for 4.35.1. SRA32 ===== */
16694 
16695 /* ===== Inline Function Start for 4.35.2. SRA32.u ===== */
16740 __STATIC_FORCEINLINE unsigned long __RV_SRA32_U(unsigned long a, unsigned int b)
16741 {
16742  unsigned long result;
16743  __ASM volatile("sra32.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16744  return result;
16745 }
16746 /* ===== Inline Function End for 4.35.2. SRA32.u ===== */
16747 
16748 /* ===== Inline Function Start for 4.36.1. SRAI32 ===== */
16793 #define __RV_SRAI32(a, b) \
16794  ({ \
16795  unsigned long result; \
16796  unsigned long __a = (unsigned long)(a); \
16797  __ASM volatile("srai32 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
16798  result; \
16799  })
16800 /* ===== Inline Function End for 4.36.1. SRAI32 ===== */
16801 
16802 /* ===== Inline Function Start for 4.36.2. SRAI32.u ===== */
16847 #define __RV_SRAI32_U(a, b) \
16848  ({ \
16849  unsigned long result; \
16850  unsigned long __a = (unsigned long)(a); \
16851  __ASM volatile("srai32.u %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
16852  result; \
16853  })
16854 /* ===== Inline Function End for 4.36.2. SRAI32.u ===== */
16855 
16856 /* ===== Inline Function Start for 4.37. SRAIW.u ===== */
16893 #define __RV_SRAIW_U(a, b) \
16894  ({ \
16895  long result; \
16896  int __a = (int)(a); \
16897  __ASM volatile("sraiw.u %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
16898  result; \
16899  })
16900 /* ===== Inline Function End for 4.37. SRAIW.u ===== */
16901 
16902 /* ===== Inline Function Start for 4.38.1. SRL32 ===== */
16947 __STATIC_FORCEINLINE unsigned long __RV_SRL32(unsigned long a, unsigned int b)
16948 {
16949  unsigned long result;
16950  __ASM volatile("srl32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
16951  return result;
16952 }
16953 /* ===== Inline Function End for 4.38.1. SRL32 ===== */
16954 
16955 /* ===== Inline Function Start for 4.38.2. SRL32.u ===== */
17000 __STATIC_FORCEINLINE unsigned long __RV_SRL32_U(unsigned long a, unsigned int b)
17001 {
17002  unsigned long result;
17003  __ASM volatile("srl32.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17004  return result;
17005 }
17006 /* ===== Inline Function End for 4.38.2. SRL32.u ===== */
17007 
17008 /* ===== Inline Function Start for 4.39.1. SRLI32 ===== */
17051 #define __RV_SRLI32(a, b) \
17052  ({ \
17053  unsigned long result; \
17054  unsigned long __a = (unsigned long)(a); \
17055  __ASM volatile("srli32 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
17056  result; \
17057  })
17058 /* ===== Inline Function End for 4.39.1. SRLI32 ===== */
17059 
17060 /* ===== Inline Function Start for 4.39.2. SRLI32.u ===== */
17103 #define __RV_SRLI32_U(a, b) \
17104  ({ \
17105  unsigned long result; \
17106  unsigned long __a = (unsigned long)(a); \
17107  __ASM volatile("srli32.u %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
17108  result; \
17109  })
17110 /* ===== Inline Function End for 4.39.2. SRLI32.u ===== */
17111 
17112 /* ===== Inline Function Start for 4.40. STAS32 ===== */
17147 __STATIC_FORCEINLINE unsigned long __RV_STAS32(unsigned long a, unsigned long b)
17148 {
17149  unsigned long result;
17150  __ASM volatile("stas32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17151  return result;
17152 }
17153 /* ===== Inline Function End for 4.40. STAS32 ===== */
17154 
17155 /* ===== Inline Function Start for 4.41. STSA32 ===== */
17189 __STATIC_FORCEINLINE unsigned long __RV_STSA32(unsigned long a, unsigned long b)
17190 {
17191  unsigned long result;
17192  __ASM volatile("stsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17193  return result;
17194 }
17195 /* ===== Inline Function End for 4.41. STSA32 ===== */
17196 
17197 /* ===== Inline Function Start for 4.42. SUB32 ===== */
17229 __STATIC_FORCEINLINE unsigned long __RV_SUB32(unsigned long a, unsigned long b)
17230 {
17231  unsigned long result;
17232  __ASM volatile("sub32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17233  return result;
17234 }
17235 /* ===== Inline Function End for 4.42. SUB32 ===== */
17236 
17237 /* ===== Inline Function Start for 4.43. UKADD32 ===== */
17273 __STATIC_FORCEINLINE unsigned long __RV_UKADD32(unsigned long a, unsigned long b)
17274 {
17275  unsigned long result;
17276  __ASM volatile("ukadd32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17277  return result;
17278 }
17279 /* ===== Inline Function End for 4.43. UKADD32 ===== */
17280 
17281 /* ===== Inline Function Start for 4.44. UKCRAS32 ===== */
17326 __STATIC_FORCEINLINE unsigned long __RV_UKCRAS32(unsigned long a, unsigned long b)
17327 {
17328  unsigned long result;
17329  __ASM volatile("ukcras32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17330  return result;
17331 }
17332 /* ===== Inline Function End for 4.44. UKCRAS32 ===== */
17333 
17334 /* ===== Inline Function Start for 4.45. UKCRSA32 ===== */
17378 __STATIC_FORCEINLINE unsigned long __RV_UKCRSA32(unsigned long a, unsigned long b)
17379 {
17380  unsigned long result;
17381  __ASM volatile("ukcrsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17382  return result;
17383 }
17384 /* ===== Inline Function End for 4.45. UKCRSA32 ===== */
17385 
17386 /* ===== Inline Function Start for 4.46. UKSTAS32 ===== */
17431 __STATIC_FORCEINLINE unsigned long __RV_UKSTAS32(unsigned long a, unsigned long b)
17432 {
17433  unsigned long result;
17434  __ASM volatile("ukstas32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17435  return result;
17436 }
17437 /* ===== Inline Function End for 4.46. UKSTAS32 ===== */
17438 
17439 /* ===== Inline Function Start for 4.47. UKSTSA32 ===== */
17483 __STATIC_FORCEINLINE unsigned long __RV_UKSTSA32(unsigned long a, unsigned long b)
17484 {
17485  unsigned long result;
17486  __ASM volatile("ukstsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17487  return result;
17488 }
17489 /* ===== Inline Function End for 4.47. UKSTSA32 ===== */
17490 
17491 /* ===== Inline Function Start for 4.48. UKSUB32 ===== */
17527 __STATIC_FORCEINLINE unsigned long __RV_UKSUB32(unsigned long a, unsigned long b)
17528 {
17529  unsigned long result;
17530  __ASM volatile("uksub32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17531  return result;
17532 }
17533 /* ===== Inline Function End for 4.48. UKSUB32 ===== */
17534 
17535 /* ===== Inline Function Start for 4.49. UMAX32 ===== */
17565 __STATIC_FORCEINLINE unsigned long __RV_UMAX32(unsigned long a, unsigned long b)
17566 {
17567  unsigned long result;
17568  __ASM volatile("umax32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17569  return result;
17570 }
17571 /* ===== Inline Function End for 4.49. UMAX32 ===== */
17572 
17573 /* ===== Inline Function Start for 4.50. UMIN32 ===== */
17603 __STATIC_FORCEINLINE unsigned long __RV_UMIN32(unsigned long a, unsigned long b)
17604 {
17605  unsigned long result;
17606  __ASM volatile("umin32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17607  return result;
17608 }
17609 /* ===== Inline Function End for 4.50. UMIN32 ===== */
17610 
17611 /* ===== Inline Function Start for 4.51. URADD32 ===== */
17649 __STATIC_FORCEINLINE unsigned long __RV_URADD32(unsigned long a, unsigned long b)
17650 {
17651  unsigned long result;
17652  __ASM volatile("uradd32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17653  return result;
17654 }
17655 /* ===== Inline Function End for 4.51. URADD32 ===== */
17656 
17657 /* ===== Inline Function Start for 4.52. URCRAS32 ===== */
17696 __STATIC_FORCEINLINE unsigned long __RV_URCRAS32(unsigned long a, unsigned long b)
17697 {
17698  unsigned long result;
17699  __ASM volatile("urcras32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17700  return result;
17701 }
17702 /* ===== Inline Function End for 4.52. URCRAS32 ===== */
17703 
17704 /* ===== Inline Function Start for 4.53. URCRSA32 ===== */
17743 __STATIC_FORCEINLINE unsigned long __RV_URCRSA32(unsigned long a, unsigned long b)
17744 {
17745  unsigned long result;
17746  __ASM volatile("urcrsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17747  return result;
17748 }
17749 /* ===== Inline Function End for 4.53. URCRSA32 ===== */
17750 
17751 /* ===== Inline Function Start for 4.54. URSTAS32 ===== */
17790 __STATIC_FORCEINLINE unsigned long __RV_URSTAS32(unsigned long a, unsigned long b)
17791 {
17792  unsigned long result;
17793  __ASM volatile("urstas32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17794  return result;
17795 }
17796 /* ===== Inline Function End for 4.54. URSTAS32 ===== */
17797 
17798 /* ===== Inline Function Start for 4.55. URSTSA32 ===== */
17837 __STATIC_FORCEINLINE unsigned long __RV_URSTSA32(unsigned long a, unsigned long b)
17838 {
17839  unsigned long result;
17840  __ASM volatile("urstsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17841  return result;
17842 }
17843 /* ===== Inline Function End for 4.55. URSTSA32 ===== */
17844 
17845 /* ===== Inline Function Start for 4.56. URSUB32 ===== */
17883 __STATIC_FORCEINLINE unsigned long __RV_URSUB32(unsigned long a, unsigned long b)
17884 {
17885  unsigned long result;
17886  __ASM volatile("ursub32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
17887  return result;
17888 }
17889 /* ===== Inline Function End for 4.56. URSUB32 ===== */
17890 
17891 #endif /* __RISCV_XLEN == 64 */
17892 
17900 /* ===== Inline Function Start for EXPD80 ===== */
17928 __STATIC_FORCEINLINE unsigned long __RV_EXPD80(unsigned long a)
17929 {
17930  unsigned long result;
17931  __ASM volatile("expd80 %0, %1" : "=r"(result) : "r"(a));
17932  return result;
17933 }
17934 /* ===== Inline Function End for EXPD80 ===== */
17935 
17936 /* ===== Inline Function Start for EXPD81 ===== */
17963 __STATIC_FORCEINLINE unsigned long __RV_EXPD81(unsigned long a)
17964 {
17965  unsigned long result;
17966  __ASM volatile("expd81 %0, %1" : "=r"(result) : "r"(a));
17967  return result;
17968 }
17969 /* ===== Inline Function End for EXPD81 ===== */
17970 
17971 /* ===== Inline Function Start for EXPD82 ===== */
17998 __STATIC_FORCEINLINE unsigned long __RV_EXPD82(unsigned long a)
17999 {
18000  unsigned long result;
18001  __ASM volatile("expd82 %0, %1" : "=r"(result) : "r"(a));
18002  return result;
18003 }
18004 /* ===== Inline Function End for EXPD82 ===== */
18005 
18006 /* ===== Inline Function Start for EXPD83 ===== */
18033 __STATIC_FORCEINLINE unsigned long __RV_EXPD83(unsigned long a)
18034 {
18035  unsigned long result;
18036  __ASM volatile("expd83 %0, %1" : "=r"(result) : "r"(a));
18037  return result;
18038 }
18039 /* ===== Inline Function End for EXPD83 ===== */
18040 
18041 #if (__RISCV_XLEN == 64)
18042 /* ===== Inline Function Start for EXPD84 ===== */
18069 __STATIC_FORCEINLINE unsigned long __RV_EXPD84(unsigned long a)
18070 {
18071  unsigned long result;
18072  __ASM volatile("expd84 %0, %1" : "=r"(result) : "r"(a));
18073  return result;
18074 }
18075 /* ===== Inline Function End for EXPD84 ===== */
18076 
18077 /* ===== Inline Function Start for EXPD85 ===== */
18104 __STATIC_FORCEINLINE unsigned long __RV_EXPD85(unsigned long a)
18105 {
18106  unsigned long result;
18107  __ASM volatile("expd85 %0, %1" : "=r"(result) : "r"(a));
18108  return result;
18109 }
18110 /* ===== Inline Function End for EXPD85 ===== */
18111 
18112 /* ===== Inline Function Start for EXPD86 ===== */
18139 __STATIC_FORCEINLINE unsigned long __RV_EXPD86(unsigned long a)
18140 {
18141  unsigned long result;
18142  __ASM volatile("expd86 %0, %1" : "=r"(result) : "r"(a));
18143  return result;
18144 }
18145 /* ===== Inline Function End for EXPD86 ===== */
18146 
18147 /* ===== Inline Function Start for EXPD87 ===== */
18174 __STATIC_FORCEINLINE unsigned long __RV_EXPD87(unsigned long a)
18175 {
18176  unsigned long result;
18177  __ASM volatile("expd87 %0, %1" : "=r"(result) : "r"(a));
18178  return result;
18179 }
18180 /* ===== Inline Function End for EXPD87 ===== */
18181 #endif /* __RISCV_XLEN == 64 */
18182 
18183 #if (__RISCV_XLEN == 32) || defined(__ONLY_FOR_DOXYGEN_DOCUMENT_GENERATION__)
18184 /* XXXXX Nuclei Extended DSP Instructions for RV32 XXXXX */
18185 
18205 /* ===== Inline Function Start for DKHM8 ===== */
18251 __STATIC_FORCEINLINE unsigned long long __RV_DKHM8(unsigned long long a, unsigned long long b)
18252 {
18253  unsigned long long result;
18254  __ASM volatile("dkhm8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18255  return result;
18256 }
18257 /* ===== Inline Function End for DKHM8 ===== */
18258 
18259 /* ===== Inline Function Start for DKHM16 ===== */
18306 __STATIC_FORCEINLINE unsigned long long __RV_DKHM16(unsigned long long a, unsigned long long b)
18307 {
18308  unsigned long long result;
18309  __ASM volatile("dkhm16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18310  return result;
18311 }
18312 /* ===== Inline Function End for DKHM16 ===== */
18313 
18314 /* ===== Inline Function Start for DKABS8 ===== */
18351 __STATIC_FORCEINLINE unsigned long long __RV_DKABS8(unsigned long long a)
18352 {
18353  unsigned long long result;
18354  __ASM volatile("dkabs8 %0, %1" : "=r"(result) : "r"(a));
18355  return result;
18356 }
18357 /* ===== Inline Function End for DKABS8 ===== */
18358 
18359 /* ===== Inline Function Start for DKABS16 ===== */
18396 __STATIC_FORCEINLINE unsigned long long __RV_DKABS16(unsigned long long a)
18397 {
18398  unsigned long long result;
18399  __ASM volatile("dkabs16 %0, %1" : "=r"(result) : "r"(a));
18400  return result;
18401 }
18402 /* ===== Inline Function End for DKABS16 ===== */
18403 
18404 /* ===== Inline Function Start for DKSLRA8 ===== */
18454 __STATIC_FORCEINLINE unsigned long long __RV_DKSLRA8(unsigned long long a, int b)
18455 {
18456  unsigned long long result;
18457  __ASM volatile("dkslra8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18458  return result;
18459 }
18460 /* ===== Inline Function End for DKSLRA8 ===== */
18461 
18462 /* ===== Inline Function Start for DKSLRA16 ===== */
18513 __STATIC_FORCEINLINE unsigned long long __RV_DKSLRA16(unsigned long long a, int b)
18514 {
18515  unsigned long long result;
18516  __ASM volatile("dkslra16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18517  return result;
18518 }
18519 /* ===== Inline Function End for DKSLRA16 ===== */
18520 
18521 /* ===== Inline Function Start for DKADD8 ===== */
18560 __STATIC_FORCEINLINE unsigned long long __RV_DKADD8(unsigned long long a, unsigned long long b)
18561 {
18562  unsigned long long result;
18563  __ASM volatile("dkadd8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18564  return result;
18565 }
18566 /* ===== Inline Function End for DKADD8 ===== */
18567 
18568 /* ===== Inline Function Start for DKADD16 ===== */
18607 __STATIC_FORCEINLINE unsigned long long __RV_DKADD16(unsigned long long a, unsigned long long b)
18608 {
18609  unsigned long long result;
18610  __ASM volatile("dkadd16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18611  return result;
18612 }
18613 /* ===== Inline Function End for DKADD16 ===== */
18614 
18615 /* ===== Inline Function Start for DKSUB8 ===== */
18654 __STATIC_FORCEINLINE unsigned long long __RV_DKSUB8(unsigned long long a, unsigned long long b)
18655 {
18656  unsigned long long result;
18657  __ASM volatile("dksub8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18658  return result;
18659 }
18660 /* ===== Inline Function End for DKSUB8 ===== */
18661 
18662 /* ===== Inline Function Start for DKSUB16 ===== */
18702 __STATIC_FORCEINLINE unsigned long long __RV_DKSUB16(unsigned long long a, unsigned long long b)
18703 {
18704  unsigned long long result;
18705  __ASM volatile("dksub16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18706  return result;
18707 }
18708 /* ===== Inline Function End for DKSUB16 ===== */
18709 
18710 /* ===== Inline Function Start for DKHMX8 ===== */
18755 __STATIC_FORCEINLINE unsigned long long __RV_DKHMX8(unsigned long long a, unsigned long long b)
18756 {
18757  unsigned long long result;
18758  __ASM volatile("dkhmx8 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18759  return result;
18760 }
18761 /* ===== Inline Function End for DKHMX8 ===== */
18762 
18763 /* ===== Inline Function Start for DKHMX16 ===== */
18808 __STATIC_FORCEINLINE unsigned long long __RV_DKHMX16(unsigned long long a, unsigned long long b)
18809 {
18810  unsigned long long result;
18811  __ASM volatile("dkhmx16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18812  return result;
18813 }
18814 /* ===== Inline Function End for DKHMX16 ===== */
18815 
18816 /* ===== Inline Function Start for DSMMUL ===== */
18853 __STATIC_FORCEINLINE unsigned long long __RV_DSMMUL(unsigned long long a, unsigned long long b)
18854 {
18855  unsigned long long result;
18856  __ASM volatile("dsmmul %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18857  return result;
18858 }
18859 /* ===== Inline Function End for DSMMUL ===== */
18860 
18861 /* ===== Inline Function Start for DSMMULU ===== */
18898 __STATIC_FORCEINLINE unsigned long long __RV_DSMMUL_U(unsigned long long a, unsigned long long b)
18899 {
18900  unsigned long long result;
18901  __ASM volatile("dsmmul.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18902  return result;
18903 }
18904 /* ===== Inline Function End for DSMMULU ===== */
18905 
18906 /* ===== Inline Function Start for DKWMMUL ===== */
18945 __STATIC_FORCEINLINE unsigned long long __RV_DKWMMUL(unsigned long long a, unsigned long long b)
18946 {
18947  unsigned long long result;
18948  __ASM volatile("dkwmmul %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18949  return result;
18950 }
18951 /* ===== Inline Function End for DKWMMUL ===== */
18952 
18953 /* ===== Inline Function Start for DKWMMULU ===== */
18992 __STATIC_FORCEINLINE unsigned long long __RV_DKWMMUL_U(unsigned long long a, unsigned long long b)
18993 {
18994  unsigned long long result;
18995  __ASM volatile("dkwmmul.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
18996  return result;
18997 }
18998 /* ===== Inline Function End for DKWMMULU ===== */
18999 
19000 /* ===== Inline Function Start for DKABS32 ===== */
19037 __STATIC_FORCEINLINE unsigned long long __RV_DKABS32(unsigned long long a)
19038 {
19039  unsigned long long result;
19040  __ASM volatile("dkabs32 %0, %1" : "=r"(result) : "r"(a));
19041  return result;
19042 }
19043 /* ===== Inline Function End for DKABS32 ===== */
19044 
19045 /* ===== Inline Function Start for DKSLRA32 ===== */
19090 __STATIC_FORCEINLINE unsigned long long __RV_DKSLRA32(unsigned long long a, int b)
19091 {
19092  unsigned long long result;
19093  __ASM volatile("dkslra32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19094  return result;
19095 }
19096 /* ===== Inline Function End for DKSLRA32 ===== */
19097 
19098 /* ===== Inline Function Start for DKADD32 ===== */
19137 __STATIC_FORCEINLINE unsigned long long __RV_DKADD32(unsigned long long a, unsigned long long b)
19138 {
19139  unsigned long long result;
19140  __ASM volatile("dkadd32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19141  return result;
19142 }
19143 /* ===== Inline Function End for DKADD32 ===== */
19144 
19145 /* ===== Inline Function Start for DKSUB32 ===== */
19184 __STATIC_FORCEINLINE unsigned long long __RV_DKSUB32(unsigned long long a, unsigned long long b)
19185 {
19186  unsigned long long result;
19187  __ASM volatile("dksub32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19188  return result;
19189 }
19190 /* ===== Inline Function End for DKSUB32 ===== */
19191 
19192 /* ===== Inline Function Start for DRADD16 ===== */
19222 __STATIC_FORCEINLINE unsigned long long __RV_DRADD16(unsigned long long a, unsigned long long b)
19223 {
19224  unsigned long long result;
19225  __ASM volatile("dradd16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19226  return result;
19227 }
19228 /* ===== Inline Function End for DRADD16 ===== */
19229 
19230 /* ===== Inline Function Start for DSUB16 ===== */
19260 __STATIC_FORCEINLINE unsigned long long __RV_DSUB16(unsigned long long a, unsigned long long b)
19261 {
19262  unsigned long long result;
19263  __ASM volatile("dsub16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19264  return result;
19265 }
19266 /* ===== Inline Function End for DSUB16 ===== */
19267 
19268 /* ===== Inline Function Start for DRADD32 ===== */
19298 __STATIC_FORCEINLINE unsigned long long __RV_DRADD32(unsigned long long a, unsigned long long b)
19299 {
19300  unsigned long long result;
19301  __ASM volatile("dradd32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19302  return result;
19303 }
19304 /* ===== Inline Function End for DRADD32 ===== */
19305 
19306 /* ===== Inline Function Start for DSUB32 ===== */
19336 __STATIC_FORCEINLINE unsigned long long __RV_DSUB32(unsigned long long a, unsigned long long b)
19337 {
19338  unsigned long long result;
19339  __ASM volatile("dsub32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19340  return result;
19341 }
19342 /* ===== Inline Function End for DSUB32 ===== */
19343 
19344 /* ===== Inline Function Start for DMSR16 ===== */
19381 __STATIC_FORCEINLINE unsigned long long __RV_DMSR16(unsigned long a, unsigned long b)
19382 {
19383  unsigned long long result;
19384  __ASM volatile("dmsr16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19385  return result;
19386 }
19387 /* ===== Inline Function End for DMSR16 ===== */
19388 
19389 /* ===== Inline Function Start for DMSR17 ===== */
19427 __STATIC_FORCEINLINE unsigned long long __RV_DMSR17(unsigned long a, unsigned long b)
19428 {
19429  unsigned long long result;
19430  __ASM volatile("dmsr17 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19431  return result;
19432 }
19433 /* ===== Inline Function End for DMSR17 ===== */
19434 
19435 /* ===== Inline Function Start for DMSR33 ===== */
19468 __STATIC_FORCEINLINE unsigned long long __RV_DMSR33(unsigned long long a, unsigned long long b)
19469 {
19470  unsigned long long result;
19471  __ASM volatile("dmsr33 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19472  return result;
19473 }
19474 /* ===== Inline Function End for DMSR33 ===== */
19475 
19476 /* ===== Inline Function Start for DMXSR33 ===== */
19509 __STATIC_FORCEINLINE unsigned long long __RV_DMXSR33(unsigned long long a, unsigned long long b)
19510 {
19511  unsigned long long result;
19512  __ASM volatile("dmxsr33 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19513  return result;
19514 }
19515 /* ===== Inline Function End for DMXSR33 ===== */
19516 
19517 /* ===== Inline Function Start for DREDAS16 ===== */
19547 __STATIC_FORCEINLINE unsigned long __RV_DREDAS16(unsigned long long a)
19548 {
19549  unsigned long result;
19550  __ASM volatile("dredas16 %0, %1" : "=r"(result) : "r"(a));
19551  return result;
19552 }
19553 /* ===== Inline Function End for DREDAS16 ===== */
19554 
19555 /* ===== Inline Function Start for DREDSA16 ===== */
19583 __STATIC_FORCEINLINE unsigned long __RV_DREDSA16(unsigned long long a)
19584 {
19585  unsigned long result;
19586  __ASM volatile("dredsa16 %0, %1" : "=r"(result) : "r"(a));
19587  return result;
19588 }
19589 /* ===== Inline Function End for DREDSA16 ===== */
19590 
19591 /* ===== Inline Function Start for DKCLIP64 ===== */
19630 __STATIC_FORCEINLINE int16_t __RV_DKCLIP64(unsigned long long a)
19631 {
19632  int16_t result;
19633  __ASM volatile("dkclip64 %0, %1" : "=r"(result) : "r"(a));
19634  return result;
19635 }
19636 /* ===== Inline Function End for DKCLIP64 ===== */
19637 
19638 /* ===== Inline Function Start for DKMDA ===== */
19677 __STATIC_FORCEINLINE unsigned long long __RV_DKMDA(unsigned long long a, unsigned long long b)
19678 {
19679  unsigned long long result;
19680  __ASM volatile("dkmda %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19681  return result;
19682 }
19683 /* ===== Inline Function End for DKMDA ===== */
19684 
19685 /* ===== Inline Function Start for DKMXDA ===== */
19724 __STATIC_FORCEINLINE unsigned long long __RV_DKMXDA(unsigned long long a, unsigned long long b)
19725 {
19726  unsigned long long result;
19727  __ASM volatile("dkmxda %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19728  return result;
19729 }
19730 /* ===== Inline Function End for DKMXDA ===== */
19731 
19732 /* ===== Inline Function Start for DSMDRS ===== */
19765 __STATIC_FORCEINLINE unsigned long long __RV_DSMDRS(unsigned long long a, unsigned long long b)
19766 {
19767  unsigned long long result;
19768  __ASM volatile("dsmdrs %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19769  return result;
19770 }
19771 /* ===== Inline Function End for DSMDRS ===== */
19772 
19773 /* ===== Inline Function Start for DSMXDS ===== */
19806 __STATIC_FORCEINLINE unsigned long long __RV_DSMXDS(unsigned long long a, unsigned long long b)
19807 {
19808  unsigned long long result;
19809  __ASM volatile("dsmxds %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19810  return result;
19811 }
19812 /* ===== Inline Function End for DSMXDS ===== */
19813 
19814 /* ===== Inline Function Start for DSMBB32 ===== */
19844 __STATIC_FORCEINLINE long long __RV_DSMBB32(unsigned long long a, unsigned long long b)
19845 {
19846  long long result;
19847  __ASM volatile("dsmbb32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19848  return result;
19849 }
19850 /* ===== Inline Function End for DSMBB32 ===== */
19851 
19852 /* ===== Inline Function Start for DSMBB32.sra14 ===== */
19883 __STATIC_FORCEINLINE long long __RV_DSMBB32_SRA14(unsigned long long a, unsigned long long b)
19884 {
19885  long long result;
19886  __ASM volatile("dsmbb32.sra14 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19887  return result;
19888 }
19889 /* ===== Inline Function End for DSMBB32.sra14 ===== */
19890 
19891 /* ===== Inline Function Start for DSMBB32.sra32 ===== */
19923 __STATIC_FORCEINLINE long long __RV_DSMBB32_SRA32(unsigned long long a, unsigned long long b)
19924 {
19925  long long result;
19926  __ASM volatile("dsmbb32.sra32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19927  return result;
19928 }
19929 /* ===== Inline Function End for DSMBB32.sra32 ===== */
19930 
19931 /* ===== Inline Function Start for DSMBT32 ===== */
19963 __STATIC_FORCEINLINE long long __RV_DSMBT32(unsigned long long a, unsigned long long b)
19964 {
19965  long long result;
19966  __ASM volatile("dsmbt32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
19967  return result;
19968 }
19969 /* ===== Inline Function End for DSMBT32 ===== */
19970 
19971 /* ===== Inline Function Start for DSMBT32.sra14 ===== */
20003 __STATIC_FORCEINLINE long long __RV_DSMBT32_SRA14(unsigned long long a, unsigned long long b)
20004 {
20005  long long result;
20006  __ASM volatile("dsmbt32.sra14 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20007  return result;
20008 }
20009 /* ===== Inline Function End for DSMBT32.sra14 ===== */
20010 
20011 /* ===== Inline Function Start for DSMBT32.sra32 ===== */
20043 __STATIC_FORCEINLINE long long __RV_DSMBT32_SRA32(unsigned long long a, unsigned long long b)
20044 {
20045  long long result;
20046  __ASM volatile("dsmbt32.sra32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20047  return result;
20048 }
20049 /* ===== Inline Function End for DSMBT32.sra32 ===== */
20050 
20051 /* ===== Inline Function Start for DSMTT32 ===== */
20083 __STATIC_FORCEINLINE long long __RV_DSMTT32(unsigned long long a, unsigned long long b)
20084 {
20085  long long result;
20086  __ASM volatile("dsmtt32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20087  return result;
20088 }
20089 /* ===== Inline Function End for DSMTT32 ===== */
20090 
20091 /* ===== Inline Function Start for DSMTT32.sra14 ===== */
20123 __STATIC_FORCEINLINE long long __RV_DSMTT32_SRA14(unsigned long long a, unsigned long long b)
20124 {
20125  long long result;
20126  __ASM volatile("dsmtt32.sra14 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20127  return result;
20128 }
20129 /* ===== Inline Function End for DSMTT32.sra14 ===== */
20130 
20131 /* ===== Inline Function Start for DSMTT32.sra32 ===== */
20163 __STATIC_FORCEINLINE long long __RV_DSMTT32_SRA32(unsigned long long a, unsigned long long b)
20164 {
20165  long long result;
20166  __ASM volatile("dsmtt32.sra32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20167  return result;
20168 }
20169 /* ===== Inline Function End for DSMTT32.sra32 ===== */
20170 
20171 /* ===== Inline Function Start for DPKBB32 ===== */
20200 __STATIC_FORCEINLINE unsigned long long __RV_DPKBB32(unsigned long long a, unsigned long long b)
20201 {
20202  unsigned long long result;
20203  __ASM volatile("dpkbb32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20204  return result;
20205 }
20206 /* ===== Inline Function End for DPKBB32 ===== */
20207 
20208 /* ===== Inline Function Start for DPKBT32 ===== */
20237 __STATIC_FORCEINLINE unsigned long long __RV_DPKBT32(unsigned long long a, unsigned long long b)
20238 {
20239  unsigned long long result;
20240  __ASM volatile("dpkbt32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20241  return result;
20242 }
20243 /* ===== Inline Function End for DPKBT32 ===== */
20244 
20245 /* ===== Inline Function Start for DPKTT32 ===== */
20274 __STATIC_FORCEINLINE unsigned long long __RV_DPKTT32(unsigned long long a, unsigned long long b)
20275 {
20276  unsigned long long result;
20277  __ASM volatile("dpktt32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20278  return result;
20279 }
20280 /* ===== Inline Function End for DPKTT32 ===== */
20281 
20282 /* ===== Inline Function Start for DPKTB32 ===== */
20311 __STATIC_FORCEINLINE unsigned long long __RV_DPKTB32(unsigned long long a, unsigned long long b)
20312 {
20313  unsigned long long result;
20314  __ASM volatile("dpktb32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20315  return result;
20316 }
20317 /* ===== Inline Function End for DPKTB32 ===== */
20318 
20319 /* ===== Inline Function Start for DPKTB16 ===== */
20349 __STATIC_FORCEINLINE unsigned long long __RV_DPKTB16(unsigned long long a, unsigned long long b)
20350 {
20351  unsigned long long result;
20352  __ASM volatile("dpktb16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20353  return result;
20354 }
20355 /* ===== Inline Function End for DPKTB16 ===== */
20356 
20357 /* ===== Inline Function Start for DPKBB16 ===== */
20387 __STATIC_FORCEINLINE unsigned long long __RV_DPKBB16(unsigned long long a, unsigned long long b)
20388 {
20389  unsigned long long result;
20390  __ASM volatile("dpkbb16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20391  return result;
20392 }
20393 /* ===== Inline Function End for DPKBB16 ===== */
20394 
20395 /* ===== Inline Function Start for DPKBT16 ===== */
20425 __STATIC_FORCEINLINE unsigned long long __RV_DPKBT16(unsigned long long a, unsigned long long b)
20426 {
20427  unsigned long long result;
20428  __ASM volatile("dpkbt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20429  return result;
20430 }
20431 /* ===== Inline Function End for DPKBT16 ===== */
20432 
20433 /* ===== Inline Function Start for DPKTT16 ===== */
20463 __STATIC_FORCEINLINE unsigned long long __RV_DPKTT16(unsigned long long a, unsigned long long b)
20464 {
20465  unsigned long long result;
20466  __ASM volatile("dpktt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20467  return result;
20468 }
20469 /* ===== Inline Function End for DPKTT16 ===== */
20470 
20471 /* ===== Inline Function Start for DSRA16 ===== */
20508 __STATIC_FORCEINLINE unsigned long long __RV_DSRA16(unsigned long long a, unsigned long b)
20509 {
20510  unsigned long long result;
20511  __ASM volatile("dsra16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20512  return result;
20513 }
20514 /* ===== Inline Function End for DSRA16 ===== */
20515 
20516 /* ===== Inline Function Start for DADD16 ===== */
20546 __STATIC_FORCEINLINE unsigned long long __RV_DADD16(unsigned long long a, unsigned long long b)
20547 {
20548  unsigned long long result;
20549  __ASM volatile("dadd16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20550  return result;
20551 }
20552 /* ===== Inline Function End for DADD16 ===== */
20553 
20554 /* ===== Inline Function Start for DADD32 ===== */
20584 __STATIC_FORCEINLINE unsigned long long __RV_DADD32(unsigned long long a, unsigned long long b)
20585 {
20586  unsigned long long result;
20587  __ASM volatile("dadd32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20588  return result;
20589 }
20590 /* ===== Inline Function End for DADD32 ===== */
20591 
20592 /* ===== Inline Function Start for DSMBB16 ===== */
20625 __STATIC_FORCEINLINE unsigned long long __RV_DSMBB16(unsigned long long a, unsigned long long b) /* pass */
20626 {
20627  unsigned long long result;
20628  __ASM volatile("dsmbb16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20629  return result;
20630 }
20631 /* ===== Inline Function End for DSMBB16 ===== */
20632 
20633 /* ===== Inline Function Start for DSMBT16 ===== */
20666 __STATIC_FORCEINLINE unsigned long long __RV_DSMBT16(unsigned long long a, unsigned long long b) /* pass */
20667 {
20668  unsigned long long result;
20669  __ASM volatile("dsmbt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20670  return result;
20671 }
20672 /* ===== Inline Function End for DSMBT16 ===== */
20673 
20674 /* ===== Inline Function Start for DSMTT16 ===== */
20707 __STATIC_FORCEINLINE unsigned long long __RV_DSMTT16(unsigned long long a, unsigned long long b)
20708 {
20709  unsigned long long result;
20710  __ASM volatile("dsmtt16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20711  return result;
20712 }
20713 /* ===== Inline Function End for DSMTT16 ===== */
20714 
20715 /* ===== Inline Function Start for DRCRSA16 ===== */
20749 __STATIC_FORCEINLINE unsigned long long __RV_DRCRSA16(unsigned long long a, unsigned long long b)
20750 {
20751  unsigned long long result;
20752  __ASM volatile("drcrsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20753  return result;
20754 }
20755 /* ===== Inline Function End for DRCRSA16 ===== */
20756 
20757 /* ===== Inline Function Start for DRCRSA32 ===== */
20790 __STATIC_FORCEINLINE unsigned long long __RV_DRCRSA32(unsigned long long a, unsigned long long b)
20791 {
20792  unsigned long long result;
20793  __ASM volatile("drcrsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20794  return result;
20795 }
20796 /* ===== Inline Function End for DRCRSA32 ===== */
20797 
20798 /* ===== Inline Function Start for DRCRAS16 ===== */
20832 __STATIC_FORCEINLINE unsigned long long __RV_DRCRAS16(unsigned long long a, unsigned long long b)
20833 {
20834  unsigned long long result;
20835  __ASM volatile("drcras16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20836  return result;
20837 }
20838 /* ===== Inline Function End for DRCRAS16 ===== */
20839 
20840 /* ===== Inline Function Start for DRCRAS32 ===== */
20873 __STATIC_FORCEINLINE unsigned long long __RV_DRCRAS32(unsigned long long a, unsigned long long b)
20874 {
20875  unsigned long long result;
20876  __ASM volatile("DRCRAS32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20877  return result;
20878 }
20879 /* ===== Inline Function End for DRCRAS32 ===== */
20880 
20881 /* ===== Inline Function Start for DKCRAS16 ===== */
20928 __STATIC_FORCEINLINE unsigned long long __RV_DKCRAS16(unsigned long long a, unsigned long long b)
20929 {
20930  unsigned long long result;
20931  __ASM volatile("dkcras16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20932  return result;
20933 }
20934 /* ===== Inline Function End for DKCRAS16 ===== */
20935 
20936 /* ===== Inline Function Start for DKCRSA16 ===== */
20983 __STATIC_FORCEINLINE unsigned long long __RV_DKCRSA16(unsigned long long a, unsigned long long b)
20984 {
20985  unsigned long long result;
20986  __ASM volatile("dkcrsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
20987  return result;
20988 }
20989 /* ===== Inline Function End for DKCRSA16 ===== */
20990 
20991 /* ===== Inline Function Start for DRSUB16 ===== */
21021 __STATIC_FORCEINLINE unsigned long long __RV_DRSUB16(unsigned long long a, unsigned long long b)
21022 {
21023  unsigned long long result;
21024  __ASM volatile("drsub16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21025  return result;
21026 }
21027 /* ===== Inline Function End for DRSUB16 ===== */
21028 
21029 /* ===== Inline Function Start for DSTSA32 ===== */
21061 __STATIC_FORCEINLINE unsigned long long __RV_DSTSA32(unsigned long long a, unsigned long long b)
21062 {
21063  unsigned long long result;
21064  __ASM volatile("dstsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21065  return result;
21066 }
21067 /* ===== Inline Function End for DSTSA32 ===== */
21068 
21069 /* ===== Inline Function Start for DSTAS32 ===== */
21101 __STATIC_FORCEINLINE unsigned long long __RV_DSTAS32(unsigned long long a, unsigned long long b)
21102 {
21103  unsigned long long result;
21104  __ASM volatile("DSTAS32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21105  return result;
21106 }
21107 /* ===== Inline Function End for DSTAS32 ===== */
21108 
21109 /* ===== Inline Function Start for DKCRSA32 ===== */
21151 __STATIC_FORCEINLINE unsigned long long __RV_DKCRSA32(unsigned long long a, unsigned long long b)
21152 {
21153  unsigned long long result;
21154  __ASM volatile("dkcrsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21155  return result;
21156 }
21157 /* ===== Inline Function End for DKCRSA32 ===== */
21158 
21159 /* ===== Inline Function Start for DKCRAS32 ===== */
21201 __STATIC_FORCEINLINE unsigned long long __RV_DKCRAS32(unsigned long long a, unsigned long long b)
21202 {
21203  unsigned long long result;
21204  __ASM volatile("dkcras32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21205  return result;
21206 }
21207 /* ===== Inline Function End for DKCRAS32 ===== */
21208 
21209 /* ===== Inline Function Start for DCRSA32 ===== */
21241 __STATIC_FORCEINLINE unsigned long long __RV_DCRSA32(unsigned long long a, unsigned long long b)
21242 {
21243  unsigned long long result;
21244  __ASM volatile("dcrsa32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21245  return result;
21246 }
21247 /* ===== Inline Function End for DCRSA32 ===== */
21248 
21249 /* ===== Inline Function Start for DCRAS32 ===== */
21281 __STATIC_FORCEINLINE unsigned long long __RV_DCRAS32(unsigned long long a, unsigned long long b)
21282 {
21283  unsigned long long result;
21284  __ASM volatile("dcras32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21285  return result;
21286 }
21287 /* ===== Inline Function End for DCRAS32 ===== */
21288 
21289 /* ===== Inline Function Start for DKSTSA16 ===== */
21336 __STATIC_FORCEINLINE unsigned long long __RV_DKSTSA16(unsigned long long a, unsigned long long b)
21337 {
21338  unsigned long long result;
21339  __ASM volatile("dkstsa16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21340  return result;
21341 }
21342 /* ===== Inline Function End for DKSTSA16 ===== */
21343 
21344 /* ===== Inline Function Start for DKSTAS16 ===== */
21391 __STATIC_FORCEINLINE unsigned long long __RV_DKSTAS16(unsigned long long a, unsigned long long b)
21392 {
21393  unsigned long long result;
21394  __ASM volatile("dkstas16 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21395  return result;
21396 }
21397 /* ===== Inline Function End for DKSTAS16 ===== */
21398 
21399 /* ===== Inline Function Start for DSCLIP8 ===== */
21438 #define __RV_DSCLIP8(a, b) \
21439  ({ \
21440  unsigned long long result; \
21441  unsigned long long __a = (unsigned long long)(a); \
21442  __ASM volatile("dsclip8 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
21443  result; \
21444  })
21445 /* ===== Inline Function End for DSCLIP8 ===== */
21446 
21447 /* ===== Inline Function Start for DSCLIP16 ===== */
21486 #define __RV_DSCLIP16(a, b) \
21487  ({ \
21488  unsigned long long result; \
21489  unsigned long long __a = (unsigned long long)(a); \
21490  __ASM volatile("dsclip16 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
21491  result; \
21492  })
21493 /* ===== Inline Function End for DSCLIP16 ===== */
21494 
21495 /* ===== Inline Function Start for DSCLIP32 ===== */
21534 #define __RV_DSCLIP32(a, b) \
21535  ({ \
21536  unsigned long long result; \
21537  unsigned long long __a = (unsigned long long)(a); \
21538  __ASM volatile("dsclip32 %0, %1, %2" : "=r"(result) : "r"(__a), "K"(b)); \
21539  result; \
21540  })
21541 /* ===== Inline Function End for DSCLIP32 ===== */
21542 
21543 /* ===== Inline Function Start for DRSUB32 ===== */
21573 __STATIC_FORCEINLINE unsigned long long __RV_DRSUB32(unsigned long long a, unsigned long long b)
21574 {
21575  unsigned long long result;
21576  __ASM volatile("drsub32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
21577  return result;
21578 }
21579 /* ===== Inline Function End for DRSUB32 ===== */
21580 
21581 /* ===== Inline Function Start for DKMMAC ===== */
21621 __STATIC_FORCEINLINE unsigned long long __RV_DKMMAC(unsigned long long t, unsigned long long a, unsigned long long b)
21622 {
21623  __ASM volatile("dkmmac %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21624  return t;
21625 }
21626 /* ===== Inline Function End for DKMMAC ===== */
21627 
21628 /* ===== Inline Function Start for DKMMACU ===== */
21668 __STATIC_FORCEINLINE unsigned long long __RV_DKMMAC_U(unsigned long long t, unsigned long long a, unsigned long long b)
21669 {
21670  __ASM volatile("dkmmac.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21671  return t;
21672 }
21673 /* ===== Inline Function End for DKMMACU ===== */
21674 
21675 /* ===== Inline Function Start for DKMMSB ===== */
21715 __STATIC_FORCEINLINE unsigned long long __RV_DKMMSB(unsigned long long t, unsigned long long a, unsigned long long b)
21716 {
21717  __ASM volatile("dkmmsb %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21718  return t;
21719 }
21720 /* ===== Inline Function End for DKMMSB ===== */
21721 
21722 /* ===== Inline Function Start for DKMMSBU ===== */
21762 __STATIC_FORCEINLINE unsigned long long __RV_DKMMSB_U(unsigned long long t, unsigned long long a, unsigned long long b)
21763 {
21764  __ASM volatile("dkmmsb.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21765  return t;
21766 }
21767 /* ===== Inline Function End for DKMMSBU ===== */
21768 
21769 /* ===== Inline Function Start for DKMADA ===== */
21809 __STATIC_FORCEINLINE unsigned long long __RV_DKMADA(unsigned long long t, unsigned long long a, unsigned long long b)
21810 {
21811  __ASM volatile("dkmada %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21812  return t;
21813 }
21814 /* ===== Inline Function End for DKMADA ===== */
21815 
21816 /* ===== Inline Function Start for DKMAXDA ===== */
21855 __STATIC_FORCEINLINE unsigned long long __RV_DKMAXDA(unsigned long long t, unsigned long long a, unsigned long long b)
21856 {
21857  __ASM volatile("dkmaxda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21858  return t;
21859 }
21860 /* ===== Inline Function End for DKMAXDA ===== */
21861 
21862 /* ===== Inline Function Start for DKMADS ===== */
21902 __STATIC_FORCEINLINE unsigned long long __RV_DKMADS(unsigned long long t, unsigned long long a, unsigned long long b)
21903 {
21904  __ASM volatile("dkmads %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21905  return t;
21906 }
21907 /* ===== Inline Function End for DKMADS ===== */
21908 
21909 /* ===== Inline Function Start for DKMADRS ===== */
21949 __STATIC_FORCEINLINE unsigned long long __RV_DKMADRS(unsigned long long t, unsigned long long a, unsigned long long b)
21950 {
21951  __ASM volatile("dkmadrs %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21952  return t;
21953 }
21954 /* ===== Inline Function End for DKMADRS ===== */
21955 
21956 /* ===== Inline Function Start for DKMAXDS ===== */
21995 __STATIC_FORCEINLINE unsigned long long __RV_DKMAXDS(unsigned long long t, unsigned long long a, unsigned long long b)
21996 {
21997  __ASM volatile("dkmaxds %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
21998  return t;
21999 }
22000 /* ===== Inline Function End for DKMAXDS ===== */
22001 
22002 /* ===== Inline Function Start for DKMSDA ===== */
22041 __STATIC_FORCEINLINE unsigned long long __RV_DKMSDA(unsigned long long t, unsigned long long a, unsigned long long b)
22042 {
22043  __ASM volatile("dkmsda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22044  return t;
22045 }
22046 /* ===== Inline Function End for DKMSDA ===== */
22047 
22048 /* ===== Inline Function Start for DKMSXDA ===== */
22087 __STATIC_FORCEINLINE unsigned long long __RV_DKMSXDA(unsigned long long t, unsigned long long a, unsigned long long b)
22088 {
22089  __ASM volatile("dkmsxda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22090  return t;
22091 }
22092 /* ===== Inline Function End for DKMSXDA ===== */
22093 
22094 /* ===== Inline Function Start for DSMAQA ===== */
22136 __STATIC_FORCEINLINE unsigned long long __RV_DSMAQA(unsigned long long t, unsigned long long a, unsigned long long b)
22137 {
22138  __ASM volatile("dsmaqa %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22139  return t;
22140 }
22141 /* ===== Inline Function End for DSMAQA ===== */
22142 
22143 /* ===== Inline Function Start for DSMAQASU ===== */
22185 __STATIC_FORCEINLINE unsigned long long __RV_DSMAQA_SU(unsigned long long t, unsigned long long a, unsigned long long b)
22186 {
22187  __ASM volatile("dsmaqasu %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22188  return t;
22189 }
22190 /* ===== Inline Function End for DSMAQASU ===== */
22191 
22192 /* ===== Inline Function Start for DUMAQA ===== */
22234 __STATIC_FORCEINLINE unsigned long long __RV_DUMAQA(unsigned long long t, unsigned long long a, unsigned long long b)
22235 {
22236  __ASM volatile("dumaqa %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22237  return t;
22238 }
22239 /* ===== Inline Function End for DUMAQA ===== */
22240 
22241 /* ===== Inline Function Start for DKMDA32 ===== */
22275 __STATIC_FORCEINLINE long long __RV_DKMDA32(unsigned long long a, unsigned long long b)
22276 {
22277  long long result;
22278  __ASM volatile("dkmda32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
22279  return result;
22280 }
22281 /* ===== Inline Function End for DKMDA32 ===== */
22282 
22283 /* ===== Inline Function Start for DKMXDA32 ===== */
22317 __STATIC_FORCEINLINE long long __RV_DKMXDA32(unsigned long long a, unsigned long long b)
22318 {
22319  long long result;
22320  __ASM volatile("dkmxda32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
22321  return result;
22322 }
22323 /* ===== Inline Function End for DKMXDA32 ===== */
22324 
22325 /* ===== Inline Function Start for DKMADA32 ===== */
22360 __STATIC_FORCEINLINE long long __RV_DKMADA32(long long t, unsigned long long a, unsigned long long b)
22361 {
22362  __ASM volatile("dkmada32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22363  return t;
22364 }
22365 /* ===== Inline Function End for DKMADA32 ===== */
22366 
22367 /* ===== Inline Function Start for DKMAXDA32 ===== */
22403 __STATIC_FORCEINLINE long long __RV_DKMAXDA32(long long t, unsigned long long a, unsigned long long b)
22404 {
22405  __ASM volatile("dkmaxda32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22406  return t;
22407 }
22408 /* ===== Inline Function End for DKMAXDA32 ===== */
22409 
22410 /* ===== Inline Function Start for DKMADS32 ===== */
22447 __STATIC_FORCEINLINE long long __RV_DKMADS32(long long t, unsigned long long a, unsigned long long b)
22448 {
22449  __ASM volatile("dkmads32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22450  return t;
22451 }
22452 /* ===== Inline Function End for DKMADS32 ===== */
22453 
22454 /* ===== Inline Function Start for DKMADRS32 ===== */
22491 __STATIC_FORCEINLINE long long __RV_DKMADRS32(long long t, unsigned long long a, unsigned long long b)
22492 {
22493  __ASM volatile("dkmadrs32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22494  return t;
22495 }
22496 /* ===== Inline Function End for DKMADRS32 ===== */
22497 
22498 /* ===== Inline Function Start for DKMAXDS32 ===== */
22535 __STATIC_FORCEINLINE long long __RV_DKMAXDS32(long long t, unsigned long long a, unsigned long long b)
22536 {
22537  __ASM volatile("dkmaxds32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22538  return t;
22539 }
22540 /* ===== Inline Function End for DKMAXDS32 ===== */
22541 
22542 /* ===== Inline Function Start for DKMSDA32 ===== */
22578 __STATIC_FORCEINLINE long long __RV_DKMSDA32(long long t, unsigned long long a, unsigned long long b)
22579 {
22580  __ASM volatile("dkmsda32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22581  return t;
22582 }
22583 /* ===== Inline Function End for DKMSDA32 ===== */
22584 
22585 /* ===== Inline Function Start for DKMSXDA32 ===== */
22621 __STATIC_FORCEINLINE long long __RV_DKMSXDA32(long long t, unsigned long long a, unsigned long long b)
22622 {
22623  __ASM volatile("dkmsxda32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22624  return t;
22625 }
22626 /* ===== Inline Function End for DKMSXDA32 ===== */
22627 
22628 /* ===== Inline Function Start for DSMDS32 ===== */
22664 __STATIC_FORCEINLINE long long __RV_DSMDS32(unsigned long long a, unsigned long long b)
22665 {
22666  long long result;
22667  __ASM volatile("dsmds32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
22668  return result;
22669 }
22670 /* ===== Inline Function End for DSMDS32 ===== */
22671 
22672 /* ===== Inline Function Start for DSMDRS32 ===== */
22707 __STATIC_FORCEINLINE long long __RV_DSMDRS32(unsigned long long a, unsigned long long b)
22708 {
22709  long long result;
22710  __ASM volatile("dsmdrs32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
22711  return result;
22712 }
22713 /* ===== Inline Function End for DSMDRS32 ===== */
22714 
22715 /* ===== Inline Function Start for DSMXDS32 ===== */
22751 __STATIC_FORCEINLINE long long __RV_DSMXDS32(unsigned long long a, unsigned long long b)
22752 {
22753  long long result;
22754  __ASM volatile("dsmxds32 %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
22755  return result;
22756 }
22757 /* ===== Inline Function End for DSMXDS32 ===== */
22758 
22759 /* ===== Inline Function Start for DSMALDA ===== */
22798 __STATIC_FORCEINLINE long long __RV_DSMALDA(long long t, unsigned long long a, unsigned long long b)
22799 {
22800  __ASM volatile("dsmalda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22801  return t;
22802 }
22803 /* ===== Inline Function End for DSMALDA ===== */
22804 
22805 /* ===== Inline Function Start for DSMALXDA ===== */
22844 __STATIC_FORCEINLINE long long __RV_DSMALXDA(long long t, unsigned long long a, unsigned long long b)
22845 {
22846  __ASM volatile("dsmalxda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22847  return t;
22848 }
22849 /* ===== Inline Function End for DSMALXDA ===== */
22850 
22851 /* ===== Inline Function Start for DSMALDS ===== */
22890 __STATIC_FORCEINLINE long long __RV_DSMALDS(long long t, unsigned long long a, unsigned long long b)
22891 {
22892  __ASM volatile("dsmalds %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22893  return t;
22894 }
22895 /* ===== Inline Function End for DSMALDS ===== */
22896 
22897 /* ===== Inline Function Start for DSMALDRS ===== */
22936 __STATIC_FORCEINLINE long long __RV_DSMALDRS(long long t, unsigned long long a, unsigned long long b)
22937 {
22938  __ASM volatile("dsmaldrs %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22939  return t;
22940 }
22941 /* ===== Inline Function End for DSMALDRS ===== */
22942 
22943 /* ===== Inline Function Start for DSMALXDS ===== */
22982 __STATIC_FORCEINLINE long long __RV_DSMALXDS(long long t, unsigned long long a, unsigned long long b)
22983 {
22984  __ASM volatile("dsmalxds %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
22985  return t;
22986 }
22987 /* ===== Inline Function End for DSMALXDS ===== */
22988 
22989 /* ===== Inline Function Start for DSMSLDA ===== */
23027 __STATIC_FORCEINLINE long long __RV_DSMSLDA(long long t, unsigned long long a, unsigned long long b)
23028 {
23029  __ASM volatile("dsmslda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23030  return t;
23031 }
23032 /* ===== Inline Function End for DSMSLDA ===== */
23033 
23034 /* ===== Inline Function Start for DSMSLXDA ===== */
23072 __STATIC_FORCEINLINE long long __RV_DSMSLXDA(long long t, unsigned long long a, unsigned long long b)
23073 {
23074  __ASM volatile("dsmslxda %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23075  return t;
23076 }
23077 /* ===== Inline Function End for DSMSLXDA ===== */
23078 
23079 /* ===== Inline Function Start for DDSMAQA ===== */
23123 __STATIC_FORCEINLINE long long __RV_DDSMAQA(long long t, unsigned long long a, unsigned long long b)
23124 {
23125  __ASM volatile("ddsmaqa %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23126  return t;
23127 }
23128 /* ===== Inline Function End for DDSMAQA ===== */
23129 
23130 /* ===== Inline Function Start for DDSMAQASU ===== */
23174 __STATIC_FORCEINLINE long long __RV_DDSMAQASU(long long t, unsigned long long a, unsigned long long b)
23175 {
23176  __ASM volatile("ddsmaqasu %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23177  return t;
23178 }
23179 /* ===== Inline Function End for DDSMAQASU ===== */
23180 
23181 /* ===== Inline Function Start for DDUMAQA ===== */
23225 __STATIC_FORCEINLINE long long __RV_DDUMAQA(long long t, unsigned long long a, unsigned long long b)
23226 {
23227  __ASM volatile("ddumaqa %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23228  return t;
23229 }
23230 /* ===== Inline Function End for DDUMAQA ===== */
23231 
23232 /* ===== Inline Function Start for DSMA32.u ===== */
23265 __STATIC_FORCEINLINE long __RV_DSMA32_U(unsigned long long a, unsigned long long b)
23266 {
23267  long result;
23268  __ASM volatile("dsma32.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
23269  return result;
23270 }
23271 /* ===== Inline Function End for DSMA32.u ===== */
23272 
23273 /* ===== Inline Function Start for DSMXS32.u ===== */
23306 __STATIC_FORCEINLINE long __RV_DSMXS32_U(unsigned long long a, unsigned long long b)
23307 {
23308  long result;
23309  __ASM volatile("dsmxs32.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
23310  return result;
23311 }
23312 /* ===== Inline Function End for DSMXS32.u ===== */
23313 
23314 /* ===== Inline Function Start for DSMXA32.u ===== */
23347 __STATIC_FORCEINLINE long __RV_DSMXA32_U(unsigned long long a, unsigned long long b)
23348 {
23349  long result;
23350  __ASM volatile("dsmxa32.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
23351  return result;
23352 }
23353 /* ===== Inline Function End for DSMXA32.u ===== */
23354 
23355 /* ===== Inline Function Start for DSMS32.u ===== */
23388 __STATIC_FORCEINLINE long __RV_DSMS32_U(unsigned long long a, unsigned long long b)
23389 {
23390  long result;
23391  __ASM volatile("dsms32.u %0, %1, %2" : "=r"(result) : "r"(a), "r"(b));
23392  return result;
23393 }
23394 /* ===== Inline Function End for DSMS32.u ===== */
23395 
23396 /* ===== Inline Function Start for DSMADA16 ===== */
23433 __STATIC_FORCEINLINE long __RV_DSMADA16(long long t, unsigned long long a, unsigned long long b)
23434 {
23435  __ASM volatile("dsmada16 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23436  return (long)t;
23437 }
23438 /* ===== Inline Function End for DSMADA16 ===== */
23439 
23440 /* ===== Inline Function Start for DSMAXDA16 ===== */
23477 __STATIC_FORCEINLINE long __RV_DSMAXDA16(long long t, unsigned long long a, unsigned long long b)
23478 {
23479  __ASM volatile("dsmaxda16 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23480  return (long)t;
23481 }
23482 /* ===== Inline Function End for DSMAXDA16 ===== */
23483 
23484 /* ===== Inline Function Start for DKSMS32.u ===== */
23516 __STATIC_FORCEINLINE unsigned long long __RV_DKSMS32_U(unsigned long long t, unsigned long long a, unsigned long long b)
23517 {
23518  __ASM volatile("dksms32.u %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23519  return t;
23520 }
23521 /* ===== Inline Function End for DKSMS32.u ===== */
23522 
23523 /* ===== Inline Function Start for DMADA32 ===== */
23555 __STATIC_FORCEINLINE long __RV_DMADA32(long long t, unsigned long long a, unsigned long long b)
23556 {
23557  __ASM volatile("dmada32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23558  return (long)t;
23559 }
23560 /* ===== Inline Function End for DMADA32 ===== */
23561 
23562 /* ===== Inline Function Start for DSMALBB ===== */
23596 __STATIC_FORCEINLINE long long __RV_DSMALBB(long long t, unsigned long long a, unsigned long long b)
23597 {
23598  __ASM volatile("dsmalbb %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23599  return t;
23600 }
23601 /* ===== Inline Function End for DSMALBB ===== */
23602 
23603 /* ===== Inline Function Start for DSMALBT ===== */
23639 __STATIC_FORCEINLINE long long __RV_DSMALBT(long long t, unsigned long long a, unsigned long long b)
23640 {
23641  __ASM volatile("dsmalbt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23642  return t;
23643 }
23644 /* ===== Inline Function End for DSMALBT ===== */
23645 
23646 /* ===== Inline Function Start for DSMALTT ===== */
23682 __STATIC_FORCEINLINE long long __RV_DSMALTT(long long t, unsigned long long a, unsigned long long b)
23683 {
23684  __ASM volatile("dsmaltt %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23685  return t;
23686 }
23687 /* ===== Inline Function End for DSMALTT ===== */
23688 
23689 /* ===== Inline Function Start for DKMABB32 ===== */
23730 __STATIC_FORCEINLINE long long __RV_DKMABB32(long long t, unsigned long long a, unsigned long long b)
23731 {
23732  __ASM volatile("dkmabb32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23733  return t;
23734 }
23735 /* ===== Inline Function End for DKMABB32 ===== */
23736 
23737 /* ===== Inline Function Start for DKMABT32 ===== */
23778 __STATIC_FORCEINLINE long long __RV_DKMABT32(long long t, unsigned long long a, unsigned long long b)
23779 {
23780  __ASM volatile("dkmabt32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23781  return t;
23782 }
23783 /* ===== Inline Function End for DKMABT32 ===== */
23784 
23785 /* ===== Inline Function Start for DKMATT32 ===== */
23826 __STATIC_FORCEINLINE long long __RV_DKMATT32(long long t, unsigned long long a, unsigned long long b)
23827 {
23828  __ASM volatile("dkmatt32 %0, %1, %2" : "+r"(t) : "r"(a), "r"(b));
23829  return t;
23830 }
23831 /* ===== Inline Function End for DKMATT32 ===== */
23832 #endif /* __RISCV_XLEN == 32 */
23833 
23834 #elif defined (__ICCRISCV__)
23835 
23836 #if __riscv_xlen == 32
23837 #include "iar_nds32_intrinsic.h"
23838 #elif __riscv_xlen == 64
23839 #include "iar_nds64_intrinsic.h"
23840 #else
23841 #error "Unexpected RISC-V XLEN size."
23842 #endif /* __riscv_xlen == 32 */
23843 
23844 #pragma language=save
23845 #pragma language=extended
23846 
23847 // Redefine those compatible instruction name supplied by IAR
23848 #define __RV_CLROV __nds__clrov
23849 #define __RV_RDOV __nds__rdov
23850 #define __RV_ADD8 __nds__add8
23851 #define __RV_SUB8 __nds__sub8
23852 #define __RV_ADD16 __nds__add16
23853 #define __RV_SUB16 __nds__sub16
23854 #define __RV_ADD64 __nds__add64
23855 #define __RV_SUB64 __nds__sub64
23856 #define __RV_RADD8 __nds__radd8
23857 #define __RV_RSUB8 __nds__rsub8
23858 #define __RV_RADD16 __nds__radd16
23859 #define __RV_RSUB16 __nds__rsub16
23860 #define __RV_RADD64 __nds__radd64
23861 #define __RV_RSUB64 __nds__rsub64
23862 #define __RV_RADDW __nds__raddw
23863 #define __RV_RSUBW __nds__rsubw
23864 #define __RV_URADD8 __nds__uradd8
23865 #define __RV_URSUB8 __nds__ursub8
23866 #define __RV_URADD16 __nds__uradd16
23867 #define __RV_URSUB16 __nds__ursub16
23868 #define __RV_URADD64 __nds__uradd64
23869 #define __RV_URSUB64 __nds__ursub64
23870 #define __RV_URADDW __nds__uraddw
23871 #define __RV_URSUBW __nds__ursubw
23872 #define __RV_KADD8 __nds__kadd8
23873 #define __RV_KSUB8 __nds__ksub8
23874 #define __RV_KADD16 __nds__kadd16
23875 #define __RV_KSUB16 __nds__ksub16
23876 #define __RV_KADD64 __nds__kadd64
23877 #define __RV_KSUB64 __nds__ksub64
23878 #define __RV_KADDH __nds__kaddh
23879 #define __RV_KSUBH __nds__ksubh
23880 #define __RV_KADDW __nds__kaddw
23881 #define __RV_KSUBW __nds__ksubw
23882 #define __RV_UKADD8 __nds__ukadd8
23883 #define __RV_UKSUB8 __nds__uksub8
23884 #define __RV_UKADD16 __nds__ukadd16
23885 #define __RV_UKSUB16 __nds__uksub16
23886 #define __RV_UKADD64 __nds__ukadd64
23887 #define __RV_UKSUB64 __nds__uksub64
23888 #define __RV_UKADDH __nds__ukaddh
23889 #define __RV_UKSUBH __nds__uksubh
23890 #define __RV_UKADDW __nds__ukaddw
23891 #define __RV_UKSUBW __nds__uksubw
23892 #define __RV_CRAS16 __nds__cras16
23893 #define __RV_CRSA16 __nds__crsa16
23894 #define __RV_RCRAS16 __nds__rcras16
23895 #define __RV_RCRSA16 __nds__rcrsa16
23896 #define __RV_URCRAS16 __nds__urcras16
23897 #define __RV_URCRSA16 __nds__urcrsa16
23898 #define __RV_KCRAS16 __nds__kcras16
23899 #define __RV_KCRSA16 __nds__kcrsa16
23900 #define __RV_UKCRAS16 __nds__ukcras16
23901 #define __RV_UKCRSA16 __nds__ukcrsa16
23902 #define __RV_SRA8 __nds__sra8
23903 #define __RV_SRAI8 __nds__sra8
23904 #define __RV_SRA16 __nds__sra16
23905 #define __RV_SRAI16 __nds__sra16
23906 #define __RV_SRL8 __nds__srl8
23907 #define __RV_SRL16 __nds__srl16
23908 #define __RV_SLL8 __nds__sll8
23909 #define __RV_SLL16 __nds__sll16
23910 #define __RV_SRA_U __nds__sra_u
23911 #define __RV_SRA8_U __nds__sra8_u
23912 #define __RV_SRA16_U __nds__sra16_u
23913 #define __RV_SRL8_U __nds__srl8_u
23914 #define __RV_SRL16_U __nds__srl16_u
23915 #define __RV_KSLL8 __nds__ksll8
23916 #define __RV_KSLL16 __nds__ksll16
23917 #define __RV_KSLLW __nds__ksllw
23918 #define __RV_KSLRA8 __nds__kslra8
23919 #define __RV_KSLRA8_U __nds__kslra8_u
23920 #define __RV_KSLRA16 __nds__kslra16
23921 #define __RV_KSLRA16_U __nds__kslra16_u
23922 #define __RV_KSLRAW __nds__kslraw
23923 #define __RV_KSLRAW_U __nds__kslraw_u
23924 #define __RV_CMPEQ8 __nds__cmpeq8
23925 #define __RV_CMPEQ16 __nds__cmpeq16
23926 #define __RV_SCMPLE8 __nds__scmple8
23927 #define __RV_SCMPLE16 __nds__scmple16
23928 #define __RV_SCMPLT8 __nds__scmplt8
23929 #define __RV_SCMPLT16 __nds__scmplt16
23930 #define __RV_UCMPLE8 __nds__ucmple8
23931 #define __RV_UCMPLE16 __nds__ucmple16
23932 #define __RV_UCMPLT8 __nds__ucmplt8
23933 #define __RV_UCMPLT16 __nds__ucmplt16
23934 #define __RV_SMUL8 __nds__smul8
23935 #define __RV_UMUL8 __nds__umul8
23936 #define __RV_SMUL16 __nds__smul16
23937 #define __RV_UMUL16 __nds__umul16
23938 #define __RV_SMULX8 __nds__smulx8
23939 #define __RV_UMULX8 __nds__umulx8
23940 #define __RV_SMULX16 __nds__smulx16
23941 #define __RV_UMULX16 __nds__umulx16
23942 #define __RV_KHM8 __nds__khm8
23943 #define __RV_KHMX8 __nds__khmx8
23944 #define __RV_KHM16 __nds__khm16
23945 #define __RV_KHMX16 __nds__khmx16
23946 #define __RV_MULR64 __nds__mulr64
23947 #define __RV_MULSR64 __nds__mulsr64
23948 #define __RV_SMMUL __nds__smmul
23949 #define __RV_SMMUL_U __nds__smmul_u
23950 #define __RV_WEXT __nds__wext
23951 #define __RV_SUNPKD810 __nds__sunpkd810
23952 #define __RV_SUNPKD820 __nds__sunpkd820
23953 #define __RV_SUNPKD830 __nds__sunpkd830
23954 #define __RV_SUNPKD831 __nds__sunpkd831
23955 #define __RV_SUNPKD832 __nds__sunpkd832
23956 #define __RV_ZUNPKD810 __nds__zunpkd810
23957 #define __RV_ZUNPKD820 __nds__zunpkd820
23958 #define __RV_ZUNPKD830 __nds__zunpkd830
23959 #define __RV_ZUNPKD831 __nds__zunpkd831
23960 #define __RV_ZUNPKD832 __nds__zunpkd832
23961 #define __RV_PKBB16 __nds__pkbb16
23962 #define __RV_PKBT16 __nds__pkbt16
23963 #define __RV_PKTT16 __nds__pktt16
23964 #define __RV_PKTB16 __nds__pktb16
23965 #define __RV_KMMAC __nds__kmmac
23966 #define __RV_KMMAC_U __nds__kmmac_u
23967 #define __RV_KMMSB __nds__kmmsb
23968 #define __RV_KMMSB_U __nds__kmmsb_u
23969 #define __RV_KWMMUL __nds__kwmmul
23970 #define __RV_KWMMUL_U __nds__kwmmul_u
23971 #define __RV_SMMWB __nds__smmwb
23972 #define __RV_SMMWB_U __nds__smmwb_u
23973 #define __RV_SMMWT __nds__smmwt
23974 #define __RV_SMMWT_U __nds__smmwt_u
23975 #define __RV_KMMAWB __nds__kmmawb
23976 #define __RV_KMMAWB_U __nds__kmmawb_u
23977 #define __RV_KMMAWT __nds__kmmawt
23978 #define __RV_KMMAWT_U __nds__kmmawt_u
23979 #define __RV_KMMWB2 __nds__kmmwb2
23980 #define __RV_KMMWB2_U __nds__kmmwb2_u
23981 #define __RV_KMMWT2 __nds__kmmwt2
23982 #define __RV_KMMWT2_U __nds__kmmwt2_u
23983 #define __RV_KMMAWB2 __nds__kmmawb2
23984 #define __RV_KMMAWB2_U __nds__kmmawb2_u
23985 #define __RV_KMMAWT2 __nds__kmmawt2
23986 #define __RV_KMMAWT2_U __nds__kmmawt2_u
23987 #define __RV_SMBB16 __nds__smbb16
23988 #define __RV_SMBT16 __nds__smbt16
23989 #define __RV_SMTT16 __nds__smtt16
23990 #define __RV_KMDA __nds__kmda
23991 #define __RV_KMXDA __nds__kmxda
23992 #define __RV_SMDS __nds__smds
23993 #define __RV_SMDRS __nds__smdrs
23994 #define __RV_SMXDS __nds__smxds
23995 #define __RV_KMABB __nds__kmabb
23996 #define __RV_KMABT __nds__kmabt
23997 #define __RV_KMATT __nds__kmatt
23998 #define __RV_KMADA __nds__kmada
23999 #define __RV_KMAXDA __nds__kmaxda
24000 #define __RV_KMADS __nds__kmads
24001 #define __RV_KMADRS __nds__kmadrs
24002 #define __RV_KMAXDS __nds__kmaxds
24003 #define __RV_KMSDA __nds__kmsda
24004 #define __RV_KMSXDA __nds__kmsxda
24005 #define __RV_SMAL __nds__smal
24006 #define __RV_SMAQA __nds__smaqa
24007 #define __RV_UMAQA __nds__umaqa
24008 #define __RV_SMAQA_SU __nds__smaqa_su
24009 #define __RV_SMAR64 __nds__smar64
24010 #define __RV_SMSR64 __nds__smsr64
24011 #define __RV_UMAR64 __nds__umar64
24012 #define __RV_UMSR64 __nds__umsr64
24013 #define __RV_KMAR64 __nds__kmar64
24014 #define __RV_KMSR64 __nds__kmsr64
24015 #define __RV_UKMAR64 __nds__ukmar64
24016 #define __RV_UKMSR64 __nds__ukmsr64
24017 #define __RV_SMALBB __nds__smalbb
24018 #define __RV_SMALBT __nds__smalbt
24019 #define __RV_SMALTT __nds__smaltt
24020 #define __RV_SMALDA __nds__smalda
24021 #define __RV_SMALXDA __nds__smalxda
24022 #define __RV_SMALDS __nds__smalds
24023 #define __RV_SMALDRS __nds__smaldrs
24024 #define __RV_SMALXDS __nds__smalxds
24025 #define __RV_SMSLDA __nds__smslda
24026 #define __RV_SMSLXDA __nds__smslxda
24027 #define __RV_MINW __nds__minw
24028 #define __RV_MAXW __nds__maxw
24029 #define __RV_SMIN8 __nds__smin8
24030 #define __RV_SMAX8 __nds__smax8
24031 #define __RV_SMIN16 __nds__smin16
24032 #define __RV_SMAX16 __nds__smax16
24033 #define __RV_UMIN8 __nds__umin8
24034 #define __RV_UMAX8 __nds__umax8
24035 #define __RV_UMIN16 __nds__umin16
24036 #define __RV_UMAX16 __nds__umax16
24037 #define __RV_KABS8 __nds__kabs8
24038 #define __RV_KABS16 __nds__kabs16
24039 #define __RV_KABSW __nds__kabsw
24040 #define __RV_SCLIP8 __nds__sclip8
24041 #define __RV_SCLIP16 __nds__sclip16
24042 #define __RV_SCLIP32 __nds__sclip32
24043 #define __RV_UCLIP8 __nds__uclip8
24044 #define __RV_UCLIP16 __nds__uclip16
24045 #define __RV_UCLIP32 __nds__uclip32
24046 #define __RV_CLO8 __nds__clo8
24047 #define __RV_CLO16 __nds__clo16
24048 #define __RV_CLO32 __nds__clo32
24049 #define __RV_CLZ8 __nds__clz8
24050 #define __RV_CLZ16 __nds__clz16
24051 #define __RV_CLZ32 __nds__clz32
24052 #define __RV_CLRS8 __nds__clrs8
24053 #define __RV_CLRS16 __nds__clrs16
24054 #define __RV_CLRS32 __nds__clrs32
24055 #define __RV_SWAP8 __nds__swap8
24056 #define __RV_SWAP16 __nds__swap16
24057 #define __RV_KHMBB __nds__khmbb
24058 #define __RV_KHMBT __nds__khmbt
24059 #define __RV_KHMTT __nds__khmtt
24060 #define __RV_KDMBB __nds__kdmbb
24061 #define __RV_KDMBT __nds__kdmbt
24062 #define __RV_KDMTT __nds__kdmtt
24063 #define __RV_KDMABB __nds__kdmabb
24064 #define __RV_KDMABT __nds__kdmabt
24065 #define __RV_KDMATT __nds__kdmatt
24066 #define __RV_MADDR32 __nds__maddr32
24067 #define __RV_MSUBR32 __nds__msubr32
24068 #define __RV_PBSAD __nds__pbsad
24069 #define __RV_PBSADA __nds__pbsada
24070 #define __RV_AVE __nds__ave
24071 #define __RV_BITREV __nds__bitrev
24072 #define __RV_INSB __nds__insb
24073 
24074 #if (__riscv_xlen == 64)
24075 #define __RV_ADD32 __nds__add32
24076 #define __RV_SUB32 __nds__sub32
24077 #define __RV_RADD32 __nds__radd32
24078 #define __RV_RSUB32 __nds__rsub32
24079 #define __RV_URADD32 __nds__uradd32
24080 #define __RV_URSUB32 __nds__ursub32
24081 #define __RV_KADD32 __nds__kadd32
24082 #define __RV_KSUB32 __nds__ksub32
24083 #define __RV_UKADD32 __nds__ukadd32
24084 #define __RV_UKSUB32 __nds__uksub32
24085 #define __RV_CRAS32 __nds__cras32
24086 #define __RV_CRSA32 __nds__crsa32
24087 #define __RV_RCRAS32 __nds__rcras32
24088 #define __RV_RCRSA32 __nds__rcrsa32
24089 #define __RV_URCRAS32 __nds__urcras32
24090 #define __RV_URCRSA32 __nds__urcrsa32
24091 #define __RV_KCRAS32 __nds__kcras32
24092 #define __RV_KCRSA32 __nds__kcrsa32
24093 #define __RV_UKCRAS32 __nds__ukcras32
24094 #define __RV_UKCRSA32 __nds__ukcrsa32
24095 #define __RV_SRA32 __nds__sra32
24096 #define __RV_SRAI32 __nds__sra32
24097 #define __RV_SRL32 __nds__srl32
24098 #define __RV_SLL32 __nds__sll32
24099 #define __RV_SLLI32 __nds__sll32
24100 #define __RV_SRAW_U __nds__sraw_u
24101 #define __RV_SRA32_U __nds__sra32_u
24102 #define __RV_SRL32_U __nds__srl32_u
24103 #define __RV_KSLL32 __nds__ksll32
24104 #define __RV_KSLRA32 __nds__kslra32
24105 #define __RV_KSLRA32_U __nds__kslra32_u
24106 #define __RV_SMBB32 __nds__smbb32
24107 #define __RV_SMBT32 __nds__smbt32
24108 #define __RV_SMTT32 __nds__smtt32
24109 #define __RV_PKBB32 __nds__pkbb32
24110 #define __RV_PKBT32 __nds__pkbt32
24111 #define __RV_PKTT32 __nds__pktt32
24112 #define __RV_PKTB32 __nds__pktb32
24113 #define __RV_SMIN32 __nds__smin32
24114 #define __RV_SMAX32 __nds__smax32
24115 #define __RV_UMIN32 __nds__umin32
24116 #define __RV_UMAX32 __nds__umax32
24117 #define __RV_KABS32 __nds__kabs32
24118 #define __RV_KHMBB16 __nds__khmbb16
24119 #define __RV_KHMBT16 __nds__khmbt16
24120 #define __RV_KHMTT16 __nds__khmtt16
24121 #define __RV_KDMBB16 __nds__kdmbb16
24122 #define __RV_KDMBT16 __nds__kdmbt16
24123 #define __RV_KDMTT16 __nds__kdmtt16
24124 #define __RV_KDMABB16 __nds__kdmabb16
24125 #define __RV_KDMABT16 __nds__kdmabt16
24126 #define __RV_KDMATT16 __nds__kdmatt16
24127 #define __RV_KMABB32 __nds__kmabb32
24128 #define __RV_KMABT32 __nds__kmabt32
24129 #define __RV_KMATT32 __nds__kmatt32
24130 #define __RV_KMDA32 __nds__kmda32
24131 #define __RV_KMXDA32 __nds__kmxda32
24132 #define __RV_KMADA32 __nds__kmada32
24133 #define __RV_KMAXDA32 __nds__kmaxda32
24134 #define __RV_KMADS32 __nds__kmads32
24135 #define __RV_KMADRS32 __nds__kmadrs32
24136 #define __RV_KMAXDS32 __nds__kmaxds32
24137 #define __RV_KMSDA32 __nds__kmsda32
24138 #define __RV_KMSXDA32 __nds__kmsxda32
24139 #define __RV_SMDS32 __nds__smds32
24140 #define __RV_SMDRS32 __nds__smdrs32
24141 #define __RV_SMXDS32 __nds__smxds32
24142 #endif /* __riscv_xlen == 64 */
24143 
24144 // For now, the P-extention version of IAR IDE is 0.5.0, but Nuclei's supports 0.5.4
24145 // so Nuclei supplies a workround to add custom instructions of those not natively
24146 // supported by the IAR Assembler. Note that __RV_BPICK remains to be implemented in future.
24147 // And we only implement Xxldsp Nuclei custom instruction set, bpick not implemented, expdxx
24148 // implemented in c, not via .insn variant
24149 
24150 #pragma inline=forced_no_body
24151 unsigned long __RV_STAS16(unsigned long a, unsigned long b) {
24152  unsigned long r;
24153  __asm(".insn r 0x7F, 0x2, 0x7A, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24154  return r;
24155 }
24156 
24157 #pragma inline=forced_no_body
24158 unsigned long __RV_RSTAS16(unsigned long a, unsigned long b) {
24159  unsigned long r;
24160  __asm(".insn r 0x7F, 0x2, 0x5A, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24161  return r;
24162 }
24163 
24164 #pragma inline=forced_no_body
24165 unsigned long __RV_KSTAS16(unsigned long a, unsigned long b) {
24166  unsigned long r;
24167  __asm(".insn r 0x7F, 0x2, 0x62, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24168  return r;
24169 }
24170 
24171 #pragma inline=forced_no_body
24172 unsigned long __RV_URSTAS16(unsigned long a, unsigned long b) {
24173  unsigned long r;
24174  __asm(".insn r 0x7F, 0x2, 0x6A, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24175  return r;
24176 }
24177 
24178 #pragma inline=forced_no_body
24179 unsigned long __RV_UKSTAS16(unsigned long a, unsigned long b) {
24180  unsigned long r;
24181  __asm(".insn r 0x7F, 0x2, 0x72, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24182  return r;
24183 }
24184 
24185 #pragma inline=forced_no_body
24186 unsigned long __RV_STSA16(unsigned long a, unsigned long b) {
24187  unsigned long r;
24188  __asm(".insn r 0x7F, 0x2, 0x7B, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24189  return r;
24190 }
24191 
24192 #pragma inline=forced_no_body
24193 unsigned long __RV_RSTSA16(unsigned long a, unsigned long b) {
24194  unsigned long r;
24195  __asm(".insn r 0x7F, 0x2, 0x5B, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24196  return r;
24197 }
24198 
24199 #pragma inline=forced_no_body
24200 unsigned long __RV_KSTSA16(unsigned long a, unsigned long b) {
24201  unsigned long r;
24202  __asm(".insn r 0x7F, 0x2, 0x63, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24203  return r;
24204 }
24205 
24206 #pragma inline=forced_no_body
24207 unsigned long __RV_URSTSA16(unsigned long a, unsigned long b) {
24208  unsigned long r;
24209  __asm(".insn r 0x7F, 0x2, 0x6B, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24210  return r;
24211 }
24212 
24213 #pragma inline=forced_no_body
24214 unsigned long __RV_UKSTSA16(unsigned long a, unsigned long b) {
24215  unsigned long r;
24216  __asm(".insn r 0x7F, 0x2, 0x73, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24217  return r;
24218 }
24219 
24220 // #pragma inline=forced_no_body
24221 // unsigned long __RV_BPICK(unsigned long a, unsigned long b, unsigned long c) {
24222  // TODO: remains to be done
24223 // }
24224 
24225 // RV64 only
24226 #pragma inline=forced_no_body
24227 unsigned long __RV_STAS32(unsigned long a, unsigned long b) {
24228  unsigned long r;
24229  __asm(".insn r 0x7F, 0x2, 0x78, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24230  return r;
24231 }
24232 
24233 #pragma inline=forced_no_body
24234 unsigned long __RV_RSTAS32(unsigned long a, unsigned long b) {
24235  unsigned long r;
24236  __asm(".insn r 0x7F, 0x2, 0x58, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24237  return r;
24238 }
24239 
24240 #pragma inline=forced_no_body
24241 unsigned long __RV_KSTAS32(unsigned long a, unsigned long b) {
24242  unsigned long r;
24243  __asm(".insn r 0x7F, 0x2, 0x60, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24244  return r;
24245 }
24246 
24247 #pragma inline=forced_no_body
24248 unsigned long __RV_URSTAS32(unsigned long a, unsigned long b) {
24249  unsigned long r;
24250  __asm(".insn r 0x7F, 0x2, 0x68, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24251  return r;
24252 }
24253 
24254 #pragma inline=forced_no_body
24255 unsigned long __RV_UKSTAS32(unsigned long a, unsigned long b) {
24256  unsigned long r;
24257  __asm(".insn r 0x7F, 0x2, 0x70, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24258  return r;
24259 }
24260 
24261 #pragma inline=forced_no_body
24262 unsigned long __RV_STSA32(unsigned long a, unsigned long b) {
24263  unsigned long r;
24264  __asm(".insn r 0x7F, 0x2, 0x79, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24265  return r;
24266 }
24267 
24268 #pragma inline=forced_no_body
24269 unsigned long __RV_RSTSA32(unsigned long a, unsigned long b) {
24270  unsigned long r;
24271  __asm(".insn r 0x7F, 0x2, 0x59, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24272  return r;
24273 }
24274 
24275 #pragma inline=forced_no_body
24276 unsigned long __RV_KSTSA32(unsigned long a, unsigned long b) {
24277  unsigned long r;
24278  __asm(".insn r 0x7F, 0x2, 0x61, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24279  return r;
24280 }
24281 
24282 #pragma inline=forced_no_body
24283 unsigned long __RV_URSTSA32(unsigned long a, unsigned long b) {
24284  unsigned long r;
24285  __asm(".insn r 0x7F, 0x2, 0x69, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24286  return r;
24287 }
24288 
24289 #pragma inline=forced_no_body
24290 unsigned long __RV_UKSTSA32(unsigned long a, unsigned long b) {
24291  unsigned long r;
24292  __asm(".insn r 0x7F, 0x2, 0x71, %0,%1,%2":"=r"(r) : "r"(a), "r"(b) );
24293  return r;
24294 }
24295 
24296 #pragma inline=forced_no_body
24297 unsigned long __RV_EXPD80(unsigned long a)
24298 {
24299  return __EXPD_BYTE((uint8_t)(a & 0xff));
24300 }
24301 
24302 #pragma inline=forced_no_body
24303 unsigned long __RV_EXPD81(unsigned long a)
24304 {
24305  return __EXPD_BYTE((uint8_t)((a >> 8) & 0xff));
24306 }
24307 
24308 #pragma inline=forced_no_body
24309 unsigned long __RV_EXPD82(unsigned long a)
24310 {
24311  return __EXPD_BYTE((uint8_t)((a >> 16) & 0xff));
24312 }
24313 
24314 #pragma inline=forced_no_body
24315 unsigned long __RV_EXPD83(unsigned long a)
24316 {
24317  return __EXPD_BYTE((uint8_t)((a >> 24) & 0xff));
24318 }
24319 
24320 #if __RISCV_XLEN == 64
24321 // RV64 only
24322 #pragma inline=forced_no_body
24323 unsigned long __RV_EXPD84(unsigned long a)
24324 {
24325  return __EXPD_BYTE((uint8_t)((a >> 32) & 0xff));
24326 }
24327 
24328 #pragma inline=forced_no_body
24329 unsigned long __RV_EXPD85(unsigned long a)
24330 {
24331  return __EXPD_BYTE((uint8_t)((a >> 40) & 0xff));
24332 }
24333 
24334 #pragma inline=forced_no_body
24335 unsigned long __RV_EXPD86(unsigned long a)
24336 {
24337  return __EXPD_BYTE((uint8_t)((a >> 48) & 0xff));
24338 }
24339 
24340 #pragma inline=forced_no_body
24341 unsigned long __RV_EXPD87(unsigned long a)
24342 {
24343  return __EXPD_BYTE((uint8_t)((a >> 56) & 0xff));
24344 }
24345 #endif
24346 #pragma language=restore
24347 
24348 #else
24349  #error Unknown compiler
24350 #endif /* __ICCRISCV__ */
24351 
24352 
24353 /* XXXXX ARM Compatiable SIMD API XXXXX */
24355 #define __QADD8(x, y) __RV_KADD8(x, y)
24356 
24357 #define __QSUB8(x, y) __RV_KSUB8((x), (y))
24358 
24359 #define __QADD16(x, y) __RV_KADD16((x), (y))
24360 
24361 #define __SHADD16(x, y) __RV_RADD16((x), (y))
24362 
24363 #define __QSUB16(x, y) __RV_KSUB16((x), (y))
24364 
24365 #define __SHSUB16(x, y) __RV_RSUB16((x), (y))
24366 
24367 #define __QASX(x, y) __RV_KCRAS16((x), (y))
24368 
24369 #define __SHASX(x, y) __RV_RCRAS16((x), (y))
24370 
24371 #define __QSAX(x, y) __RV_KCRSA16((x), (y))
24372 
24373 #define __SHSAX(x, y) __RV_RCRSA16((x), (y))
24374 
24375 #define __SMUSDX(x, y) __RV_SMXDS((y), (x))
24376 
24377 __STATIC_FORCEINLINE long __SMUADX (unsigned long op1, unsigned long op2)
24378 {
24379  return __RV_KMXDA(op1, op2);
24380 }
24382 #define __QADD(x, y) __RV_KADDW((x), (y))
24383 
24384 #define __QSUB(x, y) __RV_KSUBW((x), (y))
24385 
24386 __STATIC_FORCEINLINE long __SMLAD(unsigned long op1, unsigned long op2, long acc)
24387 {
24388  return __RV_KMADA(acc, op1, op2);
24389 }
24391 __STATIC_FORCEINLINE long __SMLADX(unsigned long op1, unsigned long op2, long acc)
24392 {
24393  return __RV_KMAXDA(acc, op1, op2);
24394 }
24396 __STATIC_FORCEINLINE long __SMLSDX(unsigned long op1, unsigned long op2, long acc)
24397 {
24398  return (acc - __RV_SMXDS(op1, op2));
24399 }
24401 __STATIC_FORCEINLINE long long __SMLALD(unsigned long op1, unsigned long op2, long long acc)
24402 {
24403  return __RV_SMALDA(acc, op1, op2);
24404 }
24406 __STATIC_FORCEINLINE long long __SMLALDX(unsigned long op1, unsigned long op2, long long acc)
24407 {
24408  return __RV_SMALXDA(acc, op1, op2);
24409 }
24411 __STATIC_FORCEINLINE long __SMUAD(unsigned long op1, unsigned long op2)
24412 {
24413  return __RV_KMDA(op1, op2);
24414 }
24416 __STATIC_FORCEINLINE long __SMUSD(unsigned long op1, unsigned long op2)
24417 {
24418  return __RV_SMDRS(op1, op2);
24419 }
24421 #define __SXTB16(x) __RV_SUNPKD820(x)
24422 
24423 __STATIC_FORCEINLINE unsigned long __SXTAB16(unsigned long op1, unsigned long op2)
24424 {
24425  return __RV_ADD16(op1, __RV_SUNPKD820(op2));
24426 }
24427 #define __SXTAB16_RORn(ARG1, ARG2, ROTATE) __SXTAB16(ARG1, __ROR(ARG2, ROTATE))
24428 
24430 __STATIC_FORCEINLINE long __SMMLA(long op1, long op2, long acc)
24431 {
24432  long mul;
24433  mul = __RV_SMMUL(op1, op2);
24434  return (acc + mul);
24435 }
24436 #define __DKHM8 __RV_DKHM8
24437 #define __DKHM16 __RV_DKHM16
24438 #define __DKSUB16 __RV_DKSUB16
24439 #define __SMAQA __RV_SMAQA
24440 #define __MULSR64 __RV_MULSR64
24441 #define __DQADD8 __RV_DKADD8
24442 #define __DQSUB8 __RV_DKSUB8
24443 #define __DKADD16 __RV_DKADD16
24444 #define __PKBB16 __RV_PKBB16
24445 #define __DKSLRA16 __RV_DKSLRA16
24446 #define __DKSLRA8 __RV_DKSLRA8
24447 #define __KABSW __RV_KABSW
24448 #define __DKABS8 __RV_DKABS8
24449 #define __DKABS16 __RV_DKABS16
24450 #define __SMALDA __RV_SMALDA
24451 #define __SMSLDA __RV_SMSLDA
24452 #define __SMALBB __RV_SMALBB
24453 #define __SUB64 __RV_SUB64
24454 #define __ADD64 __RV_ADD64
24455 #define __SMBB16 __RV_SMBB16
24456 #define __SMBT16 __RV_SMBT16
24457 #define __SMTT16 __RV_SMTT16
24458 #define __EXPD80 __RV_EXPD80
24459 #define __SMAX8 __RV_SMAX8
24460 #define __SMAX16 __RV_SMAX16
24461 #define __PKTT16 __RV_PKTT16
24462 #define __KADD16 __RV_KADD16
24463 #define __SADD16 __RV_ADD16
24464 #define __SSUB8 __RV_KSUB8
24465 #define __SADD8 __RV_KADD8
24466 #define __USAT16 __RV_UCLIP16
24467 #define __SMALTT __RV_SMALTT
24468 
24470 #define __PKHBT(ARG1, ARG2, ARG3) ((ARG3 == 0) ? __RV_PKTB16(ARG2, ARG1) : \
24471  (ARG3 == 16) ? __RV_PKBB16(ARG2, ARG1) : \
24472  (((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
24473  ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)))
24474 
24476 #define __PKHTB(ARG1, ARG2, ARG3) ((ARG3 == 0) ? __RV_PKTB16(ARG1, ARG2) : \
24477  (ARG3 == 16) ? __RV_PKTT16(ARG1, ARG2) : \
24478  (((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
24479  ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)))
24480 
24482 #define __SXTB16_RORn(ARG1, ARG2) __RV_SUNPKD820(__ROR(ARG1, ARG2))
24483 
24484 #endif /* defined(__DSP_PRESENT) && (__DSP_PRESENT == 1) */
24485 
24486 #ifdef __cplusplus
24487 }
24488 #endif
24489 
24490 #endif /* __CORE_FEATURE_DSP__ */
__RV_CLZ16
__STATIC_FORCEINLINE unsigned long __RV_CLZ16(unsigned long a)
CLZ16 (SIMD 16-bit Count Leading Zero)
Definition: core_feature_dsp.h:1105
__RV_CLRS8
__STATIC_FORCEINLINE unsigned long __RV_CLRS8(unsigned long a)
CLRS8 (SIMD 8-bit Count Leading Redundant Sign)
Definition: core_feature_dsp.h:776
__RV_DKMAXDA
__STATIC_FORCEINLINE unsigned long long __RV_DKMAXDA(unsigned long long t, unsigned long long a, unsigned long long b)
DKMAXDA (Two Cross 16x16 with 32-bit Signed Double Add)
Definition: core_feature_dsp.h:21855
__RV_DRCRAS16
__STATIC_FORCEINLINE unsigned long long __RV_DRCRAS16(unsigned long long a, unsigned long long b)
DRCRAS16 (16-bit Signed Halving Cross Addition & Subtraction)
Definition: core_feature_dsp.h:20832
__RV_UCMPLE8
__STATIC_FORCEINLINE unsigned long __RV_UCMPLE8(unsigned long a, unsigned long b)
UCMPLE8 (SIMD 8-bit Unsigned Compare Less Than & Equal)
Definition: core_feature_dsp.h:11254
__RV_UKCRSA16
__STATIC_FORCEINLINE unsigned long __RV_UKCRSA16(unsigned long a, unsigned long b)
UKCRSA16 (SIMD 16-bit Unsigned Saturating Cross Subtraction & Addition)
Definition: core_feature_dsp.h:11720
__RV_UKMSR64
__STATIC_FORCEINLINE unsigned long long __RV_UKMSR64(unsigned long long t, unsigned long a, unsigned long b)
UKMSR64 (Unsigned Multiply and Saturating Subtract from 64-Bit Data)
Definition: core_feature_dsp.h:11850
__RV_CLROV
__STATIC_FORCEINLINE void __RV_CLROV(void)
CLROV (Clear OV flag)
Definition: core_feature_dsp.h:731
__RV_DPKTB32
__STATIC_FORCEINLINE unsigned long long __RV_DPKTB32(unsigned long long a, unsigned long long b)
DPKTB32 (Pack Two 32-bit Data from Top and Bottom Half)
Definition: core_feature_dsp.h:20311
__RV_PKBT16
__STATIC_FORCEINLINE unsigned long __RV_PKBT16(unsigned long a, unsigned long b)
PKBT16 (Pack Two 16-bit Data from Bottom and Top Half)
Definition: core_feature_dsp.h:6156
__RV_KMADRS
__STATIC_FORCEINLINE long __RV_KMADRS(long t, unsigned long a, unsigned long b)
KMADRS (SIMD Saturating Signed Multiply Two Halfs & Reverse Subtract & Add)
Definition: core_feature_dsp.h:3103
__RV_RCRAS32
__STATIC_FORCEINLINE unsigned long __RV_RCRAS32(unsigned long a, unsigned long b)
RCRAS32 (SIMD 32-bit Signed Halving Cross Addition & Subtraction)
Definition: core_feature_dsp.h:15986
__RV_DSMXS32_U
__STATIC_FORCEINLINE long __RV_DSMXS32_U(unsigned long long a, unsigned long long b)
DSMXS32.u (64-bit SIMD 32-bit Signed Multiply Cross Subtraction With Rounding and Clip)
Definition: core_feature_dsp.h:23306
__RV_PBSADA
__STATIC_FORCEINLINE unsigned long __RV_PBSADA(unsigned long t, unsigned long a, unsigned long b)
PBSADA (Parallel Byte Sum of Absolute Difference Accum)
Definition: core_feature_dsp.h:6055
__RV_KMABB
__STATIC_FORCEINLINE long __RV_KMABB(long t, unsigned long a, unsigned long b)
KMABB (SIMD Saturating Signed Multiply Bottom Halfs & Add)
Definition: core_feature_dsp.h:2705
__RV_DKHM8
__STATIC_FORCEINLINE unsigned long long __RV_DKHM8(unsigned long long a, unsigned long long b)
DKHM8 (64-bit SIMD Signed Saturating Q7 Multiply)
Definition: core_feature_dsp.h:18251
__RV_WEXT
__STATIC_FORCEINLINE unsigned long __RV_WEXT(long long a, unsigned int b)
WEXT (Extract Word from 64-bit)
Definition: core_feature_dsp.h:13537
__RV_PKBB16
__STATIC_FORCEINLINE unsigned long __RV_PKBB16(unsigned long a, unsigned long b)
PKBB16 (Pack Two 16-bit Data from Both Bottom Half)
Definition: core_feature_dsp.h:6105
__RV_SRL16
__STATIC_FORCEINLINE unsigned long __RV_SRL16(unsigned long a, unsigned int b)
SRL16 (SIMD 16-bit Shift Right Logical)
Definition: core_feature_dsp.h:10377
__RV_KADD64
__STATIC_FORCEINLINE long long __RV_KADD64(long long a, long long b)
KADD64 (64-bit Signed Saturating Addition)
Definition: core_feature_dsp.h:1666
__RV_RSTAS16
__STATIC_FORCEINLINE unsigned long __RV_RSTAS16(unsigned long a, unsigned long b)
RSTAS16 (SIMD 16-bit Signed Halving Straight Addition & Subtraction)
Definition: core_feature_dsp.h:6623
__RV_SMMUL_U
__STATIC_FORCEINLINE long __RV_SMMUL_U(long a, long b)
SMMUL.u (SIMD MSW Signed Multiply Word with Rounding)
Definition: core_feature_dsp.h:8816
__RV_KMDA
__STATIC_FORCEINLINE long __RV_KMDA(unsigned long a, unsigned long b)
KMDA (SIMD Signed Multiply Two Halfs and Add)
Definition: core_feature_dsp.h:3293
__RV_SMXDS
__STATIC_FORCEINLINE long __RV_SMXDS(unsigned long a, unsigned long b)
SMXDS (SIMD Signed Crossed Multiply Two Halfs and Subtract)
Definition: core_feature_dsp.h:8636
__RV_DKMMSB
__STATIC_FORCEINLINE unsigned long long __RV_DKMMSB(unsigned long long t, unsigned long long a, unsigned long long b)
DKMMSB (64-bit MSW 32x32 Signed Multiply and Saturating Sub)
Definition: core_feature_dsp.h:21715
__RV_KMMSB
__STATIC_FORCEINLINE long __RV_KMMSB(long t, long a, long b)
KMMSB (SIMD Saturating MSW Signed Multiply Word and Subtract)
Definition: core_feature_dsp.h:4047
__RV_SMAX32
__STATIC_FORCEINLINE unsigned long __RV_SMAX32(unsigned long a, unsigned long b)
SMAX32 (SIMD 32-bit Signed Maximum)
Definition: core_feature_dsp.h:16290
__RV_KDMBT
__STATIC_FORCEINLINE long __RV_KDMBT(unsigned int a, unsigned int b)
KDMBT (Signed Saturating Double Multiply B16 x T16)
Definition: core_feature_dsp.h:1985
__RV_SRA16_U
__STATIC_FORCEINLINE unsigned long __RV_SRA16_U(unsigned long a, unsigned long b)
SRA16.u (SIMD 16-bit Rounding Shift Right Arithmetic)
Definition: core_feature_dsp.h:10003
__RV_DSMTT32_SRA14
__STATIC_FORCEINLINE long long __RV_DSMTT32_SRA14(unsigned long long a, unsigned long long b)
DSMTT32.sra14 (Signed Multiply Top Word & Top Word with Right Shift 14-bit)
Definition: core_feature_dsp.h:20123
__RV_SMALBB
__STATIC_FORCEINLINE long long __RV_SMALBB(long long t, unsigned long a, unsigned long b)
SMALBB (Signed Multiply Bottom Halfs & Add 64-bit)
Definition: core_feature_dsp.h:7480
__RV_KMABT32
__STATIC_FORCEINLINE long __RV_KMABT32(long t, unsigned long a, unsigned long b)
KMABT32 (Saturating Signed Multiply Bottom & Top Words & Add)
Definition: core_feature_dsp.h:14736
__RV_SMIN32
__STATIC_FORCEINLINE unsigned long __RV_SMIN32(unsigned long a, unsigned long b)
SMIN32 (SIMD 32-bit Signed Minimum)
Definition: core_feature_dsp.h:16634
__RV_KMAXDS
__STATIC_FORCEINLINE long __RV_KMAXDS(long t, unsigned long a, unsigned long b)
KMAXDS (SIMD Saturating Signed Crossed Multiply Two Halfs & Subtract & Add)
Definition: core_feature_dsp.h:3175
__RV_KHMTT16
__STATIC_FORCEINLINE unsigned long __RV_KHMTT16(unsigned long a, unsigned long b)
KHMTT16 (SIMD Signed Saturating Half Multiply T16 x T16)
Definition: core_feature_dsp.h:14611
__RV_KSLRA32
__STATIC_FORCEINLINE unsigned long __RV_KSLRA32(unsigned long a, int b)
KSLRA32 (SIMD 32-bit Shift Left Logical with Saturation or Shift Right Arithmetic)
Definition: core_feature_dsp.h:15485
__RV_DADD16
__STATIC_FORCEINLINE unsigned long long __RV_DADD16(unsigned long long a, unsigned long long b)
DADD16 (16-bit Addition)
Definition: core_feature_dsp.h:20546
__RV_PKBT32
__STATIC_FORCEINLINE unsigned long __RV_PKBT32(unsigned long a, unsigned long b)
PKBT32 (Pack Two 32-bit Data from Bottom and Top Half)
Definition: core_feature_dsp.h:15797
__RV_URSUB64
__STATIC_FORCEINLINE unsigned long long __RV_URSUB64(unsigned long long a, unsigned long long b)
URSUB64 (64-bit Unsigned Halving Subtraction)
Definition: core_feature_dsp.h:13385
__RV_KSLL8
__STATIC_FORCEINLINE unsigned long __RV_KSLL8(unsigned long a, unsigned int b)
KSLL8 (SIMD 8-bit Saturating Shift Left Logical)
Definition: core_feature_dsp.h:4671
__RV_KMMAC_U
__STATIC_FORCEINLINE long __RV_KMMAC_U(long t, long a, long b)
KMMAC.u (SIMD Saturating MSW Signed Multiply Word and Add with Rounding)
Definition: core_feature_dsp.h:3467
__RV_DSMBT32_SRA32
__STATIC_FORCEINLINE long long __RV_DSMBT32_SRA32(unsigned long long a, unsigned long long b)
DSMBT32.sra32 (Signed Crossed Multiply Two Halfs and Subtract with Right Shift 32)
Definition: core_feature_dsp.h:20043
__RV_UKSTSA32
__STATIC_FORCEINLINE unsigned long __RV_UKSTSA32(unsigned long a, unsigned long b)
UKSTSA32 (SIMD 32-bit Unsigned Saturating Straight Subtraction & Addition)
Definition: core_feature_dsp.h:17483
__RV_UKSUB16
__STATIC_FORCEINLINE unsigned long __RV_UKSUB16(unsigned long a, unsigned long b)
UKSUB16 (SIMD 16-bit Unsigned Saturating Subtraction)
Definition: core_feature_dsp.h:12050
__RV_SRL32_U
__STATIC_FORCEINLINE unsigned long __RV_SRL32_U(unsigned long a, unsigned int b)
SRL32.u (SIMD 32-bit Rounding Shift Right Logical)
Definition: core_feature_dsp.h:17000
__RV_KHMX8
__STATIC_FORCEINLINE unsigned long __RV_KHMX8(unsigned long a, unsigned long b)
KHMX8 (SIMD Signed Saturating Crossed Q7 Multiply)
Definition: core_feature_dsp.h:2356
__RV_KMATT
__STATIC_FORCEINLINE long __RV_KMATT(long t, unsigned long a, unsigned long b)
KMATT (SIMD Saturating Signed Multiply Top Halfs & Add)
Definition: core_feature_dsp.h:2831
__RV_URSUB32
__STATIC_FORCEINLINE unsigned long __RV_URSUB32(unsigned long a, unsigned long b)
URSUB32 (SIMD 32-bit Unsigned Halving Subtraction)
Definition: core_feature_dsp.h:17883
__RV_SRL8
__STATIC_FORCEINLINE unsigned long __RV_SRL8(unsigned long a, unsigned int b)
SRL8 (SIMD 8-bit Shift Right Logical)
Definition: core_feature_dsp.h:10166
__RV_BITREV
__STATIC_FORCEINLINE unsigned long __RV_BITREV(unsigned long a, unsigned long b)
BITREV (Bit Reverse)
Definition: core_feature_dsp.h:623
__RV_DKMADRS
__STATIC_FORCEINLINE unsigned long long __RV_DKMADRS(unsigned long long t, unsigned long long a, unsigned long long b)
DKMADRS (Two 16x16 with 32-bit Signed Add and Reversed Sub)
Definition: core_feature_dsp.h:21949
__RV_SRA8_U
__STATIC_FORCEINLINE unsigned long __RV_SRA8_U(unsigned long a, unsigned int b)
SRA8.u (SIMD 8-bit Rounding Shift Right Arithmetic)
Definition: core_feature_dsp.h:9787
__RV_AVE
__STATIC_FORCEINLINE long __RV_AVE(long a, long b)
AVE (Average with Rounding)
Definition: core_feature_dsp.h:582
__RV_DSMTT32
__STATIC_FORCEINLINE long long __RV_DSMTT32(unsigned long long a, unsigned long long b)
DSMTT32 (Signed Multiply Top Word & Top Word)
Definition: core_feature_dsp.h:20083
__RV_DKHMX8
__STATIC_FORCEINLINE unsigned long long __RV_DKHMX8(unsigned long long a, unsigned long long b)
DKHMX8 (64-bit SIMD Signed Crossed Saturating Q7 Multiply)
Definition: core_feature_dsp.h:18755
__RV_URSUBW
__STATIC_FORCEINLINE unsigned long __RV_URSUBW(unsigned int a, unsigned int b)
URSUBW (32-bit Unsigned Halving Subtraction)
Definition: core_feature_dsp.h:13433
__RV_MSUBR32
__STATIC_FORCEINLINE unsigned long __RV_MSUBR32(unsigned long t, unsigned long a, unsigned long b)
MSUBR32 (Multiply and Subtract from 32-Bit Word)
Definition: core_feature_dsp.h:5871
__RV_SMBB16
__STATIC_FORCEINLINE long __RV_SMBB16(unsigned long a, unsigned long b)
SMBB16 (SIMD Signed Multiply Bottom Half & Bottom Half)
Definition: core_feature_dsp.h:8355
__RV_SMMWB
__STATIC_FORCEINLINE long __RV_SMMWB(long a, unsigned long b)
SMMWB (SIMD MSW Signed Multiply Word and Bottom Half)
Definition: core_feature_dsp.h:8866
__RV_DSMBT16
__STATIC_FORCEINLINE unsigned long long __RV_DSMBT16(unsigned long long a, unsigned long long b)
DSMBT16 (Signed Multiply Bottom Half & Top Half)
Definition: core_feature_dsp.h:20666
__RV_SLL32
__STATIC_FORCEINLINE unsigned long __RV_SLL32(unsigned long a, unsigned int b)
SLL32 (SIMD 32-bit Shift Left Logical)
Definition: core_feature_dsp.h:16212
__RV_SMUL8
__STATIC_FORCEINLINE unsigned long long __RV_SMUL8(unsigned int a, unsigned int b)
SMUL8 (SIMD Signed 8-bit Multiply)
Definition: core_feature_dsp.h:9316
__RV_KHMBT16
__STATIC_FORCEINLINE unsigned long __RV_KHMBT16(unsigned long a, unsigned long b)
KHMBT16 (SIMD Signed Saturating Half Multiply B16 x T16)
Definition: core_feature_dsp.h:14558
__RV_EXPD83
__STATIC_FORCEINLINE unsigned long __RV_EXPD83(unsigned long a)
EXPD83 (Expand and Copy Byte 3 to 32bit(rv32) or 64bit(when rv64))
Definition: core_feature_dsp.h:18033
__RV_UKCRSA32
__STATIC_FORCEINLINE unsigned long __RV_UKCRSA32(unsigned long a, unsigned long b)
UKCRSA32 (SIMD 32-bit Unsigned Saturating Cross Subtraction & Addition)
Definition: core_feature_dsp.h:17378
__RV_KSUBH
__STATIC_FORCEINLINE long __RV_KSUBH(int a, int b)
KSUBH (Signed Subtraction with Q15 Saturation)
Definition: core_feature_dsp.h:5543
__RV_DSMBT32_SRA14
__STATIC_FORCEINLINE long long __RV_DSMBT32_SRA14(unsigned long long a, unsigned long long b)
DSMBT32.sra14 (Signed Multiply Bottom Word & Top Word with Right Shift 14)
Definition: core_feature_dsp.h:20003
__RV_CMPEQ8
__STATIC_FORCEINLINE unsigned long __RV_CMPEQ8(unsigned long a, unsigned long b)
CMPEQ8 (SIMD 8-bit Integer Compare Equal)
Definition: core_feature_dsp.h:1194
__RV_KDMATT
__STATIC_FORCEINLINE long __RV_KDMATT(long t, unsigned int a, unsigned int b)
KDMATT (Signed Saturating Double Multiply Addition T16 x T16)
Definition: core_feature_dsp.h:2233
__RV_KHMTT
__STATIC_FORCEINLINE long __RV_KHMTT(unsigned int a, unsigned int b)
KHMTT (Signed Saturating Half Multiply T16 x T16)
Definition: core_feature_dsp.h:2641
__RV_KMXDA32
__STATIC_FORCEINLINE long __RV_KMXDA32(unsigned long a, unsigned long b)
KMXDA32 (Signed Crossed Multiply Two Words and Add)
Definition: core_feature_dsp.h:15013
__RV_KMMWB2_U
__STATIC_FORCEINLINE long __RV_KMMWB2_U(long a, unsigned long b)
KMMWB2.u (SIMD Saturating MSW Signed Multiply Word and Bottom Half & 2 with Rounding)
Definition: core_feature_dsp.h:4218
__RV_KDMABB
__STATIC_FORCEINLINE long __RV_KDMABB(long t, unsigned int a, unsigned int b)
KDMABB (Signed Saturating Double Multiply Addition B16 x B16)
Definition: core_feature_dsp.h:2105
__RV_RSUB16
__STATIC_FORCEINLINE unsigned long __RV_RSUB16(unsigned long a, unsigned long b)
RSUB16 (SIMD 16-bit Signed Halving Subtraction)
Definition: core_feature_dsp.h:6766
__RV_SMTT32
__STATIC_FORCEINLINE long __RV_SMTT32(unsigned long a, unsigned long b)
SMTT32 (Signed Multiply Top Word & Top Word)
Definition: core_feature_dsp.h:16437
__RV_DKABS32
__STATIC_FORCEINLINE unsigned long long __RV_DKABS32(unsigned long long a)
DKABS32 (64-bit SIMD 32-bit Saturating Absolute)
Definition: core_feature_dsp.h:19037
__RV_KMAXDS32
__STATIC_FORCEINLINE long __RV_KMAXDS32(long t, unsigned long a, unsigned long b)
KMAXDS32 (Saturating Signed Crossed Multiply Two Words & Subtract & Add)
Definition: core_feature_dsp.h:15206
__RV_KMSXDA32
__STATIC_FORCEINLINE long __RV_KMSXDA32(long t, unsigned long a, unsigned long b)
KMSXDA32 (Saturating Signed Crossed Multiply Two Words & Add & Subtract)
Definition: core_feature_dsp.h:15316
__RV_DSMMUL_U
__STATIC_FORCEINLINE unsigned long long __RV_DSMMUL_U(unsigned long long a, unsigned long long b)
DSMMULU (64-bit MSW 32x32 Unsigned Multiply)
Definition: core_feature_dsp.h:18898
__RV_SMALXDA
__STATIC_FORCEINLINE long long __RV_SMALXDA(long long t, unsigned long a, unsigned long b)
SMALXDA (Signed Crossed Multiply Two Halfs and Two Adds 64-bit)
Definition: core_feature_dsp.h:7812
__RV_ZUNPKD820
__STATIC_FORCEINLINE unsigned long __RV_ZUNPKD820(unsigned long a)
ZUNPKD820 (Unsigned Unpacking Bytes 2 & 0)
Definition: core_feature_dsp.h:13629
__RV_KHMX16
__STATIC_FORCEINLINE unsigned long __RV_KHMX16(unsigned long a, unsigned long b)
KHMX16 (SIMD Signed Saturating Crossed Q15 Multiply)
Definition: core_feature_dsp.h:2482
__RV_KMMAWB2
__STATIC_FORCEINLINE long __RV_KMMAWB2(long t, unsigned long a, unsigned long b)
KMMAWB2 (SIMD Saturating MSW Signed Multiply Word and Bottom Half & 2 and Add)
Definition: core_feature_dsp.h:3659
__RV_RSTAS32
__STATIC_FORCEINLINE unsigned long __RV_RSTAS32(unsigned long a, unsigned long b)
RSTAS32 (SIMD 32-bit Signed Halving Straight Addition & Subtraction)
Definition: core_feature_dsp.h:16080
__RV_EXPD82
__STATIC_FORCEINLINE unsigned long __RV_EXPD82(unsigned long a)
EXPD82 (Expand and Copy Byte 2 to 32bit(rv32) or 64bit(when rv64))
Definition: core_feature_dsp.h:17998
__RV_UKSTAS16
__STATIC_FORCEINLINE unsigned long __RV_UKSTAS16(unsigned long a, unsigned long b)
UKSTAS16 (SIMD 16-bit Unsigned Saturating Straight Addition & Subtraction)
Definition: core_feature_dsp.h:11905
__RV_DSMDRS
__STATIC_FORCEINLINE unsigned long long __RV_DSMDRS(unsigned long long a, unsigned long long b)
DSMDRS (Signed Multiply Two Halfs and Reverse Subtract)
Definition: core_feature_dsp.h:19765
__RV_RADDW
__STATIC_FORCEINLINE long __RV_RADDW(int a, int b)
RADDW (32-bit Signed Halving Addition)
Definition: core_feature_dsp.h:6449
__RV_KSUB16
__STATIC_FORCEINLINE unsigned long __RV_KSUB16(unsigned long a, unsigned long b)
KSUB16 (SIMD 16-bit Signed Saturating Subtraction)
Definition: core_feature_dsp.h:5430
__RV_UKSTSA16
__STATIC_FORCEINLINE unsigned long __RV_UKSTSA16(unsigned long a, unsigned long b)
UKSTSA16 (SIMD 16-bit Unsigned Saturating Straight Subtraction & Addition)
Definition: core_feature_dsp.h:11960
__RV_SMULX8
__STATIC_FORCEINLINE unsigned long long __RV_SMULX8(unsigned int a, unsigned int b)
SMULX8 (SIMD Signed Crossed 8-bit Multiply)
Definition: core_feature_dsp.h:9399
__RV_DRADD32
__STATIC_FORCEINLINE unsigned long long __RV_DRADD32(unsigned long long a, unsigned long long b)
DRADD32 (64-bit SIMD 32-bit Halving Signed Addition)
Definition: core_feature_dsp.h:19298
__RV_PKTT16
__STATIC_FORCEINLINE unsigned long __RV_PKTT16(unsigned long a, unsigned long b)
PKTT16 (Pack Two 16-bit Data from Both Top Half)
Definition: core_feature_dsp.h:6207
__RV_SWAP16
__STATIC_FORCEINLINE unsigned long __RV_SWAP16(unsigned long a)
SWAP16 (Swap Halfword within Word)
Definition: core_feature_dsp.h:11067
__RV_DKMADA32
__STATIC_FORCEINLINE long long __RV_DKMADA32(long long t, unsigned long long a, unsigned long long b)
DKMADA32 (Two Signed 32x32 with 64-bit Saturation Add)
Definition: core_feature_dsp.h:22360
__RV_DSRA16
__STATIC_FORCEINLINE unsigned long long __RV_DSRA16(unsigned long long a, unsigned long b)
DSRA16 (32-bit Signed Saturating Cross Addition & Subtraction)
Definition: core_feature_dsp.h:20508
__RV_KMMWB2
__STATIC_FORCEINLINE long __RV_KMMWB2(long a, unsigned long b)
KMMWB2 (SIMD Saturating MSW Signed Multiply Word and Bottom Half & 2)
Definition: core_feature_dsp.h:4162
__RV_RCRAS16
__STATIC_FORCEINLINE unsigned long __RV_RCRAS16(unsigned long a, unsigned long b)
RCRAS16 (SIMD 16-bit Signed Halving Cross Addition & Subtraction)
Definition: core_feature_dsp.h:6498
__RV_RADD16
__STATIC_FORCEINLINE unsigned long __RV_RADD16(unsigned long a, unsigned long b)
RADD16 (SIMD 16-bit Signed Halving Addition)
Definition: core_feature_dsp.h:6348
__RV_UMULX16
__STATIC_FORCEINLINE unsigned long long __RV_UMULX16(unsigned int a, unsigned int b)
UMULX16 (SIMD Unsigned Crossed 16-bit Multiply)
Definition: core_feature_dsp.h:12847
__RV_KMDA32
__STATIC_FORCEINLINE long __RV_KMDA32(unsigned long a, unsigned long b)
KMDA32 (Signed Multiply Two Words and Add)
Definition: core_feature_dsp.h:14961
__RV_CLZ8
__STATIC_FORCEINLINE unsigned long __RV_CLZ8(unsigned long a)
CLZ8 (SIMD 8-bit Count Leading Zero)
Definition: core_feature_dsp.h:1058
__RV_UMULX8
__STATIC_FORCEINLINE unsigned long long __RV_UMULX8(unsigned int a, unsigned int b)
UMULX8 (SIMD Unsigned Crossed 8-bit Multiply)
Definition: core_feature_dsp.h:12677
__RV_KSLRA8_U
__STATIC_FORCEINLINE unsigned long __RV_KSLRA8_U(unsigned long a, int b)
KSLRA8.u (SIMD 8-bit Shift Left Logical with Saturation or Rounding Shift Right Arithmetic)
Definition: core_feature_dsp.h:4964
__RV_KMADS32
__STATIC_FORCEINLINE long __RV_KMADS32(long t, unsigned long a, unsigned long b)
KMADS32 (Saturating Signed Multiply Two Words & Subtract & Add)
Definition: core_feature_dsp.h:15078
__RV_SMXDS32
__STATIC_FORCEINLINE long __RV_SMXDS32(unsigned long a, unsigned long b)
SMXDS32 (Signed Crossed Multiply Two Words and Subtract)
Definition: core_feature_dsp.h:16596
__RV_DKMABB32
__STATIC_FORCEINLINE long long __RV_DKMABB32(long long t, unsigned long long a, unsigned long long b)
DKMABB32 (Saturating Signed Multiply Bottom Words & Add)
Definition: core_feature_dsp.h:23730
__RV_MULSR64
__STATIC_FORCEINLINE long long __RV_MULSR64(long a, long b)
MULSR64 (Multiply Word Signed to 64-bit Data)
Definition: core_feature_dsp.h:5974
__RV_KMABB32
__STATIC_FORCEINLINE long __RV_KMABB32(long t, unsigned long a, unsigned long b)
KMABB32 (Saturating Signed Multiply Bottom Words & Add)
Definition: core_feature_dsp.h:14674
__RV_SMSR64
__STATIC_FORCEINLINE long long __RV_SMSR64(long long t, long a, long b)
SMSR64 (Signed Multiply and Subtract from 64- Bit Data)
Definition: core_feature_dsp.h:9234
__RV_DKMDA
__STATIC_FORCEINLINE unsigned long long __RV_DKMDA(unsigned long long a, unsigned long long b)
DKMDA (Signed Multiply Two Halfs and Add)
Definition: core_feature_dsp.h:19677
__RV_CLO32
__STATIC_FORCEINLINE unsigned long __RV_CLO32(unsigned long a)
CLO32 (SIMD 32-bit Count Leading One)
Definition: core_feature_dsp.h:1011
__RV_KDMBB16
__STATIC_FORCEINLINE unsigned long __RV_KDMBB16(unsigned long a, unsigned long b)
KDMBB16 (SIMD Signed Saturating Double Multiply B16 x B16)
Definition: core_feature_dsp.h:14153
__RV_CLRS32
__STATIC_FORCEINLINE unsigned long __RV_CLRS32(unsigned long a)
CLRS32 (SIMD 32-bit Count Leading Redundant Sign)
Definition: core_feature_dsp.h:870
__RV_KMMAWT2_U
__STATIC_FORCEINLINE long __RV_KMMAWT2_U(long t, unsigned long a, unsigned long b)
KMMAWT2.u (SIMD Saturating MSW Signed Multiply Word and Top Half & 2 and Add with Rounding)
Definition: core_feature_dsp.h:3987
__RV_UKSTAS32
__STATIC_FORCEINLINE unsigned long __RV_UKSTAS32(unsigned long a, unsigned long b)
UKSTAS32 (SIMD 32-bit Unsigned Saturating Straight Addition & Subtraction)
Definition: core_feature_dsp.h:17431
__RV_DMSR17
__STATIC_FORCEINLINE unsigned long long __RV_DMSR17(unsigned long a, unsigned long b)
DMSR17 (Signed Multiply Halfs with Right Shift 17-bit and Cross Multiply Halfs with Right Shift 17-bi...
Definition: core_feature_dsp.h:19427
__RV_RSUB8
__STATIC_FORCEINLINE unsigned long __RV_RSUB8(unsigned long a, unsigned long b)
RSUB8 (SIMD 8-bit Signed Halving Subtraction)
Definition: core_feature_dsp.h:6719
__RV_DKADD32
__STATIC_FORCEINLINE unsigned long long __RV_DKADD32(unsigned long long a, unsigned long long b)
DKADD32(64-bit SIMD 32-bit Signed Saturating Addition)
Definition: core_feature_dsp.h:19137
__RV_KMAXDA32
__STATIC_FORCEINLINE long __RV_KMAXDA32(long t, unsigned long a, unsigned long b)
KMAXDA32 (Saturating Signed Crossed Multiply Two Words and Two Adds)
Definition: core_feature_dsp.h:14910
__RV_CRAS16
__STATIC_FORCEINLINE unsigned long __RV_CRAS16(unsigned long a, unsigned long b)
CRAS16 (SIMD 16-bit Cross Addition & Subtraction)
Definition: core_feature_dsp.h:1282
__RV_KHMBT
__STATIC_FORCEINLINE long __RV_KHMBT(unsigned int a, unsigned int b)
KHMBT (Signed Saturating Half Multiply B16 x T16)
Definition: core_feature_dsp.h:2588
__RV_KSTSA16
__STATIC_FORCEINLINE unsigned long __RV_KSTSA16(unsigned long a, unsigned long b)
KSTSA16 (SIMD 16-bit Signed Saturating Straight Subtraction & Addition)
Definition: core_feature_dsp.h:5335
__RV_RSUB32
__STATIC_FORCEINLINE unsigned long __RV_RSUB32(unsigned long a, unsigned long b)
RSUB32 (SIMD 32-bit Signed Halving Subtraction)
Definition: core_feature_dsp.h:16172
__RV_DPKBB32
__STATIC_FORCEINLINE unsigned long long __RV_DPKBB32(unsigned long long a, unsigned long long b)
DPKBB32 (Pack Two 32-bit Data from Both Bottom Half)
Definition: core_feature_dsp.h:20200
__RV_DKMMSB_U
__STATIC_FORCEINLINE unsigned long long __RV_DKMMSB_U(unsigned long long t, unsigned long long a, unsigned long long b)
DKMMSBU (64-bit MSW 32x32 Unsigned Multiply and Saturating Sub)
Definition: core_feature_dsp.h:21762
__RV_SMAQA_SU
__STATIC_FORCEINLINE long __RV_SMAQA_SU(long t, unsigned long a, unsigned long b)
SMAQA.SU (Signed and Unsigned Multiply Four Bytes with 32-bit Adds)
Definition: core_feature_dsp.h:8226
__RV_SLL8
__STATIC_FORCEINLINE unsigned long __RV_SLL8(unsigned long a, unsigned int b)
SLL8 (SIMD 8-bit Shift Left Logical)
Definition: core_feature_dsp.h:7214
__RV_DPKBT16
__STATIC_FORCEINLINE unsigned long long __RV_DPKBT16(unsigned long long a, unsigned long long b)
DPKBT16 (Pack Two 16-bit Data from Bottom and Top Half)
Definition: core_feature_dsp.h:20425
__RV_ZUNPKD832
__STATIC_FORCEINLINE unsigned long __RV_ZUNPKD832(unsigned long a)
ZUNPKD832 (Unsigned Unpacking Bytes 3 & 2)
Definition: core_feature_dsp.h:13767
__RV_CLO16
__STATIC_FORCEINLINE unsigned long __RV_CLO16(unsigned long a)
CLO16 (SIMD 16-bit Count Leading One)
Definition: core_feature_dsp.h:964
__RV_URSTAS16
__STATIC_FORCEINLINE unsigned long __RV_URSTAS16(unsigned long a, unsigned long b)
URSTAS16 (SIMD 16-bit Unsigned Halving Straight Addition & Subtraction)
Definition: core_feature_dsp.h:13188
__RV_DSMAQA
__STATIC_FORCEINLINE unsigned long long __RV_DSMAQA(unsigned long long t, unsigned long long a, unsigned long long b)
DSMAQA (Four Signed 8x8 with 32-bit Signed Add)
Definition: core_feature_dsp.h:22136
__RV_UKSUB32
__STATIC_FORCEINLINE unsigned long __RV_UKSUB32(unsigned long a, unsigned long b)
UKSUB32 (SIMD 32-bit Unsigned Saturating Subtraction)
Definition: core_feature_dsp.h:17527
__RV_KMABT
__STATIC_FORCEINLINE long __RV_KMABT(long t, unsigned long a, unsigned long b)
KMABT (SIMD Saturating Signed Multiply Bottom & Top Halfs & Add)
Definition: core_feature_dsp.h:2768
__RV_DSMBT32
__STATIC_FORCEINLINE long long __RV_DSMBT32(unsigned long long a, unsigned long long b)
SMBT32 (Signed Multiply Bottom Word & Top Word)
Definition: core_feature_dsp.h:19963
__RV_KSUB8
__STATIC_FORCEINLINE unsigned long __RV_KSUB8(unsigned long a, unsigned long b)
KSUB8 (SIMD 8-bit Signed Saturating Subtraction)
Definition: core_feature_dsp.h:5382
__RV_UCMPLE16
__STATIC_FORCEINLINE unsigned long __RV_UCMPLE16(unsigned long a, unsigned long b)
UCMPLE16 (SIMD 16-bit Unsigned Compare Less Than & Equal)
Definition: core_feature_dsp.h:11294
__RV_KDMABT
__STATIC_FORCEINLINE long __RV_KDMABT(long t, unsigned int a, unsigned int b)
KDMABT (Signed Saturating Double Multiply Addition B16 x T16)
Definition: core_feature_dsp.h:2169
__RV_CLZ32
__STATIC_FORCEINLINE unsigned long __RV_CLZ32(unsigned long a)
CLZ32 (SIMD 32-bit Count Leading Zero)
Definition: core_feature_dsp.h:1152
__RV_DPKTT32
__STATIC_FORCEINLINE unsigned long long __RV_DPKTT32(unsigned long long a, unsigned long long b)
DPKTT32 (Pack Two 32-bit Data from Both Top Half)
Definition: core_feature_dsp.h:20274
__RV_SUB64
__STATIC_FORCEINLINE unsigned long long __RV_SUB64(unsigned long long a, unsigned long long b)
SUB64 (64-bit Subtraction)
Definition: core_feature_dsp.h:10763
__RV_KCRAS16
__STATIC_FORCEINLINE unsigned long __RV_KCRAS16(unsigned long a, unsigned long b)
KCRAS16 (SIMD 16-bit Signed Saturating Cross Addition & Subtraction)
Definition: core_feature_dsp.h:1818
__RV_URADDW
__STATIC_FORCEINLINE unsigned long __RV_URADDW(unsigned int a, unsigned int b)
URADDW (32-bit Unsigned Halving Addition)
Definition: core_feature_dsp.h:13041
__RV_DKMMAC_U
__STATIC_FORCEINLINE unsigned long long __RV_DKMMAC_U(unsigned long long t, unsigned long long a, unsigned long long b)
DKMMACU (64-bit MSW 32x32 Unsigned Multiply and Saturating Add)
Definition: core_feature_dsp.h:21668
__RV_UMSR64
__STATIC_FORCEINLINE unsigned long long __RV_UMSR64(unsigned long long t, unsigned long a, unsigned long b)
UMSR64 (Unsigned Multiply and Subtract from 64-Bit Data)
Definition: core_feature_dsp.h:12510
__RV_DMADA32
__STATIC_FORCEINLINE long __RV_DMADA32(long long t, unsigned long long a, unsigned long long b)
DMADA32 ((Two Cross Signed 32x32 with 64-bit Add and Clip to 32-bit)
Definition: core_feature_dsp.h:23555
__RV_UMIN16
__STATIC_FORCEINLINE unsigned long __RV_UMIN16(unsigned long a, unsigned long b)
UMIN16 (SIMD 16-bit Unsigned Minimum)
Definition: core_feature_dsp.h:12456
__RV_KSUB64
__STATIC_FORCEINLINE long long __RV_KSUB64(long long a, long long b)
KSUB64 (64-bit Signed Saturating Subtraction)
Definition: core_feature_dsp.h:5496
__RV_SMAX16
__STATIC_FORCEINLINE unsigned long __RV_SMAX16(unsigned long a, unsigned long b)
SMAX16 (SIMD 16-bit Signed Maximum)
Definition: core_feature_dsp.h:8303
__RV_SUNPKD830
__STATIC_FORCEINLINE unsigned long __RV_SUNPKD830(unsigned long a)
SUNPKD830 (Signed Unpacking Bytes 3 & 0)
Definition: core_feature_dsp.h:10901
__RV_KMMWT2
__STATIC_FORCEINLINE long __RV_KMMWT2(long a, unsigned long b)
KMMWT2 (SIMD Saturating MSW Signed Multiply Word and Top Half & 2)
Definition: core_feature_dsp.h:4274
__RV_DSMALTT
__STATIC_FORCEINLINE long long __RV_DSMALTT(long long t, unsigned long long a, unsigned long long b)
DSMALTT (Signed Multiply Top Half & Add 64-bit)
Definition: core_feature_dsp.h:23682
__RV_DRCRAS32
__STATIC_FORCEINLINE unsigned long long __RV_DRCRAS32(unsigned long long a, unsigned long long b)
DRCRAS32 (32-bit Signed Cross Addition & Subtraction)
Definition: core_feature_dsp.h:20873
__RV_SCMPLT8
__STATIC_FORCEINLINE unsigned long __RV_SCMPLT8(unsigned long a, unsigned long b)
SCMPLT8 (SIMD 8-bit Signed Compare Less Than)
Definition: core_feature_dsp.h:7134
__RV_KMMAWT
__STATIC_FORCEINLINE long __RV_KMMAWT(long t, unsigned long a, unsigned long b)
KMMAWT (SIMD Saturating MSW Signed Multiply Word and Top Half and Add)
Definition: core_feature_dsp.h:3789
__RV_KSLRA16_U
__STATIC_FORCEINLINE unsigned long __RV_KSLRA16_U(unsigned long a, int b)
KSLRA16.u (SIMD 16-bit Shift Left Logical with Saturation or Rounding Shift Right Arithmetic)
Definition: core_feature_dsp.h:5098
__RV_UKSUBW
__STATIC_FORCEINLINE unsigned long __RV_UKSUBW(unsigned int a, unsigned int b)
UKSUBW (Unsigned Subtraction with U32 Saturation)
Definition: core_feature_dsp.h:12203
__RV_SMDS
__STATIC_FORCEINLINE long __RV_SMDS(unsigned long a, unsigned long b)
SMDS (SIMD Signed Multiply Two Halfs and Subtract)
Definition: core_feature_dsp.h:8518
__RV_URADD8
__STATIC_FORCEINLINE unsigned long __RV_URADD8(unsigned long a, unsigned long b)
URADD8 (SIMD 8-bit Unsigned Halving Addition)
Definition: core_feature_dsp.h:12894
__RV_STAS16
__STATIC_FORCEINLINE unsigned long __RV_STAS16(unsigned long a, unsigned long b)
STAS16 (SIMD 16-bit Straight Addition & Subtraction)
Definition: core_feature_dsp.h:10581
__RV_KADD8
__STATIC_FORCEINLINE unsigned long __RV_KADD8(unsigned long a, unsigned long b)
KADD8 (SIMD 8-bit Signed Saturating Addition)
Definition: core_feature_dsp.h:1553
__RV_ADD8
__STATIC_FORCEINLINE unsigned long __RV_ADD8(unsigned long a, unsigned long b)
ADD8 (SIMD 8-bit Addition)
Definition: core_feature_dsp.h:449
__RV_DREDSA16
__STATIC_FORCEINLINE unsigned long __RV_DREDSA16(unsigned long long a)
DREDSA16 (Reduced Subtraction and Reduced Addition)
Definition: core_feature_dsp.h:19583
__RV_PKTB16
__STATIC_FORCEINLINE unsigned long __RV_PKTB16(unsigned long a, unsigned long b)
PKTB16 (Pack Two 16-bit Data from Top and Bottom Half)
Definition: core_feature_dsp.h:6258
__RV_SRA32
__STATIC_FORCEINLINE unsigned long __RV_SRA32(unsigned long a, unsigned int b)
SRA32 (SIMD 32-bit Shift Right Arithmetic)
Definition: core_feature_dsp.h:16687
__RV_DSMDRS32
__STATIC_FORCEINLINE long long __RV_DSMDRS32(unsigned long long a, unsigned long long b)
DSMDRS32 (Two Signed 32x32 with 64-bit Revered Sub)
Definition: core_feature_dsp.h:22707
__RV_DSUB16
__STATIC_FORCEINLINE unsigned long long __RV_DSUB16(unsigned long long a, unsigned long long b)
DSUB16 (64-bit SIMD 16-bit Halving Signed Subtraction)
Definition: core_feature_dsp.h:19260
__RV_RADD32
__STATIC_FORCEINLINE unsigned long __RV_RADD32(unsigned long a, unsigned long b)
RADD32 (SIMD 32-bit Signed Halving Addition)
Definition: core_feature_dsp.h:15939
__RV_DKMADS
__STATIC_FORCEINLINE unsigned long long __RV_DKMADS(unsigned long long t, unsigned long long a, unsigned long long b)
DKMADS (Two 16x16 with 32-bit Signed Add and Sub)
Definition: core_feature_dsp.h:21902
__RV_DKCRSA16
__STATIC_FORCEINLINE unsigned long long __RV_DKCRSA16(unsigned long long a, unsigned long long b)
DKCRSA16 (16-bit Signed Saturating Cross Subtraction & Addition)
Definition: core_feature_dsp.h:20983
__RV_DSUB32
__STATIC_FORCEINLINE unsigned long long __RV_DSUB32(unsigned long long a, unsigned long long b)
DSUB32 (64-bit SIMD 32-bit Halving Signed Subtraction)
Definition: core_feature_dsp.h:19336
__RV_MAXW
__STATIC_FORCEINLINE long __RV_MAXW(int a, int b)
MAXW (32-bit Signed Word Maximum)
Definition: core_feature_dsp.h:5788
__RV_SRA_U
__STATIC_FORCEINLINE long __RV_SRA_U(long a, unsigned int b)
SRA.u (Rounding Shift Right Arithmetic)
Definition: core_feature_dsp.h:9623
__RV_DSMADA16
__STATIC_FORCEINLINE long __RV_DSMADA16(long long t, unsigned long long a, unsigned long long b)
DSMADA16 (Signed Multiply Two Halfs and Two Adds 32-bit)
Definition: core_feature_dsp.h:23433
__RV_SMMWT
__STATIC_FORCEINLINE long __RV_SMMWT(long a, unsigned long b)
SMMWT (SIMD MSW Signed Multiply Word and Top Half)
Definition: core_feature_dsp.h:8966
__RV_SMIN16
__STATIC_FORCEINLINE unsigned long __RV_SMIN16(unsigned long a, unsigned long b)
SMIN16 (SIMD 16-bit Signed Minimum)
Definition: core_feature_dsp.h:8714
__RV_KMMAWT2
__STATIC_FORCEINLINE long __RV_KMMAWT2(long t, unsigned long a, unsigned long b)
KMMAWT2 (SIMD Saturating MSW Signed Multiply Word and Top Half & 2 and Add)
Definition: core_feature_dsp.h:3919
__RV_DKMXDA32
__STATIC_FORCEINLINE long long __RV_DKMXDA32(unsigned long long a, unsigned long long b)
DKMXDA32 (Two Cross Signed 32x32 with 64-bit Saturation Add)
Definition: core_feature_dsp.h:22317
__RV_DKSTSA16
__STATIC_FORCEINLINE unsigned long long __RV_DKSTSA16(unsigned long long a, unsigned long long b)
DKSTSA16 (16-bit Signed Saturating Straight Subtraction & Addition)
Definition: core_feature_dsp.h:21336
__RV_KMSDA32
__STATIC_FORCEINLINE long __RV_KMSDA32(long t, unsigned long a, unsigned long b)
KMSDA32 (Saturating Signed Multiply Two Words & Add & Subtract)
Definition: core_feature_dsp.h:15261
__RV_URCRAS16
__STATIC_FORCEINLINE unsigned long __RV_URCRAS16(unsigned long a, unsigned long b)
URCRAS16 (SIMD 16-bit Unsigned Halving Cross Addition & Subtraction)
Definition: core_feature_dsp.h:13090
__RV_PKBB32
__STATIC_FORCEINLINE unsigned long __RV_PKBB32(unsigned long a, unsigned long b)
PKBB32 (Pack Two 32-bit Data from Both Bottom Half)
Definition: core_feature_dsp.h:15749
__RV_DKMADS32
__STATIC_FORCEINLINE long long __RV_DKMADS32(long long t, unsigned long long a, unsigned long long b)
DKMADS32 (Two Signed 32x32 with 64-bit Saturation Add and Sub)
Definition: core_feature_dsp.h:22447
__RV_DCRAS32
__STATIC_FORCEINLINE unsigned long long __RV_DCRAS32(unsigned long long a, unsigned long long b)
DCRAS32 (32-bit Cross Addition & Subtraction)
Definition: core_feature_dsp.h:21281
__RV_DDSMAQA
__STATIC_FORCEINLINE long long __RV_DDSMAQA(long long t, unsigned long long a, unsigned long long b)
DDSMAQA (Eight Signed 8x8 with 64-bit Add)
Definition: core_feature_dsp.h:23123
__RV_DKADD8
__STATIC_FORCEINLINE unsigned long long __RV_DKADD8(unsigned long long a, unsigned long long b)
DKADD8 (64-bit SIMD 8-bit Signed Saturating Addition)
Definition: core_feature_dsp.h:18560
__RV_UKADD64
__STATIC_FORCEINLINE unsigned long long __RV_UKADD64(unsigned long long a, unsigned long long b)
UKADD64 (64-bit Unsigned Saturating Addition)
Definition: core_feature_dsp.h:11524
__RV_SMUL16
__STATIC_FORCEINLINE unsigned long long __RV_SMUL16(unsigned int a, unsigned int b)
SMUL16 (SIMD Signed 16-bit Multiply)
Definition: core_feature_dsp.h:9484
__RV_SMTT16
__STATIC_FORCEINLINE long __RV_SMTT16(unsigned long a, unsigned long b)
SMTT16 (SIMD Signed Multiply Top Half & Top Half)
Definition: core_feature_dsp.h:8459
__RV_KABS32
__STATIC_FORCEINLINE unsigned long __RV_KABS32(unsigned long a)
KABS32 (Scalar 32-bit Absolute Value with Saturation)
Definition: core_feature_dsp.h:13954
__RV_DSMMUL
__STATIC_FORCEINLINE unsigned long long __RV_DSMMUL(unsigned long long a, unsigned long long b)
DSMMUL (64-bit MSW 32x32 Signed Multiply)
Definition: core_feature_dsp.h:18853
__RV_SMALDS
__STATIC_FORCEINLINE long long __RV_SMALDS(long long t, unsigned long a, unsigned long b)
SMALDS (Signed Multiply Two Halfs & Subtract & Add 64-bit)
Definition: core_feature_dsp.h:7903
__RV_CRSA16
__STATIC_FORCEINLINE unsigned long __RV_CRSA16(unsigned long a, unsigned long b)
CRSA16 (SIMD 16-bit Cross Subtraction & Addition)
Definition: core_feature_dsp.h:1328
__RV_DKMMAC
__STATIC_FORCEINLINE unsigned long long __RV_DKMMAC(unsigned long long t, unsigned long long a, unsigned long long b)
DKMMAC (64-bit MSW 32x32 Signed Multiply and Saturating Add)
Definition: core_feature_dsp.h:21621
__RV_DKABS16
__STATIC_FORCEINLINE unsigned long long __RV_DKABS16(unsigned long long a)
DKABS16 (64-bit SIMD 16-bit Saturating Absolute)
Definition: core_feature_dsp.h:18396
__RV_DKADD16
__STATIC_FORCEINLINE unsigned long long __RV_DKADD16(unsigned long long a, unsigned long long b)
DKADD16 (64-bit SIMD 16-bit Signed Saturating Addition)
Definition: core_feature_dsp.h:18607
__RV_DKSUB32
__STATIC_FORCEINLINE unsigned long long __RV_DKSUB32(unsigned long long a, unsigned long long b)
DKSUB32 (64-bit SIMD 32-bit Signed Saturating Subtraction)
Definition: core_feature_dsp.h:19184
__RV_DMXSR33
__STATIC_FORCEINLINE unsigned long long __RV_DMXSR33(unsigned long long a, unsigned long long b)
DMXSR33 (Signed Multiply with Right Shift 33-bit and Cross Multiply with Right Shift 33-bit)
Definition: core_feature_dsp.h:19509
__RV_KSTAS16
__STATIC_FORCEINLINE unsigned long __RV_KSTAS16(unsigned long a, unsigned long b)
KSTAS16 (SIMD 16-bit Signed Saturating Straight Addition & Subtraction)
Definition: core_feature_dsp.h:5278
__RV_SRL8_U
__STATIC_FORCEINLINE unsigned long __RV_SRL8_U(unsigned long a, unsigned int b)
SRL8.u (SIMD 8-bit Rounding Shift Right Logical)
Definition: core_feature_dsp.h:10219
__RV_UKSUB8
__STATIC_FORCEINLINE unsigned long __RV_UKSUB8(unsigned long a, unsigned long b)
UKSUB8 (SIMD 8-bit Unsigned Saturating Subtraction)
Definition: core_feature_dsp.h:12005
__RV_BPICK
__STATIC_FORCEINLINE unsigned long __RV_BPICK(unsigned long a, unsigned long b, unsigned long c)
BPICK (Bit-wise Pick)
Definition: core_feature_dsp.h:706
__RV_KWMMUL_U
__STATIC_FORCEINLINE long __RV_KWMMUL_U(long a, long b)
KWMMUL.u (SIMD Saturating MSW Signed Multiply Word & Double with Rounding)
Definition: core_feature_dsp.h:5703
__RV_SMALDA
__STATIC_FORCEINLINE long long __RV_SMALDA(long long t, unsigned long a, unsigned long b)
SMALDA (Signed Multiply Two Halfs and Two Adds 64-bit)
Definition: core_feature_dsp.h:7728
__RV_SRL16_U
__STATIC_FORCEINLINE unsigned long __RV_SRL16_U(unsigned long a, unsigned int b)
SRL16.u (SIMD 16-bit Rounding Shift Right Logical)
Definition: core_feature_dsp.h:10429
__RV_SMAX8
__STATIC_FORCEINLINE unsigned long __RV_SMAX8(unsigned long a, unsigned long b)
SMAX8 (SIMD 8-bit Signed Maximum)
Definition: core_feature_dsp.h:8264
__RV_KCRSA16
__STATIC_FORCEINLINE unsigned long __RV_KCRSA16(unsigned long a, unsigned long b)
KCRSA16 (SIMD 16-bit Signed Saturating Cross Subtraction & Addition)
Definition: core_feature_dsp.h:1875
__RV_DSMA32_U
__STATIC_FORCEINLINE long __RV_DSMA32_U(unsigned long long a, unsigned long long b)
DSMA32.u (64-bit SIMD 32-bit Signed Multiply Addition With Rounding and Clip)
Definition: core_feature_dsp.h:23265
__RV_ADD64
__STATIC_FORCEINLINE unsigned long long __RV_ADD64(unsigned long long a, unsigned long long b)
ADD64 (64-bit Addition)
Definition: core_feature_dsp.h:543
__RV_DKMADA
__STATIC_FORCEINLINE unsigned long long __RV_DKMADA(unsigned long long t, unsigned long long a, unsigned long long b)
DKMADA (Saturating Signed Multiply Two Halfs and Two Adds)
Definition: core_feature_dsp.h:21809
__RV_SUB32
__STATIC_FORCEINLINE unsigned long __RV_SUB32(unsigned long a, unsigned long b)
SUB32 (SIMD 32-bit Subtraction)
Definition: core_feature_dsp.h:17229
__RV_RCRSA32
__STATIC_FORCEINLINE unsigned long __RV_RCRSA32(unsigned long a, unsigned long b)
RCRSA32 (SIMD 32-bit Signed Halving Cross Subtraction & Addition)
Definition: core_feature_dsp.h:16033
__RV_DKSMS32_U
__STATIC_FORCEINLINE unsigned long long __RV_DKSMS32_U(unsigned long long t, unsigned long long a, unsigned long long b)
DKSMS32.u (Two Signed Multiply Shift-clip and Saturation with Rounding)
Definition: core_feature_dsp.h:23516
__RV_KMSDA
__STATIC_FORCEINLINE long __RV_KMSDA(long t, unsigned long a, unsigned long b)
KMSDA (SIMD Saturating Signed Multiply Two Halfs & Add & Subtract)
Definition: core_feature_dsp.h:4393
__RV_KADDW
__STATIC_FORCEINLINE long __RV_KADDW(int a, int b)
KADDW (Signed Addition with Q31 Saturation)
Definition: core_feature_dsp.h:1761
__RV_CLO8
__STATIC_FORCEINLINE unsigned long __RV_CLO8(unsigned long a)
CLO8 (SIMD 8-bit Count Leading One)
Definition: core_feature_dsp.h:917
__RV_DKSLRA16
__STATIC_FORCEINLINE unsigned long long __RV_DKSLRA16(unsigned long long a, int b)
DKSLRA16 (64-bit SIMD 16-bit Shift Left Logical with Saturation or Shift Right Arithmetic)
Definition: core_feature_dsp.h:18513
__RV_URSTSA32
__STATIC_FORCEINLINE unsigned long __RV_URSTSA32(unsigned long a, unsigned long b)
URSTSA32 (SIMD 32-bit Unsigned Halving Straight Subtraction & Addition)
Definition: core_feature_dsp.h:17837
__RV_PBSAD
__STATIC_FORCEINLINE unsigned long __RV_PBSAD(unsigned long a, unsigned long b)
PBSAD (Parallel Byte Sum of Absolute Difference)
Definition: core_feature_dsp.h:6013
__RV_SLL16
__STATIC_FORCEINLINE unsigned long __RV_SLL16(unsigned long a, unsigned int b)
SLL16 (SIMD 16-bit Shift Left Logical)
Definition: core_feature_dsp.h:7296
__RV_CMPEQ16
__STATIC_FORCEINLINE unsigned long __RV_CMPEQ16(unsigned long a, unsigned long b)
CMPEQ16 (SIMD 16-bit Integer Compare Equal)
Definition: core_feature_dsp.h:1236
__RV_KABSW
__STATIC_FORCEINLINE unsigned long __RV_KABSW(signed long a)
KABSW (Scalar 32-bit Absolute Value with Saturation)
Definition: core_feature_dsp.h:1506
__RV_DSMSLXDA
__STATIC_FORCEINLINE long long __RV_DSMSLXDA(long long t, unsigned long long a, unsigned long long b)
DSMSLXDA (Four Cross Signed 16x16 with 64-bit Sub)
Definition: core_feature_dsp.h:23072
__RV_DSTAS32
__STATIC_FORCEINLINE unsigned long long __RV_DSTAS32(unsigned long long a, unsigned long long b)
DSTAS32 (SIMD 32-bit Straight Addition & Subtractionn)
Definition: core_feature_dsp.h:21101
__RV_SMALBT
__STATIC_FORCEINLINE long long __RV_SMALBT(long long t, unsigned long a, unsigned long b)
SMALBT (Signed Multiply Bottom Half & Top Half & Add 64-bit)
Definition: core_feature_dsp.h:7562
__RV_ZUNPKD810
__STATIC_FORCEINLINE unsigned long __RV_ZUNPKD810(unsigned long a)
ZUNPKD810 (Unsigned Unpacking Bytes 1 & 0)
Definition: core_feature_dsp.h:13583
__RV_URADD32
__STATIC_FORCEINLINE unsigned long __RV_URADD32(unsigned long a, unsigned long b)
URADD32 (SIMD 32-bit Unsigned Halving Addition)
Definition: core_feature_dsp.h:17649
__RV_RADD64
__STATIC_FORCEINLINE long long __RV_RADD64(long long a, long long b)
RADD64 (64-bit Signed Halving Addition)
Definition: core_feature_dsp.h:6401
__RV_DRCRSA16
__STATIC_FORCEINLINE unsigned long long __RV_DRCRSA16(unsigned long long a, unsigned long long b)
DRCRSA16 (16-bit Signed Halving Cross Subtraction & Addition)
Definition: core_feature_dsp.h:20749
__RV_KMSR64
__STATIC_FORCEINLINE long long __RV_KMSR64(long long t, long a, long b)
KMSR64 (Signed Multiply and Saturating Subtract from 64-Bit Data)
Definition: core_feature_dsp.h:4523
__RV_KDMTT16
__STATIC_FORCEINLINE unsigned long __RV_KDMTT16(unsigned long a, unsigned long b)
KDMTT16 (SIMD Signed Saturating Double Multiply T16 x T16)
Definition: core_feature_dsp.h:14257
__RV_KMMAWT_U
__STATIC_FORCEINLINE long __RV_KMMAWT_U(long t, unsigned long a, unsigned long b)
KMMAWT.u (SIMD Saturating MSW Signed Multiply Word and Top Half and Add with Rounding)
Definition: core_feature_dsp.h:3851
__RV_DSMALBB
__STATIC_FORCEINLINE long long __RV_DSMALBB(long long t, unsigned long long a, unsigned long long b)
DSMALBB (Signed Multiply Bottom Halfs & Add 64-bit)
Definition: core_feature_dsp.h:23596
__RV_SMMWT_U
__STATIC_FORCEINLINE long __RV_SMMWT_U(long a, unsigned long b)
SMMWT.u (SIMD MSW Signed Multiply Word and Top Half with Rounding)
Definition: core_feature_dsp.h:9016
__RV_UMAX32
__STATIC_FORCEINLINE unsigned long __RV_UMAX32(unsigned long a, unsigned long b)
UMAX32 (SIMD 32-bit Unsigned Maximum)
Definition: core_feature_dsp.h:17565
__RV_SUNPKD810
__STATIC_FORCEINLINE unsigned long __RV_SUNPKD810(unsigned long a)
SUNPKD810 (Signed Unpacking Bytes 1 & 0)
Definition: core_feature_dsp.h:10809
__RV_STSA32
__STATIC_FORCEINLINE unsigned long __RV_STSA32(unsigned long a, unsigned long b)
STSA32 (SIMD 32-bit Straight Subtraction & Addition)
Definition: core_feature_dsp.h:17189
__RV_KHM8
__STATIC_FORCEINLINE unsigned long __RV_KHM8(unsigned long a, unsigned long b)
KHM8 (SIMD Signed Saturating Q7 Multiply)
Definition: core_feature_dsp.h:2294
__RV_DSMALXDA
__STATIC_FORCEINLINE long long __RV_DSMALXDA(long long t, unsigned long long a, unsigned long long b)
DSMALXDA (Four Signed 16x16 with 64-bit Add)
Definition: core_feature_dsp.h:22844
__RV_DKSUB16
__STATIC_FORCEINLINE unsigned long long __RV_DKSUB16(unsigned long long a, unsigned long long b)
DKSUB16 (64-bit SIMD 16-bit Signed Saturating Subtraction)
Definition: core_feature_dsp.h:18702
__RV_DKCRAS32
__STATIC_FORCEINLINE unsigned long long __RV_DKCRAS32(unsigned long long a, unsigned long long b)
DKCRAS32 (32-bit Signed Saturating Cross Addition & Subtraction)
Definition: core_feature_dsp.h:21201
__RV_DKMABT32
__STATIC_FORCEINLINE long long __RV_DKMABT32(long long t, unsigned long long a, unsigned long long b)
DKMABT32 (Saturating Signed Multiply Bottom & Top Words & Add)
Definition: core_feature_dsp.h:23778
__RV_DSMAQA_SU
__STATIC_FORCEINLINE unsigned long long __RV_DSMAQA_SU(unsigned long long t, unsigned long long a, unsigned long long b)
DSMAQASU (Four Signed 8 x Unsigned 8 with 32-bit Signed Add)
Definition: core_feature_dsp.h:22185
__RV_UMIN8
__STATIC_FORCEINLINE unsigned long __RV_UMIN8(unsigned long a, unsigned long b)
UMIN8 (SIMD 8-bit Unsigned Minimum)
Definition: core_feature_dsp.h:12417
__RV_DSMS32_U
__STATIC_FORCEINLINE long __RV_DSMS32_U(unsigned long long a, unsigned long long b)
DSMS32.u (64-bit SIMD 32-bit Signed Multiply Subtraction with Rounding and Clip)
Definition: core_feature_dsp.h:23388
__RV_DSMTT16
__STATIC_FORCEINLINE unsigned long long __RV_DSMTT16(unsigned long long a, unsigned long long b)
DSMTT16 (Signed Multiply Top Half & Top Half)
Definition: core_feature_dsp.h:20707
__RV_STSA16
__STATIC_FORCEINLINE unsigned long __RV_STSA16(unsigned long a, unsigned long b)
STSA16 (SIMD 16-bit Straight Subtraction & Addition)
Definition: core_feature_dsp.h:10627
__RV_KSLL16
__STATIC_FORCEINLINE unsigned long __RV_KSLL16(unsigned long a, unsigned int b)
KSLL16 (SIMD 16-bit Saturating Shift Left Logical)
Definition: core_feature_dsp.h:4777
__RV_UMAQA
__STATIC_FORCEINLINE unsigned long __RV_UMAQA(unsigned long t, unsigned long a, unsigned long b)
UMAQA (Unsigned Multiply Four Bytes with 32- bit Adds)
Definition: core_feature_dsp.h:12301
__RV_KDMABT16
__STATIC_FORCEINLINE unsigned long __RV_KDMABT16(unsigned long t, unsigned long a, unsigned long b)
KDMABT16 (SIMD Signed Saturating Double Multiply Addition B16 x T16)
Definition: core_feature_dsp.h:14388
__RV_SUB16
__STATIC_FORCEINLINE unsigned long __RV_SUB16(unsigned long a, unsigned long b)
SUB16 (SIMD 16-bit Subtraction)
Definition: core_feature_dsp.h:10709
__RV_UKADDH
__STATIC_FORCEINLINE unsigned long __RV_UKADDH(unsigned int a, unsigned int b)
UKADDH (Unsigned Addition with U16 Saturation)
Definition: core_feature_dsp.h:11566
__RV_UKSUBH
__STATIC_FORCEINLINE unsigned long __RV_UKSUBH(unsigned int a, unsigned int b)
UKSUBH (Unsigned Subtraction with U16 Saturation)
Definition: core_feature_dsp.h:12159
__RV_DSMXA32_U
__STATIC_FORCEINLINE long __RV_DSMXA32_U(unsigned long long a, unsigned long long b)
DSMXA32.u (64-bit SIMD 32-bit Signed Cross Multiply Addition with Rounding and Clip)
Definition: core_feature_dsp.h:23347
__RV_DSMBB32_SRA14
__STATIC_FORCEINLINE long long __RV_DSMBB32_SRA14(unsigned long long a, unsigned long long b)
DSMBB32.sra14 (Signed Crossed Multiply Two Halfs and Subtract with Right Shift 14)
Definition: core_feature_dsp.h:19883
__STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE
Define a static function that should be always inlined by the compiler.
Definition: nmsis_gcc.h:70
__RV_DKHM16
__STATIC_FORCEINLINE unsigned long long __RV_DKHM16(unsigned long long a, unsigned long long b)
DKHM16 (64-bit SIMD Signed Saturating Q15 Multiply)
Definition: core_feature_dsp.h:18306
__RV_DPKBB16
__STATIC_FORCEINLINE unsigned long long __RV_DPKBB16(unsigned long long a, unsigned long long b)
DPKBB16 (Pack Two 16-bit Data from Both Bottom Half)
Definition: core_feature_dsp.h:20387
__RV_DSMBB16
__STATIC_FORCEINLINE unsigned long long __RV_DSMBB16(unsigned long long a, unsigned long long b)
DSMBB16 (Signed Multiply Bottom Half & Bottom Half)
Definition: core_feature_dsp.h:20625
__RV_DSMXDS32
__STATIC_FORCEINLINE long long __RV_DSMXDS32(unsigned long long a, unsigned long long b)
DSMXDS32 (Two Cross Signed 32x32 with 64-bit Sub)
Definition: core_feature_dsp.h:22751
__RV_DKMDA32
__STATIC_FORCEINLINE long long __RV_DKMDA32(unsigned long long a, unsigned long long b)
DKMDA32 (Two Signed 32x32 with 64-bit Saturation Add)
Definition: core_feature_dsp.h:22275
__RV_SMBT32
__STATIC_FORCEINLINE long __RV_SMBT32(unsigned long a, unsigned long b)
SMBT32 (Signed Multiply Bottom Word & Top Word)
Definition: core_feature_dsp.h:16388
__RV_DSMDS32
__STATIC_FORCEINLINE long long __RV_DSMDS32(unsigned long long a, unsigned long long b)
DSMDS32 (Two Signed 32x32 with 64-bit Sub)
Definition: core_feature_dsp.h:22664
__RV_RSUB64
__STATIC_FORCEINLINE long long __RV_RSUB64(long long a, long long b)
RSUB64 (64-bit Signed Halving Subtraction)
Definition: core_feature_dsp.h:6820
__RV_RDOV
__STATIC_FORCEINLINE unsigned long __RV_RDOV(void)
RDOV (Read OV flag)
Definition: core_feature_dsp.h:6574
__RV_DKMSDA
__STATIC_FORCEINLINE unsigned long long __RV_DKMSDA(unsigned long long t, unsigned long long a, unsigned long long b)
DKMSDA (Two 16x16 with 32-bit Signed Double Sub)
Definition: core_feature_dsp.h:22041
__RV_DSMALDS
__STATIC_FORCEINLINE long long __RV_DSMALDS(long long t, unsigned long long a, unsigned long long b)
DSMALDS (Four Signed 16x16 with 64-bit Add and Sub)
Definition: core_feature_dsp.h:22890
__RV_SMAQA
__STATIC_FORCEINLINE long __RV_SMAQA(long t, unsigned long a, unsigned long b)
SMAQA (Signed Multiply Four Bytes with 32-bit Adds)
Definition: core_feature_dsp.h:8181
__RV_SMSLXDA
__STATIC_FORCEINLINE long long __RV_SMSLXDA(long long t, unsigned long a, unsigned long b)
SMSLXDA (Signed Crossed Multiply Two Halfs & Add & Subtract 64- bit)
Definition: core_feature_dsp.h:9181
__RV_DKMSDA32
__STATIC_FORCEINLINE long long __RV_DKMSDA32(long long t, unsigned long long a, unsigned long long b)
DKMSDA32 (Two Signed 32x32 with 64-bit Saturation Sub)
Definition: core_feature_dsp.h:22578
__RV_DRADD16
__STATIC_FORCEINLINE unsigned long long __RV_DRADD16(unsigned long long a, unsigned long long b)
DRADD16 (64-bit SIMD 16-bit Halving Signed Addition)
Definition: core_feature_dsp.h:19222
__RV_KHMBB16
__STATIC_FORCEINLINE unsigned long __RV_KHMBB16(unsigned long a, unsigned long b)
KHMBB16 (SIMD Signed Saturating Half Multiply B16 x B16)
Definition: core_feature_dsp.h:14505
__RV_DKHMX16
__STATIC_FORCEINLINE unsigned long long __RV_DKHMX16(unsigned long long a, unsigned long long b)
DKHMX16 (64-bit SIMD Signed Crossed Saturating Q15 Multiply)
Definition: core_feature_dsp.h:18808
__RV_DRSUB16
__STATIC_FORCEINLINE unsigned long long __RV_DRSUB16(unsigned long long a, unsigned long long b)
DRSUB16 (16-bit Signed Halving Subtraction)
Definition: core_feature_dsp.h:21021
__RV_DKMAXDA32
__STATIC_FORCEINLINE long long __RV_DKMAXDA32(long long t, unsigned long long a, unsigned long long b)
DKMAXDA32 (Two Cross Signed 32x32 with 64-bit Saturation Add)
Definition: core_feature_dsp.h:22403
__RV_KADD16
__STATIC_FORCEINLINE unsigned long __RV_KADD16(unsigned long a, unsigned long b)
KADD16 (SIMD 16-bit Signed Saturating Addition)
Definition: core_feature_dsp.h:1600
__RV_SUB8
__STATIC_FORCEINLINE unsigned long __RV_SUB8(unsigned long a, unsigned long b)
SUB8 (SIMD 8-bit Subtraction)
Definition: core_feature_dsp.h:10668
__RV_CRSA32
__STATIC_FORCEINLINE unsigned long __RV_CRSA32(unsigned long a, unsigned long b)
CRSA32 (SIMD 32-bit Cross Subtraction & Addition)
Definition: core_feature_dsp.h:13894
__RV_UMUL16
__STATIC_FORCEINLINE unsigned long long __RV_UMUL16(unsigned int a, unsigned int b)
UMUL16 (SIMD Unsigned 16-bit Multiply)
Definition: core_feature_dsp.h:12762
__RV_DMSR33
__STATIC_FORCEINLINE unsigned long long __RV_DMSR33(unsigned long long a, unsigned long long b)
DMSR33 (Signed Multiply with Right Shift 33-bit and Cross Multiply with Right Shift 33-bit)
Definition: core_feature_dsp.h:19468
__RV_DKMAXDS32
__STATIC_FORCEINLINE long long __RV_DKMAXDS32(long long t, unsigned long long a, unsigned long long b)
DKMAXDS32 (Two Cross Signed 32x32 with 64-bit Saturation Add and Sub)
Definition: core_feature_dsp.h:22535
__RV_DPKBT32
__STATIC_FORCEINLINE unsigned long long __RV_DPKBT32(unsigned long long a, unsigned long long b)
DPKBT32 (Pack Two 32-bit Data from Bottom and Top Half)
Definition: core_feature_dsp.h:20237
__RV_DSMALBT
__STATIC_FORCEINLINE long long __RV_DSMALBT(long long t, unsigned long long a, unsigned long long b)
DSMALBT (Signed Multiply Bottom Half & Top Half & Add 64-bit)
Definition: core_feature_dsp.h:23639
__RV_DKMATT32
__STATIC_FORCEINLINE long long __RV_DKMATT32(long long t, unsigned long long a, unsigned long long b)
DKMATT32 (Saturating Signed Multiply Bottom & Top Words & Add)
Definition: core_feature_dsp.h:23826
__RV_KMATT32
__STATIC_FORCEINLINE long __RV_KMATT32(long t, unsigned long a, unsigned long b)
KMATT32 (Saturating Signed Multiply Top Words & Add)
Definition: core_feature_dsp.h:14798
__RV_DSMTT32_SRA32
__STATIC_FORCEINLINE long long __RV_DSMTT32_SRA32(unsigned long long a, unsigned long long b)
DSMTT32.sra32 (Signed Multiply Top Word & Top Word with Right Shift 32-bit)
Definition: core_feature_dsp.h:20163
__RV_KHMBB
__STATIC_FORCEINLINE long __RV_KHMBB(unsigned int a, unsigned int b)
KHMBB (Signed Saturating Half Multiply B16 x B16)
Definition: core_feature_dsp.h:2535
__RV_SRA32_U
__STATIC_FORCEINLINE unsigned long __RV_SRA32_U(unsigned long a, unsigned int b)
SRA32.u (SIMD 32-bit Rounding Shift Right Arithmetic)
Definition: core_feature_dsp.h:16740
__RV_SUNPKD820
__STATIC_FORCEINLINE unsigned long __RV_SUNPKD820(unsigned long a)
SUNPKD820 (Signed Unpacking Bytes 2 & 0)
Definition: core_feature_dsp.h:10855
__RV_KSLRAW
__STATIC_FORCEINLINE long __RV_KSLRAW(int a, int b)
KSLRAW (Shift Left Logical with Q31 Saturation or Shift Right Arithmetic)
Definition: core_feature_dsp.h:5158
__RV_EXPD81
__STATIC_FORCEINLINE unsigned long __RV_EXPD81(unsigned long a)
EXPD81 (Expand and Copy Byte 1 to 32bit(rv32) or 64bit(when rv64))
Definition: core_feature_dsp.h:17963
__RV_KMXDA
__STATIC_FORCEINLINE long __RV_KMXDA(unsigned long a, unsigned long b)
KMXDA (SIMD Signed Crossed Multiply Two Halfs and Add)
Definition: core_feature_dsp.h:3344
__RV_DDUMAQA
__STATIC_FORCEINLINE long long __RV_DDUMAQA(long long t, unsigned long long a, unsigned long long b)
DDUMAQA (Eight Unsigned 8x8 with 64-bit Unsigned Add)
Definition: core_feature_dsp.h:23225
__RV_KMADA32
__STATIC_FORCEINLINE long __RV_KMADA32(long t, unsigned long a, unsigned long b)
KMADA32 (Saturating Signed Multiply Two Words and Two Adds)
Definition: core_feature_dsp.h:14854
__RV_KSLRA8
__STATIC_FORCEINLINE unsigned long __RV_KSLRA8(unsigned long a, int b)
KSLRA8 (SIMD 8-bit Shift Left Logical with Saturation or Shift Right Arithmetic)
Definition: core_feature_dsp.h:4897
__RV_UKMAR64
__STATIC_FORCEINLINE unsigned long long __RV_UKMAR64(unsigned long long t, unsigned long a, unsigned long b)
UKMAR64 (Unsigned Multiply and Saturating Add to 64-Bit Data)
Definition: core_feature_dsp.h:11785
__RV_SMALTT
__STATIC_FORCEINLINE long long __RV_SMALTT(long long t, unsigned long a, unsigned long b)
SMALTT (Signed Multiply Top Halfs & Add 64-bit)
Definition: core_feature_dsp.h:7644
__RV_KHM16
__STATIC_FORCEINLINE unsigned long __RV_KHM16(unsigned long a, unsigned long b)
KHM16 (SIMD Signed Saturating Q15 Multiply)
Definition: core_feature_dsp.h:2419
__RV_DKMAXDS
__STATIC_FORCEINLINE unsigned long long __RV_DKMAXDS(unsigned long long t, unsigned long long a, unsigned long long b)
DKMAXDS (Saturating Signed Crossed Multiply Two Halfs & Subtract & Add)
Definition: core_feature_dsp.h:21995
__RV_KMADA
__STATIC_FORCEINLINE long __RV_KMADA(long t, unsigned long a, unsigned long b)
KMADA (SIMD Saturating Signed Multiply Two Halfs and Two Adds)
Definition: core_feature_dsp.h:2895
__RV_UCMPLT16
__STATIC_FORCEINLINE unsigned long __RV_UCMPLT16(unsigned long a, unsigned long b)
UCMPLT16 (SIMD 16-bit Unsigned Compare Less Than)
Definition: core_feature_dsp.h:11372
__RV_DKCRAS16
__STATIC_FORCEINLINE unsigned long long __RV_DKCRAS16(unsigned long long a, unsigned long long b)
DKCRAS16 (16-bit Signed Saturating Cross Addition & Subtraction)
Definition: core_feature_dsp.h:20928
__RV_CRAS32
__STATIC_FORCEINLINE unsigned long __RV_CRAS32(unsigned long a, unsigned long b)
CRAS32 (SIMD 32-bit Cross Addition & Subtraction)
Definition: core_feature_dsp.h:13852
__RV_KMADS
__STATIC_FORCEINLINE long __RV_KMADS(long t, unsigned long a, unsigned long b)
KMADS (SIMD Saturating Signed Multiply Two Halfs & Subtract & Add)
Definition: core_feature_dsp.h:3031
__RV_DKMXDA
__STATIC_FORCEINLINE unsigned long long __RV_DKMXDA(unsigned long long a, unsigned long long b)
DKMXDA (Signed Crossed Multiply Two Halfs and Add)
Definition: core_feature_dsp.h:19724
__RV_DADD32
__STATIC_FORCEINLINE unsigned long long __RV_DADD32(unsigned long long a, unsigned long long b)
DADD32 (32-bit Addition)
Definition: core_feature_dsp.h:20584
__RV_CLRS16
__STATIC_FORCEINLINE unsigned long __RV_CLRS16(unsigned long a)
CLRS16 (SIMD 16-bit Count Leading Redundant Sign)
Definition: core_feature_dsp.h:823
__RV_KABS16
__STATIC_FORCEINLINE unsigned long __RV_KABS16(unsigned long a)
KABS16 (SIMD 16-bit Saturating Absolute)
Definition: core_feature_dsp.h:1459
__RV_RSTSA32
__STATIC_FORCEINLINE unsigned long __RV_RSTSA32(unsigned long a, unsigned long b)
RSTSA32 (SIMD 32-bit Signed Halving Straight Subtraction & Addition)
Definition: core_feature_dsp.h:16126
__RV_SCMPLE8
__STATIC_FORCEINLINE unsigned long __RV_SCMPLE8(unsigned long a, unsigned long b)
SCMPLE8 (SIMD 8-bit Signed Compare Less Than & Equal)
Definition: core_feature_dsp.h:7055
__RV_SMDRS
__STATIC_FORCEINLINE long __RV_SMDRS(unsigned long a, unsigned long b)
SMDRS (SIMD Signed Multiply Two Halfs and Reverse Subtract)
Definition: core_feature_dsp.h:8577
__RV_KMMAWB2_U
__STATIC_FORCEINLINE long __RV_KMMAWB2_U(long t, unsigned long a, unsigned long b)
KMMAWB2.u (SIMD Saturating MSW Signed Multiply Word and Bottom Half & 2 and Add with Rounding)
Definition: core_feature_dsp.h:3727
__RV_KMMAC
__STATIC_FORCEINLINE long __RV_KMMAC(long t, long a, long b)
KMMAC (SIMD Saturating MSW Signed Multiply Word and Add)
Definition: core_feature_dsp.h:3406
__RV_SMULX16
__STATIC_FORCEINLINE unsigned long long __RV_SMULX16(unsigned int a, unsigned int b)
SMULX16 (SIMD Signed Crossed 16-bit Multiply)
Definition: core_feature_dsp.h:9569
__RV_SUNPKD832
__STATIC_FORCEINLINE unsigned long __RV_SUNPKD832(unsigned long a)
SUNPKD832 (Signed Unpacking Bytes 3 & 2)
Definition: core_feature_dsp.h:10993
__RV_KSLLW
__STATIC_FORCEINLINE long __RV_KSLLW(long a, unsigned int b)
KSLLW (Saturating Shift Left Logical for Word)
Definition: core_feature_dsp.h:4570
__RV_DSMALDA
__STATIC_FORCEINLINE long long __RV_DSMALDA(long long t, unsigned long long a, unsigned long long b)
DSMALDA (Four Signed 16x16 with 64-bit Add)
Definition: core_feature_dsp.h:22798
__RV_MADDR32
__STATIC_FORCEINLINE unsigned long __RV_MADDR32(unsigned long t, unsigned long a, unsigned long b)
MADDR32 (Multiply and Add to 32-Bit Word)
Definition: core_feature_dsp.h:5749
__RV_DKMSXDA
__STATIC_FORCEINLINE unsigned long long __RV_DKMSXDA(unsigned long long t, unsigned long long a, unsigned long long b)
DKMSXDA (Two Cross 16x16 with 32-bit Signed Double Sub)
Definition: core_feature_dsp.h:22087
__RV_ZUNPKD830
__STATIC_FORCEINLINE unsigned long __RV_ZUNPKD830(unsigned long a)
ZUNPKD830 (Unsigned Unpacking Bytes 3 & 0)
Definition: core_feature_dsp.h:13675
__RV_KADD32
__STATIC_FORCEINLINE unsigned long __RV_KADD32(unsigned long a, unsigned long b)
KADD32 (SIMD 32-bit Signed Saturating Addition)
Definition: core_feature_dsp.h:14000
__RV_URADD64
__STATIC_FORCEINLINE unsigned long long __RV_URADD64(unsigned long long a, unsigned long long b)
URADD64 (64-bit Unsigned Halving Addition)
Definition: core_feature_dsp.h:12993
__RV_URSTSA16
__STATIC_FORCEINLINE unsigned long __RV_URSTSA16(unsigned long a, unsigned long b)
URSTSA16 (SIMD 16-bit Unsigned Halving Straight Subtraction & Addition)
Definition: core_feature_dsp.h:13237
__RV_KDMBT16
__STATIC_FORCEINLINE unsigned long __RV_KDMBT16(unsigned long a, unsigned long b)
KDMBT16 (SIMD Signed Saturating Double Multiply B16 x T16)
Definition: core_feature_dsp.h:14205
__RV_URCRAS32
__STATIC_FORCEINLINE unsigned long __RV_URCRAS32(unsigned long a, unsigned long b)
URCRAS32 (SIMD 32-bit Unsigned Halving Cross Addition & Subtraction)
Definition: core_feature_dsp.h:17696
__RV_KDMTT
__STATIC_FORCEINLINE long __RV_KDMTT(unsigned int a, unsigned int b)
KDMTT (Signed Saturating Double Multiply T16 x T16)
Definition: core_feature_dsp.h:2040
__RV_SCMPLE16
__STATIC_FORCEINLINE unsigned long __RV_SCMPLE16(unsigned long a, unsigned long b)
SCMPLE16 (SIMD 16-bit Signed Compare Less Than & Equal)
Definition: core_feature_dsp.h:7095
__RV_KMMAWB
__STATIC_FORCEINLINE long __RV_KMMAWB(long t, unsigned long a, unsigned long b)
KMMAWB (SIMD Saturating MSW Signed Multiply Word and Bottom Half and Add)
Definition: core_feature_dsp.h:3529
__RV_UCMPLT8
__STATIC_FORCEINLINE unsigned long __RV_UCMPLT8(unsigned long a, unsigned long b)
UCMPLT8 (SIMD 8-bit Unsigned Compare Less Than)
Definition: core_feature_dsp.h:11333
__RV_DSMALDRS
__STATIC_FORCEINLINE long long __RV_DSMALDRS(long long t, unsigned long long a, unsigned long long b)
DSMALDRS (Four Signed 16x16 with 64-bit Add and Revered Sub)
Definition: core_feature_dsp.h:22936
__RV_DKMADRS32
__STATIC_FORCEINLINE long long __RV_DKMADRS32(long long t, unsigned long long a, unsigned long long b)
DKMADRS32 (Two Signed 32x32 with 64-bit Saturation Revered Add and Sub)
Definition: core_feature_dsp.h:22491
__RV_RSUBW
__STATIC_FORCEINLINE long __RV_RSUBW(int a, int b)
RSUBW (32-bit Signed Halving Subtraction)
Definition: core_feature_dsp.h:6868
__RV_ADD16
__STATIC_FORCEINLINE unsigned long __RV_ADD16(unsigned long a, unsigned long b)
ADD16 (SIMD 16-bit Addition)
Definition: core_feature_dsp.h:490
__RV_STAS32
__STATIC_FORCEINLINE unsigned long __RV_STAS32(unsigned long a, unsigned long b)
STAS32 (SIMD 32-bit Straight Addition & Subtraction)
Definition: core_feature_dsp.h:17147
__RV_SMMUL
__STATIC_FORCEINLINE long __RV_SMMUL(long a, long b)
SMMUL (SIMD MSW Signed Multiply Word)
Definition: core_feature_dsp.h:8765
__RV_SMALXDS
__STATIC_FORCEINLINE long long __RV_SMALXDS(long long t, unsigned long a, unsigned long b)
SMALXDS (Signed Crossed Multiply Two Halfs & Subtract & Add 64- bit)
Definition: core_feature_dsp.h:8085
__RV_SRA8
__STATIC_FORCEINLINE unsigned long __RV_SRA8(unsigned long a, unsigned int b)
SRA8 (SIMD 8-bit Shift Right Arithmetic)
Definition: core_feature_dsp.h:9733
__RV_PKTB32
__STATIC_FORCEINLINE unsigned long __RV_PKTB32(unsigned long a, unsigned long b)
PKTB32 (Pack Two 32-bit Data from Top and Bottom Half)
Definition: core_feature_dsp.h:15893
__RV_DSMBB32
__STATIC_FORCEINLINE long long __RV_DSMBB32(unsigned long long a, unsigned long long b)
DSMBB32 (Signed Multiply Bottom Word & Bottom Word)
Definition: core_feature_dsp.h:19844
__RV_ZUNPKD831
__STATIC_FORCEINLINE unsigned long __RV_ZUNPKD831(unsigned long a)
ZUNPKD831 (Unsigned Unpacking Bytes 3 & 1)
Definition: core_feature_dsp.h:13721
__RV_URSUB16
__STATIC_FORCEINLINE unsigned long __RV_URSUB16(unsigned long a, unsigned long b)
URSUB16 (SIMD 16-bit Unsigned Halving Subtraction)
Definition: core_feature_dsp.h:13331
__RV_SMSLDA
__STATIC_FORCEINLINE long long __RV_SMSLDA(long long t, unsigned long a, unsigned long b)
SMSLDA (Signed Multiply Two Halfs & Add & Subtract 64-bit)
Definition: core_feature_dsp.h:9099
__RV_DREDAS16
__STATIC_FORCEINLINE unsigned long __RV_DREDAS16(unsigned long long a)
DREDAS16 (Reduced Addition and Reduced Subtraction)
Definition: core_feature_dsp.h:19547
__RV_KDMBB
__STATIC_FORCEINLINE long __RV_KDMBB(unsigned int a, unsigned int b)
KDMBB (Signed Saturating Double Multiply B16 x B16)
Definition: core_feature_dsp.h:1930
__RV_DRSUB32
__STATIC_FORCEINLINE unsigned long long __RV_DRSUB32(unsigned long long a, unsigned long long b)
DRSUB32 (32-bit Signed Halving Subtraction)
Definition: core_feature_dsp.h:21573
__RV_SMAR64
__STATIC_FORCEINLINE long long __RV_SMAR64(long long t, long a, long b)
SMAR64 (Signed Multiply and Add to 64-Bit Data)
Definition: core_feature_dsp.h:8137
__RV_SMMWB_U
__STATIC_FORCEINLINE long __RV_SMMWB_U(long a, unsigned long b)
SMMWB.u (SIMD MSW Signed Multiply Word and Bottom Half with Rounding)
Definition: core_feature_dsp.h:8916
__RV_DKCLIP64
__STATIC_FORCEINLINE int16_t __RV_DKCLIP64(unsigned long long a)
DKCLIP64 (64-bit Clipped to 16-bit Saturation Value)
Definition: core_feature_dsp.h:19630
__RV_UKCRAS32
__STATIC_FORCEINLINE unsigned long __RV_UKCRAS32(unsigned long a, unsigned long b)
UKCRAS32 (SIMD 32-bit Unsigned Saturating Cross Addition & Subtraction)
Definition: core_feature_dsp.h:17326
__RV_DKCRSA32
__STATIC_FORCEINLINE unsigned long long __RV_DKCRSA32(unsigned long long a, unsigned long long b)
DKCRSA32 (32-bit Signed Saturating Cross Subtraction & Addition)
Definition: core_feature_dsp.h:21151
__RV_UMUL8
__STATIC_FORCEINLINE unsigned long long __RV_UMUL8(unsigned int a, unsigned int b)
UMUL8 (SIMD Unsigned 8-bit Multiply)
Definition: core_feature_dsp.h:12593
__RV_SMBB32
__STATIC_FORCEINLINE long __RV_SMBB32(unsigned long a, unsigned long b)
SMBB32 (Signed Multiply Bottom Word & Bottom Word)
Definition: core_feature_dsp.h:16339
__RV_DKSTAS16
__STATIC_FORCEINLINE unsigned long long __RV_DKSTAS16(unsigned long long a, unsigned long long b)
DKSTAS16 (16-bit Signed Saturating Straight Addition & Subtraction)
Definition: core_feature_dsp.h:21391
__RV_UMAR64
__STATIC_FORCEINLINE unsigned long long __RV_UMAR64(unsigned long long t, unsigned long a, unsigned long b)
UMAR64 (Unsigned Multiply and Add to 64-Bit Data)
Definition: core_feature_dsp.h:12257
__RV_SMDRS32
__STATIC_FORCEINLINE long __RV_SMDRS32(unsigned long a, unsigned long b)
SMDRS32 (Signed Multiply Two Words and Reverse Subtract)
Definition: core_feature_dsp.h:16543
__RV_UKADD32
__STATIC_FORCEINLINE unsigned long __RV_UKADD32(unsigned long a, unsigned long b)
UKADD32 (SIMD 32-bit Unsigned Saturating Addition)
Definition: core_feature_dsp.h:17273
__RV_SUNPKD831
__STATIC_FORCEINLINE unsigned long __RV_SUNPKD831(unsigned long a)
SUNPKD831 (Signed Unpacking Bytes 3 & 1)
Definition: core_feature_dsp.h:10947
__RV_DKSUB8
__STATIC_FORCEINLINE unsigned long long __RV_DKSUB8(unsigned long long a, unsigned long long b)
DKSUB8 (64-bit SIMD 8-bit Signed Saturating Subtraction)
Definition: core_feature_dsp.h:18654
__RV_MULR64
__STATIC_FORCEINLINE unsigned long long __RV_MULR64(unsigned long a, unsigned long b)
MULR64 (Multiply Word Unsigned to 64-bit Data)
Definition: core_feature_dsp.h:5922
__RV_MINW
__STATIC_FORCEINLINE long __RV_MINW(int a, int b)
MINW (32-bit Signed Word Minimum)
Definition: core_feature_dsp.h:5824
__RV_UKCRAS16
__STATIC_FORCEINLINE unsigned long __RV_UKCRAS16(unsigned long a, unsigned long b)
UKCRAS16 (SIMD 16-bit Unsigned Saturating Cross Addition & Subtraction)
Definition: core_feature_dsp.h:11665
__RV_KDMATT16
__STATIC_FORCEINLINE unsigned long __RV_KDMATT16(unsigned long t, unsigned long a, unsigned long b)
KDMATT16 (SIMD Signed Saturating Double Multiply Addition T16 x T16)
Definition: core_feature_dsp.h:14453
__RV_DDSMAQASU
__STATIC_FORCEINLINE long long __RV_DDSMAQASU(long long t, unsigned long long a, unsigned long long b)
DDSMAQASU (Eight Signed 8 x Unsigned 8 with 64-bit Add)
Definition: core_feature_dsp.h:23174
__RV_DSMAXDA16
__STATIC_FORCEINLINE long __RV_DSMAXDA16(long long t, unsigned long long a, unsigned long long b)
DSMAXDA16 (Signed Crossed Multiply Two Halfs and Two Adds 32-bit)
Definition: core_feature_dsp.h:23477
__RV_DRCRSA32
__STATIC_FORCEINLINE unsigned long long __RV_DRCRSA32(unsigned long long a, unsigned long long b)
DRCRSA32 (32-bit Signed Halving CrossSubtraction & Addition)
Definition: core_feature_dsp.h:20790
__RV_URCRSA16
__STATIC_FORCEINLINE unsigned long __RV_URCRSA16(unsigned long a, unsigned long b)
URCRSA16 (SIMD 16-bit Unsigned Halving Cross Subtraction & Addition)
Definition: core_feature_dsp.h:13139
__RV_DMSR16
__STATIC_FORCEINLINE unsigned long long __RV_DMSR16(unsigned long a, unsigned long b)
DMSR16 (Signed Multiply Halfs with Right Shift 16-bit and Cross Multiply Halfs with Right Shift 16-bi...
Definition: core_feature_dsp.h:19381
__RV_DUMAQA
__STATIC_FORCEINLINE unsigned long long __RV_DUMAQA(unsigned long long t, unsigned long long a, unsigned long long b)
DUMAQA (Four Unsigned 8x8 with 32-bit Unsigned Add)
Definition: core_feature_dsp.h:22234
__RV_KSLL32
__STATIC_FORCEINLINE unsigned long __RV_KSLL32(unsigned long a, unsigned int b)
KSLL32 (SIMD 32-bit Saturating Shift Left Logical)
Definition: core_feature_dsp.h:15367
__RV_KSLRA16
__STATIC_FORCEINLINE unsigned long __RV_KSLRA16(unsigned long a, int b)
KSLRA16 (SIMD 16-bit Shift Left Logical with Saturation or Shift Right Arithmetic)
Definition: core_feature_dsp.h:5031
__RV_URCRSA32
__STATIC_FORCEINLINE unsigned long __RV_URCRSA32(unsigned long a, unsigned long b)
URCRSA32 (SIMD 32-bit Unsigned Halving Cross Subtraction & Addition)
Definition: core_feature_dsp.h:17743
__RV_KSLRAW_U
__STATIC_FORCEINLINE long __RV_KSLRAW_U(int a, int b)
KSLRAW.u (Shift Left Logical with Q31 Saturation or Rounding Shift Right Arithmetic)
Definition: core_feature_dsp.h:5221
__RV_DSMBB32_SRA32
__STATIC_FORCEINLINE long long __RV_DSMBB32_SRA32(unsigned long long a, unsigned long long b)
DSMBB32.sra32 (Signed Crossed Multiply Two Halfs and Subtract with Right Shift 32)
Definition: core_feature_dsp.h:19923
__RV_SRA16
__STATIC_FORCEINLINE unsigned long __RV_SRA16(unsigned long a, unsigned long b)
SRA16 (SIMD 16-bit Shift Right Arithmetic)
Definition: core_feature_dsp.h:9949
__RV_URSUB8
__STATIC_FORCEINLINE unsigned long __RV_URSUB8(unsigned long a, unsigned long b)
URSUB8 (SIMD 8-bit Unsigned Halving Subtraction)
Definition: core_feature_dsp.h:13284
__RV_KDMABB16
__STATIC_FORCEINLINE unsigned long __RV_KDMABB16(unsigned long t, unsigned long a, unsigned long b)
KDMABB16 (SIMD Signed Saturating Double Multiply Addition B16 x B16)
Definition: core_feature_dsp.h:14323
__RV_KWMMUL
__STATIC_FORCEINLINE long __RV_KWMMUL(long a, long b)
KWMMUL (SIMD Saturating MSW Signed Multiply Word & Double)
Definition: core_feature_dsp.h:5647
__RV_RADD8
__STATIC_FORCEINLINE unsigned long __RV_RADD8(unsigned long a, unsigned long b)
RADD8 (SIMD 8-bit Signed Halving Addition)
Definition: core_feature_dsp.h:6303
__RV_DPKTB16
__STATIC_FORCEINLINE unsigned long long __RV_DPKTB16(unsigned long long a, unsigned long long b)
DPKTB16 (Pack Two 32-bit Data from Top and Bottom Half)
Definition: core_feature_dsp.h:20349
__RV_UKADD16
__STATIC_FORCEINLINE unsigned long __RV_UKADD16(unsigned long a, unsigned long b)
UKADD16 (SIMD 16-bit Unsigned Saturating Addition)
Definition: core_feature_dsp.h:11462
__RV_UMAX16
__STATIC_FORCEINLINE unsigned long __RV_UMAX16(unsigned long a, unsigned long b)
UMAX16 (SIMD 16-bit Unsigned Maximum)
Definition: core_feature_dsp.h:12378
__RV_DKWMMUL_U
__STATIC_FORCEINLINE unsigned long long __RV_DKWMMUL_U(unsigned long long a, unsigned long long b)
DKWMMULU (64-bit MSW 32x32 Unsigned Multiply & Double)
Definition: core_feature_dsp.h:18992
__RV_DSMSLDA
__STATIC_FORCEINLINE long long __RV_DSMSLDA(long long t, unsigned long long a, unsigned long long b)
DSMSLDA (Four Signed 16x16 with 64-bit Sub)
Definition: core_feature_dsp.h:23027
__RV_SMALDRS
__STATIC_FORCEINLINE long long __RV_SMALDRS(long long t, unsigned long a, unsigned long b)
SMALDRS (Signed Multiply Two Halfs & Reverse Subtract & Add 64- bit)
Definition: core_feature_dsp.h:7994
__RV_UKADDW
__STATIC_FORCEINLINE unsigned long __RV_UKADDW(unsigned int a, unsigned int b)
UKADDW (Unsigned Addition with U32 Saturation)
Definition: core_feature_dsp.h:11609
__RV_KSLRA32_U
__STATIC_FORCEINLINE unsigned long __RV_KSLRA32_U(unsigned long a, int b)
KSLRA32.u (SIMD 32-bit Shift Left Logical with Saturation or Rounding Shift Right Arithmetic)
Definition: core_feature_dsp.h:15551
__RV_DKMSXDA32
__STATIC_FORCEINLINE long long __RV_DKMSXDA32(long long t, unsigned long long a, unsigned long long b)
DKMSXDA32 (Two Cross Signed 32x32 with 64-bit Saturation Sub)
Definition: core_feature_dsp.h:22621
__RV_UKADD8
__STATIC_FORCEINLINE unsigned long __RV_UKADD8(unsigned long a, unsigned long b)
UKADD8 (SIMD 8-bit Unsigned Saturating Addition)
Definition: core_feature_dsp.h:11417
__RV_KSTAS32
__STATIC_FORCEINLINE unsigned long __RV_KSTAS32(unsigned long a, unsigned long b)
KSTAS32 (SIMD 32-bit Signed Saturating Straight Addition & Subtraction)
Definition: core_feature_dsp.h:15603
__RV_DKWMMUL
__STATIC_FORCEINLINE unsigned long long __RV_DKWMMUL(unsigned long long a, unsigned long long b)
DKWMMUL (64-bit MSW 32x32 Signed Multiply & Double)
Definition: core_feature_dsp.h:18945
__EXPD_BYTE
#define __EXPD_BYTE(x)
Expand byte to unsigned long value.
Definition: core_compatiable.h:254
__RV_DSMXDS
__STATIC_FORCEINLINE unsigned long long __RV_DSMXDS(unsigned long long a, unsigned long long b)
DSMXDS (Signed Crossed Multiply Two Halfs and Subtract)
Definition: core_feature_dsp.h:19806
__RV_KMAXDA
__STATIC_FORCEINLINE long __RV_KMAXDA(long t, unsigned long a, unsigned long b)
KMAXDA (SIMD Saturating Signed Crossed Multiply Two Halfs and Two Adds)
Definition: core_feature_dsp.h:2959
__RV_SMBT16
__STATIC_FORCEINLINE long __RV_SMBT16(unsigned long a, unsigned long b)
SMBT16 (SIMD Signed Multiply Bottom Half & Top Half)
Definition: core_feature_dsp.h:8407
__RV_EXPD80
__STATIC_FORCEINLINE unsigned long __RV_EXPD80(unsigned long a)
EXPD80 (Expand and Copy Byte 0 to 32bit(when rv32) or 64bit(when rv64))
Definition: core_feature_dsp.h:17928
__RV_URSTAS32
__STATIC_FORCEINLINE unsigned long __RV_URSTAS32(unsigned long a, unsigned long b)
URSTAS32 (SIMD 32-bit Unsigned Halving Straight Addition & Subtraction)
Definition: core_feature_dsp.h:17790
__RV_SWAP8
__STATIC_FORCEINLINE unsigned long __RV_SWAP8(unsigned long a)
SWAP8 (Swap Byte within Halfword)
Definition: core_feature_dsp.h:11030
__RV_KMMAWB_U
__STATIC_FORCEINLINE long __RV_KMMAWB_U(long t, unsigned long a, unsigned long b)
KMMAWB.u (SIMD Saturating MSW Signed Multiply Word and Bottom Half and Add with Rounding)
Definition: core_feature_dsp.h:3591
__RV_KCRAS32
__STATIC_FORCEINLINE unsigned long __RV_KCRAS32(unsigned long a, unsigned long b)
KCRAS32 (SIMD 32-bit Signed Saturating Cross Addition & Subtraction)
Definition: core_feature_dsp.h:14051
__RV_KMAR64
__STATIC_FORCEINLINE long long __RV_KMAR64(long long t, long a, long b)
KMAR64 (Signed Multiply and Saturating Add to 64-Bit Data)
Definition: core_feature_dsp.h:3243
__ASM
#define __ASM
Pass information from the compiler to the assembler.
Definition: nmsis_gcc.h:55
__RV_DPKTT16
__STATIC_FORCEINLINE unsigned long long __RV_DPKTT16(unsigned long long a, unsigned long long b)
DPKTT16 (Pack Two 16-bit Data from Both Top Half)
Definition: core_feature_dsp.h:20463
__RV_DKSLRA8
__STATIC_FORCEINLINE unsigned long long __RV_DKSLRA8(unsigned long long a, int b)
DKSLRA8 (64-bit SIMD 8-bit Shift Left Logical with Saturation or Shift Right Arithmetic)
Definition: core_feature_dsp.h:18454
__RV_PKTT32
__STATIC_FORCEINLINE unsigned long __RV_PKTT32(unsigned long a, unsigned long b)
PKTT32 (Pack Two 32-bit Data from Both Top Half)
Definition: core_feature_dsp.h:15845
__RV_UMAX8
__STATIC_FORCEINLINE unsigned long __RV_UMAX8(unsigned long a, unsigned long b)
UMAX8 (SIMD 8-bit Unsigned Maximum)
Definition: core_feature_dsp.h:12339
__RV_KMMWT2_U
__STATIC_FORCEINLINE long __RV_KMMWT2_U(long a, unsigned long b)
KMMWT2.u (SIMD Saturating MSW Signed Multiply Word and Top Half & 2 with Rounding)
Definition: core_feature_dsp.h:4330
__RV_RCRSA16
__STATIC_FORCEINLINE unsigned long __RV_RCRSA16(unsigned long a, unsigned long b)
RCRSA16 (SIMD 16-bit Signed Halving Cross Subtraction & Addition)
Definition: core_feature_dsp.h:6547
__RV_SRL32
__STATIC_FORCEINLINE unsigned long __RV_SRL32(unsigned long a, unsigned int b)
SRL32 (SIMD 32-bit Shift Right Logical)
Definition: core_feature_dsp.h:16947
__RV_KSUB32
__STATIC_FORCEINLINE unsigned long __RV_KSUB32(unsigned long a, unsigned long b)
KSUB32 (SIMD 32-bit Signed Saturating Subtraction)
Definition: core_feature_dsp.h:15701
__RV_UMIN32
__STATIC_FORCEINLINE unsigned long __RV_UMIN32(unsigned long a, unsigned long b)
UMIN32 (SIMD 32-bit Unsigned Minimum)
Definition: core_feature_dsp.h:17603
__RV_DKABS8
__STATIC_FORCEINLINE unsigned long long __RV_DKABS8(unsigned long long a)
DKABS8 (64-bit SIMD 8-bit Saturating Absolute)
Definition: core_feature_dsp.h:18351
__RV_KSUBW
__STATIC_FORCEINLINE long __RV_KSUBW(int a, int b)
KSUBW (Signed Subtraction with Q31 Saturation)
Definition: core_feature_dsp.h:5591
__RV_DCRSA32
__STATIC_FORCEINLINE unsigned long long __RV_DCRSA32(unsigned long long a, unsigned long long b)
DCRSA32 (32-bit Cross Subtraction & Addition)
Definition: core_feature_dsp.h:21241
__RV_KABS8
__STATIC_FORCEINLINE unsigned long __RV_KABS8(unsigned long a)
KABS8 (SIMD 8-bit Saturating Absolute)
Definition: core_feature_dsp.h:1414
__RV_KADDH
__STATIC_FORCEINLINE long __RV_KADDH(int a, int b)
KADDH (Signed Addition with Q15 Saturation)
Definition: core_feature_dsp.h:1713
__RV_DSMALXDS
__STATIC_FORCEINLINE long long __RV_DSMALXDS(long long t, unsigned long long a, unsigned long long b)
DSMALXDS (Four Cross Signed 16x16 with 64-bit Add and Sub)
Definition: core_feature_dsp.h:22982
__RV_KSTSA32
__STATIC_FORCEINLINE unsigned long __RV_KSTSA32(unsigned long a, unsigned long b)
KSTSA32 (SIMD 32-bit Signed Saturating Straight Subtraction & Addition)
Definition: core_feature_dsp.h:15654
__RV_SCMPLT16
__STATIC_FORCEINLINE unsigned long __RV_SCMPLT16(unsigned long a, unsigned long b)
SCMPLT16 (SIMD 16-bit Signed Compare Less Than)
Definition: core_feature_dsp.h:7173
__RV_KCRSA32
__STATIC_FORCEINLINE unsigned long __RV_KCRSA32(unsigned long a, unsigned long b)
KCRSA32 (SIMD 32-bit Signed Saturating Cross Subtraction & Addition)
Definition: core_feature_dsp.h:14101
__RV_SMAL
__STATIC_FORCEINLINE long long __RV_SMAL(long long a, unsigned long b)
SMAL (Signed Multiply Halfs & Add 64-bit)
Definition: core_feature_dsp.h:7397
__RV_URADD16
__STATIC_FORCEINLINE unsigned long __RV_URADD16(unsigned long a, unsigned long b)
URADD16 (SIMD 16-bit Unsigned Halving Addition)
Definition: core_feature_dsp.h:12941
__RV_SMDS32
__STATIC_FORCEINLINE long __RV_SMDS32(unsigned long a, unsigned long b)
SMDS32 (Signed Multiply Two Words and Subtract)
Definition: core_feature_dsp.h:16490
__RV_ADD32
__STATIC_FORCEINLINE unsigned long __RV_ADD32(unsigned long a, unsigned long b)
ADD32 (SIMD 32-bit Addition)
Definition: core_feature_dsp.h:13809
__RV_SMIN8
__STATIC_FORCEINLINE unsigned long __RV_SMIN8(unsigned long a, unsigned long b)
SMIN8 (SIMD 8-bit Signed Minimum)
Definition: core_feature_dsp.h:8675
__RV_KMADRS32
__STATIC_FORCEINLINE long __RV_KMADRS32(long t, unsigned long a, unsigned long b)
KMADRS32 (Saturating Signed Multiply Two Words & Reverse Subtract & Add)
Definition: core_feature_dsp.h:15142
__RV_DSTSA32
__STATIC_FORCEINLINE unsigned long long __RV_DSTSA32(unsigned long long a, unsigned long long b)
DSTSA32 (32-bit Straight Subtraction & Addition)
Definition: core_feature_dsp.h:21061
__RV_UKSUB64
__STATIC_FORCEINLINE unsigned long long __RV_UKSUB64(unsigned long long a, unsigned long long b)
UKSUB64 (64-bit Unsigned Saturating Subtraction)
Definition: core_feature_dsp.h:12113
__RV_DKSLRA32
__STATIC_FORCEINLINE unsigned long long __RV_DKSLRA32(unsigned long long a, int b)
DKSLRA32 (64-bit SIMD 32-bit Shift Left Logical with Saturation or Shift Right Arithmetic)
Definition: core_feature_dsp.h:19090
__RV_RSTSA16
__STATIC_FORCEINLINE unsigned long __RV_RSTSA16(unsigned long a, unsigned long b)
RSTSA16 (SIMD 16-bit Signed Halving Straight Subtraction & Addition)
Definition: core_feature_dsp.h:6672
__RV_KMSXDA
__STATIC_FORCEINLINE long __RV_KMSXDA(long t, unsigned long a, unsigned long b)
KMSXDA (SIMD Saturating Signed Crossed Multiply Two Halfs & Add & Subtract)
Definition: core_feature_dsp.h:4455
__RV_KMMSB_U
__STATIC_FORCEINLINE long __RV_KMMSB_U(long t, long a, long b)
KMMSB.u (SIMD Saturating MSW Signed Multiply Word and Subtraction with Rounding)
Definition: core_feature_dsp.h:4107