18 #ifndef __CORE_COMPATIABLE_H__
19 #define __CORE_COMPATIABLE_H__
28 #include "core_feature_base.h"
42 #define __ISB() __RWMB()
45 #define __DSB() __RWMB()
48 #define __DMB() __RWMB()
51 #define __LDRBT(ptr) __LB((ptr))
53 #define __LDRHT(ptr) __LH((ptr))
55 #define __LDRT(ptr) __LW((ptr))
58 #define __STRBT(val, ptr) __SB((ptr), (val))
60 #define __STRHT(val, ptr) __SH((ptr), (val))
62 #define __STRT(val, ptr) __SW((ptr), (val))
72 #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1)
73 #define __SSAT(val, sat) __RV_SCLIP32((val), (sat-1))
77 if ((sat >= 1U) && (sat <= 32U)) {
78 const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
79 const int32_t min = -1 - max ;
82 }
else if (val < min) {
97 #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1)
98 #define __USAT(val, sat) __RV_UCLIP32((val), (sat))
103 const uint32_t max = ((1U << sat) - 1U);
104 if (val > (int32_t)max) {
106 }
else if (val < 0) {
110 return (uint32_t)val;
126 result = ((value & 0xff000000) >> 24)
127 | ((value & 0x00ff0000) >> 8 )
128 | ((value & 0x0000ff00) << 8 )
129 | ((value & 0x000000ff) << 24);
143 result = ((value & 0xff000000) >> 8)
144 | ((value & 0x00ff0000) << 8 )
145 | ((value & 0x0000ff00) >> 8 )
146 | ((value & 0x000000ff) << 8) ;
162 result = ((value & 0xff00) >> 8) | ((value & 0x00ff) << 8);
180 return (op1 >> op2) | (op1 << (32U - op2));
197 uint32_t tmp1 = (uint32_t)op1;
198 uint32_t tmp2 = (uint32_t)(op1 >> 32);
199 return (uint64_t)((tmp1 >> op2) | (tmp1 << (32U - op2)))
200 | ((uint64_t)((tmp2 >> op2) | (tmp2 << (32U - op2))) << 32);
209 #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1)
210 #define __RBIT(value) __RV_BITREVI((value), 31)
215 uint32_t s = (4U * 8U) - 1U;
218 for (value >>= 1U; value != 0U; value >>= 1U) {
220 result |= value & 1U;
234 #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1)
235 #define __CLZ(data) __RV_CLZ32(data)
240 uint32_t temp = ~data;
241 while (temp & 0x80000000) {
259 unsigned long ret = 0;
261 while (!(data & 1UL)) {
275 #if __RISCV_XLEN == 32
276 #define __EXPD_BYTE(x) ((unsigned long)(((unsigned long)(x) << 0) | \
277 ((unsigned long)(x) << 8) | \
278 ((unsigned long)(x) << 16) | \
279 ((unsigned long)(x) << 24)))
280 #elif __RISCV_XLEN == 64
281 #define __EXPD_BYTE(x) ((unsigned long)(((unsigned long)(x) << 0) | \
282 ((unsigned long)(x) << 8) | \
283 ((unsigned long)(x) << 16) | \
284 ((unsigned long)(x) << 24) | \
285 ((unsigned long)(x) << 32) | \
286 ((unsigned long)(x) << 40) | \
287 ((unsigned long)(x) << 48) | \
288 ((unsigned long)(x) << 56)))
#define __RBIT(value)
Reverse bit order of value.
__STATIC_FORCEINLINE unsigned long __CTZ(unsigned long data)
Count tailing zero.
#define __SSAT(val, sat)
Signed Saturate.
__STATIC_FORCEINLINE uint64_t __ROR64(uint64_t op1, uint32_t op2)
Rotate Right in uint32x2 value (64 bit)
#define __USAT(val, sat)
Unsigned Saturate.
__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
Reverse byte order (16 bit)
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
Rotate Right in unsigned value (32 bit)
#define __CLZ(data)
Count leading zeros.
__STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
Reverse byte order (16 bit)
__STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
Reverse byte order (32 bit)
#define __STATIC_FORCEINLINE
Define a static function that should be always inlined by the compiler.