NMSIS-Core  Version 1.5.0
NMSIS-Core support for Nuclei processor-based devices
core_feature_base.h
1 /*
2  * Copyright (c) 2019 Nuclei Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #ifndef __CORE_FEATURE_BASE__
20 #define __CORE_FEATURE_BASE__
25 /*
26  * Core Base Feature Configuration Macro:
27  * 1. __HARTID_OFFSET: Optional, define this macro when your cpu system first hart hartid and hart index is different.
28  * eg. If your cpu system, first hart hartid is 2, hart index is 0, then set this macro to 2
29  *
30  */
31 #include <stdint.h>
32 
33 #ifdef __cplusplus
34  extern "C" {
35 #endif
36 
37 #include "nmsis_compiler.h"
38 
45 #ifndef __RISCV_XLEN
47  #ifndef __riscv_xlen
48  #define __RISCV_XLEN 32
49  #else
50  #define __RISCV_XLEN __riscv_xlen
51  #endif
52 #endif /* __RISCV_XLEN */
53 
55 typedef unsigned long rv_csr_t;
56 
58 #if defined(CPU_SERIES) && CPU_SERIES == 100
59 typedef uint32_t rv_counter_t;
60 #else
61 typedef uint64_t rv_counter_t;
62 #endif
63  /* End of Doxygen Group NMSIS_Core_Registers */
75 typedef union {
76  struct {
77  rv_csr_t a:1;
78  rv_csr_t b:1;
79  rv_csr_t c:1;
80  rv_csr_t d:1;
81  rv_csr_t e:1;
82  rv_csr_t f:1;
83  rv_csr_t g:1;
84  rv_csr_t h:1;
85  rv_csr_t i:1;
86  rv_csr_t j:1;
87  rv_csr_t k:1;
88  rv_csr_t l:1;
89  rv_csr_t m:1;
90  rv_csr_t n:1;
91  rv_csr_t o:1;
92  rv_csr_t p:1;
93  rv_csr_t q:1;
94  rv_csr_t r:1;
95  rv_csr_t s:1;
96  rv_csr_t t:1;
97  rv_csr_t u:1;
98  rv_csr_t v:1;
99  rv_csr_t w:1;
105  } b;
106  rv_csr_t d;
107 } CSR_MISA_Type;
108 
112 typedef union {
113  struct {
135 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
136  rv_csr_t _reserved3:7;
137  rv_csr_t uxl:2;
138  rv_csr_t sxl:2;
139  rv_csr_t sbe:1;
140  rv_csr_t mbe:1;
141  rv_csr_t gva:1;
142  rv_csr_t mpv:1;
143  rv_csr_t _reserved4:1;
144  rv_csr_t mpelp:1;
145  rv_csr_t mdt:1;
146  rv_csr_t _reserved5:20;
147  rv_csr_t sd:1;
148 #else
151 #endif
152  } b;
155 
156 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 32
160 typedef union {
161  struct {
171  } b;
174 #endif
175 
179 typedef union {
180  struct {
183  } b;
186 
190 typedef union {
191  struct {
199 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
200  rv_csr_t _reserved2:__RISCV_XLEN-32;
201 #endif
203  } b;
206 
210 typedef union {
211  struct {
216  } b;
219 
223 typedef union {
224  struct {
239  } b;
242 
246 typedef union {
247  struct {
250  } b;
253 
257 typedef union {
258  struct {
279  } b;
282 
285 
289 typedef union {
290  struct {
299  } b;
302 
306 typedef union {
307  struct {
329  } b;
332 
334 
338 typedef union {
339  struct {
349  } b;
352 
356 typedef union {
357  struct {
367  } b;
370 
372 
376 typedef union {
377  struct {
387  } b;
390 
392 
396 typedef union {
397  struct {
424  } b;
427 
429 
433 typedef union {
434  struct {
445  } b;
448 
450 
454 typedef union {
455  struct {
464  } b;
467 
469 
477 typedef union {
478  struct {
489  } b;
490  struct {
491  rv_csr_t set:4;
492  rv_csr_t way:3;
493  rv_csr_t lsize:3;
494  rv_csr_t ecc:1;
495  rv_csr_t napot:1;
496  rv_csr_t i_size:7;
497  rv_csr_t d_size:8;
499  rv_csr_t mapping:1;
500  } nb;
503 
505 
509 typedef union {
510  struct {
516  } b;
519 
521 
525 typedef union {
526  struct {
531  } b;
534 
536 
540 typedef union {
541  struct {
544  } b;
547 
549 
553 typedef union {
554  struct {
562 #if __RISCV_XLEN == 64
563  rv_csr_t _reserved3:__RISCV_XLEN-32;
564 #endif
565  } b;
568 
570 
574 typedef union {
575  struct {
588 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
589  rv_csr_t _reserved1:__RISCV_XLEN-32;
590 #endif
591  } b;
594 
598 typedef union {
599  struct {
611  } b;
614 
618 typedef union {
619  struct {
624  } b;
627 
631 typedef union {
632  struct {
637  } b;
640 
644 typedef union {
645  struct {
654  } b;
657 
658  /* End of Doxygen Group NMSIS_Core_Base_Registers */
660 
661 /* ########################### Core Function Access ########################### */
675 #ifndef __ASSEMBLER__
676 
677 #ifndef __ICCRISCV__
678 
689 #define __RV_CSR_SWAP(csr, val) \
690  ({ \
691  rv_csr_t __v = (unsigned long)(val); \
692  __ASM volatile("csrrw %0, " STRINGIFY(csr) ", %1" \
693  : "=r"(__v) \
694  : "rK"(__v) \
695  : "memory"); \
696  __v; \
697  })
698 
707 #define __RV_CSR_READ(csr) \
708  ({ \
709  rv_csr_t __v; \
710  __ASM volatile("csrr %0, " STRINGIFY(csr) \
711  : "=r"(__v) \
712  : \
713  : "memory"); \
714  __v; \
715  })
716 
725 #define __RV_CSR_WRITE(csr, val) \
726  ({ \
727  rv_csr_t __v = (rv_csr_t)(val); \
728  __ASM volatile("csrw " STRINGIFY(csr) ", %0" \
729  : \
730  : "rK"(__v) \
731  : "memory"); \
732  })
733 
744 #define __RV_CSR_READ_SET(csr, val) \
745  ({ \
746  rv_csr_t __v = (rv_csr_t)(val); \
747  __ASM volatile("csrrs %0, " STRINGIFY(csr) ", %1" \
748  : "=r"(__v) \
749  : "rK"(__v) \
750  : "memory"); \
751  __v; \
752  })
753 
762 #define __RV_CSR_SET(csr, val) \
763  ({ \
764  rv_csr_t __v = (rv_csr_t)(val); \
765  __ASM volatile("csrs " STRINGIFY(csr) ", %0" \
766  : \
767  : "rK"(__v) \
768  : "memory"); \
769  })
770 
781 #define __RV_CSR_READ_CLEAR(csr, val) \
782  ({ \
783  rv_csr_t __v = (rv_csr_t)(val); \
784  __ASM volatile("csrrc %0, " STRINGIFY(csr) ", %1" \
785  : "=r"(__v) \
786  : "rK"(__v) \
787  : "memory"); \
788  __v; \
789  })
790 
799 #define __RV_CSR_CLEAR(csr, val) \
800  ({ \
801  rv_csr_t __v = (rv_csr_t)(val); \
802  __ASM volatile("csrc " STRINGIFY(csr) ", %0" \
803  : \
804  : "rK"(__v) \
805  : "memory"); \
806  })
807 #else
808 
809 #include <intrinsics.h>
810 
811 #define __RV_CSR_SWAP __write_csr
812 #define __RV_CSR_READ __read_csr
813 #define __RV_CSR_WRITE __write_csr
814 #define __RV_CSR_READ_SET __set_bits_csr
815 #define __RV_CSR_SET __set_bits_csr
816 #define __RV_CSR_READ_CLEAR __clear_bits_csr
817 #define __RV_CSR_CLEAR __clear_bits_csr
818 
819 #endif /* __ICCRISCV__ */
820 
821 #endif /* __ASSEMBLER__ */
822 
833 #define __FENCE(p, s) __ASM volatile ("fence " #p "," #s : : : "memory")
834 
842 {
843 #if defined(CPU_SERIES) && CPU_SERIES == 100
844 #else
845  __ASM volatile("fence.i");
846 #endif
847 }
848 
850 #define __RWMB() __FENCE(iorw,iorw)
851 
853 #define __RMB() __FENCE(ir,ir)
854 
856 #define __WMB() __FENCE(ow,ow)
857 
859 #define __SMP_RWMB() __FENCE(rw,rw)
860 
862 #define __SMP_RMB() __FENCE(r,r)
863 
865 #define __SMP_WMB() __FENCE(w,w)
866 
868 #define __CPU_RELAX() __ASM volatile ("" : : : "memory")
869 
878 __STATIC_INLINE void __switch_mode(uint8_t mode, uintptr_t stack, void(*entry_point)(void))
879 {
880  unsigned long val = 0;
881 
882  /* Set MPP to the requested privilege mode */
883  val = __RV_CSR_READ(CSR_MSTATUS);
884  val = __RV_INSERT_FIELD(val, MSTATUS_MPP, mode);
885 
886  /* Set previous MIE disabled */
887  val = __RV_INSERT_FIELD(val, MSTATUS_MPIE, 0);
888 
890 
891  /* Set the entry point in MEPC */
892  __RV_CSR_WRITE(CSR_MEPC, (unsigned long)entry_point);
893 
894  /* Set the register file */
895  __ASM volatile("mv sp, %0" ::"r"(stack));
896 
897  __ASM volatile("mret");
898 }
899 
908 __STATIC_INLINE void __s_switch_mode(uint8_t mode, uintptr_t stack, void(*entry_point)(void))
909 {
910  unsigned long val = 0;
911 
912  /* Set SPP to the requested privilege mode */
913  val = __RV_CSR_READ(CSR_SSTATUS);
914  val = __RV_INSERT_FIELD(val, SSTATUS_SPP, mode);
915 
916  /* Set previous SIE disabled */
917  val = __RV_INSERT_FIELD(val, SSTATUS_SPIE, 0);
918 
920 
921  /* Set the entry point in SEPC */
922  __RV_CSR_WRITE(CSR_SEPC, (unsigned long)entry_point);
923 
924  /* Set the register file */
925  __ASM volatile("mv sp, %0" ::"r"(stack));
926 
927  __ASM volatile("sret");
928 }
929 
937 {
939 }
940 
948 {
950 }
951 
959 {
961 }
962 
970 {
972 }
973 
981 {
983 }
984 
992 {
994 }
995 
1003 {
1005 }
1006 
1014 {
1016 }
1017 
1025 {
1026  __RV_CSR_CLEAR(CSR_MIE, 1UL << irq);
1027 }
1028 
1036 {
1037  __RV_CSR_SET(CSR_MIE, 1UL << irq);
1038 }
1039 
1047 {
1048  return ((__RV_CSR_READ(CSR_MIP) >> irq) & 0x1);
1049 }
1050 
1058 {
1059  __RV_CSR_CLEAR(CSR_MIP, 1UL << irq);
1060 }
1061 
1069 {
1071 }
1072 
1080 {
1082 }
1083 
1091 {
1093 }
1094 
1102 {
1104 }
1105 
1113 {
1115 }
1116 
1124 {
1126 }
1127 
1135 {
1137 }
1138 
1146 {
1148 }
1149 
1157 {
1158  __RV_CSR_CLEAR(CSR_SIE, 1UL << irq);
1159 }
1160 
1168 {
1169  __RV_CSR_SET(CSR_SIE, 1UL << irq);
1170 }
1171 
1179 {
1180  return ((__RV_CSR_READ(CSR_SIP) >> irq) & 0x1);
1181 }
1182 
1190 {
1191  __RV_CSR_CLEAR(CSR_SIP, 1UL << irq);
1192 }
1193 
1201 {
1202  __RWMB(); // Make sure previous memory and io operation finished
1203 #if __RISCV_XLEN == 32
1204 
1205 #if defined(CPU_SERIES) && CPU_SERIES == 100
1206  return __RV_CSR_READ(CSR_MCYCLE);
1207 #else
1208  volatile uint32_t high0, low, high;
1209  uint64_t full;
1210 
1211  high0 = __RV_CSR_READ(CSR_MCYCLEH);
1212  low = __RV_CSR_READ(CSR_MCYCLE);
1213  high = __RV_CSR_READ(CSR_MCYCLEH);
1214  if (high0 != high) {
1215  low = __RV_CSR_READ(CSR_MCYCLE);
1216  }
1217  full = (((uint64_t)high) << 32) | low;
1218  return full;
1219 #endif
1220 
1221 #elif __RISCV_XLEN == 64
1222  return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
1223 #else // TODO Need cover for XLEN=128 case in future
1224  return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
1225 #endif
1226 }
1227 
1234 {
1235 #if __RISCV_XLEN == 32
1236 #if defined(CPU_SERIES) && CPU_SERIES == 100
1237  __RV_CSR_WRITE(CSR_MCYCLE, (uint32_t)(cycle));
1238 #else
1239  __RV_CSR_WRITE(CSR_MCYCLE, 0); // prevent carry
1240  __RV_CSR_WRITE(CSR_MCYCLEH, (uint32_t)(cycle >> 32));
1241  __RV_CSR_WRITE(CSR_MCYCLE, (uint32_t)(cycle));
1242 #endif
1243 #elif __RISCV_XLEN == 64
1244  __RV_CSR_WRITE(CSR_MCYCLE, cycle);
1245 #else // TODO Need cover for XLEN=128 case in future
1246 #endif
1247 }
1248 
1256 {
1257  __RWMB(); // Make sure previous memory and io operation finished
1258 #if __RISCV_XLEN == 32
1259 #if defined(CPU_SERIES) && CPU_SERIES == 100
1260  return __RV_CSR_READ(CSR_MINSTRET);
1261 #else
1262  volatile uint32_t high0, low, high;
1263  uint64_t full;
1264 
1265  high0 = __RV_CSR_READ(CSR_MINSTRETH);
1266  low = __RV_CSR_READ(CSR_MINSTRET);
1267  high = __RV_CSR_READ(CSR_MINSTRETH);
1268  if (high0 != high) {
1269  low = __RV_CSR_READ(CSR_MINSTRET);
1270  }
1271  full = (((uint64_t)high) << 32) | low;
1272  return full;
1273 #endif
1274 #elif __RISCV_XLEN == 64
1275  return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
1276 #else // TODO Need cover for XLEN=128 case in future
1277  return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
1278 #endif
1279 }
1280 
1287 {
1288 #if __RISCV_XLEN == 32
1289 #if defined(CPU_SERIES) && CPU_SERIES == 100
1290  __RV_CSR_WRITE(CSR_MINSTRET, (uint32_t)(instret));
1291 #else
1292  __RV_CSR_WRITE(CSR_MINSTRET, 0); // prevent carry
1293  __RV_CSR_WRITE(CSR_MINSTRETH, (uint32_t)(instret >> 32));
1294  __RV_CSR_WRITE(CSR_MINSTRET, (uint32_t)(instret));
1295 #endif
1296 #elif __RISCV_XLEN == 64
1297  __RV_CSR_WRITE(CSR_MINSTRET, instret);
1298 #else // TODO Need cover for XLEN=128 case in future
1299 #endif
1300 }
1301 
1310 {
1311  __RWMB(); // Make sure previous memory and io operation finished
1312 #if __RISCV_XLEN == 32
1313 #if defined(CPU_SERIES) && CPU_SERIES == 100
1314  // NOTE: when CSR_MIRGB_INFO CSR exist and not zero, it means eclic and systimer present
1315  if (__RV_CSR_READ(CSR_MIRGB_INFO) == 0) {
1316  return __RV_CSR_READ(CSR_MTIME);
1317  }
1318 #if defined(__SYSTIMER_PRESENT) && (__SYSTIMER_PRESENT == 1)
1319  return *(uint32_t *) (__SYSTIMER_BASEADDR);
1320 #else
1321  return 0;
1322 #endif
1323 #else
1324  volatile uint32_t high0, low, high;
1325  uint64_t full;
1326 
1327  high0 = __RV_CSR_READ(CSR_TIMEH);
1328  low = __RV_CSR_READ(CSR_TIME);
1329  high = __RV_CSR_READ(CSR_TIMEH);
1330  if (high0 != high) {
1331  low = __RV_CSR_READ(CSR_TIME);
1332  }
1333  full = (((uint64_t)high) << 32) | low;
1334  return full;
1335 #endif
1336 #elif __RISCV_XLEN == 64
1337  return (uint64_t)__RV_CSR_READ(CSR_TIME);
1338 #else // TODO Need cover for XLEN=128 case in future
1339  return (uint64_t)__RV_CSR_READ(CSR_TIME);
1340 #endif
1341 }
1342 
1352 {
1353  __RWMB(); // Make sure previous memory and io operation finished
1354  return __RV_CSR_READ(CSR_CYCLE);
1355 }
1356 
1366 {
1367  __RWMB(); // Make sure previous memory and io operation finished
1368  return __RV_CSR_READ(CSR_INSTRET);
1369 }
1370 
1380 {
1381  __RWMB(); // Make sure previous memory and io operation finished
1382  return __RV_CSR_READ(CSR_TIME);
1383 }
1384 
1393 {
1394  unsigned long id;
1395 
1396  id = (__RV_CSR_READ(CSR_MHARTID) >> 8) & 0xFF;
1397  return id;
1398 }
1399 
1409 {
1410  unsigned long id;
1411 #ifdef __HARTID_OFFSET
1412  id = __RV_CSR_READ(CSR_MHARTID) - __HARTID_OFFSET;
1413 #else
1414  id = __RV_CSR_READ(CSR_MHARTID);
1415 #endif
1416  return id;
1417 }
1418 
1429 {
1430  unsigned long id;
1431  id = __RV_CSR_READ(CSR_MHARTID);
1432  return id;
1433 }
1434 
1435 
1445 {
1446  unsigned long id;
1447 
1448  id = (__RV_CSR_READ(CSR_SHARTID) >> 8) & 0xFF;
1449  return id;
1450 }
1451 
1462 {
1463  unsigned long id;
1464 #ifdef __HARTID_OFFSET
1465  id = __RV_CSR_READ(CSR_SHARTID) - __HARTID_OFFSET;
1466 #else
1467  id = __RV_CSR_READ(CSR_SHARTID);
1468 #endif
1469  return id;
1470 }
1471 
1483 {
1484  unsigned long id;
1485  id = __RV_CSR_READ(CSR_SHARTID);
1486  return id;
1487 }
1488  /* End of Doxygen Group NMSIS_Core_CSR_Register_Access */
1490 
1491 /* ########################### CPU Intrinsic Functions ########################### */
1509 {
1510  __ASM volatile("nop");
1511 }
1512 
1523 {
1525  __ASM volatile("wfi");
1526 }
1527 
1536 {
1538  __ASM volatile("wfi");
1540 }
1541 
1550 {
1551  __ASM volatile("ebreak");
1552 }
1553 
1561 {
1562  __ASM volatile("ecall");
1563 }
1564 
1568 typedef enum WFI_SleepMode {
1570  WFI_DEEP_SLEEP = 1
1572 
1581 {
1583 }
1584 
1592 {
1593  __RV_CSR_SET(CSR_TXEVT, 0x1);
1594 }
1595 
1602 {
1604 }
1605 
1612 {
1614 }
1615 
1622 {
1624 }
1625 
1632 {
1634 }
1635 
1643 {
1644  __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, (1UL << idx));
1645 }
1646 
1654 {
1655  __RV_CSR_SET(CSR_MCOUNTINHIBIT, (1UL << idx));
1656 }
1657 
1666 {
1668 }
1669 
1678 {
1680 }
1681 
1689 {
1690  __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, 0xFFFFFFFF);
1691 }
1692 
1700 {
1701  __RV_CSR_SET(CSR_MCOUNTINHIBIT, 0xFFFFFFFF);
1702 }
1703 
1711 __STATIC_INLINE void __set_hpm_event(unsigned long idx, unsigned long event)
1712 {
1713  switch (idx) {
1714  case 3: __RV_CSR_WRITE(CSR_MHPMEVENT3, event); break;
1715  case 4: __RV_CSR_WRITE(CSR_MHPMEVENT4, event); break;
1716  case 5: __RV_CSR_WRITE(CSR_MHPMEVENT5, event); break;
1717  case 6: __RV_CSR_WRITE(CSR_MHPMEVENT6, event); break;
1718  case 7: __RV_CSR_WRITE(CSR_MHPMEVENT7, event); break;
1719  case 8: __RV_CSR_WRITE(CSR_MHPMEVENT8, event); break;
1720  case 9: __RV_CSR_WRITE(CSR_MHPMEVENT9, event); break;
1721  case 10: __RV_CSR_WRITE(CSR_MHPMEVENT10, event); break;
1722  case 11: __RV_CSR_WRITE(CSR_MHPMEVENT11, event); break;
1723  case 12: __RV_CSR_WRITE(CSR_MHPMEVENT12, event); break;
1724  case 13: __RV_CSR_WRITE(CSR_MHPMEVENT13, event); break;
1725  case 14: __RV_CSR_WRITE(CSR_MHPMEVENT14, event); break;
1726  case 15: __RV_CSR_WRITE(CSR_MHPMEVENT15, event); break;
1727  case 16: __RV_CSR_WRITE(CSR_MHPMEVENT16, event); break;
1728  case 17: __RV_CSR_WRITE(CSR_MHPMEVENT17, event); break;
1729  case 18: __RV_CSR_WRITE(CSR_MHPMEVENT18, event); break;
1730  case 19: __RV_CSR_WRITE(CSR_MHPMEVENT19, event); break;
1731  case 20: __RV_CSR_WRITE(CSR_MHPMEVENT20, event); break;
1732  case 21: __RV_CSR_WRITE(CSR_MHPMEVENT21, event); break;
1733  case 22: __RV_CSR_WRITE(CSR_MHPMEVENT22, event); break;
1734  case 23: __RV_CSR_WRITE(CSR_MHPMEVENT23, event); break;
1735  case 24: __RV_CSR_WRITE(CSR_MHPMEVENT24, event); break;
1736  case 25: __RV_CSR_WRITE(CSR_MHPMEVENT25, event); break;
1737  case 26: __RV_CSR_WRITE(CSR_MHPMEVENT26, event); break;
1738  case 27: __RV_CSR_WRITE(CSR_MHPMEVENT27, event); break;
1739  case 28: __RV_CSR_WRITE(CSR_MHPMEVENT28, event); break;
1740  case 29: __RV_CSR_WRITE(CSR_MHPMEVENT29, event); break;
1741  case 30: __RV_CSR_WRITE(CSR_MHPMEVENT30, event); break;
1742  case 31: __RV_CSR_WRITE(CSR_MHPMEVENT31, event); break;
1743  default: break;
1744  }
1745 }
1746 
1755 __STATIC_INLINE unsigned long __get_hpm_event(unsigned long idx)
1756 {
1757  switch (idx) {
1758  case 3: return __RV_CSR_READ(CSR_MHPMEVENT3);
1759  case 4: return __RV_CSR_READ(CSR_MHPMEVENT4);
1760  case 5: return __RV_CSR_READ(CSR_MHPMEVENT5);
1761  case 6: return __RV_CSR_READ(CSR_MHPMEVENT6);
1762  case 7: return __RV_CSR_READ(CSR_MHPMEVENT7);
1763  case 8: return __RV_CSR_READ(CSR_MHPMEVENT8);
1764  case 9: return __RV_CSR_READ(CSR_MHPMEVENT9);
1765  case 10: return __RV_CSR_READ(CSR_MHPMEVENT10);
1766  case 11: return __RV_CSR_READ(CSR_MHPMEVENT11);
1767  case 12: return __RV_CSR_READ(CSR_MHPMEVENT12);
1768  case 13: return __RV_CSR_READ(CSR_MHPMEVENT13);
1769  case 14: return __RV_CSR_READ(CSR_MHPMEVENT14);
1770  case 15: return __RV_CSR_READ(CSR_MHPMEVENT15);
1771  case 16: return __RV_CSR_READ(CSR_MHPMEVENT16);
1772  case 17: return __RV_CSR_READ(CSR_MHPMEVENT17);
1773  case 18: return __RV_CSR_READ(CSR_MHPMEVENT18);
1774  case 19: return __RV_CSR_READ(CSR_MHPMEVENT19);
1775  case 20: return __RV_CSR_READ(CSR_MHPMEVENT20);
1776  case 21: return __RV_CSR_READ(CSR_MHPMEVENT21);
1777  case 22: return __RV_CSR_READ(CSR_MHPMEVENT22);
1778  case 23: return __RV_CSR_READ(CSR_MHPMEVENT23);
1779  case 24: return __RV_CSR_READ(CSR_MHPMEVENT24);
1780  case 25: return __RV_CSR_READ(CSR_MHPMEVENT25);
1781  case 26: return __RV_CSR_READ(CSR_MHPMEVENT26);
1782  case 27: return __RV_CSR_READ(CSR_MHPMEVENT27);
1783  case 28: return __RV_CSR_READ(CSR_MHPMEVENT28);
1784  case 29: return __RV_CSR_READ(CSR_MHPMEVENT29);
1785  case 30: return __RV_CSR_READ(CSR_MHPMEVENT30);
1786  case 31: return __RV_CSR_READ(CSR_MHPMEVENT31);
1787  default: return 0;
1788  }
1789 }
1790 
1798 __STATIC_INLINE void __set_hpm_counter(unsigned long idx, uint64_t value)
1799 {
1800  switch (idx) {
1801 #if __RISCV_XLEN == 32
1802  case 3: __RV_CSR_WRITE(CSR_MHPMCOUNTER3, 0); // prevent carry
1803  __RV_CSR_WRITE(CSR_MHPMCOUNTER3H, (uint32_t)(value >> 32));
1804  __RV_CSR_WRITE(CSR_MHPMCOUNTER3, (uint32_t)(value)); break;
1805  case 4: __RV_CSR_WRITE(CSR_MHPMCOUNTER4, 0); // prevent carry
1806  __RV_CSR_WRITE(CSR_MHPMCOUNTER4H, (uint32_t)(value >> 32));
1807  __RV_CSR_WRITE(CSR_MHPMCOUNTER4, (uint32_t)(value)); break;
1808  case 5: __RV_CSR_WRITE(CSR_MHPMCOUNTER5, 0); // prevent carry
1809  __RV_CSR_WRITE(CSR_MHPMCOUNTER5H, (uint32_t)(value >> 32));
1810  __RV_CSR_WRITE(CSR_MHPMCOUNTER5, (uint32_t)(value)); break;
1811  case 6: __RV_CSR_WRITE(CSR_MHPMCOUNTER6, 0); // prevent carry
1812  __RV_CSR_WRITE(CSR_MHPMCOUNTER6H, (uint32_t)(value >> 32));
1813  __RV_CSR_WRITE(CSR_MHPMCOUNTER6, (uint32_t)(value)); break;
1814  case 7: __RV_CSR_WRITE(CSR_MHPMCOUNTER7, 0); // prevent carry
1815  __RV_CSR_WRITE(CSR_MHPMCOUNTER7H, (uint32_t)(value >> 32));
1816  __RV_CSR_WRITE(CSR_MHPMCOUNTER7, (uint32_t)(value)); break;
1817  case 8: __RV_CSR_WRITE(CSR_MHPMCOUNTER8, 0); // prevent carry
1818  __RV_CSR_WRITE(CSR_MHPMCOUNTER8H, (uint32_t)(value >> 32));
1819  __RV_CSR_WRITE(CSR_MHPMCOUNTER8, (uint32_t)(value)); break;
1820  case 9: __RV_CSR_WRITE(CSR_MHPMCOUNTER9, 0); // prevent carry
1821  __RV_CSR_WRITE(CSR_MHPMCOUNTER9H, (uint32_t)(value >> 32));
1822  __RV_CSR_WRITE(CSR_MHPMCOUNTER9, (uint32_t)(value)); break;
1823  case 10: __RV_CSR_WRITE(CSR_MHPMCOUNTER10, 0); // prevent carry
1824  __RV_CSR_WRITE(CSR_MHPMCOUNTER10H, (uint32_t)(value >> 32));
1825  __RV_CSR_WRITE(CSR_MHPMCOUNTER10, (uint32_t)(value)); break;
1826  case 11: __RV_CSR_WRITE(CSR_MHPMCOUNTER11, 0); // prevent carry
1827  __RV_CSR_WRITE(CSR_MHPMCOUNTER11H, (uint32_t)(value >> 32));
1828  __RV_CSR_WRITE(CSR_MHPMCOUNTER11, (uint32_t)(value)); break;
1829  case 12: __RV_CSR_WRITE(CSR_MHPMCOUNTER12, 0); // prevent carry
1830  __RV_CSR_WRITE(CSR_MHPMCOUNTER12H, (uint32_t)(value >> 32));
1831  __RV_CSR_WRITE(CSR_MHPMCOUNTER12, (uint32_t)(value)); break;
1832  case 13: __RV_CSR_WRITE(CSR_MHPMCOUNTER13, 0); // prevent carry
1833  __RV_CSR_WRITE(CSR_MHPMCOUNTER13H, (uint32_t)(value >> 32));
1834  __RV_CSR_WRITE(CSR_MHPMCOUNTER13, (uint32_t)(value)); break;
1835  case 14: __RV_CSR_WRITE(CSR_MHPMCOUNTER14, 0); // prevent carry
1836  __RV_CSR_WRITE(CSR_MHPMCOUNTER14H, (uint32_t)(value >> 32));
1837  __RV_CSR_WRITE(CSR_MHPMCOUNTER14, (uint32_t)(value)); break;
1838  case 15: __RV_CSR_WRITE(CSR_MHPMCOUNTER15, 0); // prevent carry
1839  __RV_CSR_WRITE(CSR_MHPMCOUNTER15H, (uint32_t)(value >> 32));
1840  __RV_CSR_WRITE(CSR_MHPMCOUNTER15, (uint32_t)(value)); break;
1841  case 16: __RV_CSR_WRITE(CSR_MHPMCOUNTER16, 0); // prevent carry
1842  __RV_CSR_WRITE(CSR_MHPMCOUNTER16H, (uint32_t)(value >> 32));
1843  __RV_CSR_WRITE(CSR_MHPMCOUNTER16, (uint32_t)(value)); break;
1844  case 17: __RV_CSR_WRITE(CSR_MHPMCOUNTER17, 0); // prevent carry
1845  __RV_CSR_WRITE(CSR_MHPMCOUNTER17H, (uint32_t)(value >> 32));
1846  __RV_CSR_WRITE(CSR_MHPMCOUNTER17, (uint32_t)(value)); break;
1847  case 18: __RV_CSR_WRITE(CSR_MHPMCOUNTER18, 0); // prevent carry
1848  __RV_CSR_WRITE(CSR_MHPMCOUNTER18H, (uint32_t)(value >> 32));
1849  __RV_CSR_WRITE(CSR_MHPMCOUNTER18, (uint32_t)(value)); break;
1850  case 19: __RV_CSR_WRITE(CSR_MHPMCOUNTER19, 0); // prevent carry
1851  __RV_CSR_WRITE(CSR_MHPMCOUNTER19H, (uint32_t)(value >> 32));
1852  __RV_CSR_WRITE(CSR_MHPMCOUNTER19, (uint32_t)(value)); break;
1853  case 20: __RV_CSR_WRITE(CSR_MHPMCOUNTER20, 0); // prevent carry
1854  __RV_CSR_WRITE(CSR_MHPMCOUNTER20H, (uint32_t)(value >> 32));
1855  __RV_CSR_WRITE(CSR_MHPMCOUNTER20, (uint32_t)(value)); break;
1856  case 21: __RV_CSR_WRITE(CSR_MHPMCOUNTER21, 0); // prevent carry
1857  __RV_CSR_WRITE(CSR_MHPMCOUNTER21H, (uint32_t)(value >> 32));
1858  __RV_CSR_WRITE(CSR_MHPMCOUNTER21, (uint32_t)(value)); break;
1859  case 22: __RV_CSR_WRITE(CSR_MHPMCOUNTER22, 0); // prevent carry
1860  __RV_CSR_WRITE(CSR_MHPMCOUNTER22H, (uint32_t)(value >> 32));
1861  __RV_CSR_WRITE(CSR_MHPMCOUNTER22, (uint32_t)(value)); break;
1862  case 23: __RV_CSR_WRITE(CSR_MHPMCOUNTER23, 0); // prevent carry
1863  __RV_CSR_WRITE(CSR_MHPMCOUNTER23H, (uint32_t)(value >> 32));
1864  __RV_CSR_WRITE(CSR_MHPMCOUNTER23, (uint32_t)(value)); break;
1865  case 24: __RV_CSR_WRITE(CSR_MHPMCOUNTER24, 0); // prevent carry
1866  __RV_CSR_WRITE(CSR_MHPMCOUNTER24H, (uint32_t)(value >> 32));
1867  __RV_CSR_WRITE(CSR_MHPMCOUNTER24, (uint32_t)(value)); break;
1868  case 25: __RV_CSR_WRITE(CSR_MHPMCOUNTER25, 0); // prevent carry
1869  __RV_CSR_WRITE(CSR_MHPMCOUNTER25H, (uint32_t)(value >> 32));
1870  __RV_CSR_WRITE(CSR_MHPMCOUNTER25, (uint32_t)(value)); break;
1871  case 26: __RV_CSR_WRITE(CSR_MHPMCOUNTER26, 0); // prevent carry
1872  __RV_CSR_WRITE(CSR_MHPMCOUNTER26H, (uint32_t)(value >> 32));
1873  __RV_CSR_WRITE(CSR_MHPMCOUNTER26, (uint32_t)(value)); break;
1874  case 27: __RV_CSR_WRITE(CSR_MHPMCOUNTER27, 0); // prevent carry
1875  __RV_CSR_WRITE(CSR_MHPMCOUNTER27H, (uint32_t)(value >> 32));
1876  __RV_CSR_WRITE(CSR_MHPMCOUNTER27, (uint32_t)(value)); break;
1877  case 28: __RV_CSR_WRITE(CSR_MHPMCOUNTER28, 0); // prevent carry
1878  __RV_CSR_WRITE(CSR_MHPMCOUNTER28H, (uint32_t)(value >> 32));
1879  __RV_CSR_WRITE(CSR_MHPMCOUNTER28, (uint32_t)(value)); break;
1880  case 29: __RV_CSR_WRITE(CSR_MHPMCOUNTER29, 0); // prevent carry
1881  __RV_CSR_WRITE(CSR_MHPMCOUNTER29H, (uint32_t)(value >> 32));
1882  __RV_CSR_WRITE(CSR_MHPMCOUNTER29, (uint32_t)(value)); break;
1883  case 30: __RV_CSR_WRITE(CSR_MHPMCOUNTER30, 0); // prevent carry
1884  __RV_CSR_WRITE(CSR_MHPMCOUNTER30H, (uint32_t)(value >> 32));
1885  __RV_CSR_WRITE(CSR_MHPMCOUNTER30, (uint32_t)(value)); break;
1886  case 31: __RV_CSR_WRITE(CSR_MHPMCOUNTER31, 0); // prevent carry
1887  __RV_CSR_WRITE(CSR_MHPMCOUNTER31H, (uint32_t)(value >> 32));
1888  __RV_CSR_WRITE(CSR_MHPMCOUNTER31, (uint32_t)(value)); break;
1889 
1890 #elif __RISCV_XLEN == 64
1891  case 3: __RV_CSR_WRITE(CSR_MHPMCOUNTER3, (value)); break;
1892  case 4: __RV_CSR_WRITE(CSR_MHPMCOUNTER4, (value)); break;
1893  case 5: __RV_CSR_WRITE(CSR_MHPMCOUNTER5, (value)); break;
1894  case 6: __RV_CSR_WRITE(CSR_MHPMCOUNTER6, (value)); break;
1895  case 7: __RV_CSR_WRITE(CSR_MHPMCOUNTER7, (value)); break;
1896  case 8: __RV_CSR_WRITE(CSR_MHPMCOUNTER8, (value)); break;
1897  case 9: __RV_CSR_WRITE(CSR_MHPMCOUNTER9, (value)); break;
1898  case 10: __RV_CSR_WRITE(CSR_MHPMCOUNTER10, (value)); break;
1899  case 11: __RV_CSR_WRITE(CSR_MHPMCOUNTER11, (value)); break;
1900  case 12: __RV_CSR_WRITE(CSR_MHPMCOUNTER12, (value)); break;
1901  case 13: __RV_CSR_WRITE(CSR_MHPMCOUNTER13, (value)); break;
1902  case 14: __RV_CSR_WRITE(CSR_MHPMCOUNTER14, (value)); break;
1903  case 15: __RV_CSR_WRITE(CSR_MHPMCOUNTER15, (value)); break;
1904  case 16: __RV_CSR_WRITE(CSR_MHPMCOUNTER16, (value)); break;
1905  case 17: __RV_CSR_WRITE(CSR_MHPMCOUNTER17, (value)); break;
1906  case 18: __RV_CSR_WRITE(CSR_MHPMCOUNTER18, (value)); break;
1907  case 19: __RV_CSR_WRITE(CSR_MHPMCOUNTER19, (value)); break;
1908  case 20: __RV_CSR_WRITE(CSR_MHPMCOUNTER20, (value)); break;
1909  case 21: __RV_CSR_WRITE(CSR_MHPMCOUNTER21, (value)); break;
1910  case 22: __RV_CSR_WRITE(CSR_MHPMCOUNTER22, (value)); break;
1911  case 23: __RV_CSR_WRITE(CSR_MHPMCOUNTER23, (value)); break;
1912  case 24: __RV_CSR_WRITE(CSR_MHPMCOUNTER24, (value)); break;
1913  case 25: __RV_CSR_WRITE(CSR_MHPMCOUNTER25, (value)); break;
1914  case 26: __RV_CSR_WRITE(CSR_MHPMCOUNTER26, (value)); break;
1915  case 27: __RV_CSR_WRITE(CSR_MHPMCOUNTER27, (value)); break;
1916  case 28: __RV_CSR_WRITE(CSR_MHPMCOUNTER28, (value)); break;
1917  case 29: __RV_CSR_WRITE(CSR_MHPMCOUNTER29, (value)); break;
1918  case 30: __RV_CSR_WRITE(CSR_MHPMCOUNTER30, (value)); break;
1919  case 31: __RV_CSR_WRITE(CSR_MHPMCOUNTER31, (value)); break;
1920 
1921 #else
1922 #endif
1923  default: break;
1924  }
1925 }
1926 
1934 __STATIC_INLINE uint64_t __get_hpm_counter(unsigned long idx)
1935 {
1936  __RWMB(); // Make sure previous memory and io operation finished
1937 #if __RISCV_XLEN == 32
1938  volatile uint32_t high0, low, high;
1939  uint64_t full;
1940 
1941  switch (idx) {
1942  case 0: return __get_rv_cycle();
1943  case 2: return __get_rv_instret();
1944  case 3: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER3H);
1947  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER3); }
1948  full = (((uint64_t)high) << 32) | low; return full;
1949  case 4: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER4H);
1952  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER4); }
1953  full = (((uint64_t)high) << 32) | low; return full;
1954  case 5: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER5H);
1957  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER5); }
1958  full = (((uint64_t)high) << 32) | low; return full;
1959  case 6: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER6H);
1962  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER6); }
1963  full = (((uint64_t)high) << 32) | low; return full;
1964  case 7: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER7H);
1967  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER7); }
1968  full = (((uint64_t)high) << 32) | low; return full;
1969  case 8: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER8H);
1972  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER8); }
1973  full = (((uint64_t)high) << 32) | low; return full;
1974  case 9: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER9H);
1977  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER9); }
1978  full = (((uint64_t)high) << 32) | low; return full;
1979  case 10: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER10H);
1982  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER10); }
1983  full = (((uint64_t)high) << 32) | low; return full;
1984  case 11: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER11H);
1987  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER11); }
1988  full = (((uint64_t)high) << 32) | low; return full;
1989  case 12: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER12H);
1992  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER12); }
1993  full = (((uint64_t)high) << 32) | low; return full;
1994  case 13: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER13H);
1997  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER13); }
1998  full = (((uint64_t)high) << 32) | low; return full;
1999  case 14: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER14H);
2002  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER14); }
2003  full = (((uint64_t)high) << 32) | low; return full;
2004  case 15: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER15H);
2007  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER15); }
2008  full = (((uint64_t)high) << 32) | low; return full;
2009  case 16: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER16H);
2012  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER16); }
2013  full = (((uint64_t)high) << 32) | low; return full;
2014  case 17: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER17H);
2017  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER17); }
2018  full = (((uint64_t)high) << 32) | low; return full;
2019  case 18: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER18H);
2022  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER18); }
2023  full = (((uint64_t)high) << 32) | low; return full;
2024  case 19: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER19H);
2027  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER19); }
2028  full = (((uint64_t)high) << 32) | low; return full;
2029  case 20: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER20H);
2032  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER20); }
2033  full = (((uint64_t)high) << 32) | low; return full;
2034  case 21: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER21H);
2037  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER21); }
2038  full = (((uint64_t)high) << 32) | low; return full;
2039  case 22: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER22H);
2042  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER22); }
2043  full = (((uint64_t)high) << 32) | low; return full;
2044  case 23: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER23H);
2047  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER23); }
2048  full = (((uint64_t)high) << 32) | low; return full;
2049  case 24: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER24H);
2052  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER24); }
2053  full = (((uint64_t)high) << 32) | low; return full;
2054  case 25: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER25H);
2057  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER25); }
2058  full = (((uint64_t)high) << 32) | low; return full;
2059  case 26: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER26H);
2062  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER26); }
2063  full = (((uint64_t)high) << 32) | low; return full;
2064  case 27: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER27H);
2067  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER27); }
2068  full = (((uint64_t)high) << 32) | low; return full;
2069  case 28: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER28H);
2072  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER28); }
2073  full = (((uint64_t)high) << 32) | low; return full;
2074  case 29: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER29H);
2077  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER29); }
2078  full = (((uint64_t)high) << 32) | low; return full;
2079  case 30: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER30H);
2082  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER30); }
2083  full = (((uint64_t)high) << 32) | low; return full;
2084  case 31: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER31H);
2087  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER31); }
2088  full = (((uint64_t)high) << 32) | low; return full;
2089 
2090 #elif __RISCV_XLEN == 64
2091  switch (idx) {
2092  case 0: return __get_rv_cycle();
2093  case 2: return __get_rv_instret();
2094  case 3: return __RV_CSR_READ(CSR_MHPMCOUNTER3);
2095  case 4: return __RV_CSR_READ(CSR_MHPMCOUNTER4);
2096  case 5: return __RV_CSR_READ(CSR_MHPMCOUNTER5);
2097  case 6: return __RV_CSR_READ(CSR_MHPMCOUNTER6);
2098  case 7: return __RV_CSR_READ(CSR_MHPMCOUNTER7);
2099  case 8: return __RV_CSR_READ(CSR_MHPMCOUNTER8);
2100  case 9: return __RV_CSR_READ(CSR_MHPMCOUNTER9);
2101  case 10: return __RV_CSR_READ(CSR_MHPMCOUNTER10);
2102  case 11: return __RV_CSR_READ(CSR_MHPMCOUNTER11);
2103  case 12: return __RV_CSR_READ(CSR_MHPMCOUNTER12);
2104  case 13: return __RV_CSR_READ(CSR_MHPMCOUNTER13);
2105  case 14: return __RV_CSR_READ(CSR_MHPMCOUNTER14);
2106  case 15: return __RV_CSR_READ(CSR_MHPMCOUNTER15);
2107  case 16: return __RV_CSR_READ(CSR_MHPMCOUNTER16);
2108  case 17: return __RV_CSR_READ(CSR_MHPMCOUNTER17);
2109  case 18: return __RV_CSR_READ(CSR_MHPMCOUNTER18);
2110  case 19: return __RV_CSR_READ(CSR_MHPMCOUNTER19);
2111  case 20: return __RV_CSR_READ(CSR_MHPMCOUNTER20);
2112  case 21: return __RV_CSR_READ(CSR_MHPMCOUNTER21);
2113  case 22: return __RV_CSR_READ(CSR_MHPMCOUNTER22);
2114  case 23: return __RV_CSR_READ(CSR_MHPMCOUNTER23);
2115  case 24: return __RV_CSR_READ(CSR_MHPMCOUNTER24);
2116  case 25: return __RV_CSR_READ(CSR_MHPMCOUNTER25);
2117  case 26: return __RV_CSR_READ(CSR_MHPMCOUNTER26);
2118  case 27: return __RV_CSR_READ(CSR_MHPMCOUNTER27);
2119  case 28: return __RV_CSR_READ(CSR_MHPMCOUNTER28);
2120  case 29: return __RV_CSR_READ(CSR_MHPMCOUNTER29);
2121  case 30: return __RV_CSR_READ(CSR_MHPMCOUNTER30);
2122  case 31: return __RV_CSR_READ(CSR_MHPMCOUNTER31);
2123 
2124 #else
2125  switch (idx) {
2126 #endif
2127  default: return 0;
2128  }
2129 }
2130 
2139 __STATIC_INLINE unsigned long __read_hpm_counter(unsigned long idx)
2140 {
2141  switch (idx) {
2142  case 0: return __read_cycle_csr();
2143  case 2: return __read_instret_csr();
2144  case 3: return __RV_CSR_READ(CSR_MHPMCOUNTER3);
2145  case 4: return __RV_CSR_READ(CSR_MHPMCOUNTER4);
2146  case 5: return __RV_CSR_READ(CSR_MHPMCOUNTER5);
2147  case 6: return __RV_CSR_READ(CSR_MHPMCOUNTER6);
2148  case 7: return __RV_CSR_READ(CSR_MHPMCOUNTER7);
2149  case 8: return __RV_CSR_READ(CSR_MHPMCOUNTER8);
2150  case 9: return __RV_CSR_READ(CSR_MHPMCOUNTER9);
2151  case 10: return __RV_CSR_READ(CSR_MHPMCOUNTER10);
2152  case 11: return __RV_CSR_READ(CSR_MHPMCOUNTER11);
2153  case 12: return __RV_CSR_READ(CSR_MHPMCOUNTER12);
2154  case 13: return __RV_CSR_READ(CSR_MHPMCOUNTER13);
2155  case 14: return __RV_CSR_READ(CSR_MHPMCOUNTER14);
2156  case 15: return __RV_CSR_READ(CSR_MHPMCOUNTER15);
2157  case 16: return __RV_CSR_READ(CSR_MHPMCOUNTER16);
2158  case 17: return __RV_CSR_READ(CSR_MHPMCOUNTER17);
2159  case 18: return __RV_CSR_READ(CSR_MHPMCOUNTER18);
2160  case 19: return __RV_CSR_READ(CSR_MHPMCOUNTER19);
2161  case 20: return __RV_CSR_READ(CSR_MHPMCOUNTER20);
2162  case 21: return __RV_CSR_READ(CSR_MHPMCOUNTER21);
2163  case 22: return __RV_CSR_READ(CSR_MHPMCOUNTER22);
2164  case 23: return __RV_CSR_READ(CSR_MHPMCOUNTER23);
2165  case 24: return __RV_CSR_READ(CSR_MHPMCOUNTER24);
2166  case 25: return __RV_CSR_READ(CSR_MHPMCOUNTER25);
2167  case 26: return __RV_CSR_READ(CSR_MHPMCOUNTER26);
2168  case 27: return __RV_CSR_READ(CSR_MHPMCOUNTER27);
2169  case 28: return __RV_CSR_READ(CSR_MHPMCOUNTER28);
2170  case 29: return __RV_CSR_READ(CSR_MHPMCOUNTER29);
2171  case 30: return __RV_CSR_READ(CSR_MHPMCOUNTER30);
2172  case 31: return __RV_CSR_READ(CSR_MHPMCOUNTER31);
2173  default: return 0;
2174  }
2175 }
2176 
2184 __STATIC_FORCEINLINE void __set_medeleg(unsigned long mask)
2185 {
2186  __RV_CSR_WRITE(CSR_MEDELEG, mask);
2187 }
2188 
2196 __STATIC_FORCEINLINE void __set_mideleg(unsigned long mask)
2197 {
2198  __RV_CSR_WRITE(CSR_MIDELEG, mask);
2199 }
2200 
2201 /* ===== Load/Store Operations ===== */
2208 __STATIC_FORCEINLINE uint8_t __LB(volatile void *addr)
2209 {
2210  uint8_t result;
2211 
2212  __ASM volatile ("lb %0, 0(%1)" : "=r" (result) : "r" (addr));
2213  return result;
2214 }
2215 
2222 __STATIC_FORCEINLINE uint16_t __LH(volatile void *addr)
2223 {
2224  uint16_t result;
2225 
2226  __ASM volatile ("lh %0, 0(%1)" : "=r" (result) : "r" (addr));
2227  return result;
2228 }
2229 
2236 __STATIC_FORCEINLINE uint32_t __LW(volatile void *addr)
2237 {
2238  uint32_t result;
2239 
2240  __ASM volatile ("lw %0, 0(%1)" : "=r" (result) : "r" (addr));
2241  return result;
2242 }
2243 
2244 #if (__RISCV_XLEN != 32) || defined(__riscv_zilsd)
2252 __STATIC_FORCEINLINE uint64_t __LD(volatile void *addr)
2253 {
2254  uint64_t result;
2255  __ASM volatile ("ld %0, 0(%1)" : "=r" (result) : "r" (addr));
2256  return result;
2257 }
2258 #endif
2259 
2266 __STATIC_FORCEINLINE void __SB(volatile void *addr, uint8_t val)
2267 {
2268  __ASM volatile ("sb %0, 0(%1)" : : "r" (val), "r" (addr));
2269 }
2270 
2277 __STATIC_FORCEINLINE void __SH(volatile void *addr, uint16_t val)
2278 {
2279  __ASM volatile ("sh %0, 0(%1)" : : "r" (val), "r" (addr));
2280 }
2281 
2288 __STATIC_FORCEINLINE void __SW(volatile void *addr, uint32_t val)
2289 {
2290  __ASM volatile ("sw %0, 0(%1)" : : "r" (val), "r" (addr));
2291 }
2292 
2293 #if (__RISCV_XLEN != 32) || defined(__riscv_zilsd)
2300 __STATIC_FORCEINLINE void __SD(volatile void *addr, uint64_t val)
2301 {
2302  __ASM volatile ("sd %0, 0(%1)" : : "r" (val), "r" (addr));
2303 }
2304 #endif
2305 
2317 __STATIC_INLINE uint32_t __CAS_W(volatile uint32_t *addr, uint32_t oldval, uint32_t newval)
2318 {
2319  uint32_t result;
2320  uint32_t rc;
2321 
2322  __ASM volatile ( \
2323  "0: lr.w %0, %2 \n" \
2324  " bne %0, %z3, 1f \n" \
2325  " sc.w %1, %z4, %2 \n" \
2326  " bnez %1, 0b \n" \
2327  "1:\n" \
2328  : "=&r"(result), "=&r"(rc), "+A"(*addr) \
2329  : "r"(oldval), "r"(newval) \
2330  : "memory");
2331  return result;
2332 }
2333 
2341 __STATIC_FORCEINLINE uint32_t __AMOSWAP_W(volatile uint32_t *addr, uint32_t newval)
2342 {
2343  uint32_t result;
2344 
2345  __ASM volatile ("amoswap.w %0, %2, %1" : \
2346  "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
2347  return result;
2348 }
2349 
2357 __STATIC_FORCEINLINE int32_t __AMOADD_W(volatile int32_t *addr, int32_t value)
2358 {
2359  int32_t result;
2360 
2361  __ASM volatile ("amoadd.w %0, %2, %1" : \
2362  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2363  return *addr;
2364 }
2365 
2373 __STATIC_FORCEINLINE int32_t __AMOAND_W(volatile int32_t *addr, int32_t value)
2374 {
2375  int32_t result;
2376 
2377  __ASM volatile ("amoand.w %0, %2, %1" : \
2378  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2379  return *addr;
2380 }
2381 
2389 __STATIC_FORCEINLINE int32_t __AMOOR_W(volatile int32_t *addr, int32_t value)
2390 {
2391  int32_t result;
2392 
2393  __ASM volatile ("amoor.w %0, %2, %1" : \
2394  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2395  return *addr;
2396 }
2397 
2405 __STATIC_FORCEINLINE int32_t __AMOXOR_W(volatile int32_t *addr, int32_t value)
2406 {
2407  int32_t result;
2408 
2409  __ASM volatile ("amoxor.w %0, %2, %1" : \
2410  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2411  return *addr;
2412 }
2413 
2421 __STATIC_FORCEINLINE uint32_t __AMOMAXU_W(volatile uint32_t *addr, uint32_t value)
2422 {
2423  uint32_t result;
2424 
2425  __ASM volatile ("amomaxu.w %0, %2, %1" : \
2426  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2427  return *addr;
2428 }
2429 
2437 __STATIC_FORCEINLINE int32_t __AMOMAX_W(volatile int32_t *addr, int32_t value)
2438 {
2439  int32_t result;
2440 
2441  __ASM volatile ("amomax.w %0, %2, %1" : \
2442  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2443  return *addr;
2444 }
2445 
2453 __STATIC_FORCEINLINE uint32_t __AMOMINU_W(volatile uint32_t *addr, uint32_t value)
2454 {
2455  uint32_t result;
2456 
2457  __ASM volatile ("amominu.w %0, %2, %1" : \
2458  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2459  return *addr;
2460 }
2461 
2469 __STATIC_FORCEINLINE int32_t __AMOMIN_W(volatile int32_t *addr, int32_t value)
2470 {
2471  int32_t result;
2472 
2473  __ASM volatile ("amomin.w %0, %2, %1" : \
2474  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2475  return *addr;
2476 }
2477 
2478 #if __RISCV_XLEN == 64
2490 __STATIC_INLINE uint64_t __CAS_D(volatile uint64_t *addr, uint64_t oldval, uint64_t newval)
2491 {
2492  uint64_t result;
2493  uint64_t rc;
2494 
2495  __ASM volatile ( \
2496  "0: lr.d %0, %2 \n" \
2497  " bne %0, %z3, 1f \n" \
2498  " sc.d %1, %z4, %2 \n" \
2499  " bnez %1, 0b \n" \
2500  "1:\n" \
2501  : "=&r"(result), "=&r"(rc), "+A"(*addr) \
2502  : "r"(oldval), "r"(newval) \
2503  : "memory");
2504  return result;
2505 }
2506 
2514 __STATIC_FORCEINLINE uint64_t __AMOSWAP_D(volatile uint64_t *addr, uint64_t newval)
2515 {
2516  uint64_t result;
2517 
2518  __ASM volatile ("amoswap.d %0, %2, %1" : \
2519  "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
2520  return result;
2521 }
2522 
2530 __STATIC_FORCEINLINE int64_t __AMOADD_D(volatile int64_t *addr, int64_t value)
2531 {
2532  int64_t result;
2533 
2534  __ASM volatile ("amoadd.d %0, %2, %1" : \
2535  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2536  return *addr;
2537 }
2538 
2546 __STATIC_FORCEINLINE int64_t __AMOAND_D(volatile int64_t *addr, int64_t value)
2547 {
2548  int64_t result;
2549 
2550  __ASM volatile ("amoand.d %0, %2, %1" : \
2551  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2552  return *addr;
2553 }
2554 
2562 __STATIC_FORCEINLINE int64_t __AMOOR_D(volatile int64_t *addr, int64_t value)
2563 {
2564  int64_t result;
2565 
2566  __ASM volatile ("amoor.d %0, %2, %1" : \
2567  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2568  return *addr;
2569 }
2570 
2578 __STATIC_FORCEINLINE int64_t __AMOXOR_D(volatile int64_t *addr, int64_t value)
2579 {
2580  int64_t result;
2581 
2582  __ASM volatile ("amoxor.d %0, %2, %1" : \
2583  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2584  return *addr;
2585 }
2586 
2594 __STATIC_FORCEINLINE uint64_t __AMOMAXU_D(volatile uint64_t *addr, uint64_t value)
2595 {
2596  uint64_t result;
2597 
2598  __ASM volatile ("amomaxu.d %0, %2, %1" : \
2599  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2600  return *addr;
2601 }
2602 
2610 __STATIC_FORCEINLINE int64_t __AMOMAX_D(volatile int64_t *addr, int64_t value)
2611 {
2612  int64_t result;
2613 
2614  __ASM volatile ("amomax.d %0, %2, %1" : \
2615  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2616  return *addr;
2617 }
2618 
2626 __STATIC_FORCEINLINE uint64_t __AMOMINU_D(volatile uint64_t *addr, uint64_t value)
2627 {
2628  uint64_t result;
2629 
2630  __ASM volatile ("amominu.d %0, %2, %1" : \
2631  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2632  return *addr;
2633 }
2634 
2642 __STATIC_FORCEINLINE int64_t __AMOMIN_D(volatile int64_t *addr, int64_t value)
2643 {
2644  int64_t result;
2645 
2646  __ASM volatile ("amomin.d %0, %2, %1" : \
2647  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2648  return *addr;
2649 }
2650 #endif /* __RISCV_XLEN == 64 */
2651 
2658 {
2660 }
2661 
2668 {
2670 }
2671 
2678 {
2680 }
2681 
2688 {
2690 }
2691 
2698 {
2700 }
2701 
2708 {
2710 }
2711 
2732 {
2733  __ASM volatile ("prefetch.i 0(%0)" : : "r" (addr) : "memory");
2734 }
2735 
2756 {
2757  __ASM volatile ("prefetch.r 0(%0)" : : "r" (addr) : "memory");
2758 }
2759 
2780 {
2781  __ASM volatile ("prefetch.w 0(%0)" : : "r" (addr) : "memory");
2782 }
2783  /* End of Doxygen Group NMSIS_Core_CPU_Intrinsic */
2785 
2786 #ifdef __cplusplus
2787 }
2788 #endif
2789 #endif /* __CORE_FEATURE_BASE__ */
CSR_MCACHECTL_Type CSR_MCACHE_CTL_Type
CSR_MPPICFGINFO_Type CSR_MPPICFG_INFO_Type
CSR_MECCCODE_Type CSR_MECC_CODE_Type
CSR_MDLMCTL_Type CSR_DILM_CTL_Type
CSR_MTLBCFGINFO_Type CSR_MTLBCFG_INFO_Type
CSR_MMISCCTRL_Type CSR_MMISCCTL_Type
CSR_MECCLOCK_Type CSR_MECC_LOCK_Type
CSR_MCFGINFO_Type CSR_MCFG_INFO_Type
CSR_MICFGINFO_Type CSR_MICFG_INFO_Type
CSR_MDCFGINFO_Type CSR_MDCFG_INFO_Type
CSR_MILMCTL_Type CSR_MILM_CTL_Type
CSR_MMISCCTRL_Type CSR_MMISC_CTL_Type
CSR_MFIOCFGINFO_Type CSR_MFIOCFG_INFO_Type
__STATIC_FORCEINLINE uint16_t __LH(volatile void *addr)
Load 16bit value from address (16 bit)
__STATIC_FORCEINLINE void __SH(volatile void *addr, uint16_t val)
Write 16bit value to address (16 bit)
__STATIC_FORCEINLINE int32_t __AMOMAX_W(volatile int32_t *addr, int32_t value)
Atomic signed MAX with 32bit value.
__STATIC_FORCEINLINE void __disable_all_counter(void)
Disable all MCYCLE & MINSTRET & MHPMCOUNTER counter.
__STATIC_FORCEINLINE void __set_wfi_sleepmode(WFI_SleepMode_Type mode)
Set Sleep mode of WFI.
__STATIC_FORCEINLINE void __enable_all_counter(void)
Enable all MCYCLE & MINSTRET & MHPMCOUNTER counter.
__STATIC_INLINE void __set_hpm_event(unsigned long idx, unsigned long event)
Set event for selected high performance monitor event.
__STATIC_FORCEINLINE void __EBREAK(void)
Breakpoint Instruction.
__STATIC_FORCEINLINE void __enable_ic_prefetch(void)
Enable ICache prefetch.
__STATIC_FORCEINLINE void __NOP(void)
NOP Instruction.
__STATIC_FORCEINLINE void __disable_mhpm_counters(unsigned long mask)
Disable hardware performance counters with mask.
__STATIC_INLINE void __set_hpm_counter(unsigned long idx, uint64_t value)
Set value for selected high performance monitor counter.
__STATIC_FORCEINLINE void __enable_ic_cmo_prefetch(void)
Enable ICache CMO prefetch.
__STATIC_FORCEINLINE void __enable_mhpm_counters(unsigned long mask)
Enable hardware performance counters with mask.
__STATIC_FORCEINLINE void __set_mideleg(unsigned long mask)
Set interrupt delegation to S mode.
__STATIC_FORCEINLINE void __disable_mhpm_counter(unsigned long idx)
Disable selected hardware performance monitor counter.
__STATIC_FORCEINLINE void __disable_ic_prefetch(void)
Disable ICache prefetch.
__STATIC_FORCEINLINE void __enable_mhpm_counter(unsigned long idx)
Enable selected hardware performance monitor counter.
WFI_SleepMode_Type
WFI Sleep Mode enumeration.
__STATIC_FORCEINLINE void __enable_dc_cmo_prefetch(void)
Enable DCache CMO prefetch.
__STATIC_FORCEINLINE void __ECALL(void)
Environment Call Instruction.
__STATIC_FORCEINLINE uint32_t __AMOSWAP_W(volatile uint32_t *addr, uint32_t newval)
Atomic Swap 32bit value into memory.
__STATIC_FORCEINLINE int32_t __AMOXOR_W(volatile int32_t *addr, int32_t value)
Atomic XOR with 32bit value.
__STATIC_FORCEINLINE void __disable_dc_cmo_prefetch(void)
Disable DCache CMO prefetch.
__STATIC_INLINE unsigned long __read_hpm_counter(unsigned long idx)
Get value of selected high performance monitor counter.
__STATIC_FORCEINLINE uint32_t __AMOMINU_W(volatile uint32_t *addr, uint32_t value)
Atomic unsigned MIN with 32bit value.
__STATIC_FORCEINLINE uint32_t __AMOMAXU_W(volatile uint32_t *addr, uint32_t value)
Atomic unsigned MAX with 32bit value.
__STATIC_FORCEINLINE uint8_t __LB(volatile void *addr)
Load 8bit value from address (8 bit)
__STATIC_FORCEINLINE void __SB(volatile void *addr, uint8_t val)
Write 8bit value to address (8 bit)
__STATIC_FORCEINLINE void __cmo_prefetch_i(const void *addr)
Instruction prefetch operation.
__STATIC_FORCEINLINE void __WFI(void)
Wait For Interrupt.
__STATIC_INLINE unsigned long __get_hpm_event(unsigned long idx)
Get event for selected high performance monitor event.
__STATIC_FORCEINLINE void __set_medeleg(unsigned long mask)
Set exceptions delegation to S mode.
__STATIC_INLINE uint32_t __CAS_W(volatile uint32_t *addr, uint32_t oldval, uint32_t newval)
Compare and Swap 32bit value using LR and SC.
__STATIC_FORCEINLINE int32_t __AMOAND_W(volatile int32_t *addr, int32_t value)
Atomic And with 32bit value.
__STATIC_FORCEINLINE void __SW(volatile void *addr, uint32_t val)
Write 32bit value to address (32 bit)
__STATIC_FORCEINLINE void __TXEVT(void)
Send TX Event.
__STATIC_FORCEINLINE int32_t __AMOOR_W(volatile int32_t *addr, int32_t value)
Atomic OR with 32bit value.
__STATIC_INLINE uint64_t __get_hpm_counter(unsigned long idx)
Get value of selected high performance monitor counter.
__STATIC_FORCEINLINE int32_t __AMOADD_W(volatile int32_t *addr, int32_t value)
Atomic Add with 32bit value.
__STATIC_FORCEINLINE void __WFE(void)
Wait For Event.
__STATIC_FORCEINLINE void __cmo_prefetch_w(const void *addr)
Write prefetch operation.
__STATIC_FORCEINLINE void __enable_mcycle_counter(void)
Enable MCYCLE counter.
__STATIC_FORCEINLINE int32_t __AMOMIN_W(volatile int32_t *addr, int32_t value)
Atomic signed MIN with 32bit value.
__STATIC_FORCEINLINE void __disable_minstret_counter(void)
Disable MINSTRET counter.
__STATIC_FORCEINLINE void __cmo_prefetch_r(const void *addr)
Read prefetch operation.
__STATIC_FORCEINLINE void __enable_minstret_counter(void)
Enable MINSTRET counter.
__STATIC_FORCEINLINE void __disable_ic_cmo_prefetch(void)
Disable ICache CMO prefetch.
__STATIC_FORCEINLINE uint32_t __LW(volatile void *addr)
Load 32bit value from address (32 bit)
__STATIC_FORCEINLINE void __disable_mcycle_counter(void)
Disable MCYCLE counter.
@ WFI_DEEP_SLEEP
Deep sleep mode, the core_clk and core_ano_clk will poweroff.
@ WFI_SHALLOW_SLEEP
Shallow sleep mode, the core_clk will poweroff.
#define SIE_SSIE
#define MSTATUS_MPIE
#define SSTATUS_SIE
#define WFE_WFE
#define MSTATUS_MIE
#define MSTATUS_MPP
#define MCACHE_CTL_DC_CMO_PF_EN
#define SSTATUS_SPIE
#define SSTATUS_SPP
#define MCOUNTINHIBIT_CY
#define MCACHE_CTL_IC_CMO_PF_EN
#define SIE_SEIE
#define SIE_STIE
#define MIE_MTIE
#define MIE_MEIE
#define MIE_MSIE
#define MCOUNTINHIBIT_IR
#define MCACHE_CTL_IC_PF_EN
__STATIC_FORCEINLINE unsigned long __read_time_csr(void)
Read the TIME register.
__STATIC_FORCEINLINE void __disable_core_irq(uint32_t irq)
Disable Core IRQ Interrupt.
__STATIC_FORCEINLINE void __disable_irq_s(void)
Disable IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE void __enable_ext_irq_s(void)
Enable External IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE void __disable_timer_irq(void)
Disable Timer IRQ Interrupts.
#define __RV_CSR_CLEAR(csr, val)
CSR operation Macro for csrc instruction.
__STATIC_FORCEINLINE unsigned long __get_cluster_id(void)
Get cluster id of current cluster.
__STATIC_FORCEINLINE void __clear_core_irq_pending(uint32_t irq)
Clear Core IRQ Interrupt Pending status.
__STATIC_FORCEINLINE void __disable_irq(void)
Disable IRQ Interrupts.
__STATIC_FORCEINLINE void __enable_ext_irq(void)
Enable External IRQ Interrupts.
__STATIC_FORCEINLINE uint32_t __get_core_irq_pending_s(uint32_t irq)
Get Core IRQ Interrupt Pending status in supervisor mode.
__STATIC_FORCEINLINE void __clear_core_irq_pending_s(uint32_t irq)
Clear Core IRQ Interrupt Pending status in supervisor mode.
#define __RV_CSR_READ(csr)
CSR operation Macro for csrr instruction.
__STATIC_FORCEINLINE unsigned long __read_instret_csr(void)
Read the INSTRET register.
__STATIC_FORCEINLINE void __disable_sw_irq(void)
Disable software IRQ Interrupts.
__STATIC_FORCEINLINE void __disable_ext_irq(void)
Disable External IRQ Interrupts.
__STATIC_FORCEINLINE unsigned long __get_cluster_id_s(void)
Get cluster id of current cluster in supervisor mode.
__STATIC_FORCEINLINE void __set_rv_cycle(rv_counter_t cycle)
Set whole 64 bits value of mcycle counter.
__STATIC_FORCEINLINE unsigned long __get_hart_index_s(void)
Get hart index of current cluster in supervisor mode.
__STATIC_FORCEINLINE void __disable_timer_irq_s(void)
Disable Timer IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE void __enable_timer_irq_s(void)
Enable Timer IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE unsigned long __get_hart_id(void)
Get hart id of current cluster.
__STATIC_FORCEINLINE unsigned long __get_hart_id_s(void)
Get hart id of current cluster in supervisor mode.
__STATIC_FORCEINLINE void __FENCE_I(void)
Fence.i Instruction.
#define __RWMB()
Read & Write Memory barrier.
__STATIC_FORCEINLINE unsigned long __read_cycle_csr(void)
Read the CYCLE register.
__STATIC_FORCEINLINE void __disable_ext_irq_s(void)
Disable External IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE void __set_rv_instret(rv_counter_t instret)
Set whole 64 bits value of machine instruction-retired counter.
__STATIC_INLINE rv_counter_t __get_rv_time(void)
Read whole 64 bits value of real-time clock.
__STATIC_FORCEINLINE void __disable_sw_irq_s(void)
Disable software IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE unsigned long __get_hart_index(void)
Get hart index of current cluster.
__STATIC_INLINE void __switch_mode(uint8_t mode, uintptr_t stack, void(*entry_point)(void))
switch privilege from machine mode to others.
#define __RV_CSR_WRITE(csr, val)
CSR operation Macro for csrw instruction.
__STATIC_FORCEINLINE void __enable_sw_irq(void)
Enable software IRQ Interrupts.
__STATIC_INLINE rv_counter_t __get_rv_cycle(void)
Read whole 64 bits value of mcycle counter.
__STATIC_FORCEINLINE void __disable_core_irq_s(uint32_t irq)
Disable Core IRQ Interrupt in supervisor mode.
__STATIC_FORCEINLINE void __enable_timer_irq(void)
Enable Timer IRQ Interrupts.
__STATIC_INLINE void __s_switch_mode(uint8_t mode, uintptr_t stack, void(*entry_point)(void))
switch privilege from supervisor mode to others.
__STATIC_FORCEINLINE void __enable_irq_s(void)
Enable IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE uint32_t __get_core_irq_pending(uint32_t irq)
Get Core IRQ Interrupt Pending status.
__STATIC_FORCEINLINE void __enable_core_irq_s(uint32_t irq)
Enable Core IRQ Interrupt in supervisor mode.
__STATIC_INLINE rv_counter_t __get_rv_instret(void)
Read whole 64 bits value of machine instruction-retired counter.
__STATIC_FORCEINLINE void __enable_core_irq(uint32_t irq)
Enable Core IRQ Interrupt.
__STATIC_FORCEINLINE void __enable_irq(void)
Enable IRQ Interrupts.
__STATIC_FORCEINLINE void __enable_sw_irq_s(void)
Enable software IRQ Interrupts in supervisor mode.
#define __RV_CSR_SET(csr, val)
CSR operation Macro for csrs instruction.
#define CSR_MHPMEVENT6
#define CSR_MHPMEVENT29
#define CSR_MHPMEVENT18
#define CSR_INSTRET
#define CSR_MHPMCOUNTER17H
#define CSR_MHPMEVENT31
#define CSR_MHPMEVENT17
#define CSR_MHPMCOUNTER16
#define CSR_MHPMCOUNTER7H
#define CSR_MHPMEVENT20
#define CSR_MHPMCOUNTER27H
#define CSR_MHPMEVENT5
#define CSR_MHPMCOUNTER25
#define CSR_MHPMCOUNTER20
#define CSR_MHPMEVENT9
#define CSR_MHPMCOUNTER28
#define CSR_MHPMCOUNTER31
#define CSR_MHPMEVENT13
#define CSR_MHPMCOUNTER18H
#define CSR_MHPMCOUNTER21H
#define CSR_MINSTRET
#define CSR_MHPMEVENT16
#define CSR_MHPMEVENT24
#define CSR_TIMEH
#define CSR_MHPMCOUNTER28H
#define CSR_MHPMCOUNTER21
#define CSR_MHPMCOUNTER9H
#define CSR_MHPMCOUNTER7
#define CSR_MIP
#define CSR_SEPC
#define CSR_MHPMCOUNTER29H
#define CSR_MHPMCOUNTER26
#define CSR_MHPMCOUNTER14
#define CSR_MHPMCOUNTER10H
#define CSR_MHPMCOUNTER6
#define CSR_MHPMCOUNTER5
#define CSR_MHPMCOUNTER11H
#define CSR_MHPMEVENT26
#define CSR_TIME
#define CSR_MHPMCOUNTER12H
#define CSR_MHPMEVENT21
#define CSR_MHARTID
#define CSR_MEPC
#define CSR_MHPMCOUNTER25H
#define CSR_SHARTID
#define CSR_MCYCLE
#define CSR_MHPMEVENT3
#define CSR_MHPMCOUNTER26H
#define CSR_MHPMCOUNTER5H
#define CSR_MHPMEVENT14
#define CSR_SSTATUS
#define CSR_MHPMEVENT27
#define CSR_MHPMCOUNTER24H
#define CSR_MCACHE_CTL
#define CSR_MSTATUS
#define CSR_MHPMCOUNTER8H
#define CSR_MHPMCOUNTER3H
#define CSR_TXEVT
#define CSR_MHPMCOUNTER12
#define CSR_MHPMCOUNTER20H
#define CSR_MHPMEVENT19
#define CSR_SLEEPVALUE
#define CSR_MHPMEVENT15
#define CSR_MHPMEVENT11
#define CSR_MHPMEVENT4
#define CSR_MHPMCOUNTER31H
#define CSR_MHPMCOUNTER8
#define CSR_MCOUNTINHIBIT
#define CSR_MHPMCOUNTER9
#define CSR_WFE
#define CSR_MHPMCOUNTER6H
#define CSR_CYCLE
#define CSR_MIRGB_INFO
#define CSR_MHPMCOUNTER23H
#define CSR_MINSTRETH
#define CSR_MHPMEVENT8
#define CSR_MHPMCOUNTER17
#define CSR_MHPMCOUNTER11
#define CSR_MHPMEVENT7
#define CSR_MHPMCOUNTER15H
#define CSR_MEDELEG
#define CSR_MHPMCOUNTER3
#define CSR_MHPMCOUNTER10
#define CSR_MHPMCOUNTER14H
#define CSR_MHPMCOUNTER29
#define CSR_MHPMCOUNTER4
#define CSR_MHPMEVENT12
#define CSR_SIP
#define CSR_MHPMCOUNTER27
#define CSR_MHPMCOUNTER18
#define CSR_MHPMEVENT25
#define CSR_MHPMCOUNTER13H
#define CSR_MHPMCOUNTER4H
#define CSR_MHPMCOUNTER23
#define CSR_MHPMCOUNTER24
#define CSR_MHPMCOUNTER15
#define CSR_MHPMCOUNTER22
#define CSR_MCYCLEH
#define CSR_MHPMCOUNTER13
#define CSR_MIDELEG
#define CSR_MIE
#define CSR_MHPMCOUNTER30
#define CSR_MHPMEVENT22
#define CSR_MHPMEVENT30
#define CSR_MHPMCOUNTER22H
#define CSR_MHPMEVENT10
#define CSR_MHPMCOUNTER19H
#define CSR_MHPMCOUNTER16H
#define CSR_MHPMCOUNTER30H
#define CSR_MHPMCOUNTER19
#define CSR_MHPMEVENT23
#define CSR_SIE
#define CSR_MTIME
#define CSR_MHPMEVENT28
#define __ASM
Pass information from the compiler to the assembler.
Definition: nmsis_gcc.h:55
#define __STATIC_FORCEINLINE
Define a static function that should be always inlined by the compiler.
Definition: nmsis_gcc.h:70
#define __STATIC_INLINE
Define a static function that may be inlined by the compiler.
Definition: nmsis_gcc.h:65
unsigned long rv_csr_t
Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V.
#define __RISCV_XLEN
Refer to the width of an integer register in bits(either 32 or 64)
uint64_t rv_counter_t
Type of RISC-V Counter such as cycle, instret, time, depends on the XLEN defined in RISC-V,...
Union type to access MCACHE_CTL CSR register.
rv_csr_t dc_burst_type
bit: 23 D-Cache Burst type control
rv_csr_t dc_rwdecc
bit: 20 Control D-Cache Data Ram ECC code injection
rv_csr_t ic_burst_type
bit: 10 I-Cache Burst type control
rv_csr_t ic_scpd_mod
bit: 1 Scratchpad mode, 0: Scratchpad as ICache Data RAM, 1: Scratchpad as ILM SRAM
rv_csr_t ic_pf_en
bit: 6 I-Cache prefetch enable
rv_csr_t dc_rwtecc
bit: 19 Control D-Cache Tag Ram ECC code injection
rv_csr_t ic_rwtecc
bit: 4 Control I-Cache Tag Ram ECC code injection
rv_csr_t dc_prefetch_en
bit: 22 D-Cache CMO prefetch enable control
rv_csr_t ic_rwdecc
bit: 5 Control I-Cache Data Ram ECC code injection
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved0
bit: 11..15 Reserved
rv_csr_t ic_ecc_chk_en
bit: 8 I-Cache check ECC codes enable
rv_csr_t ic_en
bit: 0 I-Cache enable
rv_csr_t _reserved1
bit: 24..XLEN-1 Reserved
rv_csr_t dc_ecc_excp_en
bit: 18 D-Cache 2bit ECC error exception enable
rv_csr_t ic_cancel_en
bit: 7 I-Cache change flow canceling enable control
rv_csr_t ic_ecc_en
bit: 2 I-Cache ECC enable
rv_csr_t ic_prefetch_en
bit: 9 I-Cache CMO prefetch enable control
rv_csr_t dc_ecc_en
bit: 17 D-Cache ECC enable
rv_csr_t ic_ecc_excp_en
bit: 3 I-Cache 2bit ECC error exception enable
rv_csr_t dc_en
bit: 16 DCache enable
rv_csr_t dc_ecc_chk_en
bit: 21 D-Cache check ECC codes enable
Union type to access MCAUSE CSR register.
rv_csr_t mpp
bit: 28..29 Privilede mode flag before enter interrupt
rv_csr_t mpil
bit: 16..23 Previous interrupt level
rv_csr_t exccode
bit: 0..11 exception or interrupt code
rv_csr_t _reserved0
bit: 12..15 Reserved
rv_csr_t mpie
bit: 27 Interrupt enable flag before enter interrupt
rv_csr_t _reserved1
bit: 24..26 Reserved
rv_csr_t minhv
bit: 30 Machine interrupt vector table
rv_csr_t d
Type used for csr data access.
rv_csr_t interrupt
bit: XLEN-1 trap type.
Union type to access MCFG_INFO CSR register.
rv_csr_t sec_mode
bit: 19 Smwg extension present
rv_csr_t tee
bit: 0 TEE present
rv_csr_t icache
bit: 9 ICache present
rv_csr_t etrace
bit: 20 Etrace present
rv_csr_t dsp_n1
bit: 12 DSP N1 present
rv_csr_t d
Type used for csr data access.
rv_csr_t ecc
bit: 1 ECC present
rv_csr_t plic
bit: 3 PLIC present
rv_csr_t dsp_n2
bit: 13 DSP N2 present
rv_csr_t ppi
bit: 5 PPI present
rv_csr_t clic
bit: 2 CLIC present
rv_csr_t nice
bit: 6 NICE present
rv_csr_t dsp_n3
bit: 14 DSP N3 present
rv_csr_t _reserved1
bit: 27..XLEN-1 Reserved
rv_csr_t sstc
bit: 26 SSTC extension present
rv_csr_t ilm
bit: 7 ILM present
rv_csr_t vnice
bit: 23 VNICE present
rv_csr_t xlcz
bit: 24 XLCZ extension present
rv_csr_t dcache
bit: 10 DCache present
rv_csr_t zc_xlcz
bit: 15 Zc and xlcz extension present
rv_csr_t safety_mecha
bit: 21..22 Indicate Core's safety mechanism
rv_csr_t dlm
bit: 8 DLM present
rv_csr_t smp
bit: 11 SMP present
rv_csr_t vpu_degree
bit: 17..18 Indicate the VPU degree of parallel
rv_csr_t iregion
bit: 16 IREGION present
rv_csr_t zilsd
bit: 25 Zilsd/Zclsd extension present
rv_csr_t fio
bit: 4 FIO present
Union type to access MCOUNTINHIBIT CSR register.
rv_csr_t cy
bit: 0 1 means disable mcycle counter
rv_csr_t ir
bit: 2 1 means disable minstret counter
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved1
bit: 3..XLEN-1 Reserved
rv_csr_t _reserved0
bit: 1 Reserved
Union type to access MDCAUSE CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t mdcause
bit: 0..2 More detailed exception information as MCAUSE supplement
rv_csr_t _reserved0
bit: 3..XLEN-1 Reserved
Union type to access MDCFG_INFO CSR register.
rv_csr_t set
bit: 0..3 D-Cache sets per way
rv_csr_t lm_ecc
bit: 21 DLM ECC present
rv_csr_t _reserved0
bit: 11..15 Reserved
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved1
bit: 22..XLEN-1 Reserved
rv_csr_t lsize
bit: 7..9 D-Cache line size
rv_csr_t lm_size
bit: 16..20 DLM size, need to be 2^n size
rv_csr_t way
bit: 4..6 D-Cache way
rv_csr_t ecc
bit: 10 D-Cache ECC support
Union type to access MDLM_CTL CSR register.
rv_csr_t _reserved0
bit: 7..9 Reserved
rv_csr_t dlm_en
bit: 0 DLM enable
rv_csr_t dlm_bpa
bit: 10..XLEN-1 DLM base address
rv_csr_t dlm_rwecc
bit: 3 Control mecc_code write to dlm, simulate error injection
rv_csr_t dlm_va_en
bit: 5 Using virtual address to judge DLM access
rv_csr_t dlm_ecc_en
bit: 1 DLM ECC eanble
rv_csr_t dlm_ecc_chk_en
bit: 4 DLM check ECC codes enable
rv_csr_t dis_lsu_dlm
bit: 6 Disable LSU access DLM
rv_csr_t d
Type used for csr data access.
rv_csr_t dlm_ecc_excp_en
bit: 2 DLM ECC exception enable
Union type to access MECC_CODE CSR register.
rv_csr_t _reserved1
bit: 21..23 Reserved 0
rv_csr_t ramid
bit: 16..20 The ID of RAM that has 2bit ECC error, software can clear these bits
rv_csr_t ecc_inj_mode
bit: 31 ECC injection mode
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved2
bit: 29..30 Reserved 0
rv_csr_t code
bit: 0..8 Used to inject ECC check code
rv_csr_t _reserved0
bit: 9..15 Reserved 0
rv_csr_t sramid
bit: 24..28 The ID of RAM that has 1bit ECC error, software can clear these bits
Union type to access MECC_LOCK CSR register.
rv_csr_t ecc_lock
bit: 0 RW permission, ECC Lock configure
rv_csr_t _reserved0
bit: 1..XLEN-1 Reserved
rv_csr_t d
Type used for csr data access.
Union type to access MECC_CTL CSR register.
rv_csr_t dlm_ext_msk
bit: 6 Write 1 to disable aggregate DLM external access ECC fatal error to safety_error output
rv_csr_t ilm_acc_msk
bit: 1 Write 1 to disable aggregate ILM load/store access ECC fatal error to safety_error output
rv_csr_t dc_ccm_msk
bit: 8 Write 1 to disable aggregate DCache CCM ECC fatal error to safety_error output
rv_csr_t _reserved0
bit: 10..30 Reserved 0
rv_csr_t ic_fch_msk
bit: 3 Write 1 to disable aggregate ICache fetch ECC fatal error to safety_error output
rv_csr_t dc_acc_msk
bit: 4 Write 1 to disable aggregate DCache access ECC fatal error to safety_error output
rv_csr_t d
Type used for csr data access.
rv_csr_t io_prot_chk_en
bit: 31 Controls to check the IO interface
rv_csr_t dlm_acc_msk
bit: 2 Write 1 to disable aggregate DLM access ECC fatal error to safety_error output
rv_csr_t dc_cpbk_msk
bit: 9 Write 1 to disable aggregate DCache CPBK ECC fatal error to safety_error output
rv_csr_t ic_ccm_msk
bit: 7 Write 1 to disable aggregate ICache CCM ECC fatal error to safety_error output
rv_csr_t ilm_fch_msk
bit: 0 Write 1 to disable aggregate ILM fetch ECC fatal error to safety_error output
rv_csr_t ilm_ext_msk
bit: 5 Write 1 to disable aggregate ILM external access ECC fatal error to safety_error output
Union type to access MECC_STATUS CSR register.
rv_csr_t ic_ccm_err
bit: 7 ICache CCM ECC fatal error has occurred
rv_csr_t dc_cpbk_err
bit: 9 DCache CPBK ECC fatal error has occurred
rv_csr_t dlm_acc_err
bit: 2 DLM access ECC fatal error has occurred
rv_csr_t ilm_acc_err
bit: 1 ILM load/store access ECC fatal error has occurred
rv_csr_t dlm_ext_err
bit: 6 DLM external access ECC fatal error has occurred
rv_csr_t _reserved0
bit: 10..XLEN-1 Reserved 0
rv_csr_t ilm_fch_err
bit: 0 ILM fetch ECC fatal error has occurred
rv_csr_t dc_ccm_err
bit: 8 DCache CCM ECC fatal error has occurred
rv_csr_t dc_acc_err
bit: 4 DCache access ECC fatal error has occurred
rv_csr_t ilm_ext_err
bit: 5 ILM external access ECC fatal error has occurred
rv_csr_t d
Type used for csr data access.
rv_csr_t ic_fch_err
bit: 3 ICache fetch ECC fatal error has occurred
Union type to access MFIOCFG_INFO CSR register.
rv_csr_t _reserved0
bit: 0 Reserved
rv_csr_t fio_size
bit: 1..5 FIO size, need to be 2^n size
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved1
bit: 6..9 Reserved
rv_csr_t fio_bpa
bit: 10..XLEN-1 FIO base address
Union type to access MICFG_INFO CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t lm_size
bit: 16..20 ILM size, need to be 2^n size
rv_csr_t set
bit: 0..3 I-Cache sets per way
rv_csr_t lm_ecc
bit: 22 ILM ECC support
rv_csr_t ecc
bit: 10 I-Cache ECC support
rv_csr_t _reserved0
bit: 11..15 Reserved
rv_csr_t _reserved1
bit: 24..XLEN-1 Reserved
rv_csr_t lsize
bit: 7..9 I-Cache line size
rv_csr_t i_share_dlm
bit: 23 Support IFU fetch instructions from DLM
rv_csr_t lm_xonly
bit: 21 ILM Execute only permission or Reserved
rv_csr_t way
bit: 4..6 I-Cache way
Union type to access MILM_CTL CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved0
bit: 7..9 Reserved
rv_csr_t ilm_ecc_chk_en
bit: 4 ILM check ECC codes enable
rv_csr_t dis_lsu_ilm
bit: 6 Disable lsu access ILM
rv_csr_t ilm_ecc_excp_en
bit: 2 ILM ECC exception enable
rv_csr_t ilm_rwecc
bit: 3 Control mecc_code write to ilm, simulate error injection
rv_csr_t ilm_ecc_en
bit: 1 ILM ECC eanble
rv_csr_t ilm_va_en
bit: 5 Using virtual address to judge ILM access
rv_csr_t ilm_en
bit: 0 ILM enable
rv_csr_t ilm_bpa
bit: 10..XLEN-1 ILM base physical address
Union type to access MIRGB_INFO CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t iregion_base
bit: 10..PA_SIZE IREGION Base Address
rv_csr_t _reserved0
bit: 0 Reserved
rv_csr_t iregion_size
bit: 1..5 Indicates the size of IREGION and it should be power of 2
rv_csr_t _reserved1
bit: 6..9 Reserved
Union type to access MISA CSR register.
rv_csr_t n
bit: 13 Tentatively reserved for User-Level Interrupts extension
rv_csr_t y
bit: 24 Reserved
rv_csr_t p
bit: 15 Tentatively reserved for Packed-SIMD extension
rv_csr_t v
bit: 21 Vector extension
rv_csr_t d
bit: 3 Double-precision floating-point extension
rv_csr_t q
bit: 16 Quad-precision floating-point extension
rv_csr_t i
bit: 8 RV32I/64I/128I base ISA
rv_csr_t w
bit: 22 Reserved
rv_csr_t f
bit: 5 Single-precision floating-point extension
rv_csr_t j
bit: 9 Reserved
rv_csr_t r
bit: 17 Reserved
rv_csr_t g
bit: 6 Reserved
rv_csr_t z
bit: 25 Reserved
rv_csr_t mxl
bit: XLEN-2..XLEN-1 Machine XLEN
rv_csr_t _reserved0
bit: 26..XLEN-3 Reserved
rv_csr_t e
bit: 4 RV32E/64E base ISA
rv_csr_t u
bit: 20 User mode implemented
rv_csr_t s
bit: 18 Supervisor mode implemented
rv_csr_t a
bit: 0 Atomic extension
rv_csr_t t
bit: 19 Reserved
rv_csr_t b
bit: 1 B extension
rv_csr_t o
bit: 14 Reserved
rv_csr_t k
bit: 10 Reserved
rv_csr_t x
bit: 23 Non-standard extensions present
rv_csr_t l
bit: 11 Reserved
rv_csr_t h
bit: 7 Hypervisor extension
rv_csr_t m
bit: 12 Integer Multiply/Divide extension
rv_csr_t c
bit: 2 Compressed extension
Union type to access MMISC_CTRL CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved3
bit: 13 Reserved
rv_csr_t _reserved1
bit: 2 Reserved
rv_csr_t sijump_en
bit: 11 SIJUMP mode of trace
rv_csr_t hw_auto_context
bit: 21 Hardware auto context saving and restoring enable
rv_csr_t _reserved6
bit: 22..XLEN-1 Reserved
rv_csr_t core_buserr
bit: 8 core bus error exception or interrupt
rv_csr_t _reserved5
bit: 18..19 Reserved
rv_csr_t _reserved4
bit: 15..16 Reserved
rv_csr_t _reserved2
bit: 4..5 Reserved
rv_csr_t csr_excl_enable
bit: 17 Exclusive instruction(lr,sc) on Non-cacheable/Device memory can send exclusive flag in memory...
rv_csr_t imreturn_en
bit: 10 IMRETURN mode of trace
rv_csr_t zclsd_en
bit: 1 Control the Zclsd will uses the Zcf extension encoding or not
rv_csr_t _reserved0
bit: 0 Reserved
rv_csr_t dbg_sec
bit: 14 debug access mode, removed in latest releases
rv_csr_t bpu
bit: 3 dynamic prediction enable flag
rv_csr_t nmi_cause
bit: 9 mnvec control and nmi mcase exccode
rv_csr_t misalign
bit: 6 misaligned access support flag
rv_csr_t ldspec_en
bit: 12 enable load speculative goes to mem interface
rv_csr_t zcmt_zcmp
bit: 7 Zc Ext uses the cfdsp of D Ext's encoding or not
rv_csr_t lsu_allow_diff_en
bit: 20 LSU access allows the next operation can outstanding transactions when the current transactio...
Union type to access MMISC_CTL1 CSR register.
rv_csr_t vlsu_ooo_4k_mode
bit: 1 Control the size of address check region for vlsu ooo
rv_csr_t vlsu_cof_en
bit: 4 Control the enable of vlsu check-only first feature
rv_csr_t vlsu_ooo_force_va_4k
bit: 2 Control the size of virtual address check region for vlsu ooo
rv_csr_t vlsu_ooo_en
bit: 3 Control the enable of vlsu ooo feature
rv_csr_t _reserved0
bit: 7..XLEN-1 Reserved
rv_csr_t rvv_v1_0_cmpt
bit: 6 Control some vpu instruction behaviour is compatible with rvv1.0
rv_csr_t vlm_path_en
bit: 5 Control vlm dedicated path enable
rv_csr_t d
Type used for csr data access.
rv_csr_t fp16mode
bit: 0 16 bit float precision mode
Union type to access MPPICFG_INFO CSR register.
rv_csr_t ppi_bpa
bit: 10..XLEN-1 PPI base address
rv_csr_t _reserved0
bit: 0 Reserved 1
rv_csr_t _reserved1
bit: 6..8 Reserved 0
rv_csr_t ppi_size
bit: 1..5 PPI size, need to be 2^n size
rv_csr_t d
Type used for csr data access.
rv_csr_t ppi_en
bit: 9 PPI Enable.
Union type to access MSAVESTATUS CSR register.
rv_csr_t w
Type used for csr data access.
rv_csr_t mpie2
bit: 8 interrupt enable flag of second level NMI/exception nestting
rv_csr_t mpp2
bit: 9..10 privilede mode of second level NMI/exception nestting
rv_csr_t mpp1
bit: 1..2 privilede mode of fisrt level NMI/exception nestting
rv_csr_t _reserved1
bit: 11..13 Reserved
rv_csr_t ptyp1
bit: 6..7 NMI/exception type of before first nestting
rv_csr_t _reserved2
bit: 16..XLEN-1 Reserved
rv_csr_t ptyp2
bit: 14..15 NMI/exception type of before second nestting
rv_csr_t _reserved0
bit: 3..5 Reserved
rv_csr_t mpie1
bit: 0 interrupt enable flag of fisrt level NMI/exception nestting
Union type to access MSTACK_CTL CSR register.
rv_csr_t _reserved0
bit: 3..XLEN-1 Reserved
rv_csr_t ovf_track_en
bit: 0 Stack overflow check or track enable
rv_csr_t mode
bit: 2 Mode of stack checking
rv_csr_t d
Type used for csr data access.
rv_csr_t udf_en
bit: 1 Stack underflow check enable
Union type to access MSTATUSH CSR register.
rv_csr_t mpelp
bit: 9 Machine mode Previous Expected Landing Pad (ELP) State
rv_csr_t _reserved0
bit: 0..3 Reserved
rv_csr_t mbe
bit: 5 M-mode non-instruction-fetch memory accesse big-endian enable flag
rv_csr_t sbe
bit: 4 S-mode non-instruction-fetch memory accesse big-endian enable flag
rv_csr_t gva
bit: 6 Guest Virtual Address
rv_csr_t mpv
bit: 7 Machine Previous Virtualization Mode
rv_csr_t _reserved5
bit: 11..31 Reserved
rv_csr_t mdt
bit: 10 M-mode-disable-trap
rv_csr_t _reserved1
bit: 8 Reserved
rv_csr_t d
Type used for csr data access.
Union type to access MSTATUS CSR register.
rv_csr_t sd
bit: 31 Dirty status for XS or FS
rv_csr_t tvm
bit: 20 Trap Virtual Memory
rv_csr_t fs
bit: 13..14 FS status flag
rv_csr_t mpie
bit: 7 machine mode previous interrupt enable flag
rv_csr_t spie
bit: 5 supervisor mode interrupt enable flag
rv_csr_t sie
bit: 1 supervisor interrupt enable flag
rv_csr_t _reserved3
bit: 25..30 Reserved
rv_csr_t _reserved1
bit: 2 Reserved
rv_csr_t mxr
bit: 19 Make eXecutable Readable
rv_csr_t mprv
bit: 17 Modify PRiVilege
rv_csr_t mpp
bit: 11..12 machine previous privilede mode
rv_csr_t tw
bit: 21 Timeout Wait
rv_csr_t _reserved2
bit: 4 Reserved
rv_csr_t spp
bit: 8 supervisor previous privilede mode
rv_csr_t ube
bit: 6 U-mode non-instruction-fetch memory accesse big-endian enable flag
rv_csr_t mie
bit: 3 machine mode interrupt enable flag
rv_csr_t vs
bit: 9..10 vector status flag
rv_csr_t sum
bit: 18 Supervisor Mode load and store protection
rv_csr_t xs
bit: 15..16 XS status flag
rv_csr_t d
Type used for csr data access.
rv_csr_t tsr
bit: 22 Trap SRET
rv_csr_t _reserved0
bit: 0 Reserved
rv_csr_t sdt
bit: 24 S-mode-disable-trap
rv_csr_t spelp
bit: 23 Supervisor mode Previous Expected Landing Pad (ELP) State
Union type to access MSUBM CSR register.
rv_csr_t typ
bit: 6..7 Current sub-mode: 0: Normal Machine Mode; 1: Interrupt Handling Mode; 2: Exception Handing ...
rv_csr_t _reserved1
bit: 20..XLEN-1 Reserved 0
rv_csr_t _reserved0
bit: 0..5 Reserved 0
rv_csr_t ptyp
bit: 8..9 sub-mode before entering the trap: 0: Normal Machine Mode; 1: Interrupt Handling Mode; 2: E...
rv_csr_t d
Type used for csr data access.
rv_csr_t gpridx
bit: 10..14 Current Register Group Select
rv_csr_t pgpridx
bit: 15..19 Previous Register Group Select
Union type to access MTLBCFG_INFO CSR register.
rv_csr_t i_size
bit: 16..18 ITLB size
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved2
bit: 22..XLEN-2 Reserved 0
rv_csr_t lsize
bit: 7..9 Main TLB line size or Reserved
rv_csr_t _reserved1
bit: 12..15 Reserved 0
rv_csr_t set
bit: 0..3 Main TLB entry per way
rv_csr_t d_size
bit: 19..21 DTLB size
rv_csr_t _reserved0
bit: 27..XLEN-2 Reserved 0
rv_csr_t napot
bit: 11 TLB supports Svnapot or not
rv_csr_t way
bit: 4..6 Main TLB ways
rv_csr_t mapping
bit: XLEN-1 mapping type
rv_csr_t ecc
bit: 10 Main TLB supports ECC or not
Union type to access MTLB_CTL CSR register.
rv_csr_t tlb_ecc_chk_en
bit: 6 Controls to check the ECC when core access to MTLB
rv_csr_t d
Type used for csr data access.
rv_csr_t tlb_ecc_en
bit: 0 MTLB ECC eanble
rv_csr_t tlb_dram_ecc_inj_en
bit: 3 Controls to inject the ECC Code in CSR mecc_code to MTLB data rams
rv_csr_t tlb_ecc_excp_en
bit: 1 MTLB double bit ECC exception enable control
rv_csr_t tlb_tram_ecc_inj_en
bit: 2 Controls to inject the ECC Code in CSR mecc_code to MTLB tag rams
rv_csr_t _reserved0
bit: 4..5 Reserved
rv_csr_t napot_en
bit: 7 NAPOT page enable
rv_csr_t _reserved1
bit: 8..XLEN-1 Reserved
Union type to access MTVEC CSR register.
rv_csr_t addr
bit: 6..XLEN-1 mtvec address
rv_csr_t d
Type used for csr data access.
rv_csr_t mode
bit: 0..5 interrupt mode control