NMSIS-Core  Version 1.4.0
NMSIS-Core support for Nuclei processor-based devices
core_feature_base.h
1 /*
2  * Copyright (c) 2019 Nuclei Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #ifndef __CORE_FEATURE_BASE__
20 #define __CORE_FEATURE_BASE__
25 /*
26  * Core Base Feature Configuration Macro:
27  * 1. __HARTID_OFFSET: Optional, define this macro when your cpu system first hart hartid and hart index is different.
28  * eg. If your cpu system, first hart hartid is 2, hart index is 0, then set this macro to 2
29  *
30  */
31 #include <stdint.h>
32 
33 #ifdef __cplusplus
34  extern "C" {
35 #endif
36 
37 #include "nmsis_compiler.h"
38 
45 #ifndef __RISCV_XLEN
47  #ifndef __riscv_xlen
48  #define __RISCV_XLEN 32
49  #else
50  #define __RISCV_XLEN __riscv_xlen
51  #endif
52 #endif /* __RISCV_XLEN */
53 
55 typedef unsigned long rv_csr_t;
56  /* End of Doxygen Group NMSIS_Core_Registers */
68 typedef union {
69  struct {
70  rv_csr_t a:1;
71  rv_csr_t b:1;
72  rv_csr_t c:1;
73  rv_csr_t d:1;
74  rv_csr_t e:1;
75  rv_csr_t f:1;
76  rv_csr_t g:1;
77  rv_csr_t h:1;
78  rv_csr_t i:1;
79  rv_csr_t j:1;
80  rv_csr_t k:1;
81  rv_csr_t l:1;
82  rv_csr_t m:1;
83  rv_csr_t n:1;
84  rv_csr_t o:1;
85  rv_csr_t p:1;
86  rv_csr_t q:1;
87  rv_csr_t r:1;
88  rv_csr_t s:1;
89  rv_csr_t t:1;
90  rv_csr_t u:1;
91  rv_csr_t v:1;
92  rv_csr_t w:1;
93  rv_csr_t x:1;
94  rv_csr_t y:1;
95  rv_csr_t z:1;
98  } b;
99  rv_csr_t d;
100 } CSR_MISA_Type;
101 
105 typedef union {
106  struct {
128 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
129  rv_csr_t _reserved3:7;
130  rv_csr_t uxl:2;
131  rv_csr_t sxl:2;
132  rv_csr_t sbe:1;
133  rv_csr_t mbe:1;
134  rv_csr_t gva:1;
135  rv_csr_t mpv:1;
136  rv_csr_t _reserved4:1;
137  rv_csr_t mpelp:1;
138  rv_csr_t mdt:1;
139  rv_csr_t _reserved5:20;
140  rv_csr_t sd:1;
141 #else
144 #endif
145  } b;
148 
149 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 32
153 typedef union {
154  struct {
164  } b;
167 #endif
168 
172 typedef union {
173  struct {
176  } b;
179 
183 typedef union {
184  struct {
192 #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
193  rv_csr_t _reserved2:__RISCV_XLEN-32;
194 #endif
196  } b;
199 
203 typedef union {
204  struct {
209  } b;
212 
216 typedef union {
217  struct {
222  } b;
225 
229 typedef union {
230  struct {
233  } b;
236 
240 typedef union {
241  struct {
259  } b;
262 
265 
269 typedef union {
270  struct {
292  } b;
295 
297 
301 typedef union {
302  struct {
312  } b;
315 
319 typedef union {
320  struct {
329  } b;
332 
334 
338 typedef union {
339  struct {
347  } b;
350 
352 
356 typedef union {
357  struct {
384  } b;
387 
389 
393 typedef union {
394  struct {
404  } b;
407 
409 
413 typedef union {
414  struct {
423  } b;
426 
428 
432 typedef union {
433  struct {
443  } b;
446 
448 
452 typedef union {
453  struct {
459  } b;
462 
464 
468 typedef union {
469  struct {
474  } b;
477 
479 
483 typedef union {
484  struct {
487  } b;
490 
492 
496 typedef union {
497  struct {
504  } b;
507 
509 
513 typedef union {
514  struct {
526  } b;
529 
533 typedef union {
534  struct {
546  } b;
549 
553 typedef union {
554  struct {
559  } b;
562 
566 typedef union {
567  struct {
572  } b;
575 
579 typedef union {
580  struct {
589  } b;
592 
593  /* End of Doxygen Group NMSIS_Core_Base_Registers */
595 
596 /* ########################### Core Function Access ########################### */
610 #ifndef __ASSEMBLY__
611 
612 #ifndef __ICCRISCV__
613 
624 #define __RV_CSR_SWAP(csr, val) \
625  ({ \
626  rv_csr_t __v = (unsigned long)(val); \
627  __ASM volatile("csrrw %0, " STRINGIFY(csr) ", %1" \
628  : "=r"(__v) \
629  : "rK"(__v) \
630  : "memory"); \
631  __v; \
632  })
633 
642 #define __RV_CSR_READ(csr) \
643  ({ \
644  rv_csr_t __v; \
645  __ASM volatile("csrr %0, " STRINGIFY(csr) \
646  : "=r"(__v) \
647  : \
648  : "memory"); \
649  __v; \
650  })
651 
660 #define __RV_CSR_WRITE(csr, val) \
661  ({ \
662  rv_csr_t __v = (rv_csr_t)(val); \
663  __ASM volatile("csrw " STRINGIFY(csr) ", %0" \
664  : \
665  : "rK"(__v) \
666  : "memory"); \
667  })
668 
679 #define __RV_CSR_READ_SET(csr, val) \
680  ({ \
681  rv_csr_t __v = (rv_csr_t)(val); \
682  __ASM volatile("csrrs %0, " STRINGIFY(csr) ", %1" \
683  : "=r"(__v) \
684  : "rK"(__v) \
685  : "memory"); \
686  __v; \
687  })
688 
697 #define __RV_CSR_SET(csr, val) \
698  ({ \
699  rv_csr_t __v = (rv_csr_t)(val); \
700  __ASM volatile("csrs " STRINGIFY(csr) ", %0" \
701  : \
702  : "rK"(__v) \
703  : "memory"); \
704  })
705 
716 #define __RV_CSR_READ_CLEAR(csr, val) \
717  ({ \
718  rv_csr_t __v = (rv_csr_t)(val); \
719  __ASM volatile("csrrc %0, " STRINGIFY(csr) ", %1" \
720  : "=r"(__v) \
721  : "rK"(__v) \
722  : "memory"); \
723  __v; \
724  })
725 
734 #define __RV_CSR_CLEAR(csr, val) \
735  ({ \
736  rv_csr_t __v = (rv_csr_t)(val); \
737  __ASM volatile("csrc " STRINGIFY(csr) ", %0" \
738  : \
739  : "rK"(__v) \
740  : "memory"); \
741  })
742 #else
743 
744 #include <intrinsics.h>
745 
746 #define __RV_CSR_SWAP __write_csr
747 #define __RV_CSR_READ __read_csr
748 #define __RV_CSR_WRITE __write_csr
749 #define __RV_CSR_READ_SET __set_bits_csr
750 #define __RV_CSR_SET __set_bits_csr
751 #define __RV_CSR_READ_CLEAR __clear_bits_csr
752 #define __RV_CSR_CLEAR __clear_bits_csr
753 
754 #endif /* __ICCRISCV__ */
755 
756 #endif /* __ASSEMBLY__ */
757 
766 __STATIC_FORCEINLINE void __switch_mode(uint8_t mode, uintptr_t stack, void(*entry_point)(void))
767 {
768  unsigned long val = 0;
769 
770  /* Set MPP to the requested privilege mode */
771  val = __RV_CSR_READ(CSR_MSTATUS);
772  val = __RV_INSERT_FIELD(val, MSTATUS_MPP, mode);
773 
774  /* Set previous MIE disabled */
775  val = __RV_INSERT_FIELD(val, MSTATUS_MPIE, 0);
776 
778 
779  /* Set the entry point in MEPC */
780  __RV_CSR_WRITE(CSR_MEPC, (unsigned long)entry_point);
781 
782  /* Set the register file */
783  __ASM volatile("mv sp, %0" ::"r"(stack));
784 
785  __ASM volatile("mret");
786 }
787 
795 {
797 }
798 
806 {
808 }
809 
817 {
819 }
820 
828 {
830 }
831 
839 {
841 }
842 
850 {
852 }
853 
861 {
863 }
864 
872 {
874 }
875 
883 {
884  __RV_CSR_CLEAR(CSR_MIE, 1UL << irq);
885 }
886 
894 {
895  __RV_CSR_SET(CSR_MIE, 1UL << irq);
896 }
897 
905 {
906  return ((__RV_CSR_READ(CSR_MIP) >> irq) & 0x1);
907 }
908 
916 {
917  __RV_CSR_CLEAR(CSR_MIP, 1UL << irq);
918 }
919 
927 {
929 }
930 
938 {
940 }
941 
949 {
951 }
952 
960 {
962 }
963 
971 {
973 }
974 
982 {
984 }
985 
993 {
995 }
996 
1004 {
1006 }
1007 
1015 {
1016  __RV_CSR_CLEAR(CSR_SIE, 1UL << irq);
1017 }
1018 
1026 {
1027  __RV_CSR_SET(CSR_SIE, 1UL << irq);
1028 }
1029 
1037 {
1038  return ((__RV_CSR_READ(CSR_SIP) >> irq) & 0x1);
1039 }
1040 
1048 {
1049  __RV_CSR_CLEAR(CSR_SIP, 1UL << irq);
1050 }
1051 
1059 {
1060 #if __RISCV_XLEN == 32
1061  volatile uint32_t high0, low, high;
1062  uint64_t full;
1063 
1064  high0 = __RV_CSR_READ(CSR_MCYCLEH);
1065  low = __RV_CSR_READ(CSR_MCYCLE);
1066  high = __RV_CSR_READ(CSR_MCYCLEH);
1067  if (high0 != high) {
1068  low = __RV_CSR_READ(CSR_MCYCLE);
1069  }
1070  full = (((uint64_t)high) << 32) | low;
1071  return full;
1072 #elif __RISCV_XLEN == 64
1073  return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
1074 #else // TODO Need cover for XLEN=128 case in future
1075  return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
1076 #endif
1077 }
1078 
1085 {
1086 #if __RISCV_XLEN == 32
1087  __RV_CSR_WRITE(CSR_MCYCLE, 0); // prevent carry
1088  __RV_CSR_WRITE(CSR_MCYCLEH, (uint32_t)(cycle >> 32));
1089  __RV_CSR_WRITE(CSR_MCYCLE, (uint32_t)(cycle));
1090 #elif __RISCV_XLEN == 64
1091  __RV_CSR_WRITE(CSR_MCYCLE, cycle);
1092 #else // TODO Need cover for XLEN=128 case in future
1093 #endif
1094 }
1095 
1103 {
1104 #if __RISCV_XLEN == 32
1105  volatile uint32_t high0, low, high;
1106  uint64_t full;
1107 
1108  high0 = __RV_CSR_READ(CSR_MINSTRETH);
1109  low = __RV_CSR_READ(CSR_MINSTRET);
1110  high = __RV_CSR_READ(CSR_MINSTRETH);
1111  if (high0 != high) {
1112  low = __RV_CSR_READ(CSR_MINSTRET);
1113  }
1114  full = (((uint64_t)high) << 32) | low;
1115  return full;
1116 #elif __RISCV_XLEN == 64
1117  return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
1118 #else // TODO Need cover for XLEN=128 case in future
1119  return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
1120 #endif
1121 }
1122 
1129 {
1130 #if __RISCV_XLEN == 32
1131  __RV_CSR_WRITE(CSR_MINSTRET, 0); // prevent carry
1132  __RV_CSR_WRITE(CSR_MINSTRETH, (uint32_t)(instret >> 32));
1133  __RV_CSR_WRITE(CSR_MINSTRET, (uint32_t)(instret));
1134 #elif __RISCV_XLEN == 64
1135  __RV_CSR_WRITE(CSR_MINSTRET, instret);
1136 #else // TODO Need cover for XLEN=128 case in future
1137 #endif
1138 }
1139 
1148 {
1149 #if __RISCV_XLEN == 32
1150  volatile uint32_t high0, low, high;
1151  uint64_t full;
1152 
1153  high0 = __RV_CSR_READ(CSR_TIMEH);
1154  low = __RV_CSR_READ(CSR_TIME);
1155  high = __RV_CSR_READ(CSR_TIMEH);
1156  if (high0 != high) {
1157  low = __RV_CSR_READ(CSR_TIME);
1158  }
1159  full = (((uint64_t)high) << 32) | low;
1160  return full;
1161 #elif __RISCV_XLEN == 64
1162  return (uint64_t)__RV_CSR_READ(CSR_TIME);
1163 #else // TODO Need cover for XLEN=128 case in future
1164  return (uint64_t)__RV_CSR_READ(CSR_TIME);
1165 #endif
1166 }
1167 
1177 {
1178  return __RV_CSR_READ(CSR_CYCLE);
1179 }
1180 
1190 {
1191  return __RV_CSR_READ(CSR_INSTRET);
1192 }
1193 
1203 {
1204  return __RV_CSR_READ(CSR_TIME);
1205 }
1206 
1215 {
1216  unsigned long id;
1217 
1218  id = (__RV_CSR_READ(CSR_MHARTID) >> 8) & 0xFF;
1219  return id;
1220 }
1221 
1231 {
1232  unsigned long id;
1233 #ifdef __HARTID_OFFSET
1234  id = __RV_CSR_READ(CSR_MHARTID) - __HARTID_OFFSET;
1235 #else
1236  id = __RV_CSR_READ(CSR_MHARTID);
1237 #endif
1238  return id;
1239 }
1240 
1251 {
1252  unsigned long id;
1253  id = __RV_CSR_READ(CSR_MHARTID);
1254  return id;
1255 }
1256 
1257 
1267 {
1268  unsigned long id;
1269 
1270  id = (__RV_CSR_READ(CSR_SHARTID) >> 8) & 0xFF;
1271  return id;
1272 }
1273 
1284 {
1285  unsigned long id;
1286 #ifdef __HARTID_OFFSET
1287  id = __RV_CSR_READ(CSR_SHARTID) - __HARTID_OFFSET;
1288 #else
1289  id = __RV_CSR_READ(CSR_SHARTID);
1290 #endif
1291  return id;
1292 }
1293 
1305 {
1306  unsigned long id;
1307  id = __RV_CSR_READ(CSR_SHARTID);
1308  return id;
1309 }
1310  /* End of Doxygen Group NMSIS_Core_CSR_Register_Access */
1312 
1313 /* ########################### CPU Intrinsic Functions ########################### */
1331 {
1332  __ASM volatile("nop");
1333 }
1334 
1345 {
1347  __ASM volatile("wfi");
1348 }
1349 
1358 {
1360  __ASM volatile("wfi");
1362 }
1363 
1372 {
1373  __ASM volatile("ebreak");
1374 }
1375 
1383 {
1384  __ASM volatile("ecall");
1385 }
1386 
1390 typedef enum WFI_SleepMode {
1392  WFI_DEEP_SLEEP = 1
1394 
1403 {
1405 }
1406 
1414 {
1415  __RV_CSR_SET(CSR_TXEVT, 0x1);
1416 }
1417 
1424 {
1426 }
1427 
1434 {
1436 }
1437 
1444 {
1446 }
1447 
1454 {
1456 }
1457 
1465 {
1466  __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, (1UL << idx));
1467 }
1468 
1476 {
1477  __RV_CSR_SET(CSR_MCOUNTINHIBIT, (1UL << idx));
1478 }
1479 
1488 {
1490 }
1491 
1500 {
1502 }
1503 
1511 {
1512  __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, 0xFFFFFFFF);
1513 }
1514 
1522 {
1523  __RV_CSR_SET(CSR_MCOUNTINHIBIT, 0xFFFFFFFF);
1524 }
1525 
1533 __STATIC_FORCEINLINE void __set_hpm_event(unsigned long idx, unsigned long event)
1534 {
1535  switch (idx) {
1536  case 3: __RV_CSR_WRITE(CSR_MHPMEVENT3, event); break;
1537  case 4: __RV_CSR_WRITE(CSR_MHPMEVENT4, event); break;
1538  case 5: __RV_CSR_WRITE(CSR_MHPMEVENT5, event); break;
1539  case 6: __RV_CSR_WRITE(CSR_MHPMEVENT6, event); break;
1540  case 7: __RV_CSR_WRITE(CSR_MHPMEVENT7, event); break;
1541  case 8: __RV_CSR_WRITE(CSR_MHPMEVENT8, event); break;
1542  case 9: __RV_CSR_WRITE(CSR_MHPMEVENT9, event); break;
1543  case 10: __RV_CSR_WRITE(CSR_MHPMEVENT10, event); break;
1544  case 11: __RV_CSR_WRITE(CSR_MHPMEVENT11, event); break;
1545  case 12: __RV_CSR_WRITE(CSR_MHPMEVENT12, event); break;
1546  case 13: __RV_CSR_WRITE(CSR_MHPMEVENT13, event); break;
1547  case 14: __RV_CSR_WRITE(CSR_MHPMEVENT14, event); break;
1548  case 15: __RV_CSR_WRITE(CSR_MHPMEVENT15, event); break;
1549  case 16: __RV_CSR_WRITE(CSR_MHPMEVENT16, event); break;
1550  case 17: __RV_CSR_WRITE(CSR_MHPMEVENT17, event); break;
1551  case 18: __RV_CSR_WRITE(CSR_MHPMEVENT18, event); break;
1552  case 19: __RV_CSR_WRITE(CSR_MHPMEVENT19, event); break;
1553  case 20: __RV_CSR_WRITE(CSR_MHPMEVENT20, event); break;
1554  case 21: __RV_CSR_WRITE(CSR_MHPMEVENT21, event); break;
1555  case 22: __RV_CSR_WRITE(CSR_MHPMEVENT22, event); break;
1556  case 23: __RV_CSR_WRITE(CSR_MHPMEVENT23, event); break;
1557  case 24: __RV_CSR_WRITE(CSR_MHPMEVENT24, event); break;
1558  case 25: __RV_CSR_WRITE(CSR_MHPMEVENT25, event); break;
1559  case 26: __RV_CSR_WRITE(CSR_MHPMEVENT26, event); break;
1560  case 27: __RV_CSR_WRITE(CSR_MHPMEVENT27, event); break;
1561  case 28: __RV_CSR_WRITE(CSR_MHPMEVENT28, event); break;
1562  case 29: __RV_CSR_WRITE(CSR_MHPMEVENT29, event); break;
1563  case 30: __RV_CSR_WRITE(CSR_MHPMEVENT30, event); break;
1564  case 31: __RV_CSR_WRITE(CSR_MHPMEVENT31, event); break;
1565  default: break;
1566  }
1567 }
1568 
1577 __STATIC_FORCEINLINE unsigned long __get_hpm_event(unsigned long idx)
1578 {
1579  switch (idx) {
1580  case 3: return __RV_CSR_READ(CSR_MHPMEVENT3);
1581  case 4: return __RV_CSR_READ(CSR_MHPMEVENT4);
1582  case 5: return __RV_CSR_READ(CSR_MHPMEVENT5);
1583  case 6: return __RV_CSR_READ(CSR_MHPMEVENT6);
1584  case 7: return __RV_CSR_READ(CSR_MHPMEVENT7);
1585  case 8: return __RV_CSR_READ(CSR_MHPMEVENT8);
1586  case 9: return __RV_CSR_READ(CSR_MHPMEVENT9);
1587  case 10: return __RV_CSR_READ(CSR_MHPMEVENT10);
1588  case 11: return __RV_CSR_READ(CSR_MHPMEVENT11);
1589  case 12: return __RV_CSR_READ(CSR_MHPMEVENT12);
1590  case 13: return __RV_CSR_READ(CSR_MHPMEVENT13);
1591  case 14: return __RV_CSR_READ(CSR_MHPMEVENT14);
1592  case 15: return __RV_CSR_READ(CSR_MHPMEVENT15);
1593  case 16: return __RV_CSR_READ(CSR_MHPMEVENT16);
1594  case 17: return __RV_CSR_READ(CSR_MHPMEVENT17);
1595  case 18: return __RV_CSR_READ(CSR_MHPMEVENT18);
1596  case 19: return __RV_CSR_READ(CSR_MHPMEVENT19);
1597  case 20: return __RV_CSR_READ(CSR_MHPMEVENT20);
1598  case 21: return __RV_CSR_READ(CSR_MHPMEVENT21);
1599  case 22: return __RV_CSR_READ(CSR_MHPMEVENT22);
1600  case 23: return __RV_CSR_READ(CSR_MHPMEVENT23);
1601  case 24: return __RV_CSR_READ(CSR_MHPMEVENT24);
1602  case 25: return __RV_CSR_READ(CSR_MHPMEVENT25);
1603  case 26: return __RV_CSR_READ(CSR_MHPMEVENT26);
1604  case 27: return __RV_CSR_READ(CSR_MHPMEVENT27);
1605  case 28: return __RV_CSR_READ(CSR_MHPMEVENT28);
1606  case 29: return __RV_CSR_READ(CSR_MHPMEVENT29);
1607  case 30: return __RV_CSR_READ(CSR_MHPMEVENT30);
1608  case 31: return __RV_CSR_READ(CSR_MHPMEVENT31);
1609  default: return 0;
1610  }
1611 }
1612 
1620 __STATIC_FORCEINLINE void __set_hpm_counter(unsigned long idx, uint64_t value)
1621 {
1622  switch (idx) {
1623 #if __RISCV_XLEN == 32
1624  case 3: __RV_CSR_WRITE(CSR_MHPMCOUNTER3, 0); // prevent carry
1625  __RV_CSR_WRITE(CSR_MHPMCOUNTER3H, (uint32_t)(value >> 32));
1626  __RV_CSR_WRITE(CSR_MHPMCOUNTER3, (uint32_t)(value)); break;
1627  case 4: __RV_CSR_WRITE(CSR_MHPMCOUNTER4, 0); // prevent carry
1628  __RV_CSR_WRITE(CSR_MHPMCOUNTER4H, (uint32_t)(value >> 32));
1629  __RV_CSR_WRITE(CSR_MHPMCOUNTER4, (uint32_t)(value)); break;
1630  case 5: __RV_CSR_WRITE(CSR_MHPMCOUNTER5, 0); // prevent carry
1631  __RV_CSR_WRITE(CSR_MHPMCOUNTER5H, (uint32_t)(value >> 32));
1632  __RV_CSR_WRITE(CSR_MHPMCOUNTER5, (uint32_t)(value)); break;
1633  case 6: __RV_CSR_WRITE(CSR_MHPMCOUNTER6, 0); // prevent carry
1634  __RV_CSR_WRITE(CSR_MHPMCOUNTER6H, (uint32_t)(value >> 32));
1635  __RV_CSR_WRITE(CSR_MHPMCOUNTER6, (uint32_t)(value)); break;
1636  case 7: __RV_CSR_WRITE(CSR_MHPMCOUNTER7, 0); // prevent carry
1637  __RV_CSR_WRITE(CSR_MHPMCOUNTER7H, (uint32_t)(value >> 32));
1638  __RV_CSR_WRITE(CSR_MHPMCOUNTER7, (uint32_t)(value)); break;
1639  case 8: __RV_CSR_WRITE(CSR_MHPMCOUNTER8, 0); // prevent carry
1640  __RV_CSR_WRITE(CSR_MHPMCOUNTER8H, (uint32_t)(value >> 32));
1641  __RV_CSR_WRITE(CSR_MHPMCOUNTER8, (uint32_t)(value)); break;
1642  case 9: __RV_CSR_WRITE(CSR_MHPMCOUNTER9, 0); // prevent carry
1643  __RV_CSR_WRITE(CSR_MHPMCOUNTER9H, (uint32_t)(value >> 32));
1644  __RV_CSR_WRITE(CSR_MHPMCOUNTER9, (uint32_t)(value)); break;
1645  case 10: __RV_CSR_WRITE(CSR_MHPMCOUNTER10, 0); // prevent carry
1646  __RV_CSR_WRITE(CSR_MHPMCOUNTER10H, (uint32_t)(value >> 32));
1647  __RV_CSR_WRITE(CSR_MHPMCOUNTER10, (uint32_t)(value)); break;
1648  case 11: __RV_CSR_WRITE(CSR_MHPMCOUNTER11, 0); // prevent carry
1649  __RV_CSR_WRITE(CSR_MHPMCOUNTER11H, (uint32_t)(value >> 32));
1650  __RV_CSR_WRITE(CSR_MHPMCOUNTER11, (uint32_t)(value)); break;
1651  case 12: __RV_CSR_WRITE(CSR_MHPMCOUNTER12, 0); // prevent carry
1652  __RV_CSR_WRITE(CSR_MHPMCOUNTER12H, (uint32_t)(value >> 32));
1653  __RV_CSR_WRITE(CSR_MHPMCOUNTER12, (uint32_t)(value)); break;
1654  case 13: __RV_CSR_WRITE(CSR_MHPMCOUNTER13, 0); // prevent carry
1655  __RV_CSR_WRITE(CSR_MHPMCOUNTER13H, (uint32_t)(value >> 32));
1656  __RV_CSR_WRITE(CSR_MHPMCOUNTER13, (uint32_t)(value)); break;
1657  case 14: __RV_CSR_WRITE(CSR_MHPMCOUNTER14, 0); // prevent carry
1658  __RV_CSR_WRITE(CSR_MHPMCOUNTER14H, (uint32_t)(value >> 32));
1659  __RV_CSR_WRITE(CSR_MHPMCOUNTER14, (uint32_t)(value)); break;
1660  case 15: __RV_CSR_WRITE(CSR_MHPMCOUNTER15, 0); // prevent carry
1661  __RV_CSR_WRITE(CSR_MHPMCOUNTER15H, (uint32_t)(value >> 32));
1662  __RV_CSR_WRITE(CSR_MHPMCOUNTER15, (uint32_t)(value)); break;
1663  case 16: __RV_CSR_WRITE(CSR_MHPMCOUNTER16, 0); // prevent carry
1664  __RV_CSR_WRITE(CSR_MHPMCOUNTER16H, (uint32_t)(value >> 32));
1665  __RV_CSR_WRITE(CSR_MHPMCOUNTER16, (uint32_t)(value)); break;
1666  case 17: __RV_CSR_WRITE(CSR_MHPMCOUNTER17, 0); // prevent carry
1667  __RV_CSR_WRITE(CSR_MHPMCOUNTER17H, (uint32_t)(value >> 32));
1668  __RV_CSR_WRITE(CSR_MHPMCOUNTER17, (uint32_t)(value)); break;
1669  case 18: __RV_CSR_WRITE(CSR_MHPMCOUNTER18, 0); // prevent carry
1670  __RV_CSR_WRITE(CSR_MHPMCOUNTER18H, (uint32_t)(value >> 32));
1671  __RV_CSR_WRITE(CSR_MHPMCOUNTER18, (uint32_t)(value)); break;
1672  case 19: __RV_CSR_WRITE(CSR_MHPMCOUNTER19, 0); // prevent carry
1673  __RV_CSR_WRITE(CSR_MHPMCOUNTER19H, (uint32_t)(value >> 32));
1674  __RV_CSR_WRITE(CSR_MHPMCOUNTER19, (uint32_t)(value)); break;
1675  case 20: __RV_CSR_WRITE(CSR_MHPMCOUNTER20, 0); // prevent carry
1676  __RV_CSR_WRITE(CSR_MHPMCOUNTER20H, (uint32_t)(value >> 32));
1677  __RV_CSR_WRITE(CSR_MHPMCOUNTER20, (uint32_t)(value)); break;
1678  case 21: __RV_CSR_WRITE(CSR_MHPMCOUNTER21, 0); // prevent carry
1679  __RV_CSR_WRITE(CSR_MHPMCOUNTER21H, (uint32_t)(value >> 32));
1680  __RV_CSR_WRITE(CSR_MHPMCOUNTER21, (uint32_t)(value)); break;
1681  case 22: __RV_CSR_WRITE(CSR_MHPMCOUNTER22, 0); // prevent carry
1682  __RV_CSR_WRITE(CSR_MHPMCOUNTER22H, (uint32_t)(value >> 32));
1683  __RV_CSR_WRITE(CSR_MHPMCOUNTER22, (uint32_t)(value)); break;
1684  case 23: __RV_CSR_WRITE(CSR_MHPMCOUNTER23, 0); // prevent carry
1685  __RV_CSR_WRITE(CSR_MHPMCOUNTER23H, (uint32_t)(value >> 32));
1686  __RV_CSR_WRITE(CSR_MHPMCOUNTER23, (uint32_t)(value)); break;
1687  case 24: __RV_CSR_WRITE(CSR_MHPMCOUNTER24, 0); // prevent carry
1688  __RV_CSR_WRITE(CSR_MHPMCOUNTER24H, (uint32_t)(value >> 32));
1689  __RV_CSR_WRITE(CSR_MHPMCOUNTER24, (uint32_t)(value)); break;
1690  case 25: __RV_CSR_WRITE(CSR_MHPMCOUNTER25, 0); // prevent carry
1691  __RV_CSR_WRITE(CSR_MHPMCOUNTER25H, (uint32_t)(value >> 32));
1692  __RV_CSR_WRITE(CSR_MHPMCOUNTER25, (uint32_t)(value)); break;
1693  case 26: __RV_CSR_WRITE(CSR_MHPMCOUNTER26, 0); // prevent carry
1694  __RV_CSR_WRITE(CSR_MHPMCOUNTER26H, (uint32_t)(value >> 32));
1695  __RV_CSR_WRITE(CSR_MHPMCOUNTER26, (uint32_t)(value)); break;
1696  case 27: __RV_CSR_WRITE(CSR_MHPMCOUNTER27, 0); // prevent carry
1697  __RV_CSR_WRITE(CSR_MHPMCOUNTER27H, (uint32_t)(value >> 32));
1698  __RV_CSR_WRITE(CSR_MHPMCOUNTER27, (uint32_t)(value)); break;
1699  case 28: __RV_CSR_WRITE(CSR_MHPMCOUNTER28, 0); // prevent carry
1700  __RV_CSR_WRITE(CSR_MHPMCOUNTER28H, (uint32_t)(value >> 32));
1701  __RV_CSR_WRITE(CSR_MHPMCOUNTER28, (uint32_t)(value)); break;
1702  case 29: __RV_CSR_WRITE(CSR_MHPMCOUNTER29, 0); // prevent carry
1703  __RV_CSR_WRITE(CSR_MHPMCOUNTER29H, (uint32_t)(value >> 32));
1704  __RV_CSR_WRITE(CSR_MHPMCOUNTER29, (uint32_t)(value)); break;
1705  case 30: __RV_CSR_WRITE(CSR_MHPMCOUNTER30, 0); // prevent carry
1706  __RV_CSR_WRITE(CSR_MHPMCOUNTER30H, (uint32_t)(value >> 32));
1707  __RV_CSR_WRITE(CSR_MHPMCOUNTER30, (uint32_t)(value)); break;
1708  case 31: __RV_CSR_WRITE(CSR_MHPMCOUNTER31, 0); // prevent carry
1709  __RV_CSR_WRITE(CSR_MHPMCOUNTER31H, (uint32_t)(value >> 32));
1710  __RV_CSR_WRITE(CSR_MHPMCOUNTER31, (uint32_t)(value)); break;
1711 
1712 #elif __RISCV_XLEN == 64
1713  case 3: __RV_CSR_WRITE(CSR_MHPMCOUNTER3, (value)); break;
1714  case 4: __RV_CSR_WRITE(CSR_MHPMCOUNTER4, (value)); break;
1715  case 5: __RV_CSR_WRITE(CSR_MHPMCOUNTER5, (value)); break;
1716  case 6: __RV_CSR_WRITE(CSR_MHPMCOUNTER6, (value)); break;
1717  case 7: __RV_CSR_WRITE(CSR_MHPMCOUNTER7, (value)); break;
1718  case 8: __RV_CSR_WRITE(CSR_MHPMCOUNTER8, (value)); break;
1719  case 9: __RV_CSR_WRITE(CSR_MHPMCOUNTER9, (value)); break;
1720  case 10: __RV_CSR_WRITE(CSR_MHPMCOUNTER10, (value)); break;
1721  case 11: __RV_CSR_WRITE(CSR_MHPMCOUNTER11, (value)); break;
1722  case 12: __RV_CSR_WRITE(CSR_MHPMCOUNTER12, (value)); break;
1723  case 13: __RV_CSR_WRITE(CSR_MHPMCOUNTER13, (value)); break;
1724  case 14: __RV_CSR_WRITE(CSR_MHPMCOUNTER14, (value)); break;
1725  case 15: __RV_CSR_WRITE(CSR_MHPMCOUNTER15, (value)); break;
1726  case 16: __RV_CSR_WRITE(CSR_MHPMCOUNTER16, (value)); break;
1727  case 17: __RV_CSR_WRITE(CSR_MHPMCOUNTER17, (value)); break;
1728  case 18: __RV_CSR_WRITE(CSR_MHPMCOUNTER18, (value)); break;
1729  case 19: __RV_CSR_WRITE(CSR_MHPMCOUNTER19, (value)); break;
1730  case 20: __RV_CSR_WRITE(CSR_MHPMCOUNTER20, (value)); break;
1731  case 21: __RV_CSR_WRITE(CSR_MHPMCOUNTER21, (value)); break;
1732  case 22: __RV_CSR_WRITE(CSR_MHPMCOUNTER22, (value)); break;
1733  case 23: __RV_CSR_WRITE(CSR_MHPMCOUNTER23, (value)); break;
1734  case 24: __RV_CSR_WRITE(CSR_MHPMCOUNTER24, (value)); break;
1735  case 25: __RV_CSR_WRITE(CSR_MHPMCOUNTER25, (value)); break;
1736  case 26: __RV_CSR_WRITE(CSR_MHPMCOUNTER26, (value)); break;
1737  case 27: __RV_CSR_WRITE(CSR_MHPMCOUNTER27, (value)); break;
1738  case 28: __RV_CSR_WRITE(CSR_MHPMCOUNTER28, (value)); break;
1739  case 29: __RV_CSR_WRITE(CSR_MHPMCOUNTER29, (value)); break;
1740  case 30: __RV_CSR_WRITE(CSR_MHPMCOUNTER30, (value)); break;
1741  case 31: __RV_CSR_WRITE(CSR_MHPMCOUNTER31, (value)); break;
1742 
1743 #else
1744 #endif
1745  default: break;
1746  }
1747 }
1748 
1756 __STATIC_FORCEINLINE uint64_t __get_hpm_counter(unsigned long idx)
1757 {
1758 #if __RISCV_XLEN == 32
1759  volatile uint32_t high0, low, high;
1760  uint64_t full;
1761 
1762  switch (idx) {
1763  case 0: return __get_rv_cycle();
1764  case 2: return __get_rv_instret();
1765  case 3: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER3H);
1768  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER3); }
1769  full = (((uint64_t)high) << 32) | low; return full;
1770  case 4: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER4H);
1773  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER4); }
1774  full = (((uint64_t)high) << 32) | low; return full;
1775  case 5: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER5H);
1778  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER5); }
1779  full = (((uint64_t)high) << 32) | low; return full;
1780  case 6: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER6H);
1783  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER6); }
1784  full = (((uint64_t)high) << 32) | low; return full;
1785  case 7: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER7H);
1788  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER7); }
1789  full = (((uint64_t)high) << 32) | low; return full;
1790  case 8: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER8H);
1793  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER8); }
1794  full = (((uint64_t)high) << 32) | low; return full;
1795  case 9: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER9H);
1798  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER9); }
1799  full = (((uint64_t)high) << 32) | low; return full;
1800  case 10: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER10H);
1803  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER10); }
1804  full = (((uint64_t)high) << 32) | low; return full;
1805  case 11: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER11H);
1808  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER11); }
1809  full = (((uint64_t)high) << 32) | low; return full;
1810  case 12: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER12H);
1813  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER12); }
1814  full = (((uint64_t)high) << 32) | low; return full;
1815  case 13: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER13H);
1818  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER13); }
1819  full = (((uint64_t)high) << 32) | low; return full;
1820  case 14: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER14H);
1823  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER14); }
1824  full = (((uint64_t)high) << 32) | low; return full;
1825  case 15: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER15H);
1828  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER15); }
1829  full = (((uint64_t)high) << 32) | low; return full;
1830  case 16: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER16H);
1833  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER16); }
1834  full = (((uint64_t)high) << 32) | low; return full;
1835  case 17: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER17H);
1838  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER17); }
1839  full = (((uint64_t)high) << 32) | low; return full;
1840  case 18: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER18H);
1843  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER18); }
1844  full = (((uint64_t)high) << 32) | low; return full;
1845  case 19: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER19H);
1848  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER19); }
1849  full = (((uint64_t)high) << 32) | low; return full;
1850  case 20: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER20H);
1853  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER20); }
1854  full = (((uint64_t)high) << 32) | low; return full;
1855  case 21: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER21H);
1858  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER21); }
1859  full = (((uint64_t)high) << 32) | low; return full;
1860  case 22: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER22H);
1863  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER22); }
1864  full = (((uint64_t)high) << 32) | low; return full;
1865  case 23: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER23H);
1868  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER23); }
1869  full = (((uint64_t)high) << 32) | low; return full;
1870  case 24: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER24H);
1873  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER24); }
1874  full = (((uint64_t)high) << 32) | low; return full;
1875  case 25: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER25H);
1878  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER25); }
1879  full = (((uint64_t)high) << 32) | low; return full;
1880  case 26: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER26H);
1883  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER26); }
1884  full = (((uint64_t)high) << 32) | low; return full;
1885  case 27: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER27H);
1888  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER27); }
1889  full = (((uint64_t)high) << 32) | low; return full;
1890  case 28: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER28H);
1893  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER28); }
1894  full = (((uint64_t)high) << 32) | low; return full;
1895  case 29: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER29H);
1898  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER29); }
1899  full = (((uint64_t)high) << 32) | low; return full;
1900  case 30: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER30H);
1903  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER30); }
1904  full = (((uint64_t)high) << 32) | low; return full;
1905  case 31: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER31H);
1908  if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER31); }
1909  full = (((uint64_t)high) << 32) | low; return full;
1910 
1911 #elif __RISCV_XLEN == 64
1912  switch (idx) {
1913  case 0: return __get_rv_cycle();
1914  case 2: return __get_rv_instret();
1915  case 3: return __RV_CSR_READ(CSR_MHPMCOUNTER3);
1916  case 4: return __RV_CSR_READ(CSR_MHPMCOUNTER4);
1917  case 5: return __RV_CSR_READ(CSR_MHPMCOUNTER5);
1918  case 6: return __RV_CSR_READ(CSR_MHPMCOUNTER6);
1919  case 7: return __RV_CSR_READ(CSR_MHPMCOUNTER7);
1920  case 8: return __RV_CSR_READ(CSR_MHPMCOUNTER8);
1921  case 9: return __RV_CSR_READ(CSR_MHPMCOUNTER9);
1922  case 10: return __RV_CSR_READ(CSR_MHPMCOUNTER10);
1923  case 11: return __RV_CSR_READ(CSR_MHPMCOUNTER11);
1924  case 12: return __RV_CSR_READ(CSR_MHPMCOUNTER12);
1925  case 13: return __RV_CSR_READ(CSR_MHPMCOUNTER13);
1926  case 14: return __RV_CSR_READ(CSR_MHPMCOUNTER14);
1927  case 15: return __RV_CSR_READ(CSR_MHPMCOUNTER15);
1928  case 16: return __RV_CSR_READ(CSR_MHPMCOUNTER16);
1929  case 17: return __RV_CSR_READ(CSR_MHPMCOUNTER17);
1930  case 18: return __RV_CSR_READ(CSR_MHPMCOUNTER18);
1931  case 19: return __RV_CSR_READ(CSR_MHPMCOUNTER19);
1932  case 20: return __RV_CSR_READ(CSR_MHPMCOUNTER20);
1933  case 21: return __RV_CSR_READ(CSR_MHPMCOUNTER21);
1934  case 22: return __RV_CSR_READ(CSR_MHPMCOUNTER22);
1935  case 23: return __RV_CSR_READ(CSR_MHPMCOUNTER23);
1936  case 24: return __RV_CSR_READ(CSR_MHPMCOUNTER24);
1937  case 25: return __RV_CSR_READ(CSR_MHPMCOUNTER25);
1938  case 26: return __RV_CSR_READ(CSR_MHPMCOUNTER26);
1939  case 27: return __RV_CSR_READ(CSR_MHPMCOUNTER27);
1940  case 28: return __RV_CSR_READ(CSR_MHPMCOUNTER28);
1941  case 29: return __RV_CSR_READ(CSR_MHPMCOUNTER29);
1942  case 30: return __RV_CSR_READ(CSR_MHPMCOUNTER30);
1943  case 31: return __RV_CSR_READ(CSR_MHPMCOUNTER31);
1944 
1945 #else
1946  switch (idx) {
1947 #endif
1948  default: return 0;
1949  }
1950 }
1951 
1960 __STATIC_FORCEINLINE unsigned long __read_hpm_counter(unsigned long idx)
1961 {
1962  switch (idx) {
1963  case 0: return __read_cycle_csr();
1964  case 2: return __read_instret_csr();
1965  case 3: return __RV_CSR_READ(CSR_MHPMCOUNTER3);
1966  case 4: return __RV_CSR_READ(CSR_MHPMCOUNTER4);
1967  case 5: return __RV_CSR_READ(CSR_MHPMCOUNTER5);
1968  case 6: return __RV_CSR_READ(CSR_MHPMCOUNTER6);
1969  case 7: return __RV_CSR_READ(CSR_MHPMCOUNTER7);
1970  case 8: return __RV_CSR_READ(CSR_MHPMCOUNTER8);
1971  case 9: return __RV_CSR_READ(CSR_MHPMCOUNTER9);
1972  case 10: return __RV_CSR_READ(CSR_MHPMCOUNTER10);
1973  case 11: return __RV_CSR_READ(CSR_MHPMCOUNTER11);
1974  case 12: return __RV_CSR_READ(CSR_MHPMCOUNTER12);
1975  case 13: return __RV_CSR_READ(CSR_MHPMCOUNTER13);
1976  case 14: return __RV_CSR_READ(CSR_MHPMCOUNTER14);
1977  case 15: return __RV_CSR_READ(CSR_MHPMCOUNTER15);
1978  case 16: return __RV_CSR_READ(CSR_MHPMCOUNTER16);
1979  case 17: return __RV_CSR_READ(CSR_MHPMCOUNTER17);
1980  case 18: return __RV_CSR_READ(CSR_MHPMCOUNTER18);
1981  case 19: return __RV_CSR_READ(CSR_MHPMCOUNTER19);
1982  case 20: return __RV_CSR_READ(CSR_MHPMCOUNTER20);
1983  case 21: return __RV_CSR_READ(CSR_MHPMCOUNTER21);
1984  case 22: return __RV_CSR_READ(CSR_MHPMCOUNTER22);
1985  case 23: return __RV_CSR_READ(CSR_MHPMCOUNTER23);
1986  case 24: return __RV_CSR_READ(CSR_MHPMCOUNTER24);
1987  case 25: return __RV_CSR_READ(CSR_MHPMCOUNTER25);
1988  case 26: return __RV_CSR_READ(CSR_MHPMCOUNTER26);
1989  case 27: return __RV_CSR_READ(CSR_MHPMCOUNTER27);
1990  case 28: return __RV_CSR_READ(CSR_MHPMCOUNTER28);
1991  case 29: return __RV_CSR_READ(CSR_MHPMCOUNTER29);
1992  case 30: return __RV_CSR_READ(CSR_MHPMCOUNTER30);
1993  case 31: return __RV_CSR_READ(CSR_MHPMCOUNTER31);
1994  default: return 0;
1995  }
1996 }
1997 
2005 __STATIC_FORCEINLINE void __set_medeleg(unsigned long mask)
2006 {
2007  __RV_CSR_WRITE(CSR_MEDELEG, mask);
2008 }
2009 
2017 __STATIC_FORCEINLINE void __set_mideleg(unsigned long mask)
2018 {
2019  __RV_CSR_WRITE(CSR_MIDELEG, mask);
2020 }
2021 
2032 #define __FENCE(p, s) __ASM volatile ("fence " #p "," #s : : : "memory")
2033 
2041 {
2042  __ASM volatile("fence.i");
2043 }
2044 
2046 #define __RWMB() __FENCE(iorw,iorw)
2047 
2049 #define __RMB() __FENCE(ir,ir)
2050 
2052 #define __WMB() __FENCE(ow,ow)
2053 
2055 #define __SMP_RWMB() __FENCE(rw,rw)
2056 
2058 #define __SMP_RMB() __FENCE(r,r)
2059 
2061 #define __SMP_WMB() __FENCE(w,w)
2062 
2064 #define __CPU_RELAX() __ASM volatile ("" : : : "memory")
2065 
2066 
2067 /* ===== Load/Store Operations ===== */
2074 __STATIC_FORCEINLINE uint8_t __LB(volatile void *addr)
2075 {
2076  uint8_t result;
2077 
2078  __ASM volatile ("lb %0, 0(%1)" : "=r" (result) : "r" (addr));
2079  return result;
2080 }
2081 
2088 __STATIC_FORCEINLINE uint16_t __LH(volatile void *addr)
2089 {
2090  uint16_t result;
2091 
2092  __ASM volatile ("lh %0, 0(%1)" : "=r" (result) : "r" (addr));
2093  return result;
2094 }
2095 
2102 __STATIC_FORCEINLINE uint32_t __LW(volatile void *addr)
2103 {
2104  uint32_t result;
2105 
2106  __ASM volatile ("lw %0, 0(%1)" : "=r" (result) : "r" (addr));
2107  return result;
2108 }
2109 
2110 #if __RISCV_XLEN != 32
2118 __STATIC_FORCEINLINE uint64_t __LD(volatile void *addr)
2119 {
2120  uint64_t result;
2121  __ASM volatile ("ld %0, 0(%1)" : "=r" (result) : "r" (addr));
2122  return result;
2123 }
2124 #endif
2125 
2132 __STATIC_FORCEINLINE void __SB(volatile void *addr, uint8_t val)
2133 {
2134  __ASM volatile ("sb %0, 0(%1)" : : "r" (val), "r" (addr));
2135 }
2136 
2143 __STATIC_FORCEINLINE void __SH(volatile void *addr, uint16_t val)
2144 {
2145  __ASM volatile ("sh %0, 0(%1)" : : "r" (val), "r" (addr));
2146 }
2147 
2154 __STATIC_FORCEINLINE void __SW(volatile void *addr, uint32_t val)
2155 {
2156  __ASM volatile ("sw %0, 0(%1)" : : "r" (val), "r" (addr));
2157 }
2158 
2159 #if __RISCV_XLEN != 32
2166 __STATIC_FORCEINLINE void __SD(volatile void *addr, uint64_t val)
2167 {
2168  __ASM volatile ("sd %0, 0(%1)" : : "r" (val), "r" (addr));
2169 }
2170 #endif
2171 
2183 __STATIC_FORCEINLINE uint32_t __CAS_W(volatile uint32_t *addr, uint32_t oldval, uint32_t newval)
2184 {
2185  uint32_t result;
2186  uint32_t rc;
2187 
2188  __ASM volatile ( \
2189  "0: lr.w %0, %2 \n" \
2190  " bne %0, %z3, 1f \n" \
2191  " sc.w %1, %z4, %2 \n" \
2192  " bnez %1, 0b \n" \
2193  "1:\n" \
2194  : "=&r"(result), "=&r"(rc), "+A"(*addr) \
2195  : "r"(oldval), "r"(newval) \
2196  : "memory");
2197  return result;
2198 }
2199 
2207 __STATIC_FORCEINLINE uint32_t __AMOSWAP_W(volatile uint32_t *addr, uint32_t newval)
2208 {
2209  uint32_t result;
2210 
2211  __ASM volatile ("amoswap.w %0, %2, %1" : \
2212  "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
2213  return result;
2214 }
2215 
2223 __STATIC_FORCEINLINE int32_t __AMOADD_W(volatile int32_t *addr, int32_t value)
2224 {
2225  int32_t result;
2226 
2227  __ASM volatile ("amoadd.w %0, %2, %1" : \
2228  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2229  return *addr;
2230 }
2231 
2239 __STATIC_FORCEINLINE int32_t __AMOAND_W(volatile int32_t *addr, int32_t value)
2240 {
2241  int32_t result;
2242 
2243  __ASM volatile ("amoand.w %0, %2, %1" : \
2244  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2245  return *addr;
2246 }
2247 
2255 __STATIC_FORCEINLINE int32_t __AMOOR_W(volatile int32_t *addr, int32_t value)
2256 {
2257  int32_t result;
2258 
2259  __ASM volatile ("amoor.w %0, %2, %1" : \
2260  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2261  return *addr;
2262 }
2263 
2271 __STATIC_FORCEINLINE int32_t __AMOXOR_W(volatile int32_t *addr, int32_t value)
2272 {
2273  int32_t result;
2274 
2275  __ASM volatile ("amoxor.w %0, %2, %1" : \
2276  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2277  return *addr;
2278 }
2279 
2287 __STATIC_FORCEINLINE uint32_t __AMOMAXU_W(volatile uint32_t *addr, uint32_t value)
2288 {
2289  uint32_t result;
2290 
2291  __ASM volatile ("amomaxu.w %0, %2, %1" : \
2292  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2293  return *addr;
2294 }
2295 
2303 __STATIC_FORCEINLINE int32_t __AMOMAX_W(volatile int32_t *addr, int32_t value)
2304 {
2305  int32_t result;
2306 
2307  __ASM volatile ("amomax.w %0, %2, %1" : \
2308  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2309  return *addr;
2310 }
2311 
2319 __STATIC_FORCEINLINE uint32_t __AMOMINU_W(volatile uint32_t *addr, uint32_t value)
2320 {
2321  uint32_t result;
2322 
2323  __ASM volatile ("amominu.w %0, %2, %1" : \
2324  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2325  return *addr;
2326 }
2327 
2335 __STATIC_FORCEINLINE int32_t __AMOMIN_W(volatile int32_t *addr, int32_t value)
2336 {
2337  int32_t result;
2338 
2339  __ASM volatile ("amomin.w %0, %2, %1" : \
2340  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2341  return *addr;
2342 }
2343 
2344 #if __RISCV_XLEN == 64
2356 __STATIC_FORCEINLINE uint64_t __CAS_D(volatile uint64_t *addr, uint64_t oldval, uint64_t newval)
2357 {
2358  uint64_t result;
2359  uint64_t rc;
2360 
2361  __ASM volatile ( \
2362  "0: lr.d %0, %2 \n" \
2363  " bne %0, %z3, 1f \n" \
2364  " sc.d %1, %z4, %2 \n" \
2365  " bnez %1, 0b \n" \
2366  "1:\n" \
2367  : "=&r"(result), "=&r"(rc), "+A"(*addr) \
2368  : "r"(oldval), "r"(newval) \
2369  : "memory");
2370  return result;
2371 }
2372 
2380 __STATIC_FORCEINLINE uint64_t __AMOSWAP_D(volatile uint64_t *addr, uint64_t newval)
2381 {
2382  uint64_t result;
2383 
2384  __ASM volatile ("amoswap.d %0, %2, %1" : \
2385  "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
2386  return result;
2387 }
2388 
2396 __STATIC_FORCEINLINE int64_t __AMOADD_D(volatile int64_t *addr, int64_t value)
2397 {
2398  int64_t result;
2399 
2400  __ASM volatile ("amoadd.d %0, %2, %1" : \
2401  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2402  return *addr;
2403 }
2404 
2412 __STATIC_FORCEINLINE int64_t __AMOAND_D(volatile int64_t *addr, int64_t value)
2413 {
2414  int64_t result;
2415 
2416  __ASM volatile ("amoand.d %0, %2, %1" : \
2417  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2418  return *addr;
2419 }
2420 
2428 __STATIC_FORCEINLINE int64_t __AMOOR_D(volatile int64_t *addr, int64_t value)
2429 {
2430  int64_t result;
2431 
2432  __ASM volatile ("amoor.d %0, %2, %1" : \
2433  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2434  return *addr;
2435 }
2436 
2444 __STATIC_FORCEINLINE int64_t __AMOXOR_D(volatile int64_t *addr, int64_t value)
2445 {
2446  int64_t result;
2447 
2448  __ASM volatile ("amoxor.d %0, %2, %1" : \
2449  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2450  return *addr;
2451 }
2452 
2460 __STATIC_FORCEINLINE uint64_t __AMOMAXU_D(volatile uint64_t *addr, uint64_t value)
2461 {
2462  uint64_t result;
2463 
2464  __ASM volatile ("amomaxu.d %0, %2, %1" : \
2465  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2466  return *addr;
2467 }
2468 
2476 __STATIC_FORCEINLINE int64_t __AMOMAX_D(volatile int64_t *addr, int64_t value)
2477 {
2478  int64_t result;
2479 
2480  __ASM volatile ("amomax.d %0, %2, %1" : \
2481  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2482  return *addr;
2483 }
2484 
2492 __STATIC_FORCEINLINE uint64_t __AMOMINU_D(volatile uint64_t *addr, uint64_t value)
2493 {
2494  uint64_t result;
2495 
2496  __ASM volatile ("amominu.d %0, %2, %1" : \
2497  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2498  return *addr;
2499 }
2500 
2508 __STATIC_FORCEINLINE int64_t __AMOMIN_D(volatile int64_t *addr, int64_t value)
2509 {
2510  int64_t result;
2511 
2512  __ASM volatile ("amomin.d %0, %2, %1" : \
2513  "=r"(result), "+A"(*addr) : "r"(value) : "memory");
2514  return *addr;
2515 }
2516 #endif /* __RISCV_XLEN == 64 */
2517  /* End of Doxygen Group NMSIS_Core_CPU_Intrinsic */
2519 
2520 #ifdef __cplusplus
2521 }
2522 #endif
2523 #endif /* __CORE_FEATURE_BASE__ */
CSR_MCACHECTL_Type CSR_MCACHE_CTL_Type
CSR_MPPICFGINFO_Type CSR_MPPICFG_INFO_Type
CSR_MECCCODE_Type CSR_MECC_CODE_Type
CSR_MDLMCTL_Type CSR_DILM_CTL_Type
CSR_MTLBCFGINFO_Type CSR_MTLBCFG_INFO_Type
CSR_MMISCCTRL_Type CSR_MMISCCTL_Type
CSR_MECCLOCK_Type CSR_MECC_LOCK_Type
CSR_MCFGINFO_Type CSR_MCFG_INFO_Type
CSR_MICFGINFO_Type CSR_MICFG_INFO_Type
CSR_MDCFGINFO_Type CSR_MDCFG_INFO_Type
CSR_MILMCTL_Type CSR_MILM_CTL_Type
CSR_MMISCCTRL_Type CSR_MMISC_CTL_Type
CSR_MFIOCFGINFO_Type CSR_MFIOCFG_INFO_Type
__STATIC_FORCEINLINE uint16_t __LH(volatile void *addr)
Load 16bit value from address (16 bit)
__STATIC_FORCEINLINE void __SH(volatile void *addr, uint16_t val)
Write 16bit value to address (16 bit)
__STATIC_FORCEINLINE int32_t __AMOMAX_W(volatile int32_t *addr, int32_t value)
Atomic signed MAX with 32bit value.
__STATIC_FORCEINLINE void __disable_all_counter(void)
Disable all MCYCLE & MINSTRET & MHPMCOUNTER counter.
__STATIC_FORCEINLINE unsigned long __get_hpm_event(unsigned long idx)
Get event for selected high performance monitor event.
__STATIC_FORCEINLINE void __set_wfi_sleepmode(WFI_SleepMode_Type mode)
Set Sleep mode of WFI.
__STATIC_FORCEINLINE void __enable_all_counter(void)
Enable all MCYCLE & MINSTRET & MHPMCOUNTER counter.
__STATIC_FORCEINLINE uint64_t __get_hpm_counter(unsigned long idx)
Get value of selected high performance monitor counter.
__STATIC_FORCEINLINE void __EBREAK(void)
Breakpoint Instruction.
__STATIC_FORCEINLINE void __NOP(void)
NOP Instruction.
__STATIC_FORCEINLINE void __disable_mhpm_counters(unsigned long mask)
Disable hardware performance counters with mask.
__STATIC_FORCEINLINE void __enable_mhpm_counters(unsigned long mask)
Enable hardware performance counters with mask.
__STATIC_FORCEINLINE void __set_mideleg(unsigned long mask)
Set interrupt delegation to S mode.
__STATIC_FORCEINLINE void __disable_mhpm_counter(unsigned long idx)
Disable selected hardware performance monitor counter.
__STATIC_FORCEINLINE void __enable_mhpm_counter(unsigned long idx)
Enable selected hardware performance monitor counter.
__STATIC_FORCEINLINE void __FENCE_I(void)
Fence.i Instruction.
WFI_SleepMode_Type
WFI Sleep Mode enumeration.
__STATIC_FORCEINLINE void __ECALL(void)
Environment Call Instruction.
__STATIC_FORCEINLINE uint32_t __AMOSWAP_W(volatile uint32_t *addr, uint32_t newval)
Atomic Swap 32bit value into memory.
__STATIC_FORCEINLINE int32_t __AMOXOR_W(volatile int32_t *addr, int32_t value)
Atomic XOR with 32bit value.
__STATIC_FORCEINLINE uint32_t __AMOMINU_W(volatile uint32_t *addr, uint32_t value)
Atomic unsigned MIN with 32bit value.
__STATIC_FORCEINLINE uint32_t __AMOMAXU_W(volatile uint32_t *addr, uint32_t value)
Atomic unsigned MAX with 32bit value.
__STATIC_FORCEINLINE uint8_t __LB(volatile void *addr)
Load 8bit value from address (8 bit)
__STATIC_FORCEINLINE void __SB(volatile void *addr, uint8_t val)
Write 8bit value to address (8 bit)
__STATIC_FORCEINLINE void __set_hpm_event(unsigned long idx, unsigned long event)
Set event for selected high performance monitor event.
__STATIC_FORCEINLINE void __WFI(void)
Wait For Interrupt.
__STATIC_FORCEINLINE void __set_medeleg(unsigned long mask)
Set exceptions delegation to S mode.
__STATIC_FORCEINLINE uint32_t __CAS_W(volatile uint32_t *addr, uint32_t oldval, uint32_t newval)
Compare and Swap 32bit value using LR and SC.
__STATIC_FORCEINLINE int32_t __AMOAND_W(volatile int32_t *addr, int32_t value)
Atomic And with 32bit value.
__STATIC_FORCEINLINE void __SW(volatile void *addr, uint32_t val)
Write 32bit value to address (32 bit)
__STATIC_FORCEINLINE void __TXEVT(void)
Send TX Event.
__STATIC_FORCEINLINE int32_t __AMOOR_W(volatile int32_t *addr, int32_t value)
Atomic OR with 32bit value.
__STATIC_FORCEINLINE unsigned long __read_hpm_counter(unsigned long idx)
Get value of selected high performance monitor counter.
__STATIC_FORCEINLINE int32_t __AMOADD_W(volatile int32_t *addr, int32_t value)
Atomic Add with 32bit value.
__STATIC_FORCEINLINE void __WFE(void)
Wait For Event.
__STATIC_FORCEINLINE void __set_hpm_counter(unsigned long idx, uint64_t value)
Set value for selected high performance monitor counter.
__STATIC_FORCEINLINE void __enable_mcycle_counter(void)
Enable MCYCLE counter.
__STATIC_FORCEINLINE int32_t __AMOMIN_W(volatile int32_t *addr, int32_t value)
Atomic signed MIN with 32bit value.
__STATIC_FORCEINLINE void __disable_minstret_counter(void)
Disable MINSTRET counter.
__STATIC_FORCEINLINE void __enable_minstret_counter(void)
Enable MINSTRET counter.
__STATIC_FORCEINLINE uint32_t __LW(volatile void *addr)
Load 32bit value from address (32 bit)
__STATIC_FORCEINLINE void __disable_mcycle_counter(void)
Disable MCYCLE counter.
@ WFI_DEEP_SLEEP
Deep sleep mode, the core_clk and core_ano_clk will poweroff.
@ WFI_SHALLOW_SLEEP
Shallow sleep mode, the core_clk will poweroff.
#define MSTATUS_MPIE
#define SSTATUS_SIE
#define WFE_WFE
#define MSTATUS_MIE
#define MIE_SEIE
#define MSTATUS_MPP
#define MIE_SSIE
#define MCOUNTINHIBIT_CY
#define MIE_MTIE
#define MIE_MEIE
#define MIE_STIE
#define MIE_MSIE
#define MCOUNTINHIBIT_IR
__STATIC_FORCEINLINE unsigned long __read_time_csr()
Read the TIME register.
__STATIC_FORCEINLINE void __disable_core_irq(uint32_t irq)
Disable Core IRQ Interrupt.
__STATIC_FORCEINLINE void __disable_irq_s(void)
Disable IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE uint64_t __get_rv_instret(void)
Read whole 64 bits value of machine instruction-retired counter.
__STATIC_FORCEINLINE uint64_t __get_rv_cycle(void)
Read whole 64 bits value of mcycle counter.
__STATIC_FORCEINLINE void __enable_ext_irq_s(void)
Enable External IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE void __disable_timer_irq(void)
Disable Timer IRQ Interrupts.
#define __RV_CSR_CLEAR(csr, val)
CSR operation Macro for csrc instruction.
__STATIC_FORCEINLINE unsigned long __get_cluster_id(void)
Get cluster id of current cluster.
__STATIC_FORCEINLINE void __clear_core_irq_pending(uint32_t irq)
Clear Core IRQ Interrupt Pending status.
__STATIC_FORCEINLINE void __disable_irq(void)
Disable IRQ Interrupts.
__STATIC_FORCEINLINE void __enable_ext_irq(void)
Enable External IRQ Interrupts.
__STATIC_FORCEINLINE uint32_t __get_core_irq_pending_s(uint32_t irq)
Get Core IRQ Interrupt Pending status in supervisor mode.
__STATIC_FORCEINLINE void __clear_core_irq_pending_s(uint32_t irq)
Clear Core IRQ Interrupt Pending status in supervisor mode.
#define __RV_CSR_READ(csr)
CSR operation Macro for csrr instruction.
__STATIC_FORCEINLINE void __disable_sw_irq(void)
Disable software IRQ Interrupts.
__STATIC_FORCEINLINE void __disable_ext_irq(void)
Disable External IRQ Interrupts.
__STATIC_FORCEINLINE unsigned long __get_cluster_id_s(void)
Get cluster id of current cluster in supervisor mode.
__STATIC_FORCEINLINE unsigned long __get_hart_index_s(void)
Get hart index of current cluster in supervisor mode.
__STATIC_FORCEINLINE void __disable_timer_irq_s(void)
Disable Timer IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE void __enable_timer_irq_s(void)
Enable Timer IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE unsigned long __get_hart_id(void)
Get hart id of current cluster.
__STATIC_FORCEINLINE unsigned long __get_hart_id_s(void)
Get hart id of current cluster in supervisor mode.
__STATIC_FORCEINLINE uint64_t __get_rv_time(void)
Read whole 64 bits value of real-time clock.
__STATIC_FORCEINLINE void __disable_ext_irq_s(void)
Disable External IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE unsigned long __read_instret_csr()
Read the INSTRET register.
__STATIC_FORCEINLINE void __disable_sw_irq_s(void)
Disable software IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE unsigned long __get_hart_index(void)
Get hart index of current cluster.
#define __RV_CSR_WRITE(csr, val)
CSR operation Macro for csrw instruction.
__STATIC_FORCEINLINE void __enable_sw_irq(void)
Enable software IRQ Interrupts.
__STATIC_FORCEINLINE void __disable_core_irq_s(uint32_t irq)
Disable Core IRQ Interrupt in supervisor mode.
__STATIC_FORCEINLINE void __switch_mode(uint8_t mode, uintptr_t stack, void(*entry_point)(void))
switch privilege from machine mode to others.
__STATIC_FORCEINLINE void __enable_timer_irq(void)
Enable Timer IRQ Interrupts.
__STATIC_FORCEINLINE void __enable_irq_s(void)
Enable IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE uint32_t __get_core_irq_pending(uint32_t irq)
Get Core IRQ Interrupt Pending status.
__STATIC_FORCEINLINE unsigned long __read_cycle_csr()
Read the CYCLE register.
__STATIC_FORCEINLINE void __enable_core_irq_s(uint32_t irq)
Enable Core IRQ Interrupt in supervisor mode.
__STATIC_FORCEINLINE void __enable_core_irq(uint32_t irq)
Enable Core IRQ Interrupt.
__STATIC_FORCEINLINE void __set_rv_cycle(uint64_t cycle)
Set whole 64 bits value of mcycle counter.
__STATIC_FORCEINLINE void __enable_irq(void)
Enable IRQ Interrupts.
__STATIC_FORCEINLINE void __enable_sw_irq_s(void)
Enable software IRQ Interrupts in supervisor mode.
__STATIC_FORCEINLINE void __set_rv_instret(uint64_t instret)
Set whole 64 bits value of machine instruction-retired counter.
#define __RV_CSR_SET(csr, val)
CSR operation Macro for csrs instruction.
#define CSR_MHPMEVENT6
#define CSR_MHPMEVENT29
#define CSR_MHPMEVENT18
#define CSR_INSTRET
#define CSR_MHPMCOUNTER17H
#define CSR_MHPMEVENT31
#define CSR_MHPMEVENT17
#define CSR_MHPMCOUNTER16
#define CSR_MHPMCOUNTER7H
#define CSR_MHPMEVENT20
#define CSR_MHPMCOUNTER27H
#define CSR_MHPMEVENT5
#define CSR_MHPMCOUNTER25
#define CSR_MHPMCOUNTER20
#define CSR_MHPMEVENT9
#define CSR_MHPMCOUNTER28
#define CSR_MHPMCOUNTER31
#define CSR_MHPMEVENT13
#define CSR_MHPMCOUNTER18H
#define CSR_MHPMCOUNTER21H
#define CSR_MINSTRET
#define CSR_MHPMEVENT16
#define CSR_MHPMEVENT24
#define CSR_TIMEH
#define CSR_MHPMCOUNTER28H
#define CSR_MHPMCOUNTER21
#define CSR_MHPMCOUNTER9H
#define CSR_MHPMCOUNTER7
#define CSR_MIP
#define CSR_MHPMCOUNTER29H
#define CSR_MHPMCOUNTER26
#define CSR_MHPMCOUNTER14
#define CSR_MHPMCOUNTER10H
#define CSR_MHPMCOUNTER6
#define CSR_MHPMCOUNTER5
#define CSR_MHPMCOUNTER11H
#define CSR_MHPMEVENT26
#define CSR_TIME
#define CSR_MHPMCOUNTER12H
#define CSR_MHPMEVENT21
#define CSR_MHARTID
#define CSR_MEPC
#define CSR_MHPMCOUNTER25H
#define CSR_SHARTID
#define CSR_MCYCLE
#define CSR_MHPMEVENT3
#define CSR_MHPMCOUNTER26H
#define CSR_MHPMCOUNTER5H
#define CSR_MHPMEVENT14
#define CSR_SSTATUS
#define CSR_MHPMEVENT27
#define CSR_MHPMCOUNTER24H
#define CSR_MSTATUS
#define CSR_MHPMCOUNTER8H
#define CSR_MHPMCOUNTER3H
#define CSR_TXEVT
#define CSR_MHPMCOUNTER12
#define CSR_MHPMCOUNTER20H
#define CSR_MHPMEVENT19
#define CSR_SLEEPVALUE
#define CSR_MHPMEVENT15
#define CSR_MHPMEVENT11
#define CSR_MHPMEVENT4
#define CSR_MHPMCOUNTER31H
#define CSR_MHPMCOUNTER8
#define CSR_MCOUNTINHIBIT
#define CSR_MHPMCOUNTER9
#define CSR_WFE
#define CSR_MHPMCOUNTER6H
#define CSR_CYCLE
#define CSR_MHPMCOUNTER23H
#define CSR_MINSTRETH
#define CSR_MHPMEVENT8
#define CSR_MHPMCOUNTER17
#define CSR_MHPMCOUNTER11
#define CSR_MHPMEVENT7
#define CSR_MHPMCOUNTER15H
#define CSR_MEDELEG
#define CSR_MHPMCOUNTER3
#define CSR_MHPMCOUNTER10
#define CSR_MHPMCOUNTER14H
#define CSR_MHPMCOUNTER29
#define CSR_MHPMCOUNTER4
#define CSR_MHPMEVENT12
#define CSR_SIP
#define CSR_MHPMCOUNTER27
#define CSR_MHPMCOUNTER18
#define CSR_MHPMEVENT25
#define CSR_MHPMCOUNTER13H
#define CSR_MHPMCOUNTER4H
#define CSR_MHPMCOUNTER23
#define CSR_MHPMCOUNTER24
#define CSR_MHPMCOUNTER15
#define CSR_MHPMCOUNTER22
#define CSR_MCYCLEH
#define CSR_MHPMCOUNTER13
#define CSR_MIDELEG
#define CSR_MIE
#define CSR_MHPMCOUNTER30
#define CSR_MHPMEVENT22
#define CSR_MHPMEVENT30
#define CSR_MHPMCOUNTER22H
#define CSR_MHPMEVENT10
#define CSR_MHPMCOUNTER19H
#define CSR_MHPMCOUNTER16H
#define CSR_MHPMCOUNTER30H
#define CSR_MHPMCOUNTER19
#define CSR_MHPMEVENT23
#define CSR_SIE
#define CSR_MHPMEVENT28
#define __ASM
Pass information from the compiler to the assembler.
Definition: nmsis_gcc.h:55
#define __STATIC_FORCEINLINE
Define a static function that should be always inlined by the compiler.
Definition: nmsis_gcc.h:70
unsigned long rv_csr_t
Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V.
#define __RISCV_XLEN
Refer to the width of an integer register in bits(either 32 or 64)
Union type to access MCACHE_CTL CSR register.
rv_csr_t dc_burst_type
bit: 23 D-Cache Burst type control
rv_csr_t dc_rwdecc
bit: 20 Control D-Cache Data Ram ECC code injection
rv_csr_t ic_burst_type
bit: 10 I-Cache Burst type control
rv_csr_t ic_scpd_mod
bit: 1 Scratchpad mode, 0: Scratchpad as ICache Data RAM, 1: Scratchpad as ILM SRAM
rv_csr_t ic_pf_en
bit: 6 I-Cache prefetch enable
rv_csr_t dc_rwtecc
bit: 19 Control D-Cache Tag Ram ECC code injection
rv_csr_t ic_rwtecc
bit: 4 Control I-Cache Tag Ram ECC code injection
rv_csr_t dc_prefetch_en
bit: 22 D-Cache CMO prefetch enable control
rv_csr_t ic_rwdecc
bit: 5 Control I-Cache Data Ram ECC code injection
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved0
bit: 11..15 Reserved
rv_csr_t ic_ecc_chk_en
bit: 8 I-Cache check ECC codes enable
rv_csr_t ic_en
bit: 0 I-Cache enable
rv_csr_t _reserved1
bit: 24..XLEN-1 Reserved
rv_csr_t dc_ecc_excp_en
bit: 18 D-Cache 2bit ECC error exception enable
rv_csr_t ic_cancel_en
bit: 7 I-Cache change flow canceling enable control
rv_csr_t ic_ecc_en
bit: 2 I-Cache ECC enable
rv_csr_t ic_prefetch_en
bit: 9 I-Cache CMO prefetch enable control
rv_csr_t dc_ecc_en
bit: 17 D-Cache ECC enable
rv_csr_t ic_ecc_excp_en
bit: 3 I-Cache 2bit ECC error exception enable
rv_csr_t dc_en
bit: 16 DCache enable
rv_csr_t dc_ecc_chk_en
bit: 21 D-Cache check ECC codes enable
Union type to access MCAUSE CSR register.
rv_csr_t mpp
bit: 28..29 Privilede mode flag before enter interrupt
rv_csr_t mpil
bit: 16..23 Previous interrupt level
rv_csr_t exccode
bit: 0..11 exception or interrupt code
rv_csr_t _reserved0
bit: 12..15 Reserved
rv_csr_t mpie
bit: 27 Interrupt enable flag before enter interrupt
rv_csr_t _reserved1
bit: 24..26 Reserved
rv_csr_t minhv
bit: 30 Machine interrupt vector table
rv_csr_t d
Type used for csr data access.
rv_csr_t interrupt
bit: XLEN-1 trap type.
Union type to access MCFG_INFO CSR register.
rv_csr_t sec_mode
bit: 19 Smwg extension present
rv_csr_t tee
bit: 0 TEE present
rv_csr_t icache
bit: 9 ICache present
rv_csr_t etrace
bit: 20 Etrace present
rv_csr_t dsp_n1
bit: 12 DSP N1 present
rv_csr_t d
Type used for csr data access.
rv_csr_t ecc
bit: 1 ECC present
rv_csr_t plic
bit: 3 PLIC present
rv_csr_t dsp_n2
bit: 13 DSP N2 present
rv_csr_t ppi
bit: 5 PPI present
rv_csr_t clic
bit: 2 CLIC present
rv_csr_t nice
bit: 6 NICE present
rv_csr_t dsp_n3
bit: 14 DSP N3 present
rv_csr_t _reserved1
bit: 27..XLEN-1 Reserved
rv_csr_t sstc
bit: 26 SSTC extension present
rv_csr_t ilm
bit: 7 ILM present
rv_csr_t vnice
bit: 23 VNICE present
rv_csr_t xlcz
bit: 24 XLCZ extension present
rv_csr_t dcache
bit: 10 DCache present
rv_csr_t zc_xlcz
bit: 15 Zc and xlcz extension present
rv_csr_t safety_mecha
bit: 21..22 Indicate Core's safety mechanism
rv_csr_t dlm
bit: 8 DLM present
rv_csr_t smp
bit: 11 SMP present
rv_csr_t vpu_degree
bit: 17..18 Indicate the VPU degree of parallel
rv_csr_t iregion
bit: 16 IREGION present
rv_csr_t zilsd
bit: 25 Zilsd/Zclsd extension present
rv_csr_t fio
bit: 4 FIO present
Union type to access MCOUNTINHIBIT CSR register.
rv_csr_t cy
bit: 0 1 means disable mcycle counter
rv_csr_t ir
bit: 2 1 means disable minstret counter
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved1
bit: 3..XLEN-1 Reserved
rv_csr_t _reserved0
bit: 1 Reserved
Union type to access MDCAUSE CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t mdcause
bit: 0..2 More detailed exception information as MCAUSE supplement
rv_csr_t _reserved0
bit: 3..XLEN-1 Reserved
Union type to access MDCFG_INFO CSR register.
rv_csr_t set
bit: 0..3 D-Cache sets per way
rv_csr_t lm_ecc
bit: 21 DLM ECC present
rv_csr_t _reserved0
bit: 11..15 Reserved
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved1
bit: 22..XLEN-1 Reserved
rv_csr_t lsize
bit: 7..9 D-Cache line size
rv_csr_t lm_size
bit: 16..20 DLM size, need to be 2^n size
rv_csr_t way
bit: 4..6 D-Cache way
rv_csr_t ecc
bit: 10 D-Cache ECC support
Union type to access MDLM_CTL CSR register.
rv_csr_t _reserved0
bit: 5..9 Reserved
rv_csr_t dlm_en
bit: 0 DLM enable
rv_csr_t dlm_bpa
bit: 10..XLEN-1 DLM base address
rv_csr_t dlm_rwecc
bit: 3 Control mecc_code write to dlm, simulate error injection
rv_csr_t dlm_ecc_en
bit: 1 DLM ECC eanble
rv_csr_t dlm_ecc_chk_en
bit: 4 DLM check ECC codes enable
rv_csr_t d
Type used for csr data access.
rv_csr_t dlm_ecc_excp_en
bit: 2 DLM ECC exception enable
Union type to access MECC_CODE CSR register.
rv_csr_t _reserved1
bit: 21..23 Reserved 0
rv_csr_t ramid
bit: 16..20 The ID of RAM that has 2bit ECC error, software can clear these bits
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved2
bit: 29..XLEN-1 Reserved 0
rv_csr_t code
bit: 0..8 Used to inject ECC check code
rv_csr_t _reserved0
bit: 9..15 Reserved 0
rv_csr_t sramid
bit: 24..28 The ID of RAM that has 1bit ECC error, software can clear these bits
Union type to access MECC_LOCK CSR register.
rv_csr_t ecc_lock
bit: 0 RW permission, ECC Lock configure
rv_csr_t _reserved0
bit: 1..XLEN-1 Reserved
rv_csr_t d
Type used for csr data access.
Union type to access MECC_CTL CSR register.
rv_csr_t dlm_ext_msk
bit: 6 Write 1 to disable aggregate DLM external access ECC fatal error to safety_error output
rv_csr_t ilm_acc_msk
bit: 1 Write 1 to disable aggregate ILM load/store access ECC fatal error to safety_error output
rv_csr_t dc_ccm_msk
bit: 8 Write 1 to disable aggregate DCache CCM ECC fatal error to safety_error output
rv_csr_t _reserved0
bit: 10..XLEN-1 Reserved 0
rv_csr_t ic_fch_msk
bit: 3 Write 1 to disable aggregate ICache fetch ECC fatal error to safety_error output
rv_csr_t dc_acc_msk
bit: 4 Write 1 to disable aggregate DCache access ECC fatal error to safety_error output
rv_csr_t d
Type used for csr data access.
rv_csr_t dlm_acc_msk
bit: 2 Write 1 to disable aggregate DLM access ECC fatal error to safety_error output
rv_csr_t dc_cpbk_msk
bit: 9 Write 1 to disable aggregate DCache CPBK ECC fatal error to safety_error output
rv_csr_t ic_ccm_msk
bit: 7 Write 1 to disable aggregate ICache CCM ECC fatal error to safety_error output
rv_csr_t ilm_fch_msk
bit: 0 Write 1 to disable aggregate ILM fetch ECC fatal error to safety_error output
rv_csr_t ilm_ext_msk
bit: 5 Write 1 to disable aggregate ILM external access ECC fatal error to safety_error output
Union type to access MECC_STATUS CSR register.
rv_csr_t ic_ccm_err
bit: 7 ICache CCM ECC fatal error has occurred
rv_csr_t dc_cpbk_err
bit: 9 DCache CPBK ECC fatal error has occurred
rv_csr_t dlm_acc_err
bit: 2 DLM access ECC fatal error has occurred
rv_csr_t ilm_acc_err
bit: 1 ILM load/store access ECC fatal error has occurred
rv_csr_t dlm_ext_err
bit: 6 DLM external access ECC fatal error has occurred
rv_csr_t _reserved0
bit: 10..XLEN-1 Reserved 0
rv_csr_t ilm_fch_err
bit: 0 ILM fetch ECC fatal error has occurred
rv_csr_t dc_ccm_err
bit: 8 DCache CCM ECC fatal error has occurred
rv_csr_t dc_acc_err
bit: 4 DCache access ECC fatal error has occurred
rv_csr_t ilm_ext_err
bit: 5 ILM external access ECC fatal error has occurred
rv_csr_t d
Type used for csr data access.
rv_csr_t ic_fch_err
bit: 3 ICache fetch ECC fatal error has occurred
Union type to access MFIOCFG_INFO CSR register.
rv_csr_t _reserved0
bit: 0 Reserved
rv_csr_t fio_size
bit: 1..5 FIO size, need to be 2^n size
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved1
bit: 6..9 Reserved
rv_csr_t fio_bpa
bit: 10..XLEN-1 FIO base address
Union type to access MICFG_INFO CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t lm_size
bit: 16..20 ILM size, need to be 2^n size
rv_csr_t set
bit: 0..3 I-Cache sets per way
rv_csr_t lm_ecc
bit: 22 ILM ECC support
rv_csr_t ecc
bit: 10 I-Cache ECC support
rv_csr_t _reserved0
bit: 11..15 Reserved
rv_csr_t _reserved1
bit: 23..XLEN-1 Reserved
rv_csr_t lsize
bit: 7..9 I-Cache line size
rv_csr_t lm_xonly
bit: 21 ILM Execute only permission or Reserved
rv_csr_t way
bit: 4..6 I-Cache way
Union type to access MILM_CTL CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved0
bit: 6..9 Reserved
rv_csr_t ilm_ecc_chk_en
bit: 4 ILM check ECC codes enable
rv_csr_t ilm_ecc_excp_en
bit: 2 ILM ECC exception enable
rv_csr_t ilm_rwecc
bit: 3 Control mecc_code write to ilm, simulate error injection
rv_csr_t ilm_ecc_en
bit: 1 ILM ECC eanble
rv_csr_t ilm_va_en
bit: 5 Using virtual address to judge ILM access
rv_csr_t ilm_en
bit: 0 ILM enable
rv_csr_t ilm_bpa
bit: 10..XLEN-1 ILM base address
Union type to access MIRGB_INFO CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t iregion_base
bit: 10..PA_SIZE IREGION Base Address
rv_csr_t _reserved0
bit: 0 Reserved
rv_csr_t iregion_size
bit: 1..5 Indicates the size of IREGION and it should be power of 2
rv_csr_t _reserved1
bit: 6..9 Reserved
Union type to access MISA CSR register.
rv_csr_t n
bit: 13 Tentatively reserved for User-Level Interrupts extension
rv_csr_t y
bit: 24 Reserved
rv_csr_t p
bit: 15 Tentatively reserved for Packed-SIMD extension
rv_csr_t v
bit: 21 Vector extension
rv_csr_t d
bit: 3 Double-precision floating-point extension
rv_csr_t q
bit: 16 Quad-precision floating-point extension
rv_csr_t i
bit: 8 RV32I/64I/128I base ISA
rv_csr_t w
bit: 22 Reserved
rv_csr_t f
bit: 5 Single-precision floating-point extension
rv_csr_t j
bit: 9 Reserved
rv_csr_t r
bit: 17 Reserved
rv_csr_t g
bit: 6 Reserved
rv_csr_t z
bit: 25 Reserved
rv_csr_t mxl
bit: XLEN-2..XLEN-1 Machine XLEN
rv_csr_t _reserved0
bit: 26..XLEN-3 Reserved
rv_csr_t e
bit: 4 RV32E/64E base ISA
rv_csr_t u
bit: 20 User mode implemented
rv_csr_t s
bit: 18 Supervisor mode implemented
rv_csr_t a
bit: 0 Atomic extension
rv_csr_t t
bit: 19 Reserved
rv_csr_t b
bit: 1 B extension
rv_csr_t o
bit: 14 Reserved
rv_csr_t k
bit: 10 Reserved
rv_csr_t x
bit: 23 Non-standard extensions present
rv_csr_t l
bit: 11 Reserved
rv_csr_t h
bit: 7 Hypervisor extension
rv_csr_t m
bit: 12 Integer Multiply/Divide extension
rv_csr_t c
bit: 2 Compressed extension
Union type to access MMISC_CTRL CSR register.
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved3
bit: 13 Reserved
rv_csr_t _reserved1
bit: 2 Reserved
rv_csr_t sijump_en
bit: 11 SIJUMP mode of trace
rv_csr_t core_buserr
bit: 8 core bus error exception or interrupt
rv_csr_t _reserved5
bit: 18..XLEN-1 Reserved
rv_csr_t _reserved4
bit: 15..16 Reserved
rv_csr_t _reserved2
bit: 4..5 Reserved
rv_csr_t csr_excl_enable
bit: 17 Exclusive instruction(lr,sc) on Non-cacheable/Device memory can send exclusive flag in memory...
rv_csr_t imreturn_en
bit: 10 IMRETURN mode of trace
rv_csr_t zclsd_en
bit: 1 Control the Zclsd will uses the Zcf extension encoding or not
rv_csr_t _reserved0
bit: 0 Reserved
rv_csr_t dbg_sec
bit: 14 debug access mode, removed in latest releases
rv_csr_t bpu
bit: 3 dynamic prediction enable flag
rv_csr_t nmi_cause
bit: 9 mnvec control and nmi mcase exccode
rv_csr_t misalign
bit: 6 misaligned access support flag
rv_csr_t ldspec_en
bit: 12 enable load speculative goes to mem interface
rv_csr_t zcmt_zcmp
bit: 7 Zc Ext uses the cfdsp of D Ext’s encoding or not
Union type to access MPPICFG_INFO CSR register.
rv_csr_t ppi_bpa
bit: 10..XLEN-1 PPI base address
rv_csr_t _reserved0
bit: 0 Reserved 1
rv_csr_t _reserved1
bit: 6..8 Reserved 0
rv_csr_t ppi_size
bit: 1..5 PPI size, need to be 2^n size
rv_csr_t d
Type used for csr data access.
rv_csr_t ppi_en
bit: 9 PPI Enable.
Union type to access MSAVESTATUS CSR register.
rv_csr_t w
Type used for csr data access.
rv_csr_t mpie2
bit: 8 interrupt enable flag of second level NMI/exception nestting
rv_csr_t mpp2
bit: 9..10 privilede mode of second level NMI/exception nestting
rv_csr_t mpp1
bit: 1..2 privilede mode of fisrt level NMI/exception nestting
rv_csr_t _reserved1
bit: 11..13 Reserved
rv_csr_t ptyp1
bit: 6..7 NMI/exception type of before first nestting
rv_csr_t _reserved2
bit: 16..XLEN-1 Reserved
rv_csr_t ptyp2
bit: 14..15 NMI/exception type of before second nestting
rv_csr_t _reserved0
bit: 3..5 Reserved
rv_csr_t mpie1
bit: 0 interrupt enable flag of fisrt level NMI/exception nestting
Union type to access MSTACK_CTL CSR register.
rv_csr_t _reserved0
bit: 3..XLEN-1 Reserved
rv_csr_t ovf_track_en
bit: 0 Stack overflow check or track enable
rv_csr_t mode
bit: 2 Mode of stack checking
rv_csr_t d
Type used for csr data access.
rv_csr_t udf_en
bit: 1 Stack underflow check enable
Union type to access MSTATUSH CSR register.
rv_csr_t mpelp
bit: 9 Machine mode Previous Expected Landing Pad (ELP) State
rv_csr_t _reserved0
bit: 0..3 Reserved
rv_csr_t mbe
bit: 5 M-mode non-instruction-fetch memory accesse big-endian enable flag
rv_csr_t sbe
bit: 4 S-mode non-instruction-fetch memory accesse big-endian enable flag
rv_csr_t gva
bit: 6 Guest Virtual Address
rv_csr_t mpv
bit: 7 Machine Previous Virtualization Mode
rv_csr_t _reserved5
bit: 11..31 Reserved
rv_csr_t mdt
bit: 10 M-mode-disable-trap
rv_csr_t _reserved1
bit: 8 Reserved
rv_csr_t d
Type used for csr data access.
Union type to access MSTATUS CSR register.
rv_csr_t sd
bit: 31 Dirty status for XS or FS
rv_csr_t tvm
bit: 20 Trap Virtual Memory
rv_csr_t fs
bit: 13..14 FS status flag
rv_csr_t mpie
bit: 7 machine mode previous interrupt enable flag
rv_csr_t spie
bit: 5 supervisor mode interrupt enable flag
rv_csr_t sie
bit: 1 supervisor interrupt enable flag
rv_csr_t _reserved3
bit: 25..30 Reserved
rv_csr_t _reserved1
bit: 2 Reserved
rv_csr_t mxr
bit: 19 Make eXecutable Readable
rv_csr_t mprv
bit: 17 Modify PRiVilege
rv_csr_t mpp
bit: 11..12 machine previous privilede mode
rv_csr_t tw
bit: 21 Timeout Wait
rv_csr_t _reserved2
bit: 4 Reserved
rv_csr_t spp
bit: 8 supervisor previous privilede mode
rv_csr_t ube
bit: 6 U-mode non-instruction-fetch memory accesse big-endian enable flag
rv_csr_t mie
bit: 3 machine mode interrupt enable flag
rv_csr_t vs
bit: 9..10 vector status flag
rv_csr_t sum
bit: 18 Supervisor Mode load and store protection
rv_csr_t xs
bit: 15..16 XS status flag
rv_csr_t d
Type used for csr data access.
rv_csr_t tsr
bit: 22 Trap SRET
rv_csr_t _reserved0
bit: 0 Reserved
rv_csr_t sdt
bit: 24 S-mode-disable-trap
rv_csr_t spelp
bit: 23 Supervisor mode Previous Expected Landing Pad (ELP) State
Union type to access MSUBM CSR register.
rv_csr_t typ
bit: 6..7 current trap type
rv_csr_t _reserved1
bit: 10..XLEN-1 Reserved
rv_csr_t _reserved0
bit: 0..5 Reserved
rv_csr_t ptyp
bit: 8..9 previous trap type
rv_csr_t d
Type used for csr data access.
Union type to access MTLBCFG_INFO CSR register.
rv_csr_t i_size
bit: 16..18 ITLB size
rv_csr_t d
Type used for csr data access.
rv_csr_t _reserved2
bit: 22..XLEN-1 Reserved 0
rv_csr_t lsize
bit: 7..9 Main TLB line size or Reserved
rv_csr_t _reserved1
bit: 12..15 Reserved 0
rv_csr_t set
bit: 0..3 Main TLB entry per way
rv_csr_t d_size
bit: 19..21 DTLB size
rv_csr_t napot
bit: 11 TLB supports Svnapot or not
rv_csr_t way
bit: 4..6 Main TLB ways
rv_csr_t ecc
bit: 10 Main TLB supports ECC or not
Union type to access MTLB_CTL CSR register.
rv_csr_t tlb_ecc_chk_en
bit: 6 Controls to check the ECC when core access to MTLB
rv_csr_t d
Type used for csr data access.
rv_csr_t tlb_ecc_en
bit: 0 MTLB ECC eanble
rv_csr_t tlb_dram_ecc_inj_en
bit: 3 Controls to inject the ECC Code in CSR mecc_code to MTLB data rams
rv_csr_t tlb_ecc_excp_en
bit: 1 MTLB double bit ECC exception enable control
rv_csr_t tlb_tram_ecc_inj_en
bit: 2 Controls to inject the ECC Code in CSR mecc_code to MTLB tag rams
rv_csr_t _reserved0
bit: 4..5 Reserved
rv_csr_t napot_en
bit: 7 NAPOT page enable
rv_csr_t _reserved1
bit: 8..XLEN-1 Reserved
Union type to access MTVEC CSR register.
rv_csr_t addr
bit: 6..XLEN-1 mtvec address
rv_csr_t d
Type used for csr data access.
rv_csr_t mode
bit: 0..5 interrupt mode control