NMSIS-Core  Version 1.5.0
NMSIS-Core support for Nuclei processor-based devices
core_feature_spmp.h
1 /*
2  * Copyright (c) 2019 Nuclei Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 #ifndef __CORE_FEATURE_SPMP_H__
19 #define __CORE_FEATURE_SPMP_H__
24 /*
25  * sPMP Feature Configuration Macro:
26  * 1. __SPMP_PRESENT: Define whether sPMP is present or not
27  * __SMPU_PRESENT: Define whether SMPU is present or not
28  * * 0: Not present
29  * * 1: Present
30  * 2. __SPMP_ENTRY_NUM: Define the number of sPMP entries, only 8 or 16 is configurable
31  * __SMPU_ENTRY_NUM: Define the number of SMPU entries, only 8 or 16 is configurable
32  * __SMPU_ENTRY_NUM is the same as __SPMP_ENTRY_NUM
33  */
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37 
38 #include "core_feature_base.h"
39 #include "core_compatiable.h"
40 
41 #if defined(__SPMP_PRESENT) && (__SPMP_PRESENT == 1)
42 
43 /* ===== sPMP Operations ===== */
58 #ifndef __SPMP_ENTRY_NUM
59 /* Number of __SPMP_ENTRY_NUM entries should be defined in <Device.h> */
60 #error "__SPMP_ENTRY_NUM is not defined, please check!"
61 #endif
62 
63 typedef struct SPMP_CONFIG {
69  unsigned int protection;
75  unsigned long order;
80  unsigned long base_addr;
81 } spmp_config;
82 
96 {
97  switch (csr_idx) {
98  case 0: return __RV_CSR_READ(CSR_SPMPCFG0);
99  case 1: return __RV_CSR_READ(CSR_SPMPCFG1);
100 #if __SPMP_ENTRY_NUM > 8
101  case 2: return __RV_CSR_READ(CSR_SPMPCFG2);
102  case 3: return __RV_CSR_READ(CSR_SPMPCFG3);
103 #endif
104  default: return 0;
105  }
106 }
107 
120 __STATIC_INLINE void __set_sPMPCFGx(uint32_t csr_idx, rv_csr_t spmpcfg)
121 {
122  switch (csr_idx) {
123  case 0: __RV_CSR_WRITE(CSR_SPMPCFG0, spmpcfg); break;
124  case 1: __RV_CSR_WRITE(CSR_SPMPCFG1, spmpcfg); break;
125 #if __SPMP_ENTRY_NUM > 8
126  case 2: __RV_CSR_WRITE(CSR_SPMPCFG2, spmpcfg); break;
127  case 3: __RV_CSR_WRITE(CSR_SPMPCFG3, spmpcfg); break;
128 #endif
129  default: return;
130  }
131 }
132 
139 __STATIC_INLINE uint8_t __get_sPMPxCFG(uint32_t entry_idx)
140 {
141  rv_csr_t spmpcfgx = 0;
142  uint8_t csr_cfg_num = 0;
143  uint16_t csr_idx = 0;
144  uint16_t cfg_shift = 0;
145 
146  if (entry_idx >= __SPMP_ENTRY_NUM) return 0;
147 
148 #if __RISCV_XLEN == 32
149  csr_cfg_num = 4;
150  csr_idx = entry_idx >> 2;
151 #elif __RISCV_XLEN == 64
152  csr_cfg_num = 8;
153  /* For RV64, spmpcfg0 and spmpcfg2 each hold 8 sPMP entries, align by 2 */
154  csr_idx = (entry_idx >> 2) & ~1;
155 #else
156  // TODO Add RV128 Handling
157  return 0;
158 #endif
159  spmpcfgx = __get_sPMPCFGx(csr_idx);
160  /*
161  * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
162  * then get spmpxcfg's bit position in one CSR by left shift 3(each spmpxcfg size is one byte)
163  */
164  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
165 
166  /* read specific spmpxcfg register value */
167  return (uint8_t)(__RV_EXTRACT_FIELD(spmpcfgx, 0xFF << cfg_shift));
168 }
169 
179 __STATIC_INLINE void __set_sPMPxCFG(uint32_t entry_idx, uint8_t spmpxcfg)
180 {
181  rv_csr_t spmpcfgx = 0;
182  uint8_t csr_cfg_num = 0;
183  uint16_t csr_idx = 0;
184  uint16_t cfg_shift = 0;
185  if (entry_idx >= __SPMP_ENTRY_NUM) return;
186 
187 #if __RISCV_XLEN == 32
188  csr_cfg_num = 4;
189  csr_idx = entry_idx >> 2;
190 #elif __RISCV_XLEN == 64
191  csr_cfg_num = 8;
192  /* For RV64, spmpcfg0 and spmpcfg2 each hold 8 sPMP entries, align by 2 */
193  csr_idx = (entry_idx >> 2) & ~1;
194 #else
195  // TODO Add RV128 Handling
196  return;
197 #endif
198  /* read specific spmpcfgx register value */
199  spmpcfgx = __get_sPMPCFGx(csr_idx);
200  /*
201  * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
202  * then get spmpxcfg's bit position in one CSR by left shift 3(each spmpxcfg size is one byte)
203  */
204  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
205 
206  spmpcfgx = __RV_INSERT_FIELD(spmpcfgx, 0xFFUL << cfg_shift, spmpxcfg);
207  __set_sPMPCFGx(csr_idx, spmpcfgx);
208 }
209 
217 {
218  switch (csr_idx) {
219  case 0: return __RV_CSR_READ(CSR_SPMPADDR0);
220  case 1: return __RV_CSR_READ(CSR_SPMPADDR1);
221  case 2: return __RV_CSR_READ(CSR_SPMPADDR2);
222  case 3: return __RV_CSR_READ(CSR_SPMPADDR3);
223  case 4: return __RV_CSR_READ(CSR_SPMPADDR4);
224  case 5: return __RV_CSR_READ(CSR_SPMPADDR5);
225  case 6: return __RV_CSR_READ(CSR_SPMPADDR6);
226  case 7: return __RV_CSR_READ(CSR_SPMPADDR7);
227 #if __SPMP_ENTRY_NUM > 8
228  case 8: return __RV_CSR_READ(CSR_SPMPADDR8);
229  case 9: return __RV_CSR_READ(CSR_SPMPADDR9);
230  case 10: return __RV_CSR_READ(CSR_SPMPADDR10);
231  case 11: return __RV_CSR_READ(CSR_SPMPADDR11);
232  case 12: return __RV_CSR_READ(CSR_SPMPADDR12);
233  case 13: return __RV_CSR_READ(CSR_SPMPADDR13);
234  case 14: return __RV_CSR_READ(CSR_SPMPADDR14);
235  case 15: return __RV_CSR_READ(CSR_SPMPADDR15);
236 #endif
237  default: return 0;
238  }
239 }
240 
247 __STATIC_INLINE void __set_sPMPADDRx(uint32_t csr_idx, rv_csr_t spmpaddr)
248 {
249  switch (csr_idx) {
250  case 0: __RV_CSR_WRITE(CSR_SPMPADDR0, spmpaddr); break;
251  case 1: __RV_CSR_WRITE(CSR_SPMPADDR1, spmpaddr); break;
252  case 2: __RV_CSR_WRITE(CSR_SPMPADDR2, spmpaddr); break;
253  case 3: __RV_CSR_WRITE(CSR_SPMPADDR3, spmpaddr); break;
254  case 4: __RV_CSR_WRITE(CSR_SPMPADDR4, spmpaddr); break;
255  case 5: __RV_CSR_WRITE(CSR_SPMPADDR5, spmpaddr); break;
256  case 6: __RV_CSR_WRITE(CSR_SPMPADDR6, spmpaddr); break;
257  case 7: __RV_CSR_WRITE(CSR_SPMPADDR7, spmpaddr); break;
258 #if __SPMP_ENTRY_NUM > 8
259  case 8: __RV_CSR_WRITE(CSR_SPMPADDR8, spmpaddr); break;
260  case 9: __RV_CSR_WRITE(CSR_SPMPADDR9, spmpaddr); break;
261  case 10: __RV_CSR_WRITE(CSR_SPMPADDR10, spmpaddr); break;
262  case 11: __RV_CSR_WRITE(CSR_SPMPADDR11, spmpaddr); break;
263  case 12: __RV_CSR_WRITE(CSR_SPMPADDR12, spmpaddr); break;
264  case 13: __RV_CSR_WRITE(CSR_SPMPADDR13, spmpaddr); break;
265  case 14: __RV_CSR_WRITE(CSR_SPMPADDR14, spmpaddr); break;
266  case 15: __RV_CSR_WRITE(CSR_SPMPADDR15, spmpaddr); break;
267 #endif
268  default: return;
269  }
270 }
271 
283 __STATIC_INLINE void __set_sPMPENTRYx(uint32_t entry_idx, const spmp_config *spmp_cfg)
284 {
285  unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
286  unsigned long cfgmask, addrmask = 0;
287  unsigned long spmpcfg, spmpaddr = 0;
288  unsigned long protection, csr_cfg_num = 0;
289  /* check parameters */
290  if (entry_idx >= __SPMP_ENTRY_NUM || spmp_cfg->order > __RISCV_XLEN || spmp_cfg->order < SPMP_SHIFT) return;
291 
292  /* calculate sPMP register and offset */
293 #if __RISCV_XLEN == 32
294  csr_cfg_num = 4;
295  cfg_csr_idx = (entry_idx >> 2);
296 #elif __RISCV_XLEN == 64
297  csr_cfg_num = 8;
298  cfg_csr_idx = ((entry_idx >> 2)) & ~1;
299 #else
300  // TODO Add RV128 Handling
301  return;
302 #endif
303  /*
304  * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
305  * then get spmpxcfg's bit position in one CSR by left shift 3, each spmpxcfg size is one byte
306  */
307  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
308  addr_csr_idx = entry_idx;
309 
310  /* encode sPMP config */
311  protection = (unsigned long)spmp_cfg->protection;
312  protection |= (SPMP_SHIFT == spmp_cfg->order) ? SPMP_A_NA4 : SPMP_A_NAPOT;
313  cfgmask = ~(0xFFUL << cfg_shift);
314  spmpcfg = (__get_sPMPCFGx(cfg_csr_idx) & cfgmask);
315  spmpcfg |= ((protection << cfg_shift) & ~cfgmask);
316 
317  /* encode sPMP address */
318  if (SPMP_SHIFT == spmp_cfg->order) { /* NA4 */
319  spmpaddr = (spmp_cfg->base_addr >> SPMP_SHIFT);
320  } else { /* NAPOT */
321  addrmask = (1UL << (spmp_cfg->order - SPMP_SHIFT)) - 1;
322  spmpaddr = ((spmp_cfg->base_addr >> SPMP_SHIFT) & ~addrmask);
323  spmpaddr |= (addrmask >> 1);
324  }
325  /*
326  * write csrs, update the address first, in case the entry is locked that
327  * we won't be able to modify it after we set the config csr.
328  */
329  __set_sPMPADDRx(addr_csr_idx, spmpaddr);
330  __set_sPMPCFGx(cfg_csr_idx, spmpcfg);
331 }
332 
344 __STATIC_INLINE int __get_sPMPENTRYx(unsigned int entry_idx, spmp_config *spmp_cfg)
345 {
346  unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
347  unsigned long cfgmask, spmpcfg, prot = 0;
348  unsigned long t1, addr, spmpaddr, len = 0;
349  uint8_t csr_cfg_num = 0;
350  /* check parameters */
351  if (entry_idx >= __SPMP_ENTRY_NUM || !spmp_cfg) return -1;
352 
353  /* calculate sPMP register and offset */
354 #if __RISCV_XLEN == 32
355  csr_cfg_num = 4;
356  cfg_csr_idx = entry_idx >> 2;
357 #elif __RISCV_XLEN == 64
358  csr_cfg_num = 8;
359  cfg_csr_idx = (entry_idx >> 2) & ~1;
360 #else
361 // TODO Add RV128 Handling
362  return -1;
363 #endif
364 
365  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
366  addr_csr_idx = entry_idx;
367 
368  /* decode sPMP config */
369  cfgmask = (0xFFUL << cfg_shift);
370  spmpcfg = (__get_sPMPCFGx(cfg_csr_idx) & cfgmask);
371  prot = spmpcfg >> cfg_shift;
372 
373  /* decode sPMP address */
374  spmpaddr = __get_sPMPADDRx(addr_csr_idx);
375  if (SPMP_A_NAPOT == (prot & SPMP_A)) {
376  t1 = __CTZ(~spmpaddr);
377  addr = (spmpaddr & ~((1UL << t1) - 1)) << SPMP_SHIFT;
378  len = (t1 + SPMP_SHIFT + 1);
379  } else {
380  addr = spmpaddr << SPMP_SHIFT;
381  len = SPMP_SHIFT;
382  }
383 
384  /* return details */
385  spmp_cfg->protection = prot;
386  spmp_cfg->base_addr = addr;
387  spmp_cfg->order = len;
388 
389  return 0;
390 }
391 
392 #if defined(__SMPU_PRESENT) && (__SMPU_PRESENT == 1)
397 #define __get_SMPUCFGx __get_sPMPCFGx
398 #define __set_SMPUCFGx __set_sPMPCFGx
399 #define __get_SMPUxCFG __get_sPMPxCFG
400 #define __set_SMPUxCFG __set_sPMPxCFG
401 #define __get_SMPUADDRx __get_sPMPADDRx
402 #define __set_SMPUADDRx __set_sPMPADDRx
403 #define __set_SMPUENTRYx __set_sPMPENTRYx
404 #define __get_SMPUENTRYx __get_sPMPENTRYx
405 
416 {
417 #if __RISCV_XLEN == 32
418  __RV_CSR_WRITE(CSR_SMPUSWITCH0, (uint32_t)val);
419  __RV_CSR_WRITE(CSR_SMPUSWITCH1, (uint32_t)(val >> 32));
420 #elif __RISCV_XLEN == 64
422 #else
423  // TODO Add RV128 Handling
424 #endif
425 }
426 
436 {
437 #if __RISCV_XLEN == 32
438  uint32_t lo, hi = 0;
441  return (uint64_t)((((uint64_t)hi) << 32) | lo);
442 #elif __RISCV_XLEN == 64
443  return (uint64_t)__RV_CSR_READ(CSR_SMPUSWITCH0);
444 #else
445  // TODO Add RV128 Handling
446 #endif
447 }
448 
449 #endif
450  /* End of Doxygen Group NMSIS_Core_SPMP_Functions */
452 #endif /* defined(__SPMP_PRESENT) && (__SPMP_PRESENT == 1) */
453 
454 #ifdef __cplusplus
455 }
456 #endif
457 #endif /* __CORE_FEATURE_SPMP_H__ */
__STATIC_INLINE unsigned long __CTZ(unsigned long data)
Count tailing zero.
#define SPMP_A
#define SPMP_A_NA4
#define SPMP_A_NAPOT
#define SPMP_SHIFT
#define __RV_CSR_READ(csr)
CSR operation Macro for csrr instruction.
#define __RV_CSR_WRITE(csr, val)
CSR operation Macro for csrw instruction.
#define CSR_SPMPCFG3
#define CSR_SPMPADDR10
#define CSR_SPMPADDR9
#define CSR_SPMPADDR0
#define CSR_SPMPADDR11
#define CSR_SPMPADDR14
#define CSR_SMPUSWITCH1
#define CSR_SPMPADDR12
#define CSR_SPMPADDR1
#define CSR_SPMPCFG0
#define CSR_SPMPADDR8
#define CSR_SPMPADDR2
#define CSR_SPMPADDR7
#define CSR_SPMPADDR6
#define CSR_SPMPADDR13
#define CSR_SPMPCFG2
#define CSR_SMPUSWITCH0
#define CSR_SPMPADDR3
#define CSR_SPMPCFG1
#define CSR_SPMPADDR15
#define CSR_SPMPADDR5
#define CSR_SPMPADDR4
#define __STATIC_INLINE
Define a static function that may be inlined by the compiler.
Definition: nmsis_gcc.h:65
unsigned long rv_csr_t
Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V.
#define __RISCV_XLEN
Refer to the width of an integer register in bits(either 32 or 64)
__STATIC_INLINE uint64_t __get_SMPUSWITCHx(void)
Get SMPU each entry's on/off status.
__STATIC_INLINE int __get_sPMPENTRYx(unsigned int entry_idx, spmp_config *spmp_cfg)
Get sPMP entry by entry idx.
__STATIC_INLINE void __set_sPMPADDRx(uint32_t csr_idx, rv_csr_t spmpaddr)
Set sPMPADDRx by CSR index.
__STATIC_INLINE void __set_sPMPCFGx(uint32_t csr_idx, rv_csr_t spmpcfg)
Set sPMPCFGx by csr index.
__STATIC_INLINE void __set_SMPUSWITCHx(uint64_t val)
Set SMPU each entry's on/off status.
__STATIC_INLINE rv_csr_t __get_sPMPADDRx(uint32_t csr_idx)
Get sPMPADDRx Register by CSR index.
spmp_config smpu_config
sPMP has upgraded to S-mode Memory Protection Unit, renamed as SMPU, but still share the apis with sP...
__STATIC_INLINE void __set_sPMPENTRYx(uint32_t entry_idx, const spmp_config *spmp_cfg)
Set sPMP entry by entry idx.
__STATIC_INLINE void __set_sPMPxCFG(uint32_t entry_idx, uint8_t spmpxcfg)
Set 8bit sPMPxCFG by spmp entry index.
__STATIC_INLINE rv_csr_t __get_sPMPCFGx(uint32_t csr_idx)
Get sPMPCFGx Register by csr index.
__STATIC_INLINE uint8_t __get_sPMPxCFG(uint32_t entry_idx)
Get 8bit sPMPxCFG Register by sPMP entry index.
unsigned long base_addr
Base address of memory region It must be 2^order aligned address.
unsigned int protection
Set permissions using macros SMPU_S/ SMPU_R/SMPU_W/ SMPU_X of SMPU; SPMP_L/ SPMP_U/SPMP_R/ SPMP_W/SPM...
unsigned long order
Size of memory region as power of 2, it has to be minimum 2 and maxium __RISCV_XLEN according to the ...