Skip to content

Commit 13acd10

Browse files
committed
cortex-r82: Add Non-MPU SMP FVP Example
This example shows how to bring-up FreeRTOS-SMP on an Arm Cortex-R82 multiprocessor system using the BaseR AEMv8R Architecture Envelope Model (AEM) Fixed Virtual Platform (FVP). Signed-off-by: Ahmed Ismail <Ahmed.Ismail@arm.com>
1 parent 27ec9c9 commit 13acd10

17 files changed

+1802
-0
lines changed
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Copyright 2023-2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2+
#
3+
# SPDX-License-Identifier: MIT
4+
5+
cmake_minimum_required(VERSION 3.15)
6+
7+
add_library(bsp INTERFACE)
8+
9+
target_sources(bsp
10+
INTERFACE
11+
${CMAKE_CURRENT_SOURCE_DIR}/Source/port_asm_vectors.S
12+
${CMAKE_CURRENT_SOURCE_DIR}/Source/boot.S
13+
${CMAKE_CURRENT_SOURCE_DIR}/Source/xil-crt0.S
14+
${CMAKE_CURRENT_SOURCE_DIR}/Source/gic.c
15+
)
16+
17+
target_include_directories(bsp
18+
INTERFACE
19+
${CMAKE_CURRENT_SOURCE_DIR}/Include
20+
)
21+
22+
target_link_libraries(bsp
23+
INTERFACE
24+
freertos_kernel
25+
)
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
/* Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
2+
* SPDX-License-Identifier: MIT
3+
*/
4+
5+
#define GICD_BASE ( 0xAF000000UL ) /* Base of GIC Distributor on BaseR FVP */
6+
#define GICR_BASE_PER_CORE( core ) ( 0xAF100000 + (0x20000 * ( core ) ) ) /* Base of GIC Redistributor per core on BaseR FVP */
7+
#define SGI_BASE ( 0x10000 ) /* SGI Base */
8+
#define GICD_CTLR ( 0x000 ) /* Distributor Control Register */
9+
#define GICR_WAKER ( 0x14 ) /* ReDistributor Wake Register */
10+
#define GICR_PWRR ( 0x24 ) /* ReDistributor Power Register */
11+
#define GICR_IGROUPR0 ( SGI_BASE + 0x80 ) /* Interrupt Group Registers */
12+
#define GICR_ISENABLER0 ( SGI_BASE + 0x100 ) /* Interrupt Set-Enable Registers */
13+
#define GICR_IPRIORITYR( n ) ( SGI_BASE + ( 0x400 + ( 4 * n ) ) ) /* Interrupt Priority Registers */
14+
#define GICR_IGRPMODR0 ( SGI_BASE + 0xD00 ) /* Distributor Interrupt group modifier Register */
15+
16+
#define GICD_CTLR_ENABLEGRP1NS_BIT ( 1U ) /* GICD_CTRL.EnableGrp1NS bit */
17+
#define GICD_CTLR_ENABLEGRP1S_BIT ( 2U ) /* GICD_CTRL.EnableGrp1S bit */
18+
#define GICD_CTLR_ARES_BIT ( 4U ) /* GICD_CTRL.ARE_S bit */
19+
#define GICD_CTLR_DS_BIT ( 6U ) /* GICD_CTRL.DS bit */
20+
21+
#define GICR_PWRR_RDPD_BIT ( 0U ) /* GICR_PWRR.RDPD bit */
22+
23+
#define GICR_WAKER_PS_BIT ( 1U ) /* GICR_WAKER.PS bit */
24+
#define GICR_WAKER_CA_BIT ( 2U ) /* GICR_WAKER.CA bit */
25+
26+
#define GIC_MAX_INTERRUPT_ID ( 31UL ) /* Maximum Interrupt ID for PPIs and SGIs */
27+
#define GIC_WAIT_TIMEOUT ( 1000000U ) /* Timeout for waiting on GIC operations */
28+
29+
/**
30+
* Assigns the specified interrupt to Group 1 and enables it
31+
* in the Redistributor for the local core.
32+
*/
33+
void vGIC_EnableIRQ( uint32_t ulInterruptID );
34+
35+
/**
36+
* Enables signaling of Group-1 interrupts at EL1 via ICC_IGRPEN1_EL1.
37+
*/
38+
void vGIC_EnableCPUInterface( void );
39+
40+
/**
41+
* Initializes the GIC Distributor:
42+
* - Enables Group-1 Non-Secure and Group-1 Secure interrupts
43+
* - Enables Affinity Routing (ARE_S) and Disable Security (DS) bits
44+
*/
45+
void vGIC_InitDist( void );
46+
47+
/**
48+
* Powers up and wakes the Redistributor for the current core:
49+
* 1. Clears the Redistributor power-down bit and waits for RDPD=0
50+
* 2. Clears the Processor-Sleep bit and waits for Children-Asleep=0
51+
*/
52+
void vGIC_PowerUpRedistributor( void );
53+
54+
/**
55+
* Sets the priority of the specified SGI/PPI (INTID 0‑31) in the local
56+
* Redistributor bank via GICR_IPRIORITYR.
57+
* For shared peripheral interrupts (SPI, INTID ≥ 32) use the GICD_IPRIORITYR path.
58+
*
59+
* @param ulInterruptID The ID of the interrupt to set the priority for.
60+
* @param ulPriority The priority value to set.
61+
*/
62+
void vGIC_SetPriority( uint32_t ulInterruptID, uint32_t ulPriority );
63+
64+
/**
65+
* Powers up the GIC Redistributor, Sets up the priority for SGI0,
66+
* sets SGI0 to be a Group 1 interrupt, and enables delivery of Group-1 IRQs to EL1.
67+
*/
68+
void vGIC_SetupSgi0( void );
Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,218 @@
1+
/******************************************************************************
2+
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
3+
* Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
4+
* SPDX-License-Identifier: MIT
5+
******************************************************************************/
6+
#if defined(__ARMCC_VERSION)
7+
/* Externs needed by the MPU setup code. These are defined in Scatter-Loading
8+
* description file (armclang.sct). */
9+
.set __el1_stack, Image$$ARM_LIB_STACK$$Base
10+
.set _el1_stack_end, Image$$ARM_LIB_HEAP$$Base
11+
#endif
12+
13+
#include "FreeRTOSConfig.h"
14+
15+
.global _prestart
16+
.global _boot
17+
18+
.global __el1_stack
19+
.global _vector_table
20+
21+
.set EL1_stack, __el1_stack
22+
23+
.set EL1_stack_end, _el1_stack_end
24+
25+
.set vector_base, _vector_table
26+
27+
/*
28+
* N_CPUS_SHIFT must equal log2(configNUMBER_OF_CORES). It represents the
29+
* number of bits required to index the core that owns a particular slice
30+
* of the shared EL1 stack pool.
31+
*
32+
* To avoid overlapping stack regions, the code assumes
33+
* configNUMBER_OF_CORES is a power‑of‑two. The static check below forces
34+
* a build‑time error if that assumption is broken.
35+
*/
36+
#if ( (configNUMBER_OF_CORES & (configNUMBER_OF_CORES - 1)) != 0 )
37+
#error "configNUMBER_OF_CORES must be a power‑of‑two"
38+
#endif
39+
40+
/* Compute log2(configNUMBER_OF_CORES). */
41+
#if (configNUMBER_OF_CORES == 1)
42+
.set N_CPUS_SHIFT, 0
43+
#elif (configNUMBER_OF_CORES == 2)
44+
.set N_CPUS_SHIFT, 1
45+
#elif (configNUMBER_OF_CORES == 4)
46+
.set N_CPUS_SHIFT, 2
47+
#else
48+
#error "Unsupported configNUMBER_OF_CORES value — must be a power‑of‑two up to 4"
49+
#endif
50+
51+
.section .boot,"ax"
52+
53+
_prestart:
54+
_boot:
55+
start:
56+
/* Clear all GP registers (x0–x30) for a known initial state */
57+
mov x0, #0
58+
mov x1, #0
59+
mov x2, #0
60+
mov x3, #0
61+
mov x4, #0
62+
mov x5, #0
63+
mov x6, #0
64+
mov x7, #0
65+
mov x8, #0
66+
mov x9, #0
67+
mov x10, #0
68+
mov x11, #0
69+
mov x12, #0
70+
mov x13, #0
71+
mov x14, #0
72+
mov x15, #0
73+
mov x16, #0
74+
mov x17, #0
75+
mov x18, #0
76+
mov x19, #0
77+
mov x20, #0
78+
mov x21, #0
79+
mov x22, #0
80+
mov x23, #0
81+
mov x24, #0
82+
mov x25, #0
83+
mov x26, #0
84+
mov x27, #0
85+
mov x28, #0
86+
mov x29, #0
87+
mov x30, #0
88+
89+
mrs x0, currentEL
90+
cmp x0, #0x4
91+
beq InitEL1
92+
93+
b error /* Check we’ve come from EL1 (currentEL==0x4), otherwise fault */
94+
InitEL1:
95+
/* Set vector table base address */
96+
ldr x1, =vector_base
97+
msr VBAR_EL1,x1
98+
99+
mrs x0, CPACR_EL1
100+
/* Allow FP/SIMD at both EL1 and EL0: CPACR_EL1.FPEN[21:20] = 0b11 */
101+
orr x0, x0, #(0x3 << 20)
102+
msr CPACR_EL1, x0 /* Enable FP/SIMD access at EL1 and EL0 */
103+
isb
104+
105+
/* Clear FP status flags (FPSR) to avoid spurious exceptions on first use */
106+
mov x0, 0x0
107+
msr FPSR, x0
108+
109+
/* Define stack pointer for current exception level */
110+
#if configNUMBER_OF_CORES > 1
111+
/* Divide the EL1 stack region equally among all cores, then set SP based on MPIDR_EL1[7:0] */
112+
/* x0 = log2(N_CPUS) is assumed to be a build-time constant */
113+
mov x0, N_CPUS_SHIFT /* log2(#cores) */
114+
/* load overall stack limits */
115+
ldr x2, =EL1_stack /* low address of the shared stack pool */
116+
ldr x3, =EL1_stack_end /* high address (one past the pool) */
117+
/* x1 = total size of the pool, x1 >> N_CPUS_SHIFT = size per core */
118+
sub x1, x3, x2 /* total_stack_size */
119+
lsr x1, x1, x0 /* slice_size = total/#cores */
120+
/* x4 = this CPU’s index (Aff0 field of MPIDR_EL1) */
121+
mrs x4, MPIDR_EL1
122+
and x4, x4, #0xFF /* core_id ∈ {0 … N_CPUS-1} */
123+
cmp x4, #configNUMBER_OF_CORES
124+
b.hs error
125+
/* x0 = slice_size * core_id → how far to step back from the top */
126+
mul x0, x1, x4
127+
/* sp = top_of_pool – offset (so core 0 gets the very top) */
128+
sub x3, x3, x0 /* x3 = initial SP for this core */
129+
bic x3, x3, #0xF /* keep the mandated 16-byte alignment */
130+
mov sp, x3
131+
#else
132+
ldr x2, =EL1_stack_end
133+
mov sp, x2
134+
#endif
135+
136+
/* Enable ICC system-register interface (SRE=1) and disable FIQ/IRQ bypass (DFB/DIB) */
137+
mov x0, #0x7
138+
msr ICC_SRE_EL1, x0
139+
140+
/* Invalidate I and D caches */
141+
ic IALLU
142+
bl invalidate_dcaches
143+
dsb sy
144+
isb
145+
146+
/* Unmask SError interrupts (clear DAIF.A bit) */
147+
mrs x1,DAIF
148+
bic x1,x1,#(0x1<<8)
149+
msr DAIF,x1
150+
151+
/* Configure SCTLR_EL1:
152+
* - Enable data cache (C=1)
153+
* - Allow EL0 to execute WFE/WFI (Set nTWE/nTWI so they don't trap)
154+
*/
155+
mrs x1, SCTLR_EL1
156+
orr x1, x1, #(1 << 18) /* nTWE = 1 → WFE at EL0 does not trap */
157+
orr x1, x1, #(1 << 16) /* nTWI = 1 → WFI at EL0 does not trap */
158+
orr x1, x1, #(1 << 2) /* C = 1 → enable data cache */
159+
msr SCTLR_EL1, x1
160+
isb
161+
162+
/* Branch to C-level startup (zero BSS, init data, etc.) */
163+
bl _startup
164+
165+
/* If we ever get here, something went wrong—hang forever */
166+
error:
167+
b error
168+
169+
invalidate_dcaches:
170+
171+
dmb ISH
172+
mrs x0, CLIDR_EL1 /* x0 = CLIDR */
173+
ubfx w2, w0, #24, #3 /* w2 = CLIDR.LoC */
174+
cmp w2, #0 /* LoC is 0? */
175+
b.eq invalidateCaches_end /* No cleaning required */
176+
mov w1, #0 /* w1 = level iterator */
177+
178+
invalidateCaches_flush_level:
179+
add w3, w1, w1, lsl #1 /* w3 = w1 * 3 (right-shift for cache type) */
180+
lsr w3, w0, w3 /* w3 = w0 >> w3 */
181+
ubfx w3, w3, #0, #3 /* w3 = cache type of this level */
182+
cmp w3, #2 /* No cache at this level? */
183+
b.lt invalidateCaches_next_level
184+
185+
lsl w4, w1, #1
186+
msr CSSELR_EL1, x4 /* Select current cache level in CSSELR */
187+
isb /* ISB required to reflect new CSIDR */
188+
mrs x4, CCSIDR_EL1 /* w4 = CSIDR */
189+
190+
ubfx w3, w4, #0, #3
191+
add w3, w3, #2 /* w3 = log2(line size) */
192+
ubfx w5, w4, #13, #15
193+
ubfx w4, w4, #3, #10 /* w4 = Way number */
194+
clz w6, w4 /* w6 = 32 - log2(number of ways) */
195+
196+
invalidateCaches_flush_set:
197+
mov w8, w4 /* w8 = Way number */
198+
invalidateCaches_flush_way:
199+
lsl w7, w1, #1 /* Fill level field */
200+
lsl w9, w5, w3
201+
orr w7, w7, w9 /* Fill level field */
202+
lsl w9, w8, w6
203+
orr w7, w7, w9 /* Fill way field */
204+
dc CISW, x7 /* Invalidate by set/way to point of coherency */
205+
subs w8, w8, #1 /* Decrement way */
206+
b.ge invalidateCaches_flush_way
207+
subs w5, w5, #1 /* Descrement set */
208+
b.ge invalidateCaches_flush_set
209+
210+
invalidateCaches_next_level:
211+
add w1, w1, #1 /* Next level */
212+
cmp w2, w1
213+
b.gt invalidateCaches_flush_level
214+
215+
invalidateCaches_end:
216+
ret
217+
218+
.end

0 commit comments

Comments
 (0)