Updated: October 28, 2024 |
The cache controller circuitry in your system determines whether you need to provide a callout for the kernel to interface to the cache controller.
On the x86 architecture, the cache controller is integrated tightly with the CPU, meaning the kernel doesn't have to talk to the cache controller. On the ARM architecture, you need a callout that can tell the cache controllers to invalidate portions of the cache when certain functions are performed in the kernel.
The callout prototype for cache control is control(). This callout gets passed:
This callout is responsible for returning the number of cache lines that it affected. The caller (the kernel) can use this information and call the control() callout repeatedly at a higher level. A return of 0 (zero) means the entire cache was affected (e.g., all entries were invalidated).
For more information about caches, see cacheattr in the System Page chapter.
The kernel uses a control() function's flags argument to pass instructions to a cache control callout. The flag values can be the same as the MS_* flags used for the msync() function (e.g., MS_ASYNC to make the controller perform asynchronous writes, and MS_INVALIDATE_ICACHE to invalidate the instruction cache only).
/* * Cortex A8 specific cache operations * * unsigned control(paddr32_t base, * unsigned num_lines, * int flags, * struct cacheattr_entry *cache, * volatile struct syspage_entry * ) */ #include "callout.ah" #define MAX_LINES 32 #define LINE_SIZE 64 #define LINE_LIMIT 1024 CALLOUT_START(cache_a8_i, 0, 0) /* * For large flushes just do the whole cache */ cmp r1, #LINE_LIMIT movhi r0, #0 mcrhi p15, 0, r0, c7, c5, 0 // ICIALLU mcrhi p15, 0, r1, c7, c10, 4 // DSB (deprecated encoding) movhi pc, lr /* * Trim the address to a cache line boundary, and stop at 32 lines * to avoid having to re-issue the whole flush if we get preempted */ bic r3, r0, #(LINE_SIZE-1) cmp r1, #MAX_LINES movhi r1, #MAX_LINES mov r0, r1 /* * Invalidate lines by address */ 0: mcr p15, 0, r3, c7, c5, 1 // ICIMVAU add r3, r3, #LINE_SIZE subs r1, r1, #1 bne 0b mcr p15, 0, r1, c7, c5, 6 // BPIALL mcr p15, 0, r1, c7, c10, 4 // DSB (deprecated encoding) mov pc, lr CALLOUT_END(cache_a8_i) CALLOUT_START(cache_a8_d, 0, 0) /* * Trim the address to a cache line boundary, and stop at 32 lines * to avoid having to re-issue the whole flush if we get preempted */ bic r3, r0, #(LINE_SIZE-1) cmp r1, #MAX_LINES movhi r1, #MAX_LINES mov r0, r1 tst r2, #MS_INVALIDATE bne 1f /* * Clean lines by address */ 0: mcr p15, 0, r3, c7, c10, 1 // DCCMVAC add r3, r3, #LINE_SIZE subs r1, r1, #1 bne 0b mcr p15, 0, r1, c7, c10, 4 // DSB (deprecated encoding) mov pc, lr /* * Clean and invalidate lines by address */ 1: mcr p15, 0, r3, c7, c14, 1 // DCCIMVAC add r3, r3, #LINE_SIZE subs r1, r1, #1 bne 1b mcr p15, 0, r1, c7, c10, 4 // DSB (deprecated encoding) mov pc, lr CALLOUT_END(cache_a8_d)