Skip to content

Commit 6ff5da1

Browse files
philmdrth7680
authored andcommitted
exec: Declare tlb_flush*() in 'exec/cputlb.h'
Move CPU TLB related methods to "exec/cputlb.h". Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Message-ID: <20241114011310.3615-19-philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
1 parent bcde46f commit 6ff5da1

34 files changed

+224
-211
lines changed

accel/tcg/tcg-accel-ops.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
#include "qemu/main-loop.h"
3434
#include "qemu/guest-random.h"
3535
#include "qemu/timer.h"
36-
#include "exec/exec-all.h"
36+
#include "exec/cputlb.h"
3737
#include "exec/hwaddr.h"
3838
#include "exec/tb-flush.h"
3939
#include "exec/translation-block.h"

cpu-target.c

+1
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include "exec/tswap.h"
3232
#include "exec/replay-core.h"
3333
#include "exec/cpu-common.h"
34+
#include "exec/cputlb.h"
3435
#include "exec/exec-all.h"
3536
#include "exec/tb-flush.h"
3637
#include "exec/log.h"

hw/intc/armv7m_nvic.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
#include "system/runstate.h"
2323
#include "target/arm/cpu.h"
2424
#include "target/arm/cpu-features.h"
25-
#include "exec/exec-all.h"
25+
#include "exec/cputlb.h"
2626
#include "exec/memop.h"
2727
#include "qemu/log.h"
2828
#include "qemu/module.h"

hw/ppc/spapr_nested.c

+1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#include "qemu/osdep.h"
22
#include "qemu/cutils.h"
33
#include "exec/exec-all.h"
4+
#include "exec/cputlb.h"
45
#include "helper_regs.h"
56
#include "hw/ppc/ppc.h"
67
#include "hw/ppc/spapr.h"

hw/sh4/sh7750.c

+1
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include "hw/sh4/sh_intc.h"
3737
#include "hw/timer/tmu012.h"
3838
#include "exec/exec-all.h"
39+
#include "exec/cputlb.h"
3940
#include "trace.h"
4041

4142
typedef struct SH7750State {

include/exec/cputlb.h

+191-9
Original file line numberDiff line numberDiff line change
@@ -25,21 +25,14 @@
2525
#include "exec/memattrs.h"
2626
#include "exec/vaddr.h"
2727

28-
#ifdef CONFIG_TCG
29-
30-
#if !defined(CONFIG_USER_ONLY)
31-
/* cputlb.c */
28+
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
3229
void tlb_protect_code(ram_addr_t ram_addr);
3330
void tlb_unprotect_code(ram_addr_t ram_addr);
3431
#endif
3532

36-
#endif /* CONFIG_TCG */
37-
3833
#ifndef CONFIG_USER_ONLY
39-
4034
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
4135
void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
42-
4336
#endif
4437

4538
/**
@@ -101,4 +94,193 @@ void tlb_set_page(CPUState *cpu, vaddr addr,
10194
hwaddr paddr, int prot,
10295
int mmu_idx, vaddr size);
10396

104-
#endif
97+
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
98+
/**
99+
* tlb_flush_page:
100+
* @cpu: CPU whose TLB should be flushed
101+
* @addr: virtual address of page to be flushed
102+
*
103+
* Flush one page from the TLB of the specified CPU, for all
104+
* MMU indexes.
105+
*/
106+
void tlb_flush_page(CPUState *cpu, vaddr addr);
107+
108+
/**
109+
* tlb_flush_page_all_cpus_synced:
110+
* @cpu: src CPU of the flush
111+
* @addr: virtual address of page to be flushed
112+
*
113+
* Flush one page from the TLB of all CPUs, for all
114+
* MMU indexes.
115+
*
116+
* When this function returns, no CPUs will subsequently perform
117+
* translations using the flushed TLBs.
118+
*/
119+
void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
120+
121+
/**
122+
* tlb_flush:
123+
* @cpu: CPU whose TLB should be flushed
124+
*
125+
* Flush the entire TLB for the specified CPU. Most CPU architectures
126+
* allow the implementation to drop entries from the TLB at any time
127+
* so this is generally safe. If more selective flushing is required
128+
* use one of the other functions for efficiency.
129+
*/
130+
void tlb_flush(CPUState *cpu);
131+
132+
/**
133+
* tlb_flush_all_cpus_synced:
134+
* @cpu: src CPU of the flush
135+
*
136+
* Flush the entire TLB for all CPUs, for all MMU indexes.
137+
*
138+
* When this function returns, no CPUs will subsequently perform
139+
* translations using the flushed TLBs.
140+
*/
141+
void tlb_flush_all_cpus_synced(CPUState *src_cpu);
142+
143+
/**
144+
* tlb_flush_page_by_mmuidx:
145+
* @cpu: CPU whose TLB should be flushed
146+
* @addr: virtual address of page to be flushed
147+
* @idxmap: bitmap of MMU indexes to flush
148+
*
149+
* Flush one page from the TLB of the specified CPU, for the specified
150+
* MMU indexes.
151+
*/
152+
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
153+
uint16_t idxmap);
154+
155+
/**
156+
* tlb_flush_page_by_mmuidx_all_cpus_synced:
157+
* @cpu: Originating CPU of the flush
158+
* @addr: virtual address of page to be flushed
159+
* @idxmap: bitmap of MMU indexes to flush
160+
*
161+
* Flush one page from the TLB of all CPUs, for the specified
162+
* MMU indexes.
163+
*
164+
* When this function returns, no CPUs will subsequently perform
165+
* translations using the flushed TLBs.
166+
*/
167+
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
168+
uint16_t idxmap);
169+
170+
/**
171+
* tlb_flush_by_mmuidx:
172+
* @cpu: CPU whose TLB should be flushed
173+
* @wait: If true ensure synchronisation by exiting the cpu_loop
174+
* @idxmap: bitmap of MMU indexes to flush
175+
*
176+
* Flush all entries from the TLB of the specified CPU, for the specified
177+
* MMU indexes.
178+
*/
179+
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
180+
181+
/**
182+
* tlb_flush_by_mmuidx_all_cpus_synced:
183+
* @cpu: Originating CPU of the flush
184+
* @idxmap: bitmap of MMU indexes to flush
185+
*
186+
* Flush all entries from the TLB of all CPUs, for the specified
187+
* MMU indexes.
188+
*
189+
* When this function returns, no CPUs will subsequently perform
190+
* translations using the flushed TLBs.
191+
*/
192+
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
193+
194+
/**
195+
* tlb_flush_page_bits_by_mmuidx
196+
* @cpu: CPU whose TLB should be flushed
197+
* @addr: virtual address of page to be flushed
198+
* @idxmap: bitmap of mmu indexes to flush
199+
* @bits: number of significant bits in address
200+
*
201+
* Similar to tlb_flush_page_mask, but with a bitmap of indexes.
202+
*/
203+
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
204+
uint16_t idxmap, unsigned bits);
205+
206+
/* Similarly, with broadcast and syncing. */
207+
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
208+
uint16_t idxmap,
209+
unsigned bits);
210+
211+
/**
212+
* tlb_flush_range_by_mmuidx
213+
* @cpu: CPU whose TLB should be flushed
214+
* @addr: virtual address of the start of the range to be flushed
215+
* @len: length of range to be flushed
216+
* @idxmap: bitmap of mmu indexes to flush
217+
* @bits: number of significant bits in address
218+
*
219+
* For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
220+
* comparing only the low @bits worth of each virtual page.
221+
*/
222+
void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
223+
vaddr len, uint16_t idxmap,
224+
unsigned bits);
225+
226+
/* Similarly, with broadcast and syncing. */
227+
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
228+
vaddr addr,
229+
vaddr len,
230+
uint16_t idxmap,
231+
unsigned bits);
232+
#else
233+
static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
234+
{
235+
}
236+
static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
237+
{
238+
}
239+
static inline void tlb_flush(CPUState *cpu)
240+
{
241+
}
242+
static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
243+
{
244+
}
245+
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
246+
vaddr addr, uint16_t idxmap)
247+
{
248+
}
249+
250+
static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
251+
{
252+
}
253+
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
254+
vaddr addr,
255+
uint16_t idxmap)
256+
{
257+
}
258+
static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
259+
uint16_t idxmap)
260+
{
261+
}
262+
static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
263+
vaddr addr,
264+
uint16_t idxmap,
265+
unsigned bits)
266+
{
267+
}
268+
static inline void
269+
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
270+
uint16_t idxmap, unsigned bits)
271+
{
272+
}
273+
static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
274+
vaddr len, uint16_t idxmap,
275+
unsigned bits)
276+
{
277+
}
278+
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
279+
vaddr addr,
280+
vaddr len,
281+
uint16_t idxmap,
282+
unsigned bits)
283+
{
284+
}
285+
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
286+
#endif /* CPUTLB_H */

0 commit comments

Comments
 (0)