| 61 |
#endif |
#endif |
| 62 |
|
|
| 63 |
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; |
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; |
|
TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE]; |
|
| 64 |
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
| 65 |
int nb_tbs; |
int nb_tbs; |
| 66 |
/* any access to the tbs or the page table must use this lock */ |
/* any access to the tbs or the page table must use this lock */ |
| 91 |
uint32_t phys_offset; |
uint32_t phys_offset; |
| 92 |
} PhysPageDesc; |
} PhysPageDesc; |
| 93 |
|
|
|
/* Note: the VirtPage handling is absolete and will be suppressed |
|
|
ASAP */ |
|
|
typedef struct VirtPageDesc { |
|
|
/* physical address of code page. It is valid only if 'valid_tag' |
|
|
matches 'virt_valid_tag' */ |
|
|
target_ulong phys_addr; |
|
|
unsigned int valid_tag; |
|
|
#if !defined(CONFIG_SOFTMMU) |
|
|
/* original page access rights. It is valid only if 'valid_tag' |
|
|
matches 'virt_valid_tag' */ |
|
|
unsigned int prot; |
|
|
#endif |
|
|
} VirtPageDesc; |
|
|
|
|
| 94 |
#define L2_BITS 10 |
#define L2_BITS 10 |
| 95 |
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) |
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) |
| 96 |
|
|
| 108 |
static PageDesc *l1_map[L1_SIZE]; |
static PageDesc *l1_map[L1_SIZE]; |
| 109 |
PhysPageDesc **l1_phys_map; |
PhysPageDesc **l1_phys_map; |
| 110 |
|
|
|
#if !defined(CONFIG_USER_ONLY) |
|
|
#if TARGET_LONG_BITS > 32 |
|
|
#define VIRT_L_BITS 9 |
|
|
#define VIRT_L_SIZE (1 << VIRT_L_BITS) |
|
|
static void *l1_virt_map[VIRT_L_SIZE]; |
|
|
#else |
|
|
static VirtPageDesc *l1_virt_map[L1_SIZE]; |
|
|
#endif |
|
|
static unsigned int virt_valid_tag; |
|
|
#endif |
|
|
|
|
| 111 |
/* io memory support */ |
/* io memory support */ |
| 112 |
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
| 113 |
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
| 164 |
while ((1 << qemu_host_page_bits) < qemu_host_page_size) |
while ((1 << qemu_host_page_bits) < qemu_host_page_size) |
| 165 |
qemu_host_page_bits++; |
qemu_host_page_bits++; |
| 166 |
qemu_host_page_mask = ~(qemu_host_page_size - 1); |
qemu_host_page_mask = ~(qemu_host_page_size - 1); |
|
#if !defined(CONFIG_USER_ONLY) |
|
|
virt_valid_tag = 1; |
|
|
#endif |
|
| 167 |
l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
| 168 |
memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); |
memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); |
| 169 |
} |
} |
| 237 |
target_ulong vaddr); |
target_ulong vaddr); |
| 238 |
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
| 239 |
target_ulong vaddr); |
target_ulong vaddr); |
|
|
|
|
static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc) |
|
|
{ |
|
|
#if TARGET_LONG_BITS > 32 |
|
|
void **p, **lp; |
|
|
|
|
|
p = l1_virt_map; |
|
|
lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
|
p = *lp; |
|
|
if (!p) { |
|
|
if (!alloc) |
|
|
return NULL; |
|
|
p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); |
|
|
*lp = p; |
|
|
} |
|
|
lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
|
p = *lp; |
|
|
if (!p) { |
|
|
if (!alloc) |
|
|
return NULL; |
|
|
p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); |
|
|
*lp = p; |
|
|
} |
|
|
lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
|
p = *lp; |
|
|
if (!p) { |
|
|
if (!alloc) |
|
|
return NULL; |
|
|
p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); |
|
|
*lp = p; |
|
|
} |
|
|
lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
|
p = *lp; |
|
|
if (!p) { |
|
|
if (!alloc) |
|
|
return NULL; |
|
|
p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); |
|
|
*lp = p; |
|
|
} |
|
|
lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); |
|
|
p = *lp; |
|
|
if (!p) { |
|
|
if (!alloc) |
|
|
return NULL; |
|
|
p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE); |
|
|
*lp = p; |
|
|
} |
|
|
return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1)); |
|
|
#else |
|
|
VirtPageDesc *p, **lp; |
|
|
|
|
|
lp = &l1_virt_map[index >> L2_BITS]; |
|
|
p = *lp; |
|
|
if (!p) { |
|
|
/* allocate if not found */ |
|
|
if (!alloc) |
|
|
return NULL; |
|
|
p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE); |
|
|
*lp = p; |
|
|
} |
|
|
return p + (index & (L2_SIZE - 1)); |
|
|
#endif |
|
|
} |
|
|
|
|
|
static inline VirtPageDesc *virt_page_find(target_ulong index) |
|
|
{ |
|
|
return virt_page_find_alloc(index, 0); |
|
|
} |
|
|
|
|
|
#if TARGET_LONG_BITS > 32 |
|
|
static void virt_page_flush_internal(void **p, int level) |
|
|
{ |
|
|
int i; |
|
|
if (level == 0) { |
|
|
VirtPageDesc *q = (VirtPageDesc *)p; |
|
|
for(i = 0; i < VIRT_L_SIZE; i++) |
|
|
q[i].valid_tag = 0; |
|
|
} else { |
|
|
level--; |
|
|
for(i = 0; i < VIRT_L_SIZE; i++) { |
|
|
if (p[i]) |
|
|
virt_page_flush_internal(p[i], level); |
|
|
} |
|
|
} |
|
|
} |
|
|
#endif |
|
|
|
|
|
static void virt_page_flush(void) |
|
|
{ |
|
|
virt_valid_tag++; |
|
|
|
|
|
if (virt_valid_tag == 0) { |
|
|
virt_valid_tag = 1; |
|
|
#if TARGET_LONG_BITS > 32 |
|
|
virt_page_flush_internal(l1_virt_map, 5); |
|
|
#else |
|
|
{ |
|
|
int i, j; |
|
|
VirtPageDesc *p; |
|
|
for(i = 0; i < L1_SIZE; i++) { |
|
|
p = l1_virt_map[i]; |
|
|
if (p) { |
|
|
for(j = 0; j < L2_SIZE; j++) |
|
|
p[j].valid_tag = 0; |
|
|
} |
|
|
} |
|
|
} |
|
|
#endif |
|
|
} |
|
|
} |
|
|
#else |
|
|
static void virt_page_flush(void) |
|
|
{ |
|
|
} |
|
| 240 |
#endif |
#endif |
| 241 |
|
|
| 242 |
void cpu_exec_init(void) |
void cpu_exec_init(void) |
| 286 |
nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); |
nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); |
| 287 |
#endif |
#endif |
| 288 |
nb_tbs = 0; |
nb_tbs = 0; |
| 289 |
memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); |
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
|
virt_page_flush(); |
|
| 290 |
|
|
| 291 |
memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
| 292 |
page_flush_tb(); |
page_flush_tb(); |
| 422 |
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
| 423 |
} |
} |
| 424 |
|
|
| 425 |
static inline void tb_invalidate(TranslationBlock *tb) |
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) |
| 426 |
{ |
{ |
| 427 |
|
PageDesc *p; |
| 428 |
unsigned int h, n1; |
unsigned int h, n1; |
| 429 |
TranslationBlock *tb1, *tb2, **ptb; |
target_ulong phys_pc; |
| 430 |
|
TranslationBlock *tb1, *tb2; |
| 431 |
|
|
| 432 |
|
/* remove the TB from the hash list */ |
| 433 |
|
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); |
| 434 |
|
h = tb_phys_hash_func(phys_pc); |
| 435 |
|
tb_remove(&tb_phys_hash[h], tb, |
| 436 |
|
offsetof(TranslationBlock, phys_hash_next)); |
| 437 |
|
|
| 438 |
|
/* remove the TB from the page list */ |
| 439 |
|
if (tb->page_addr[0] != page_addr) { |
| 440 |
|
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); |
| 441 |
|
tb_page_remove(&p->first_tb, tb); |
| 442 |
|
invalidate_page_bitmap(p); |
| 443 |
|
} |
| 444 |
|
if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { |
| 445 |
|
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); |
| 446 |
|
tb_page_remove(&p->first_tb, tb); |
| 447 |
|
invalidate_page_bitmap(p); |
| 448 |
|
} |
| 449 |
|
|
| 450 |
tb_invalidated_flag = 1; |
tb_invalidated_flag = 1; |
| 451 |
|
|
| 452 |
/* remove the TB from the hash list */ |
/* remove the TB from the hash list */ |
| 453 |
h = tb_hash_func(tb->pc); |
h = tb_jmp_cache_hash_func(tb->pc); |
| 454 |
ptb = &tb_hash[h]; |
cpu_single_env->tb_jmp_cache[h] = NULL; |
|
for(;;) { |
|
|
tb1 = *ptb; |
|
|
/* NOTE: the TB is not necessarily linked in the hash. It |
|
|
indicates that it is not currently used */ |
|
|
if (tb1 == NULL) |
|
|
return; |
|
|
if (tb1 == tb) { |
|
|
*ptb = tb1->hash_next; |
|
|
break; |
|
|
} |
|
|
ptb = &tb1->hash_next; |
|
|
} |
|
| 455 |
|
|
| 456 |
/* suppress this TB from the two jump lists */ |
/* suppress this TB from the two jump lists */ |
| 457 |
tb_jmp_remove(tb, 0); |
tb_jmp_remove(tb, 0); |
| 470 |
tb1 = tb2; |
tb1 = tb2; |
| 471 |
} |
} |
| 472 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ |
|
} |
|
|
|
|
|
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) |
|
|
{ |
|
|
PageDesc *p; |
|
|
unsigned int h; |
|
|
target_ulong phys_pc; |
|
|
|
|
|
/* remove the TB from the hash list */ |
|
|
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); |
|
|
h = tb_phys_hash_func(phys_pc); |
|
|
tb_remove(&tb_phys_hash[h], tb, |
|
|
offsetof(TranslationBlock, phys_hash_next)); |
|
|
|
|
|
/* remove the TB from the page list */ |
|
|
if (tb->page_addr[0] != page_addr) { |
|
|
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); |
|
|
tb_page_remove(&p->first_tb, tb); |
|
|
invalidate_page_bitmap(p); |
|
|
} |
|
|
if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { |
|
|
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); |
|
|
tb_page_remove(&p->first_tb, tb); |
|
|
invalidate_page_bitmap(p); |
|
|
} |
|
| 473 |
|
|
|
tb_invalidate(tb); |
|
| 474 |
tb_phys_invalidate_count++; |
tb_phys_invalidate_count++; |
| 475 |
} |
} |
| 476 |
|
|
| 863 |
tb_alloc_page(tb, 1, phys_page2); |
tb_alloc_page(tb, 1, phys_page2); |
| 864 |
else |
else |
| 865 |
tb->page_addr[1] = -1; |
tb->page_addr[1] = -1; |
|
#ifdef DEBUG_TB_CHECK |
|
|
tb_page_check(); |
|
|
#endif |
|
|
} |
|
|
|
|
|
/* link the tb with the other TBs */ |
|
|
void tb_link(TranslationBlock *tb) |
|
|
{ |
|
|
#if !defined(CONFIG_USER_ONLY) |
|
|
{ |
|
|
VirtPageDesc *vp; |
|
|
target_ulong addr; |
|
|
|
|
|
/* save the code memory mappings (needed to invalidate the code) */ |
|
|
addr = tb->pc & TARGET_PAGE_MASK; |
|
|
vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
|
|
#ifdef DEBUG_TLB_CHECK |
|
|
if (vp->valid_tag == virt_valid_tag && |
|
|
vp->phys_addr != tb->page_addr[0]) { |
|
|
printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n", |
|
|
addr, tb->page_addr[0], vp->phys_addr); |
|
|
} |
|
|
#endif |
|
|
vp->phys_addr = tb->page_addr[0]; |
|
|
if (vp->valid_tag != virt_valid_tag) { |
|
|
vp->valid_tag = virt_valid_tag; |
|
|
#if !defined(CONFIG_SOFTMMU) |
|
|
vp->prot = 0; |
|
|
#endif |
|
|
} |
|
|
|
|
|
if (tb->page_addr[1] != -1) { |
|
|
addr += TARGET_PAGE_SIZE; |
|
|
vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
|
|
#ifdef DEBUG_TLB_CHECK |
|
|
if (vp->valid_tag == virt_valid_tag && |
|
|
vp->phys_addr != tb->page_addr[1]) { |
|
|
printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n", |
|
|
addr, tb->page_addr[1], vp->phys_addr); |
|
|
} |
|
|
#endif |
|
|
vp->phys_addr = tb->page_addr[1]; |
|
|
if (vp->valid_tag != virt_valid_tag) { |
|
|
vp->valid_tag = virt_valid_tag; |
|
|
#if !defined(CONFIG_SOFTMMU) |
|
|
vp->prot = 0; |
|
|
#endif |
|
|
} |
|
|
} |
|
|
} |
|
|
#endif |
|
| 866 |
|
|
| 867 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
| 868 |
tb->jmp_next[0] = NULL; |
tb->jmp_next[0] = NULL; |
| 878 |
tb_reset_jump(tb, 0); |
tb_reset_jump(tb, 0); |
| 879 |
if (tb->tb_next_offset[1] != 0xffff) |
if (tb->tb_next_offset[1] != 0xffff) |
| 880 |
tb_reset_jump(tb, 1); |
tb_reset_jump(tb, 1); |
| 881 |
|
|
| 882 |
|
#ifdef DEBUG_TB_CHECK |
| 883 |
|
tb_page_check(); |
| 884 |
|
#endif |
| 885 |
} |
} |
| 886 |
|
|
| 887 |
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
| 1187 |
env->tlb_write[1][i].address = -1; |
env->tlb_write[1][i].address = -1; |
| 1188 |
} |
} |
| 1189 |
|
|
| 1190 |
virt_page_flush(); |
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
|
memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); |
|
| 1191 |
|
|
| 1192 |
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |
| 1193 |
munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); |
munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); |
| 1209 |
|
|
| 1210 |
void tlb_flush_page(CPUState *env, target_ulong addr) |
void tlb_flush_page(CPUState *env, target_ulong addr) |
| 1211 |
{ |
{ |
| 1212 |
int i, n; |
int i; |
|
VirtPageDesc *vp; |
|
|
PageDesc *p; |
|
| 1213 |
TranslationBlock *tb; |
TranslationBlock *tb; |
| 1214 |
|
|
| 1215 |
#if defined(DEBUG_TLB) |
#if defined(DEBUG_TLB) |
| 1226 |
tlb_flush_entry(&env->tlb_read[1][i], addr); |
tlb_flush_entry(&env->tlb_read[1][i], addr); |
| 1227 |
tlb_flush_entry(&env->tlb_write[1][i], addr); |
tlb_flush_entry(&env->tlb_write[1][i], addr); |
| 1228 |
|
|
| 1229 |
/* remove from the virtual pc hash table all the TB at this |
for(i = 0; i < TB_JMP_CACHE_SIZE; i++) { |
| 1230 |
virtual address */ |
tb = env->tb_jmp_cache[i]; |
| 1231 |
|
if (tb && |
| 1232 |
vp = virt_page_find(addr >> TARGET_PAGE_BITS); |
((tb->pc & TARGET_PAGE_MASK) == addr || |
| 1233 |
if (vp && vp->valid_tag == virt_valid_tag) { |
((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) { |
| 1234 |
p = page_find(vp->phys_addr >> TARGET_PAGE_BITS); |
env->tb_jmp_cache[i] = NULL; |
|
if (p) { |
|
|
/* we remove all the links to the TBs in this virtual page */ |
|
|
tb = p->first_tb; |
|
|
while (tb != NULL) { |
|
|
n = (long)tb & 3; |
|
|
tb = (TranslationBlock *)((long)tb & ~3); |
|
|
if ((tb->pc & TARGET_PAGE_MASK) == addr || |
|
|
((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) { |
|
|
tb_invalidate(tb); |
|
|
} |
|
|
tb = tb->page_next[n]; |
|
|
} |
|
| 1235 |
} |
} |
|
vp->valid_tag = 0; |
|
| 1236 |
} |
} |
| 1237 |
|
|
| 1238 |
#if !defined(CONFIG_SOFTMMU) |
#if !defined(CONFIG_SOFTMMU) |