| 210 |
flags = page_get_flags(addr); |
flags = page_get_flags(addr); |
| 211 |
if (flags & PAGE_VALID) { |
if (flags & PAGE_VALID) { |
| 212 |
virt_addr = addr & ~0xfff; |
virt_addr = addr & ~0xfff; |
| 213 |
|
#if !defined(CONFIG_SOFTMMU) |
| 214 |
munmap((void *)virt_addr, 4096); |
munmap((void *)virt_addr, 4096); |
| 215 |
|
#endif |
| 216 |
page_set_flags(virt_addr, virt_addr + 4096, 0); |
page_set_flags(virt_addr, virt_addr + 4096, 0); |
| 217 |
} |
} |
| 218 |
} |
} |
| 223 |
1 = generate PF fault |
1 = generate PF fault |
| 224 |
2 = soft MMU activation required for this block |
2 = soft MMU activation required for this block |
| 225 |
*/ |
*/ |
| 226 |
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) |
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, |
| 227 |
|
int is_write, int is_user, int is_softmmu) |
| 228 |
{ |
{ |
| 229 |
uint8_t *pde_ptr, *pte_ptr; |
uint8_t *pde_ptr, *pte_ptr; |
| 230 |
uint32_t pde, pte, virt_addr; |
uint32_t pde, pte, virt_addr; |
| 231 |
int cpl, error_code, is_dirty, is_user, prot, page_size, ret; |
int error_code, is_dirty, prot, page_size, ret; |
| 232 |
unsigned long pd; |
unsigned long pd; |
| 233 |
|
|
|
cpl = env->hflags & HF_CPL_MASK; |
|
|
is_user = (cpl == 3); |
|
|
|
|
| 234 |
#ifdef DEBUG_MMU |
#ifdef DEBUG_MMU |
| 235 |
printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", |
printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", |
| 236 |
addr, is_write, is_user, env->eip); |
addr, is_write, is_user, env->eip); |
| 252 |
|
|
| 253 |
/* page directory entry */ |
/* page directory entry */ |
| 254 |
pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)); |
pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)); |
| 255 |
pde = ldl(pde_ptr); |
pde = ldl_raw(pde_ptr); |
| 256 |
if (!(pde & PG_PRESENT_MASK)) { |
if (!(pde & PG_PRESENT_MASK)) { |
| 257 |
error_code = 0; |
error_code = 0; |
| 258 |
goto do_fault; |
goto do_fault; |
| 274 |
pde |= PG_ACCESSED_MASK; |
pde |= PG_ACCESSED_MASK; |
| 275 |
if (is_dirty) |
if (is_dirty) |
| 276 |
pde |= PG_DIRTY_MASK; |
pde |= PG_DIRTY_MASK; |
| 277 |
stl(pde_ptr, pde); |
stl_raw(pde_ptr, pde); |
| 278 |
} |
} |
| 279 |
|
|
| 280 |
pte = pde & ~0x003ff000; /* align to 4MB */ |
pte = pde & ~0x003ff000; /* align to 4MB */ |
| 283 |
} else { |
} else { |
| 284 |
if (!(pde & PG_ACCESSED_MASK)) { |
if (!(pde & PG_ACCESSED_MASK)) { |
| 285 |
pde |= PG_ACCESSED_MASK; |
pde |= PG_ACCESSED_MASK; |
| 286 |
stl(pde_ptr, pde); |
stl_raw(pde_ptr, pde); |
| 287 |
} |
} |
| 288 |
|
|
| 289 |
/* page directory entry */ |
/* page directory entry */ |
| 290 |
pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc)); |
pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc)); |
| 291 |
pte = ldl(pte_ptr); |
pte = ldl_raw(pte_ptr); |
| 292 |
if (!(pte & PG_PRESENT_MASK)) { |
if (!(pte & PG_PRESENT_MASK)) { |
| 293 |
error_code = 0; |
error_code = 0; |
| 294 |
goto do_fault; |
goto do_fault; |
| 308 |
pte |= PG_ACCESSED_MASK; |
pte |= PG_ACCESSED_MASK; |
| 309 |
if (is_dirty) |
if (is_dirty) |
| 310 |
pte |= PG_DIRTY_MASK; |
pte |= PG_DIRTY_MASK; |
| 311 |
stl(pte_ptr, pte); |
stl_raw(pte_ptr, pte); |
| 312 |
} |
} |
| 313 |
page_size = 4096; |
page_size = 4096; |
| 314 |
virt_addr = addr & ~0xfff; |
virt_addr = addr & ~0xfff; |
| 325 |
} |
} |
| 326 |
|
|
| 327 |
do_mapping: |
do_mapping: |
| 328 |
if (env->hflags & HF_SOFTMMU_MASK) { |
#if !defined(CONFIG_SOFTMMU) |
| 329 |
|
if (is_softmmu) |
| 330 |
|
#endif |
| 331 |
|
{ |
| 332 |
unsigned long paddr, vaddr, address, addend, page_offset; |
unsigned long paddr, vaddr, address, addend, page_offset; |
| 333 |
int index; |
int index; |
| 334 |
|
|
| 355 |
env->tlb_write[is_user][index].address = address; |
env->tlb_write[is_user][index].address = address; |
| 356 |
env->tlb_write[is_user][index].addend = addend; |
env->tlb_write[is_user][index].addend = addend; |
| 357 |
} |
} |
| 358 |
|
page_set_flags(vaddr, vaddr + TARGET_PAGE_SIZE, |
| 359 |
|
PAGE_VALID | PAGE_EXEC | prot); |
| 360 |
|
ret = 0; |
| 361 |
} |
} |
| 362 |
ret = 0; |
#if !defined(CONFIG_SOFTMMU) |
| 363 |
/* XXX: incorrect for 4MB pages */ |
else { |
| 364 |
pd = physpage_find(pte & ~0xfff); |
ret = 0; |
| 365 |
if ((pd & 0xfff) != 0) { |
/* XXX: incorrect for 4MB pages */ |
| 366 |
/* IO access: no mapping is done as it will be handled by the |
pd = physpage_find(pte & ~0xfff); |
| 367 |
soft MMU */ |
if ((pd & 0xfff) != 0) { |
| 368 |
if (!(env->hflags & HF_SOFTMMU_MASK)) |
/* IO access: no mapping is done as it will be handled by the |
| 369 |
ret = 2; |
soft MMU */ |
| 370 |
} else { |
if (!(env->hflags & HF_SOFTMMU_MASK)) |
| 371 |
void *map_addr; |
ret = 2; |
| 372 |
map_addr = mmap((void *)virt_addr, page_size, prot, |
} else { |
| 373 |
MAP_SHARED | MAP_FIXED, phys_ram_fd, pd); |
void *map_addr; |
| 374 |
if (map_addr == MAP_FAILED) { |
map_addr = mmap((void *)virt_addr, page_size, prot, |
| 375 |
fprintf(stderr, |
MAP_SHARED | MAP_FIXED, phys_ram_fd, pd); |
| 376 |
"mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", |
if (map_addr == MAP_FAILED) { |
| 377 |
pte & ~0xfff, virt_addr); |
fprintf(stderr, |
| 378 |
exit(1); |
"mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", |
| 379 |
} |
pte & ~0xfff, virt_addr); |
| 380 |
|
exit(1); |
| 381 |
|
} |
| 382 |
#ifdef DEBUG_MMU |
#ifdef DEBUG_MMU |
| 383 |
printf("mmaping 0x%08x to virt 0x%08x pse=%d\n", |
printf("mmaping 0x%08x to virt 0x%08x pse=%d\n", |
| 384 |
pte & ~0xfff, virt_addr, (page_size != 4096)); |
pte & ~0xfff, virt_addr, (page_size != 4096)); |
| 385 |
#endif |
#endif |
| 386 |
page_set_flags(virt_addr, virt_addr + page_size, |
page_set_flags(virt_addr, virt_addr + page_size, |
| 387 |
PAGE_VALID | PAGE_EXEC | prot); |
PAGE_VALID | PAGE_EXEC | prot); |
| 388 |
|
} |
| 389 |
} |
} |
| 390 |
|
#endif |
| 391 |
return ret; |
return ret; |
| 392 |
do_fault_protect: |
do_fault_protect: |
| 393 |
error_code = PG_ERROR_P_MASK; |
error_code = PG_ERROR_P_MASK; |