| 444 |
prot = 0; |
prot = 0; |
| 445 |
for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) |
for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) |
| 446 |
prot |= page_get_flags(addr); |
prot |= page_get_flags(addr); |
| 447 |
|
#if !defined(CONFIG_SOFTMMU) |
| 448 |
mprotect((void *)host_start, host_page_size, |
mprotect((void *)host_start, host_page_size, |
| 449 |
(prot & PAGE_BITS) & ~PAGE_WRITE); |
(prot & PAGE_BITS) & ~PAGE_WRITE); |
| 450 |
|
#endif |
| 451 |
|
#if !defined(CONFIG_USER_ONLY) |
| 452 |
|
/* suppress soft TLB */ |
| 453 |
|
/* XXX: must flush on all processor with same address space */ |
| 454 |
|
tlb_flush_page_write(cpu_single_env, host_start); |
| 455 |
|
#endif |
| 456 |
#ifdef DEBUG_TB_INVALIDATE |
#ifdef DEBUG_TB_INVALIDATE |
| 457 |
printf("protecting code page: 0x%08lx\n", |
printf("protecting code page: 0x%08lx\n", |
| 458 |
host_start); |
host_start); |
| 459 |
#endif |
#endif |
| 460 |
p->flags &= ~PAGE_WRITE; |
p->flags &= ~PAGE_WRITE; |
|
#ifdef DEBUG_TB_CHECK |
|
|
tb_page_check(); |
|
|
#endif |
|
| 461 |
} |
} |
| 462 |
} |
} |
| 463 |
|
|
| 487 |
if (page_index2 != page_index1) { |
if (page_index2 != page_index1) { |
| 488 |
tb_alloc_page(tb, page_index2); |
tb_alloc_page(tb, page_index2); |
| 489 |
} |
} |
| 490 |
|
#ifdef DEBUG_TB_CHECK |
| 491 |
|
tb_page_check(); |
| 492 |
|
#endif |
| 493 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
| 494 |
tb->jmp_next[0] = NULL; |
tb->jmp_next[0] = NULL; |
| 495 |
tb->jmp_next[1] = NULL; |
tb->jmp_next[1] = NULL; |
| 524 |
/* if the page was really writable, then we change its |
/* if the page was really writable, then we change its |
| 525 |
protection back to writable */ |
protection back to writable */ |
| 526 |
if (prot & PAGE_WRITE_ORG) { |
if (prot & PAGE_WRITE_ORG) { |
|
mprotect((void *)host_start, host_page_size, |
|
|
(prot & PAGE_BITS) | PAGE_WRITE); |
|
| 527 |
pindex = (address - host_start) >> TARGET_PAGE_BITS; |
pindex = (address - host_start) >> TARGET_PAGE_BITS; |
| 528 |
p1[pindex].flags |= PAGE_WRITE; |
if (!(p1[pindex].flags & PAGE_WRITE)) { |
| 529 |
/* and since the content will be modified, we must invalidate |
#if !defined(CONFIG_SOFTMMU) |
| 530 |
the corresponding translated code. */ |
mprotect((void *)host_start, host_page_size, |
| 531 |
tb_invalidate_page(address); |
(prot & PAGE_BITS) | PAGE_WRITE); |
| 532 |
|
#endif |
| 533 |
|
p1[pindex].flags |= PAGE_WRITE; |
| 534 |
|
/* and since the content will be modified, we must invalidate |
| 535 |
|
the corresponding translated code. */ |
| 536 |
|
tb_invalidate_page(address); |
| 537 |
#ifdef DEBUG_TB_CHECK |
#ifdef DEBUG_TB_CHECK |
| 538 |
tb_invalidate_check(address); |
tb_invalidate_check(address); |
| 539 |
#endif |
#endif |
| 540 |
return 1; |
return 1; |
| 541 |
} else { |
} |
|
return 0; |
|
| 542 |
} |
} |
| 543 |
|
return 0; |
| 544 |
} |
} |
| 545 |
|
|
| 546 |
/* call this function when system calls directly modify a memory area */ |
/* call this function when system calls directly modify a memory area */ |
| 744 |
/* unmap all maped pages and flush all associated code */ |
/* unmap all maped pages and flush all associated code */ |
| 745 |
void page_unmap(void) |
void page_unmap(void) |
| 746 |
{ |
{ |
| 747 |
PageDesc *p, *pmap; |
PageDesc *pmap; |
| 748 |
unsigned long addr; |
int i; |
|
int i, j, ret, j1; |
|
| 749 |
|
|
| 750 |
for(i = 0; i < L1_SIZE; i++) { |
for(i = 0; i < L1_SIZE; i++) { |
| 751 |
pmap = l1_map[i]; |
pmap = l1_map[i]; |
| 752 |
if (pmap) { |
if (pmap) { |
| 753 |
|
#if !defined(CONFIG_SOFTMMU) |
| 754 |
|
PageDesc *p; |
| 755 |
|
unsigned long addr; |
| 756 |
|
int j, ret, j1; |
| 757 |
|
|
| 758 |
p = pmap; |
p = pmap; |
| 759 |
for(j = 0;j < L2_SIZE;) { |
for(j = 0;j < L2_SIZE;) { |
| 760 |
if (p->flags & PAGE_VALID) { |
if (p->flags & PAGE_VALID) { |
| 777 |
j++; |
j++; |
| 778 |
} |
} |
| 779 |
} |
} |
| 780 |
|
#endif |
| 781 |
free(pmap); |
free(pmap); |
| 782 |
l1_map[i] = NULL; |
l1_map[i] = NULL; |
| 783 |
} |
} |
| 788 |
|
|
| 789 |
void tlb_flush(CPUState *env) |
void tlb_flush(CPUState *env) |
| 790 |
{ |
{ |
| 791 |
#if defined(TARGET_I386) |
#if !defined(CONFIG_USER_ONLY) |
| 792 |
int i; |
int i; |
| 793 |
for(i = 0; i < CPU_TLB_SIZE; i++) { |
for(i = 0; i < CPU_TLB_SIZE; i++) { |
| 794 |
env->tlb_read[0][i].address = -1; |
env->tlb_read[0][i].address = -1; |
| 799 |
#endif |
#endif |
| 800 |
} |
} |
| 801 |
|
|
| 802 |
|
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr) |
| 803 |
|
{ |
| 804 |
|
if (addr == (tlb_entry->address & |
| 805 |
|
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) |
| 806 |
|
tlb_entry->address = -1; |
| 807 |
|
} |
| 808 |
|
|
| 809 |
void tlb_flush_page(CPUState *env, uint32_t addr) |
void tlb_flush_page(CPUState *env, uint32_t addr) |
| 810 |
{ |
{ |
| 811 |
#if defined(TARGET_I386) |
#if !defined(CONFIG_USER_ONLY) |
| 812 |
int i; |
int i; |
| 813 |
|
|
| 814 |
|
addr &= TARGET_PAGE_MASK; |
| 815 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 816 |
env->tlb_read[0][i].address = -1; |
tlb_flush_entry(&env->tlb_read[0][i], addr); |
| 817 |
env->tlb_write[0][i].address = -1; |
tlb_flush_entry(&env->tlb_write[0][i], addr); |
| 818 |
env->tlb_read[1][i].address = -1; |
tlb_flush_entry(&env->tlb_read[1][i], addr); |
| 819 |
env->tlb_write[1][i].address = -1; |
tlb_flush_entry(&env->tlb_write[1][i], addr); |
| 820 |
|
#endif |
| 821 |
|
} |
| 822 |
|
|
| 823 |
|
/* make all write to page 'addr' trigger a TLB exception to detect |
| 824 |
|
self modifying code */ |
| 825 |
|
void tlb_flush_page_write(CPUState *env, uint32_t addr) |
| 826 |
|
{ |
| 827 |
|
#if !defined(CONFIG_USER_ONLY) |
| 828 |
|
int i; |
| 829 |
|
|
| 830 |
|
addr &= TARGET_PAGE_MASK; |
| 831 |
|
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 832 |
|
tlb_flush_entry(&env->tlb_write[0][i], addr); |
| 833 |
|
tlb_flush_entry(&env->tlb_write[1][i], addr); |
| 834 |
#endif |
#endif |
| 835 |
} |
} |
| 836 |
|
|
| 937 |
} |
} |
| 938 |
return io_index << IO_MEM_SHIFT; |
return io_index << IO_MEM_SHIFT; |
| 939 |
} |
} |
| 940 |
|
|
| 941 |
|
#if !defined(CONFIG_USER_ONLY) |
| 942 |
|
|
| 943 |
|
#define MMUSUFFIX _cmmu |
| 944 |
|
#define GETPC() NULL |
| 945 |
|
#define env cpu_single_env |
| 946 |
|
|
| 947 |
|
#define SHIFT 0 |
| 948 |
|
#include "softmmu_template.h" |
| 949 |
|
|
| 950 |
|
#define SHIFT 1 |
| 951 |
|
#include "softmmu_template.h" |
| 952 |
|
|
| 953 |
|
#define SHIFT 2 |
| 954 |
|
#include "softmmu_template.h" |
| 955 |
|
|
| 956 |
|
#define SHIFT 3 |
| 957 |
|
#include "softmmu_template.h" |
| 958 |
|
|
| 959 |
|
#undef env |
| 960 |
|
|
| 961 |
|
#endif |