/[qemu]/qemu/target-i386/helper2.c
ViewVC logotype

Diff of /qemu/target-i386/helper2.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.2 by bellard, Mon Oct 27 21:22:23 2003 UTC revision 1.3 by bellard, Tue Nov 4 23:34:23 2003 UTC
# Line 158  void cpu_x86_dump_state(CPUX86State *env Line 158  void cpu_x86_dump_state(CPUX86State *env
158  /* called when cr3 or PG bit are modified */  /* called when cr3 or PG bit are modified */
159  static int last_pg_state = -1;  static int last_pg_state = -1;
160  static int last_pe_state = 0;  static int last_pe_state = 0;
161    static uint32_t a20_mask;
162    int a20_enabled;
163    
164  int phys_ram_size;  int phys_ram_size;
165  int phys_ram_fd;  int phys_ram_fd;
166  uint8_t *phys_ram_base;  uint8_t *phys_ram_base;
167    
168    void cpu_x86_set_a20(CPUX86State *env, int a20_state)
169    {
170        a20_state = (a20_state != 0);
171        if (a20_state != a20_enabled) {
172            /* when a20 is changed, all the MMU mappings are invalid, so
173               we must flush everything */
174            page_unmap();
175            tlb_flush(env);
176            a20_enabled = a20_state;
177            if (a20_enabled)
178                a20_mask = 0xffffffff;
179            else
180                a20_mask = 0xffefffff;
181        }
182    }
183    
184  void cpu_x86_update_cr0(CPUX86State *env)  void cpu_x86_update_cr0(CPUX86State *env)
185  {  {
186      int pg_state, pe_state;      int pg_state, pe_state;
# Line 195  void cpu_x86_update_cr3(CPUX86State *env Line 214  void cpu_x86_update_cr3(CPUX86State *env
214    
215  void cpu_x86_init_mmu(CPUX86State *env)  void cpu_x86_init_mmu(CPUX86State *env)
216  {  {
217        a20_enabled = 1;
218        a20_mask = 0xffffffff;
219    
220      last_pg_state = -1;      last_pg_state = -1;
221      cpu_x86_update_cr0(env);      cpu_x86_update_cr0(env);
222  }  }
# Line 244  int cpu_x86_handle_mmu_fault(CPUX86State Line 266  int cpu_x86_handle_mmu_fault(CPUX86State
266    
267      if (!(env->cr[0] & CR0_PG_MASK)) {      if (!(env->cr[0] & CR0_PG_MASK)) {
268          pte = addr;          pte = addr;
269          virt_addr = addr & ~0xfff;          virt_addr = addr & TARGET_PAGE_MASK;
270          prot = PROT_READ | PROT_WRITE;          prot = PROT_READ | PROT_WRITE;
271          page_size = 4096;          page_size = 4096;
272          goto do_mapping;          goto do_mapping;
273      }      }
274    
275      /* page directory entry */      /* page directory entry */
276      pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3));      pde_ptr = phys_ram_base +
277            (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & a20_mask);
278      pde = ldl_raw(pde_ptr);      pde = ldl_raw(pde_ptr);
279      if (!(pde & PG_PRESENT_MASK)) {      if (!(pde & PG_PRESENT_MASK)) {
280          error_code = 0;          error_code = 0;
# Line 287  int cpu_x86_handle_mmu_fault(CPUX86State Line 310  int cpu_x86_handle_mmu_fault(CPUX86State
310          }          }
311    
312          /* page directory entry */          /* page directory entry */
313          pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc));          pte_ptr = phys_ram_base +
314                (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask);
315          pte = ldl_raw(pte_ptr);          pte = ldl_raw(pte_ptr);
316          if (!(pte & PG_PRESENT_MASK)) {          if (!(pte & PG_PRESENT_MASK)) {
317              error_code = 0;              error_code = 0;
# Line 325  int cpu_x86_handle_mmu_fault(CPUX86State Line 349  int cpu_x86_handle_mmu_fault(CPUX86State
349      }      }
350            
351   do_mapping:   do_mapping:
352        pte = pte & a20_mask;
353  #if !defined(CONFIG_SOFTMMU)  #if !defined(CONFIG_SOFTMMU)
354      if (is_softmmu)      if (is_softmmu)
355  #endif  #endif
# Line 334  int cpu_x86_handle_mmu_fault(CPUX86State Line 359  int cpu_x86_handle_mmu_fault(CPUX86State
359    
360          /* software MMU case. Even if 4MB pages, we map only one 4KB          /* software MMU case. Even if 4MB pages, we map only one 4KB
361             page in the cache to avoid filling it too fast */             page in the cache to avoid filling it too fast */
362          page_offset = (addr & ~0xfff) & (page_size - 1);          page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
363          paddr = (pte & ~0xfff) + page_offset;          paddr = (pte & TARGET_PAGE_MASK) + page_offset;
364          vaddr = virt_addr + page_offset;          vaddr = virt_addr + page_offset;
365          index = (addr >> 12) & (CPU_TLB_SIZE - 1);          index = (addr >> 12) & (CPU_TLB_SIZE - 1);
366          pd = physpage_find(paddr);          pd = physpage_find(paddr);

Legend:
Removed from v.1.2  
changed lines
  Added in v.1.3

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26