| 126 |
longjmp(env->jmp_env, 1); |
longjmp(env->jmp_env, 1); |
| 127 |
} |
} |
| 128 |
|
|
| 129 |
|
/* return non zero if error */ |
| 130 |
|
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, |
| 131 |
|
int selector) |
| 132 |
|
{ |
| 133 |
|
SegmentCache *dt; |
| 134 |
|
int index; |
| 135 |
|
uint8_t *ptr; |
| 136 |
|
|
| 137 |
|
if (selector & 0x4) |
| 138 |
|
dt = &env->ldt; |
| 139 |
|
else |
| 140 |
|
dt = &env->gdt; |
| 141 |
|
index = selector & ~7; |
| 142 |
|
if ((index + 7) > dt->limit) |
| 143 |
|
return -1; |
| 144 |
|
ptr = dt->base + index; |
| 145 |
|
*e1_ptr = ldl_kernel(ptr); |
| 146 |
|
*e2_ptr = ldl_kernel(ptr + 4); |
| 147 |
|
return 0; |
| 148 |
|
} |
| 149 |
|
|
| 150 |
|
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) |
| 151 |
|
{ |
| 152 |
|
unsigned int limit; |
| 153 |
|
limit = (e1 & 0xffff) | (e2 & 0x000f0000); |
| 154 |
|
if (e2 & DESC_G_MASK) |
| 155 |
|
limit = (limit << 12) | 0xfff; |
| 156 |
|
return limit; |
| 157 |
|
} |
| 158 |
|
|
| 159 |
|
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2) |
| 160 |
|
{ |
| 161 |
|
return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); |
| 162 |
|
} |
| 163 |
|
|
| 164 |
|
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) |
| 165 |
|
{ |
| 166 |
|
sc->base = get_seg_base(e1, e2); |
| 167 |
|
sc->limit = get_seg_limit(e1, e2); |
| 168 |
|
sc->flags = e2; |
| 169 |
|
} |
| 170 |
|
|
| 171 |
|
/* init the segment cache in vm86 mode. */ |
| 172 |
|
static inline void load_seg_vm(int seg, int selector) |
| 173 |
|
{ |
| 174 |
|
selector &= 0xffff; |
| 175 |
|
cpu_x86_load_seg_cache(env, seg, selector, |
| 176 |
|
(uint8_t *)(selector << 4), 0xffff, 0); |
| 177 |
|
} |
| 178 |
|
|
| 179 |
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, |
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, |
| 180 |
uint32_t *esp_ptr, int dpl) |
uint32_t *esp_ptr, int dpl) |
| 181 |
{ |
{ |
| 211 |
} |
} |
| 212 |
} |
} |
| 213 |
|
|
| 214 |
/* return non zero if error */ |
/* XXX: merge with load_seg() */ |
| 215 |
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, |
static void tss_load_seg(int seg_reg, int selector) |
|
int selector) |
|
| 216 |
{ |
{ |
| 217 |
|
uint32_t e1, e2; |
| 218 |
|
int rpl, dpl, cpl; |
| 219 |
|
|
| 220 |
|
if ((selector & 0xfffc) != 0) { |
| 221 |
|
if (load_segment(&e1, &e2, selector) != 0) |
| 222 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 223 |
|
if (!(e2 & DESC_S_MASK)) |
| 224 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 225 |
|
rpl = selector & 3; |
| 226 |
|
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 227 |
|
cpl = env->hflags & HF_CPL_MASK; |
| 228 |
|
if (seg_reg == R_CS) { |
| 229 |
|
if (!(e2 & DESC_CS_MASK)) |
| 230 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 231 |
|
if (dpl != rpl) |
| 232 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 233 |
|
if ((e2 & DESC_C_MASK) && dpl > rpl) |
| 234 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 235 |
|
|
| 236 |
|
} else if (seg_reg == R_SS) { |
| 237 |
|
/* SS must be writable data */ |
| 238 |
|
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) |
| 239 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 240 |
|
if (dpl != cpl || dpl != rpl) |
| 241 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 242 |
|
} else { |
| 243 |
|
/* not readable code */ |
| 244 |
|
if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) |
| 245 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 246 |
|
/* if data or non conforming code, checks the rights */ |
| 247 |
|
if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { |
| 248 |
|
if (dpl < cpl || dpl < rpl) |
| 249 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 250 |
|
} |
| 251 |
|
} |
| 252 |
|
if (!(e2 & DESC_P_MASK)) |
| 253 |
|
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
| 254 |
|
cpu_x86_load_seg_cache(env, seg_reg, selector, |
| 255 |
|
get_seg_base(e1, e2), |
| 256 |
|
get_seg_limit(e1, e2), |
| 257 |
|
e2); |
| 258 |
|
} else { |
| 259 |
|
if (seg_reg == R_SS || seg_reg == R_CS) |
| 260 |
|
raise_exception_err(EXCP0A_TSS, selector & 0xfffc); |
| 261 |
|
} |
| 262 |
|
} |
| 263 |
|
|
| 264 |
|
#define SWITCH_TSS_JMP 0 |
| 265 |
|
#define SWITCH_TSS_IRET 1 |
| 266 |
|
#define SWITCH_TSS_CALL 2 |
| 267 |
|
|
| 268 |
|
/* XXX: restore CPU state in registers (PowerPC case) */ |
| 269 |
|
static void switch_tss(int tss_selector, |
| 270 |
|
uint32_t e1, uint32_t e2, int source) |
| 271 |
|
{ |
| 272 |
|
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; |
| 273 |
|
uint8_t *tss_base; |
| 274 |
|
uint32_t new_regs[8], new_segs[6]; |
| 275 |
|
uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; |
| 276 |
|
uint32_t old_eflags, eflags_mask; |
| 277 |
SegmentCache *dt; |
SegmentCache *dt; |
| 278 |
int index; |
int index; |
| 279 |
uint8_t *ptr; |
uint8_t *ptr; |
| 280 |
|
|
| 281 |
if (selector & 0x4) |
type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
| 282 |
dt = &env->ldt; |
|
| 283 |
|
/* if task gate, we read the TSS segment and we load it */ |
| 284 |
|
if (type == 5) { |
| 285 |
|
if (!(e2 & DESC_P_MASK)) |
| 286 |
|
raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); |
| 287 |
|
tss_selector = e1 >> 16; |
| 288 |
|
if (tss_selector & 4) |
| 289 |
|
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); |
| 290 |
|
if (load_segment(&e1, &e2, tss_selector) != 0) |
| 291 |
|
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); |
| 292 |
|
if (e2 & DESC_S_MASK) |
| 293 |
|
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); |
| 294 |
|
type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
| 295 |
|
if ((type & 7) != 1) |
| 296 |
|
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); |
| 297 |
|
} |
| 298 |
|
|
| 299 |
|
if (!(e2 & DESC_P_MASK)) |
| 300 |
|
raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); |
| 301 |
|
|
| 302 |
|
if (type & 8) |
| 303 |
|
tss_limit_max = 103; |
| 304 |
else |
else |
| 305 |
dt = &env->gdt; |
tss_limit_max = 43; |
| 306 |
index = selector & ~7; |
tss_limit = get_seg_limit(e1, e2); |
| 307 |
|
tss_base = get_seg_base(e1, e2); |
| 308 |
|
if ((tss_selector & 4) != 0 || |
| 309 |
|
tss_limit < tss_limit_max) |
| 310 |
|
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); |
| 311 |
|
old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; |
| 312 |
|
if (old_type & 8) |
| 313 |
|
old_tss_limit_max = 103; |
| 314 |
|
else |
| 315 |
|
old_tss_limit_max = 43; |
| 316 |
|
|
| 317 |
|
/* read all the registers from the new TSS */ |
| 318 |
|
if (type & 8) { |
| 319 |
|
/* 32 bit */ |
| 320 |
|
new_cr3 = ldl_kernel(tss_base + 0x1c); |
| 321 |
|
new_eip = ldl_kernel(tss_base + 0x20); |
| 322 |
|
new_eflags = ldl_kernel(tss_base + 0x24); |
| 323 |
|
for(i = 0; i < 8; i++) |
| 324 |
|
new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4)); |
| 325 |
|
for(i = 0; i < 6; i++) |
| 326 |
|
new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4)); |
| 327 |
|
new_ldt = lduw_kernel(tss_base + 0x60); |
| 328 |
|
new_trap = ldl_kernel(tss_base + 0x64); |
| 329 |
|
} else { |
| 330 |
|
/* 16 bit */ |
| 331 |
|
new_cr3 = 0; |
| 332 |
|
new_eip = lduw_kernel(tss_base + 0x0e); |
| 333 |
|
new_eflags = lduw_kernel(tss_base + 0x10); |
| 334 |
|
for(i = 0; i < 8; i++) |
| 335 |
|
new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000; |
| 336 |
|
for(i = 0; i < 4; i++) |
| 337 |
|
new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4)); |
| 338 |
|
new_ldt = lduw_kernel(tss_base + 0x2a); |
| 339 |
|
new_segs[R_FS] = 0; |
| 340 |
|
new_segs[R_GS] = 0; |
| 341 |
|
new_trap = 0; |
| 342 |
|
} |
| 343 |
|
|
| 344 |
|
/* NOTE: we must avoid memory exceptions during the task switch, |
| 345 |
|
so we make dummy accesses before */ |
| 346 |
|
/* XXX: it can still fail in some cases, so a bigger hack is |
| 347 |
|
necessary to valid the TLB after having done the accesses */ |
| 348 |
|
|
| 349 |
|
v1 = ldub_kernel(env->tr.base); |
| 350 |
|
v2 = ldub(env->tr.base + old_tss_limit_max); |
| 351 |
|
stb_kernel(env->tr.base, v1); |
| 352 |
|
stb_kernel(env->tr.base + old_tss_limit_max, v2); |
| 353 |
|
|
| 354 |
|
/* clear busy bit (it is restartable) */ |
| 355 |
|
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { |
| 356 |
|
uint8_t *ptr; |
| 357 |
|
uint32_t e2; |
| 358 |
|
ptr = env->gdt.base + (env->tr.selector << 3); |
| 359 |
|
e2 = ldl_kernel(ptr + 4); |
| 360 |
|
e2 &= ~DESC_TSS_BUSY_MASK; |
| 361 |
|
stl_kernel(ptr + 4, e2); |
| 362 |
|
} |
| 363 |
|
old_eflags = compute_eflags(); |
| 364 |
|
if (source == SWITCH_TSS_IRET) |
| 365 |
|
old_eflags &= ~NT_MASK; |
| 366 |
|
|
| 367 |
|
/* save the current state in the old TSS */ |
| 368 |
|
if (type & 8) { |
| 369 |
|
/* 32 bit */ |
| 370 |
|
stl_kernel(env->tr.base + 0x20, env->eip); |
| 371 |
|
stl_kernel(env->tr.base + 0x24, old_eflags); |
| 372 |
|
for(i = 0; i < 8; i++) |
| 373 |
|
stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]); |
| 374 |
|
for(i = 0; i < 6; i++) |
| 375 |
|
stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector); |
| 376 |
|
} else { |
| 377 |
|
/* 16 bit */ |
| 378 |
|
stw_kernel(env->tr.base + 0x0e, new_eip); |
| 379 |
|
stw_kernel(env->tr.base + 0x10, old_eflags); |
| 380 |
|
for(i = 0; i < 8; i++) |
| 381 |
|
stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]); |
| 382 |
|
for(i = 0; i < 4; i++) |
| 383 |
|
stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); |
| 384 |
|
} |
| 385 |
|
|
| 386 |
|
/* now if an exception occurs, it will occurs in the next task |
| 387 |
|
context */ |
| 388 |
|
|
| 389 |
|
if (source == SWITCH_TSS_CALL) { |
| 390 |
|
stw_kernel(tss_base, env->tr.selector); |
| 391 |
|
new_eflags |= NT_MASK; |
| 392 |
|
} |
| 393 |
|
|
| 394 |
|
/* set busy bit */ |
| 395 |
|
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { |
| 396 |
|
uint8_t *ptr; |
| 397 |
|
uint32_t e2; |
| 398 |
|
ptr = env->gdt.base + (tss_selector << 3); |
| 399 |
|
e2 = ldl_kernel(ptr + 4); |
| 400 |
|
e2 |= DESC_TSS_BUSY_MASK; |
| 401 |
|
stl_kernel(ptr + 4, e2); |
| 402 |
|
} |
| 403 |
|
|
| 404 |
|
/* set the new CPU state */ |
| 405 |
|
/* from this point, any exception which occurs can give problems */ |
| 406 |
|
env->cr[0] |= CR0_TS_MASK; |
| 407 |
|
env->tr.selector = tss_selector; |
| 408 |
|
env->tr.base = tss_base; |
| 409 |
|
env->tr.limit = tss_limit; |
| 410 |
|
env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; |
| 411 |
|
|
| 412 |
|
if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { |
| 413 |
|
env->cr[3] = new_cr3; |
| 414 |
|
cpu_x86_update_cr3(env); |
| 415 |
|
} |
| 416 |
|
|
| 417 |
|
/* load all registers without an exception, then reload them with |
| 418 |
|
possible exception */ |
| 419 |
|
env->eip = new_eip; |
| 420 |
|
eflags_mask = FL_UPDATE_CPL0_MASK; |
| 421 |
|
if (!(type & 8)) |
| 422 |
|
eflags_mask &= 0xffff; |
| 423 |
|
load_eflags(new_eflags, eflags_mask); |
| 424 |
|
for(i = 0; i < 8; i++) |
| 425 |
|
env->regs[i] = new_regs[i]; |
| 426 |
|
if (new_eflags & VM_MASK) { |
| 427 |
|
for(i = 0; i < 6; i++) |
| 428 |
|
load_seg_vm(i, new_segs[i]); |
| 429 |
|
/* in vm86, CPL is always 3 */ |
| 430 |
|
cpu_x86_set_cpl(env, 3); |
| 431 |
|
} else { |
| 432 |
|
/* CPL is set the RPL of CS */ |
| 433 |
|
cpu_x86_set_cpl(env, new_segs[R_CS] & 3); |
| 434 |
|
/* first just selectors as the rest may trigger exceptions */ |
| 435 |
|
for(i = 0; i < 6; i++) |
| 436 |
|
cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0); |
| 437 |
|
} |
| 438 |
|
|
| 439 |
|
env->ldt.selector = new_ldt & ~4; |
| 440 |
|
env->ldt.base = NULL; |
| 441 |
|
env->ldt.limit = 0; |
| 442 |
|
env->ldt.flags = 0; |
| 443 |
|
|
| 444 |
|
/* load the LDT */ |
| 445 |
|
if (new_ldt & 4) |
| 446 |
|
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); |
| 447 |
|
|
| 448 |
|
dt = &env->gdt; |
| 449 |
|
index = new_ldt & ~7; |
| 450 |
if ((index + 7) > dt->limit) |
if ((index + 7) > dt->limit) |
| 451 |
return -1; |
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); |
| 452 |
ptr = dt->base + index; |
ptr = dt->base + index; |
| 453 |
*e1_ptr = ldl_kernel(ptr); |
e1 = ldl_kernel(ptr); |
| 454 |
*e2_ptr = ldl_kernel(ptr + 4); |
e2 = ldl_kernel(ptr + 4); |
| 455 |
return 0; |
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) |
| 456 |
|
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); |
| 457 |
|
if (!(e2 & DESC_P_MASK)) |
| 458 |
|
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); |
| 459 |
|
load_seg_cache_raw_dt(&env->ldt, e1, e2); |
| 460 |
|
|
| 461 |
|
/* load the segments */ |
| 462 |
|
if (!(new_eflags & VM_MASK)) { |
| 463 |
|
tss_load_seg(R_CS, new_segs[R_CS]); |
| 464 |
|
tss_load_seg(R_SS, new_segs[R_SS]); |
| 465 |
|
tss_load_seg(R_ES, new_segs[R_ES]); |
| 466 |
|
tss_load_seg(R_DS, new_segs[R_DS]); |
| 467 |
|
tss_load_seg(R_FS, new_segs[R_FS]); |
| 468 |
|
tss_load_seg(R_GS, new_segs[R_GS]); |
| 469 |
|
} |
| 470 |
|
|
| 471 |
|
/* check that EIP is in the CS segment limits */ |
| 472 |
|
if (new_eip > env->segs[R_CS].limit) { |
| 473 |
|
raise_exception_err(EXCP0D_GPF, 0); |
| 474 |
|
} |
| 475 |
} |
} |
| 476 |
|
|
| 477 |
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) |
/* check if Port I/O is allowed in TSS */ |
| 478 |
|
static inline void check_io(int addr, int size) |
| 479 |
{ |
{ |
| 480 |
unsigned int limit; |
int io_offset, val, mask; |
| 481 |
limit = (e1 & 0xffff) | (e2 & 0x000f0000); |
|
| 482 |
if (e2 & DESC_G_MASK) |
/* TSS must be a valid 32 bit one */ |
| 483 |
limit = (limit << 12) | 0xfff; |
if (!(env->tr.flags & DESC_P_MASK) || |
| 484 |
return limit; |
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || |
| 485 |
|
env->tr.limit < 103) |
| 486 |
|
goto fail; |
| 487 |
|
io_offset = lduw_kernel(env->tr.base + 0x66); |
| 488 |
|
io_offset += (addr >> 3); |
| 489 |
|
/* Note: the check needs two bytes */ |
| 490 |
|
if ((io_offset + 1) > env->tr.limit) |
| 491 |
|
goto fail; |
| 492 |
|
val = lduw_kernel(env->tr.base + io_offset); |
| 493 |
|
val >>= (addr & 7); |
| 494 |
|
mask = (1 << size) - 1; |
| 495 |
|
/* all bits must be zero to allow the I/O */ |
| 496 |
|
if ((val & mask) != 0) { |
| 497 |
|
fail: |
| 498 |
|
raise_exception_err(EXCP0D_GPF, 0); |
| 499 |
|
} |
| 500 |
} |
} |
| 501 |
|
|
| 502 |
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2) |
void check_iob_T0(void) |
| 503 |
{ |
{ |
| 504 |
return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); |
check_io(T0, 1); |
| 505 |
} |
} |
| 506 |
|
|
| 507 |
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) |
void check_iow_T0(void) |
| 508 |
{ |
{ |
| 509 |
sc->base = get_seg_base(e1, e2); |
check_io(T0, 2); |
|
sc->limit = get_seg_limit(e1, e2); |
|
|
sc->flags = e2; |
|
| 510 |
} |
} |
| 511 |
|
|
| 512 |
/* init the segment cache in vm86 mode. */ |
void check_iol_T0(void) |
|
static inline void load_seg_vm(int seg, int selector) |
|
| 513 |
{ |
{ |
| 514 |
selector &= 0xffff; |
check_io(T0, 4); |
| 515 |
cpu_x86_load_seg_cache(env, seg, selector, |
} |
| 516 |
(uint8_t *)(selector << 4), 0xffff, 0); |
|
| 517 |
|
void check_iob_DX(void) |
| 518 |
|
{ |
| 519 |
|
check_io(EDX & 0xffff, 1); |
| 520 |
|
} |
| 521 |
|
|
| 522 |
|
void check_iow_DX(void) |
| 523 |
|
{ |
| 524 |
|
check_io(EDX & 0xffff, 2); |
| 525 |
|
} |
| 526 |
|
|
| 527 |
|
void check_iol_DX(void) |
| 528 |
|
{ |
| 529 |
|
check_io(EDX & 0xffff, 4); |
| 530 |
} |
} |
| 531 |
|
|
| 532 |
/* protected mode interrupt */ |
/* protected mode interrupt */ |
| 540 |
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size; |
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size; |
| 541 |
uint32_t old_cs, old_ss, old_esp, old_eip; |
uint32_t old_cs, old_ss, old_esp, old_eip; |
| 542 |
|
|
| 543 |
|
has_error_code = 0; |
| 544 |
|
if (!is_int && !is_hw) { |
| 545 |
|
switch(intno) { |
| 546 |
|
case 8: |
| 547 |
|
case 10: |
| 548 |
|
case 11: |
| 549 |
|
case 12: |
| 550 |
|
case 13: |
| 551 |
|
case 14: |
| 552 |
|
case 17: |
| 553 |
|
has_error_code = 1; |
| 554 |
|
break; |
| 555 |
|
} |
| 556 |
|
} |
| 557 |
|
|
| 558 |
dt = &env->idt; |
dt = &env->idt; |
| 559 |
if (intno * 8 + 7 > dt->limit) |
if (intno * 8 + 7 > dt->limit) |
| 560 |
raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
| 565 |
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
| 566 |
switch(type) { |
switch(type) { |
| 567 |
case 5: /* task gate */ |
case 5: /* task gate */ |
| 568 |
cpu_abort(env, "task gate not supported"); |
/* must do that check here to return the correct error code */ |
| 569 |
break; |
if (!(e2 & DESC_P_MASK)) |
| 570 |
|
raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); |
| 571 |
|
switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL); |
| 572 |
|
if (has_error_code) { |
| 573 |
|
int mask; |
| 574 |
|
/* push the error code */ |
| 575 |
|
shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1; |
| 576 |
|
if (env->segs[R_SS].flags & DESC_B_MASK) |
| 577 |
|
mask = 0xffffffff; |
| 578 |
|
else |
| 579 |
|
mask = 0xffff; |
| 580 |
|
esp = (env->regs[R_ESP] - (2 << shift)) & mask; |
| 581 |
|
ssp = env->segs[R_SS].base + esp; |
| 582 |
|
if (shift) |
| 583 |
|
stl_kernel(ssp, error_code); |
| 584 |
|
else |
| 585 |
|
stw_kernel(ssp, error_code); |
| 586 |
|
env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask); |
| 587 |
|
} |
| 588 |
|
return; |
| 589 |
case 6: /* 286 interrupt gate */ |
case 6: /* 286 interrupt gate */ |
| 590 |
case 7: /* 286 trap gate */ |
case 7: /* 286 trap gate */ |
| 591 |
case 14: /* 386 interrupt gate */ |
case 14: /* 386 interrupt gate */ |
| 645 |
} |
} |
| 646 |
|
|
| 647 |
shift = type >> 3; |
shift = type >> 3; |
|
has_error_code = 0; |
|
|
if (!is_int && !is_hw) { |
|
|
switch(intno) { |
|
|
case 8: |
|
|
case 10: |
|
|
case 11: |
|
|
case 12: |
|
|
case 13: |
|
|
case 14: |
|
|
case 17: |
|
|
has_error_code = 1; |
|
|
break; |
|
|
} |
|
|
} |
|
| 648 |
push_size = 6 + (new_stack << 2) + (has_error_code << 1); |
push_size = 6 + (new_stack << 2) + (has_error_code << 1); |
| 649 |
if (env->eflags & VM_MASK) |
if (env->eflags & VM_MASK) |
| 650 |
push_size += 8; |
push_size += 8; |
| 1026 |
e2 = ldl_kernel(ptr + 4); |
e2 = ldl_kernel(ptr + 4); |
| 1027 |
type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
| 1028 |
if ((e2 & DESC_S_MASK) || |
if ((e2 & DESC_S_MASK) || |
| 1029 |
(type != 2 && type != 9)) |
(type != 1 && type != 9)) |
| 1030 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1031 |
if (!(e2 & DESC_P_MASK)) |
if (!(e2 & DESC_P_MASK)) |
| 1032 |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
| 1039 |
|
|
| 1040 |
/* only works if protected mode and not VM86. Calling load_seg with |
/* only works if protected mode and not VM86. Calling load_seg with |
| 1041 |
seg_reg == R_CS is discouraged */ |
seg_reg == R_CS is discouraged */ |
| 1042 |
|
/* XXX: add ring level checks */ |
| 1043 |
void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
| 1044 |
{ |
{ |
| 1045 |
uint32_t e1, e2; |
uint32_t e1, e2; |
| 1064 |
} |
} |
| 1065 |
|
|
| 1066 |
if (seg_reg == R_SS) { |
if (seg_reg == R_SS) { |
| 1067 |
if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) { |
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { |
| 1068 |
EIP = cur_eip; |
EIP = cur_eip; |
| 1069 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1070 |
} |
} |
| 1096 |
/* protected mode jump */ |
/* protected mode jump */ |
| 1097 |
void helper_ljmp_protected_T0_T1(void) |
void helper_ljmp_protected_T0_T1(void) |
| 1098 |
{ |
{ |
| 1099 |
int new_cs, new_eip; |
int new_cs, new_eip, gate_cs, type; |
| 1100 |
uint32_t e1, e2, cpl, dpl, rpl, limit; |
uint32_t e1, e2, cpl, dpl, rpl, limit; |
| 1101 |
|
|
| 1102 |
new_cs = T0; |
new_cs = T0; |
| 1110 |
if (!(e2 & DESC_CS_MASK)) |
if (!(e2 & DESC_CS_MASK)) |
| 1111 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1112 |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 1113 |
if (e2 & DESC_CS_MASK) { |
if (e2 & DESC_C_MASK) { |
| 1114 |
/* conforming code segment */ |
/* conforming code segment */ |
| 1115 |
if (dpl > cpl) |
if (dpl > cpl) |
| 1116 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1131 |
get_seg_base(e1, e2), limit, e2); |
get_seg_base(e1, e2), limit, e2); |
| 1132 |
EIP = new_eip; |
EIP = new_eip; |
| 1133 |
} else { |
} else { |
| 1134 |
cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", |
/* jump to call or task gate */ |
| 1135 |
new_cs, new_eip); |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 1136 |
|
rpl = new_cs & 3; |
| 1137 |
|
cpl = env->hflags & HF_CPL_MASK; |
| 1138 |
|
type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
| 1139 |
|
switch(type) { |
| 1140 |
|
case 1: /* 286 TSS */ |
| 1141 |
|
case 9: /* 386 TSS */ |
| 1142 |
|
case 5: /* task gate */ |
| 1143 |
|
if (dpl < cpl || dpl < rpl) |
| 1144 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1145 |
|
switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP); |
| 1146 |
|
break; |
| 1147 |
|
case 4: /* 286 call gate */ |
| 1148 |
|
case 12: /* 386 call gate */ |
| 1149 |
|
if ((dpl < cpl) || (dpl < rpl)) |
| 1150 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1151 |
|
if (!(e2 & DESC_P_MASK)) |
| 1152 |
|
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); |
| 1153 |
|
gate_cs = e1 >> 16; |
| 1154 |
|
if (load_segment(&e1, &e2, gate_cs) != 0) |
| 1155 |
|
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); |
| 1156 |
|
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 1157 |
|
/* must be code segment */ |
| 1158 |
|
if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != |
| 1159 |
|
(DESC_S_MASK | DESC_CS_MASK))) |
| 1160 |
|
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); |
| 1161 |
|
if (((e2 & DESC_C_MASK) && (dpl > cpl)) || |
| 1162 |
|
(!(e2 & DESC_C_MASK) && (dpl != cpl))) |
| 1163 |
|
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); |
| 1164 |
|
if (!(e2 & DESC_P_MASK)) |
| 1165 |
|
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); |
| 1166 |
|
new_eip = (e1 & 0xffff); |
| 1167 |
|
if (type == 12) |
| 1168 |
|
new_eip |= (e2 & 0xffff0000); |
| 1169 |
|
limit = get_seg_limit(e1, e2); |
| 1170 |
|
if (new_eip > limit) |
| 1171 |
|
raise_exception_err(EXCP0D_GPF, 0); |
| 1172 |
|
cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, |
| 1173 |
|
get_seg_base(e1, e2), limit, e2); |
| 1174 |
|
EIP = new_eip; |
| 1175 |
|
break; |
| 1176 |
|
default: |
| 1177 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1178 |
|
break; |
| 1179 |
|
} |
| 1180 |
} |
} |
| 1181 |
} |
} |
| 1182 |
|
|
| 1235 |
if (!(e2 & DESC_CS_MASK)) |
if (!(e2 & DESC_CS_MASK)) |
| 1236 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1237 |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 1238 |
if (e2 & DESC_CS_MASK) { |
if (e2 & DESC_C_MASK) { |
| 1239 |
/* conforming code segment */ |
/* conforming code segment */ |
| 1240 |
if (dpl > cpl) |
if (dpl > cpl) |
| 1241 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1281 |
} else { |
} else { |
| 1282 |
/* check gate type */ |
/* check gate type */ |
| 1283 |
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
| 1284 |
|
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 1285 |
|
rpl = new_cs & 3; |
| 1286 |
switch(type) { |
switch(type) { |
| 1287 |
case 1: /* available 286 TSS */ |
case 1: /* available 286 TSS */ |
| 1288 |
case 9: /* available 386 TSS */ |
case 9: /* available 386 TSS */ |
| 1289 |
case 5: /* task gate */ |
case 5: /* task gate */ |
| 1290 |
cpu_abort(env, "task gate not supported"); |
if (dpl < cpl || dpl < rpl) |
| 1291 |
|
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1292 |
|
switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL); |
| 1293 |
break; |
break; |
| 1294 |
case 4: /* 286 call gate */ |
case 4: /* 286 call gate */ |
| 1295 |
case 12: /* 386 call gate */ |
case 12: /* 386 call gate */ |
| 1300 |
} |
} |
| 1301 |
shift = type >> 3; |
shift = type >> 3; |
| 1302 |
|
|
|
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
|
|
rpl = new_cs & 3; |
|
| 1303 |
if (dpl < cpl || dpl < rpl) |
if (dpl < cpl || dpl < rpl) |
| 1304 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1305 |
/* check valid bit */ |
/* check valid bit */ |
| 1416 |
} |
} |
| 1417 |
} |
} |
| 1418 |
|
|
| 1419 |
/* real mode iret */ |
/* real and vm86 mode iret */ |
| 1420 |
void helper_iret_real(int shift) |
void helper_iret_real(int shift) |
| 1421 |
{ |
{ |
| 1422 |
uint32_t sp, new_cs, new_eip, new_eflags, new_esp; |
uint32_t sp, new_cs, new_eip, new_eflags, new_esp; |
| 1423 |
uint8_t *ssp; |
uint8_t *ssp; |
| 1424 |
int eflags_mask; |
int eflags_mask; |
| 1425 |
|
|
| 1426 |
sp = ESP & 0xffff; |
sp = ESP & 0xffff; |
| 1427 |
ssp = env->segs[R_SS].base + sp; |
ssp = env->segs[R_SS].base + sp; |
| 1428 |
if (shift == 1) { |
if (shift == 1) { |
| 1441 |
(new_esp & 0xffff); |
(new_esp & 0xffff); |
| 1442 |
load_seg_vm(R_CS, new_cs); |
load_seg_vm(R_CS, new_cs); |
| 1443 |
env->eip = new_eip; |
env->eip = new_eip; |
| 1444 |
eflags_mask = FL_UPDATE_CPL0_MASK; |
if (env->eflags & VM_MASK) |
| 1445 |
|
eflags_mask = FL_UPDATE_MASK32 | IF_MASK | RF_MASK; |
| 1446 |
|
else |
| 1447 |
|
eflags_mask = FL_UPDATE_CPL0_MASK; |
| 1448 |
if (shift == 0) |
if (shift == 0) |
| 1449 |
eflags_mask &= 0xffff; |
eflags_mask &= 0xffff; |
| 1450 |
load_eflags(new_eflags, eflags_mask); |
load_eflags(new_eflags, eflags_mask); |
| 1490 |
if (rpl < cpl) |
if (rpl < cpl) |
| 1491 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1492 |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 1493 |
if (e2 & DESC_CS_MASK) { |
if (e2 & DESC_C_MASK) { |
| 1494 |
if (dpl > rpl) |
if (dpl > rpl) |
| 1495 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1496 |
} else { |
} else { |
| 1586 |
|
|
| 1587 |
void helper_iret_protected(int shift) |
void helper_iret_protected(int shift) |
| 1588 |
{ |
{ |
| 1589 |
helper_ret_protected(shift, 1, 0); |
int tss_selector, type; |
| 1590 |
|
uint32_t e1, e2; |
| 1591 |
|
|
| 1592 |
|
/* specific case for TSS */ |
| 1593 |
|
if (env->eflags & NT_MASK) { |
| 1594 |
|
tss_selector = lduw_kernel(env->tr.base + 0); |
| 1595 |
|
if (tss_selector & 4) |
| 1596 |
|
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); |
| 1597 |
|
if (load_segment(&e1, &e2, tss_selector) != 0) |
| 1598 |
|
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); |
| 1599 |
|
type = (e2 >> DESC_TYPE_SHIFT) & 0x17; |
| 1600 |
|
/* NOTE: we check both segment and busy TSS */ |
| 1601 |
|
if (type != 3) |
| 1602 |
|
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); |
| 1603 |
|
switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET); |
| 1604 |
|
} else { |
| 1605 |
|
helper_ret_protected(shift, 1, 0); |
| 1606 |
|
} |
| 1607 |
} |
} |
| 1608 |
|
|
| 1609 |
void helper_lret_protected(int shift, int addend) |
void helper_lret_protected(int shift, int addend) |