v86/src/rust/codegen.rs

2649 lines
87 KiB
Rust
Raw Normal View History

2021-01-01 02:14:32 +01:00
use cpu::cpu::{
tlb_data, FLAG_CARRY, FLAG_OVERFLOW, FLAG_SIGN, FLAG_ZERO, OPSIZE_8, OPSIZE_16, OPSIZE_32,
TLB_GLOBAL, TLB_HAS_CODE, TLB_NO_USER, TLB_READONLY, TLB_VALID,
};
use cpu::global_pointers;
2021-01-01 02:14:32 +01:00
use cpu::memory;
2022-11-03 22:02:34 +01:00
use jit::{Instruction, InstructionOperand, InstructionOperandDest, JitContext};
use modrm;
2021-01-01 02:14:31 +01:00
use modrm::ModrmByte;
use profiler;
2018-07-22 07:26:54 +02:00
use regs;
use wasmgen::wasm_builder::{WasmBuilder, WasmLocal, WasmLocalI64};
pub fn gen_add_cs_offset(ctx: &mut JitContext) {
2022-11-10 18:01:12 +01:00
if !ctx.cpu.has_flat_segmentation() {
ctx.builder
.load_fixed_i32(global_pointers::get_seg_offset(regs::CS));
ctx.builder.add_i32();
}
}
pub fn gen_get_eip(builder: &mut WasmBuilder) {
builder.load_fixed_i32(global_pointers::instruction_pointer as u32);
2021-01-01 02:14:31 +01:00
}
2021-01-01 02:14:30 +01:00
pub fn gen_set_eip_to_after_current_instruction(ctx: &mut JitContext) {
ctx.builder
.const_i32(global_pointers::instruction_pointer as i32);
2021-01-01 02:14:31 +01:00
gen_get_eip(ctx.builder);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(!0xFFF);
ctx.builder.and_i32();
ctx.builder.const_i32(ctx.cpu.eip as i32 & 0xFFF);
ctx.builder.or_i32();
ctx.builder.store_aligned_i32(0);
}
2021-01-01 02:14:29 +01:00
pub fn gen_set_previous_eip_offset_from_eip_with_low_bits(
builder: &mut WasmBuilder,
low_bits: i32,
) {
// previous_ip = instruction_pointer & ~0xFFF | low_bits;
2021-01-01 02:14:33 +01:00
dbg_assert!(low_bits & !0xFFF == 0);
builder.const_i32(global_pointers::previous_ip as i32);
2021-01-01 02:14:31 +01:00
gen_get_eip(builder);
2021-01-01 02:14:30 +01:00
builder.const_i32(!0xFFF);
builder.and_i32();
builder.const_i32(low_bits);
builder.or_i32();
builder.store_aligned_i32(0);
}
2021-01-01 02:14:33 +01:00
pub fn gen_set_eip_low_bits(builder: &mut WasmBuilder, low_bits: i32) {
// instruction_pointer = instruction_pointer & ~0xFFF | low_bits;
dbg_assert!(low_bits & !0xFFF == 0);
builder.const_i32(global_pointers::instruction_pointer as i32);
2021-01-01 02:14:31 +01:00
gen_get_eip(builder);
2021-01-01 02:14:33 +01:00
builder.const_i32(!0xFFF);
builder.and_i32();
builder.const_i32(low_bits);
builder.or_i32();
2021-01-01 02:14:30 +01:00
builder.store_aligned_i32(0);
}
pub fn gen_set_eip_low_bits_and_jump_rel32(builder: &mut WasmBuilder, low_bits: i32, n: i32) {
// instruction_pointer = (instruction_pointer & ~0xFFF | low_bits) + n;
dbg_assert!(low_bits & !0xFFF == 0);
builder.const_i32(global_pointers::instruction_pointer as i32);
gen_get_eip(builder);
builder.const_i32(!0xFFF);
builder.and_i32();
builder.const_i32(low_bits);
builder.or_i32();
if n != 0 {
builder.const_i32(n);
builder.add_i32();
}
builder.store_aligned_i32(0);
}
pub fn gen_relative_jump(builder: &mut WasmBuilder, n: i32) {
// add n to instruction_pointer
2021-01-01 02:14:33 +01:00
if n != 0 {
builder.const_i32(global_pointers::instruction_pointer as i32);
gen_get_eip(builder);
builder.const_i32(n);
builder.add_i32();
builder.store_aligned_i32(0);
}
}
2021-01-01 02:14:32 +01:00
pub fn gen_page_switch_check(
ctx: &mut JitContext,
next_block_addr: u32,
last_instruction_addr: u32,
) {
// After switching a page while in jitted code, check if the page mapping still holds
gen_get_eip(ctx.builder);
let address_local = ctx.builder.set_new_local();
gen_get_phys_eip_plus_mem(ctx, &address_local);
2021-01-01 02:14:32 +01:00
ctx.builder.free_local(address_local);
ctx.builder
.const_i32(next_block_addr as i32 + unsafe { memory::mem8 } as i32);
2021-01-01 02:14:32 +01:00
ctx.builder.ne_i32();
2021-01-01 02:14:33 +01:00
if cfg!(debug_assertions) {
ctx.builder.if_void();
gen_profiler_stat_increment(ctx.builder, profiler::stat::FAILED_PAGE_CHANGE);
gen_debug_track_jit_exit(ctx.builder, last_instruction_addr);
ctx.builder.br(ctx.exit_label);
ctx.builder.block_end();
}
else {
ctx.builder.br_if(ctx.exit_label);
}
2021-01-01 02:14:32 +01:00
}
pub fn gen_update_instruction_counter(ctx: &mut JitContext) {
ctx.builder
.const_i32(global_pointers::instruction_counter as i32);
ctx.builder
.load_fixed_i32(global_pointers::instruction_counter as u32);
ctx.builder.get_local(&ctx.instruction_counter);
ctx.builder.add_i32();
ctx.builder.store_aligned_i32(0);
}
pub fn gen_get_reg8(ctx: &mut JitContext, r: u32) {
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
match r {
regs::AL | regs::CL | regs::DL | regs::BL => {
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&ctx.register_locals[r as usize]);
ctx.builder.const_i32(0xFF);
ctx.builder.and_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
},
regs::AH | regs::CH | regs::DH | regs::BH => {
ctx.builder
.get_local(&ctx.register_locals[(r - 4) as usize]);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(8);
ctx.builder.shr_u_i32();
ctx.builder.const_i32(0xFF);
ctx.builder.and_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
},
_ => assert!(false),
}
2018-08-22 04:06:52 +02:00
}
2021-01-01 02:14:30 +01:00
/// Return a new local referencing one of the 8 bit registers or a direct reference to one of the
/// register locals. Higher bits might be garbage (suitable for gen_cmp8 etc.). Must be freed with
/// gen_free_reg8_or_alias.
pub fn gen_get_reg8_or_alias_to_reg32(ctx: &mut JitContext, r: u32) -> WasmLocal {
match r {
regs::AL | regs::CL | regs::DL | regs::BL => ctx.register_locals[r as usize].unsafe_clone(),
regs::AH | regs::CH | regs::DH | regs::BH => {
ctx.builder
.get_local(&ctx.register_locals[(r - 4) as usize]);
ctx.builder.const_i32(8);
ctx.builder.shr_u_i32();
ctx.builder.set_new_local()
},
_ => panic!(),
}
}
pub fn gen_free_reg8_or_alias(ctx: &mut JitContext, r: u32, local: WasmLocal) {
match r {
regs::AL | regs::CL | regs::DL | regs::BL => {},
regs::AH | regs::CH | regs::DH | regs::BH => ctx.builder.free_local(local),
_ => panic!(),
}
}
pub fn gen_get_reg16(ctx: &mut JitContext, r: u32) {
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&ctx.register_locals[r as usize]);
ctx.builder.const_i32(0xFFFF);
ctx.builder.and_i32();
2018-08-22 04:06:52 +02:00
}
pub fn gen_get_reg32(ctx: &mut JitContext, r: u32) {
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&ctx.register_locals[r as usize]);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
}
pub fn gen_set_reg8(ctx: &mut JitContext, r: u32) {
match r {
regs::AL | regs::CL | regs::DL | regs::BL => {
// reg32[r] = stack_value & 0xFF | reg32[r] & ~0xFF
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(0xFF);
ctx.builder.and_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&ctx.register_locals[r as usize]);
ctx.builder.const_i32(!0xFF);
ctx.builder.and_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
2021-01-01 02:14:30 +01:00
ctx.builder.or_i32();
ctx.builder.set_local(&ctx.register_locals[r as usize]);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
},
regs::AH | regs::CH | regs::DH | regs::BH => {
// reg32[r] = stack_value << 8 & 0xFF00 | reg32[r] & ~0xFF00
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(8);
ctx.builder.shl_i32();
ctx.builder.const_i32(0xFF00);
ctx.builder.and_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
ctx.builder
.get_local(&ctx.register_locals[(r - 4) as usize]);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(!0xFF00);
ctx.builder.and_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
2021-01-01 02:14:30 +01:00
ctx.builder.or_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
ctx.builder
.set_local(&ctx.register_locals[(r - 4) as usize]);
},
_ => assert!(false),
}
}
2021-04-09 07:54:27 +02:00
pub fn gen_set_reg8_unmasked(ctx: &mut JitContext, r: u32) {
if cfg!(debug_assertions) {
let val = ctx.builder.set_new_local();
ctx.builder.get_local(&val);
ctx.builder.const_i32(!0xFF);
ctx.builder.and_i32();
ctx.builder.if_void();
ctx.builder.unreachable();
ctx.builder.block_end();
ctx.builder.get_local(&val);
ctx.builder.free_local(val);
}
match r {
regs::AL | regs::CL | regs::DL | regs::BL => {
// reg32[r] = stack_value | reg32[r] & ~0xFF
ctx.builder.get_local(&ctx.register_locals[r as usize]);
ctx.builder.const_i32(!0xFF);
ctx.builder.and_i32();
ctx.builder.or_i32();
ctx.builder.set_local(&ctx.register_locals[r as usize]);
},
regs::AH | regs::CH | regs::DH | regs::BH => {
// reg32[r] = stack_value << 8 | reg32[r] & ~0xFF00
ctx.builder.const_i32(8);
ctx.builder.shl_i32();
ctx.builder.const_i32(0xFF00);
ctx.builder.and_i32();
ctx.builder
.get_local(&ctx.register_locals[(r - 4) as usize]);
ctx.builder.const_i32(!0xFF00);
ctx.builder.and_i32();
ctx.builder.or_i32();
ctx.builder
.set_local(&ctx.register_locals[(r - 4) as usize]);
},
_ => assert!(false),
}
}
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
pub fn gen_set_reg16(ctx: &mut JitContext, r: u32) {
2021-01-01 02:14:30 +01:00
gen_set_reg16_local(ctx.builder, &ctx.register_locals[r as usize]);
}
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
2021-04-24 07:29:21 +02:00
pub fn gen_set_reg16_unmasked(ctx: &mut JitContext, r: u32) {
if cfg!(debug_assertions) {
let val = ctx.builder.set_new_local();
ctx.builder.get_local(&val);
ctx.builder.const_i32(!0xFFFF);
ctx.builder.and_i32();
ctx.builder.if_void();
ctx.builder.unreachable();
ctx.builder.block_end();
ctx.builder.get_local(&val);
ctx.builder.free_local(val);
}
ctx.builder.get_local(&ctx.reg(r));
ctx.builder.const_i32(!0xFFFF);
ctx.builder.and_i32();
ctx.builder.or_i32();
ctx.builder.set_local(&ctx.reg(r));
}
2021-01-01 02:14:30 +01:00
pub fn gen_set_reg16_local(builder: &mut WasmBuilder, local: &WasmLocal) {
// reg32[r] = v & 0xFFFF | reg32[r] & ~0xFFFF
builder.const_i32(0xFFFF);
builder.and_i32();
builder.get_local(local);
builder.const_i32(!0xFFFF);
builder.and_i32();
builder.or_i32();
builder.set_local(local);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
}
pub fn gen_set_reg32(ctx: &mut JitContext, r: u32) {
2021-01-01 02:14:30 +01:00
ctx.builder.set_local(&ctx.register_locals[r as usize]);
2018-08-22 04:06:52 +02:00
}
pub fn decr_exc_asize(ctx: &mut JitContext) {
gen_get_reg32(ctx, regs::ECX);
ctx.builder.const_i32(1);
ctx.builder.sub_i32();
if ctx.cpu.asize_32() {
gen_set_reg32(ctx, regs::ECX);
}
else {
gen_set_reg16(ctx, regs::CX);
}
}
pub fn gen_read_reg_xmm128_into_scratch(ctx: &mut JitContext, r: u32) {
ctx.builder
.const_i32(global_pointers::sse_scratch_register as i32);
2021-01-01 02:14:31 +01:00
let dest = global_pointers::get_reg_xmm_offset(r);
ctx.builder.const_i32(dest as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.store_aligned_i64(0);
ctx.builder
.const_i32(global_pointers::sse_scratch_register as i32 + 8);
2021-01-01 02:14:31 +01:00
let dest = global_pointers::get_reg_xmm_offset(r) + 8;
ctx.builder.const_i32(dest as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.store_aligned_i64(0);
}
2021-01-01 02:14:28 +01:00
pub fn gen_get_sreg(ctx: &mut JitContext, r: u32) {
ctx.builder
.load_fixed_u16(global_pointers::get_sreg_offset(r))
2021-01-01 02:14:28 +01:00
}
2021-01-01 02:14:31 +01:00
pub fn gen_get_ss_offset(ctx: &mut JitContext) {
ctx.builder
.load_fixed_i32(global_pointers::get_seg_offset(regs::SS));
2021-01-01 02:14:31 +01:00
}
pub fn gen_get_flags(builder: &mut WasmBuilder) {
builder.load_fixed_i32(global_pointers::flags as u32);
}
2022-11-04 05:15:26 +01:00
fn gen_get_flags_changed(builder: &mut WasmBuilder) {
builder.load_fixed_i32(global_pointers::flags_changed as u32);
2021-01-01 02:14:31 +01:00
}
2022-11-04 05:15:26 +01:00
fn gen_get_last_result(builder: &mut WasmBuilder, previous_instruction: &Instruction) {
match previous_instruction {
Instruction::Add {
dest: InstructionOperandDest::WasmLocal(l),
opsize: OPSIZE_32,
..
}
2022-11-15 02:08:10 +01:00
| Instruction::AdcSbb {
dest: InstructionOperandDest::WasmLocal(l),
opsize: OPSIZE_32,
..
}
| Instruction::Sub {
2022-11-04 05:15:26 +01:00
dest: InstructionOperandDest::WasmLocal(l),
opsize: OPSIZE_32,
2022-11-04 21:48:59 +01:00
..
2022-11-04 05:15:26 +01:00
}
| Instruction::Bitwise {
2022-11-04 05:15:26 +01:00
dest: InstructionOperandDest::WasmLocal(l),
opsize: OPSIZE_32,
}
| Instruction::NonZeroShift {
dest: InstructionOperandDest::WasmLocal(l),
opsize: OPSIZE_32,
2022-11-04 05:15:26 +01:00
} => builder.get_local(&l),
2022-11-04 19:12:04 +01:00
Instruction::Cmp {
dest: InstructionOperandDest::WasmLocal(l),
source,
opsize: OPSIZE_32,
} => {
if source.is_zero() {
builder.get_local(&l)
}
else {
builder.load_fixed_i32(global_pointers::last_result as u32)
}
},
2022-11-04 05:15:26 +01:00
_ => builder.load_fixed_i32(global_pointers::last_result as u32),
}
2021-01-01 02:14:31 +01:00
}
2022-11-04 05:15:26 +01:00
fn gen_get_last_op_size(builder: &mut WasmBuilder) {
builder.load_fixed_i32(global_pointers::last_op_size as u32);
2021-01-01 02:14:31 +01:00
}
2022-11-04 18:52:34 +01:00
fn gen_get_last_op1(builder: &mut WasmBuilder, previous_instruction: &Instruction) {
match previous_instruction {
Instruction::Cmp {
dest: InstructionOperandDest::WasmLocal(l),
source: _,
opsize: OPSIZE_32,
} => builder.get_local(&l),
_ => builder.load_fixed_i32(global_pointers::last_op1 as u32),
}
2021-01-01 02:14:31 +01:00
}
pub fn gen_get_page_fault(builder: &mut WasmBuilder) {
builder.load_fixed_u8(global_pointers::page_fault as u32);
2021-01-01 02:14:31 +01:00
}
2018-08-22 02:36:44 +02:00
/// sign-extend a byte value on the stack and leave it on the stack
pub fn sign_extend_i8(builder: &mut WasmBuilder) {
2021-01-01 02:14:30 +01:00
builder.const_i32(24);
builder.shl_i32();
builder.const_i32(24);
builder.shr_s_i32();
2018-08-22 02:36:44 +02:00
}
2018-09-03 19:47:51 +02:00
/// sign-extend a two byte value on the stack and leave it on the stack
pub fn sign_extend_i16(builder: &mut WasmBuilder) {
2021-01-01 02:14:30 +01:00
builder.const_i32(16);
builder.shl_i32();
builder.const_i32(16);
builder.shr_s_i32();
2018-09-03 19:47:51 +02:00
}
pub fn gen_fn0_const(builder: &mut WasmBuilder, name: &str) { builder.call_fn0(name) }
pub fn gen_fn1_const(builder: &mut WasmBuilder, name: &str, arg0: u32) {
2021-01-01 02:14:30 +01:00
builder.const_i32(arg0 as i32);
builder.call_fn1(name);
}
pub fn gen_fn2_const(builder: &mut WasmBuilder, name: &str, arg0: u32, arg1: u32) {
2021-01-01 02:14:30 +01:00
builder.const_i32(arg0 as i32);
builder.const_i32(arg1 as i32);
builder.call_fn2(name);
}
// helper functions for gen/generate_jit.js
pub fn gen_modrm_fn0(builder: &mut WasmBuilder, name: &str) {
// generates: fn( _ )
builder.call_fn1(name);
}
pub fn gen_modrm_fn1(builder: &mut WasmBuilder, name: &str, arg0: u32) {
// generates: fn( _, arg0 )
2021-01-01 02:14:30 +01:00
builder.const_i32(arg0 as i32);
builder.call_fn2(name);
}
2021-01-01 02:14:31 +01:00
pub fn gen_modrm_resolve(ctx: &mut JitContext, modrm_byte: ModrmByte) {
modrm::gen(ctx, modrm_byte)
}
pub fn gen_modrm_resolve_with_local(
ctx: &mut JitContext,
modrm_byte: ModrmByte,
gen: &dyn Fn(&mut JitContext, &WasmLocal),
) {
if let Some(r) = modrm::get_as_reg_index_if_possible(ctx, &modrm_byte) {
gen(ctx, &ctx.reg(r));
}
else {
gen_modrm_resolve(ctx, modrm_byte);
let address = ctx.builder.set_new_local();
gen(ctx, &address);
ctx.builder.free_local(address);
}
}
2018-08-22 02:36:44 +02:00
pub fn gen_set_reg8_r(ctx: &mut JitContext, dest: u32, src: u32) {
// generates: reg8[r_dest] = reg8[r_src]
2021-01-01 02:14:33 +01:00
if src != dest {
gen_get_reg8(ctx, src);
2021-04-09 07:54:27 +02:00
gen_set_reg8_unmasked(ctx, dest);
2021-01-01 02:14:33 +01:00
}
2018-08-22 02:36:44 +02:00
}
pub fn gen_set_reg16_r(ctx: &mut JitContext, dest: u32, src: u32) {
// generates: reg16[r_dest] = reg16[r_src]
2021-01-01 02:14:33 +01:00
if src != dest {
gen_get_reg16(ctx, src);
2021-04-24 07:29:21 +02:00
gen_set_reg16_unmasked(ctx, dest);
2021-01-01 02:14:33 +01:00
}
}
pub fn gen_set_reg32_r(ctx: &mut JitContext, dest: u32, src: u32) {
2021-01-01 02:14:29 +01:00
// generates: reg32[r_dest] = reg32[r_src]
2021-01-01 02:14:33 +01:00
if src != dest {
gen_get_reg32(ctx, src);
gen_set_reg32(ctx, dest);
}
}
2021-01-01 02:14:31 +01:00
pub fn gen_modrm_resolve_safe_read8(ctx: &mut JitContext, modrm_byte: ModrmByte) {
gen_modrm_resolve_with_local(ctx, modrm_byte, &|ctx, addr| gen_safe_read8(ctx, addr));
}
2021-01-01 02:14:31 +01:00
pub fn gen_modrm_resolve_safe_read16(ctx: &mut JitContext, modrm_byte: ModrmByte) {
gen_modrm_resolve_with_local(ctx, modrm_byte, &|ctx, addr| gen_safe_read16(ctx, addr));
}
2021-01-01 02:14:31 +01:00
pub fn gen_modrm_resolve_safe_read32(ctx: &mut JitContext, modrm_byte: ModrmByte) {
gen_modrm_resolve_with_local(ctx, modrm_byte, &|ctx, addr| gen_safe_read32(ctx, addr));
}
2021-01-01 02:14:31 +01:00
pub fn gen_modrm_resolve_safe_read64(ctx: &mut JitContext, modrm_byte: ModrmByte) {
gen_modrm_resolve_with_local(ctx, modrm_byte, &|ctx, addr| gen_safe_read64(ctx, addr));
}
2021-01-01 02:14:31 +01:00
pub fn gen_modrm_resolve_safe_read128(
ctx: &mut JitContext,
modrm_byte: ModrmByte,
where_to_write: u32,
) {
gen_modrm_resolve_with_local(ctx, modrm_byte, &|ctx, addr| {
gen_safe_read128(ctx, addr, where_to_write)
});
}
pub fn gen_safe_read8(ctx: &mut JitContext, address_local: &WasmLocal) {
gen_safe_read(ctx, BitSize::BYTE, address_local, None);
}
pub fn gen_safe_read16(ctx: &mut JitContext, address_local: &WasmLocal) {
gen_safe_read(ctx, BitSize::WORD, address_local, None);
}
pub fn gen_safe_read32(ctx: &mut JitContext, address_local: &WasmLocal) {
gen_safe_read(ctx, BitSize::DWORD, address_local, None);
}
pub fn gen_safe_read64(ctx: &mut JitContext, address_local: &WasmLocal) {
gen_safe_read(ctx, BitSize::QWORD, &address_local, None);
}
pub fn gen_safe_read128(ctx: &mut JitContext, address_local: &WasmLocal, where_to_write: u32) {
gen_safe_read(ctx, BitSize::DQWORD, &address_local, Some(where_to_write));
}
2018-12-04 23:05:21 +01:00
// only used internally for gen_safe_write
enum GenSafeWriteValue<'a> {
2018-12-04 23:05:21 +01:00
I32(&'a WasmLocal),
I64(&'a WasmLocalI64),
TwoI64s(&'a WasmLocalI64, &'a WasmLocalI64),
2018-12-04 23:05:21 +01:00
}
2021-01-01 02:14:32 +01:00
enum GenSafeReadWriteValue {
I32(WasmLocal),
I64(WasmLocalI64),
}
2021-01-01 02:14:31 +01:00
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum BitSize {
BYTE,
WORD,
DWORD,
QWORD,
DQWORD,
}
impl BitSize {
pub fn bytes(&self) -> u32 {
match self {
BitSize::BYTE => 1,
BitSize::WORD => 2,
BitSize::DWORD => 4,
BitSize::QWORD => 8,
BitSize::DQWORD => 16,
}
}
}
2018-08-22 02:36:44 +02:00
pub fn gen_safe_write8(ctx: &mut JitContext, address_local: &WasmLocal, value_local: &WasmLocal) {
2018-12-04 23:05:21 +01:00
gen_safe_write(
ctx,
BitSize::BYTE,
address_local,
GenSafeWriteValue::I32(value_local),
2018-12-04 23:05:21 +01:00
)
2018-08-22 02:36:44 +02:00
}
pub fn gen_safe_write16(ctx: &mut JitContext, address_local: &WasmLocal, value_local: &WasmLocal) {
2018-12-04 23:05:21 +01:00
gen_safe_write(
ctx,
BitSize::WORD,
address_local,
GenSafeWriteValue::I32(value_local),
2018-12-04 23:05:21 +01:00
)
}
pub fn gen_safe_write32(ctx: &mut JitContext, address_local: &WasmLocal, value_local: &WasmLocal) {
2018-12-04 23:05:21 +01:00
gen_safe_write(
ctx,
BitSize::DWORD,
address_local,
GenSafeWriteValue::I32(value_local),
2018-12-04 23:05:21 +01:00
)
}
pub fn gen_safe_write64(
ctx: &mut JitContext,
address_local: &WasmLocal,
value_local: &WasmLocalI64,
) {
gen_safe_write(
ctx,
BitSize::QWORD,
address_local,
GenSafeWriteValue::I64(value_local),
)
}
pub fn gen_safe_write128(
ctx: &mut JitContext,
address_local: &WasmLocal,
value_local_low: &WasmLocalI64,
value_local_high: &WasmLocalI64,
) {
gen_safe_write(
ctx,
BitSize::DQWORD,
address_local,
GenSafeWriteValue::TwoI64s(value_local_low, value_local_high),
2018-12-04 23:05:21 +01:00
)
}
fn gen_safe_read(
ctx: &mut JitContext,
bits: BitSize,
address_local: &WasmLocal,
where_to_write: Option<u32>,
) {
// Execute a virtual memory read. All slow paths (memory-mapped IO, tlb miss, page fault and
// read across page boundary are handled in safe_read_jit_slow
// entry <- tlb_data[addr >> 12 << 2]
// if entry & MASK == TLB_VALID && (addr & 0xFFF) <= 0x1000 - bytes: goto fast
// entry <- safe_read_jit_slow(addr, instruction_pointer)
// if page_fault: goto exit-with-pagefault
// fast: mem[(entry & ~0xFFF) ^ addr]
2018-07-24 13:54:08 +02:00
let cont = ctx.builder.block_void();
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&address_local);
2018-07-24 13:54:08 +02:00
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(12);
ctx.builder.shr_u_i32();
ctx.builder.const_i32(2);
ctx.builder.shl_i32();
2018-07-24 13:54:08 +02:00
ctx.builder
2021-01-01 02:14:32 +01:00
.load_aligned_i32(unsafe { &tlb_data[0] as *const i32 as u32 });
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
let entry_local = ctx.builder.tee_new_local();
2018-07-24 13:54:08 +02:00
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(
(0xFFF
& !TLB_READONLY
& !TLB_GLOBAL
& !TLB_HAS_CODE
& !(if ctx.cpu.cpl3() { 0 } else { TLB_NO_USER })) as i32,
2018-07-24 13:54:08 +02:00
);
2021-01-01 02:14:30 +01:00
ctx.builder.and_i32();
2018-07-24 13:54:08 +02:00
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(TLB_VALID as i32);
ctx.builder.eq_i32();
2018-07-24 13:54:08 +02:00
2018-08-22 02:36:44 +02:00
if bits != BitSize::BYTE {
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&address_local);
ctx.builder.const_i32(0xFFF);
ctx.builder.and_i32();
ctx.builder.const_i32(0x1000 - bits.bytes() as i32);
ctx.builder.le_i32();
2018-07-24 13:54:08 +02:00
2021-01-01 02:14:30 +01:00
ctx.builder.and_i32();
2018-08-22 02:36:44 +02:00
}
2018-07-24 13:54:08 +02:00
ctx.builder.br_if(cont);
if cfg!(feature = "profiler") {
ctx.builder.get_local(&address_local);
ctx.builder.get_local(&entry_local);
ctx.builder.call_fn2("report_safe_read_jit_slow");
}
ctx.builder.get_local(&address_local);
ctx.builder
2021-01-01 02:14:30 +01:00
.const_i32(ctx.start_of_current_instruction as i32 & 0xFFF);
match bits {
BitSize::BYTE => {
ctx.builder.call_fn2_ret("safe_read8_slow_jit");
},
BitSize::WORD => {
ctx.builder.call_fn2_ret("safe_read16_slow_jit");
},
BitSize::DWORD => {
ctx.builder.call_fn2_ret("safe_read32s_slow_jit");
},
BitSize::QWORD => {
ctx.builder.call_fn2_ret("safe_read64s_slow_jit");
},
BitSize::DQWORD => {
ctx.builder.call_fn2_ret("safe_read128s_slow_jit");
},
}
2021-01-01 02:14:30 +01:00
ctx.builder.tee_local(&entry_local);
ctx.builder.const_i32(1);
ctx.builder.and_i32();
if cfg!(feature = "profiler") {
ctx.builder.if_void();
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
ctx.builder.block_end();
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&entry_local);
ctx.builder.const_i32(1);
ctx.builder.and_i32();
}
ctx.builder.br_if(ctx.exit_with_fault_label);
ctx.builder.block_end();
gen_profiler_stat_increment(ctx.builder, profiler::stat::SAFE_READ_FAST); // XXX: Both fast and slow
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&entry_local);
ctx.builder.const_i32(!0xFFF);
ctx.builder.and_i32();
ctx.builder.get_local(&address_local);
ctx.builder.xor_i32();
// where_to_write is only used by dqword
dbg_assert!((where_to_write != None) == (bits == BitSize::DQWORD));
match bits {
2018-08-22 02:36:44 +02:00
BitSize::BYTE => {
2021-01-01 02:14:31 +01:00
ctx.builder.load_u8(0);
2018-08-22 02:36:44 +02:00
},
BitSize::WORD => {
2021-01-01 02:14:31 +01:00
ctx.builder.load_unaligned_u16(0);
},
BitSize::DWORD => {
2021-01-01 02:14:31 +01:00
ctx.builder.load_unaligned_i32(0);
},
BitSize::QWORD => {
2021-01-01 02:14:31 +01:00
ctx.builder.load_unaligned_i64(0);
},
BitSize::DQWORD => {
let where_to_write = where_to_write.unwrap();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
let virt_address_local = ctx.builder.set_new_local();
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(0);
ctx.builder.get_local(&virt_address_local);
2021-01-01 02:14:31 +01:00
ctx.builder.load_unaligned_i64(0);
2021-01-01 02:14:30 +01:00
ctx.builder.store_unaligned_i64(where_to_write);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(0);
ctx.builder.get_local(&virt_address_local);
2021-01-01 02:14:31 +01:00
ctx.builder.load_unaligned_i64(8);
2021-01-01 02:14:30 +01:00
ctx.builder.store_unaligned_i64(where_to_write + 8);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
ctx.builder.free_local(virt_address_local);
},
}
2018-07-24 13:54:08 +02:00
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
ctx.builder.free_local(entry_local);
2018-07-24 13:54:08 +02:00
}
pub fn gen_get_phys_eip_plus_mem(ctx: &mut JitContext, address_local: &WasmLocal) {
// Similar to gen_safe_read, but return the physical eip + memory::mem rather than reading from memory
// In functions that need to use this value we need to fix it by substracting memory::mem
// this is done in order to remove one instruction from the fast path of memory accesses (no need to add
// memory::mem anymore ).
// We need to account for this in gen_page_switch_check and we compare with next_block_addr + memory::mem8
// We cannot the same while processing an AbsoluteEip flow control change so there we need to fix the value
// by subscracting memory::mem. Overall, since AbsoluteEip is encountered less often than memory accesses so
// this ends up improving perf.
2021-01-01 02:14:32 +01:00
// Does not (need to) handle mapped memory
// XXX: Currently does not use ctx.start_of_current_instruction, but rather assumes that eip is
// already correct (pointing at the current instruction)
let cont = ctx.builder.block_void();
2021-01-01 02:14:32 +01:00
ctx.builder.get_local(&address_local);
ctx.builder.const_i32(12);
ctx.builder.shr_u_i32();
ctx.builder.const_i32(2);
ctx.builder.shl_i32();
ctx.builder
.load_aligned_i32(unsafe { &tlb_data[0] as *const i32 as u32 });
let entry_local = ctx.builder.tee_new_local();
ctx.builder.const_i32(
(0xFFF
& !TLB_READONLY
& !TLB_GLOBAL
& !TLB_HAS_CODE
& !(if ctx.cpu.cpl3() { 0 } else { TLB_NO_USER })) as i32,
);
ctx.builder.and_i32();
ctx.builder.const_i32(TLB_VALID as i32);
ctx.builder.eq_i32();
ctx.builder.br_if(cont);
2021-01-01 02:14:32 +01:00
if cfg!(feature = "profiler") {
ctx.builder.get_local(&address_local);
ctx.builder.get_local(&entry_local);
ctx.builder.call_fn2("report_safe_read_jit_slow");
}
ctx.builder.get_local(&address_local);
ctx.builder.call_fn1_ret("get_phys_eip_slow_jit");
2021-01-01 02:14:32 +01:00
ctx.builder.tee_local(&entry_local);
ctx.builder.const_i32(1);
ctx.builder.and_i32();
if cfg!(feature = "profiler") {
ctx.builder.if_void();
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction); // XXX
ctx.builder.block_end();
ctx.builder.get_local(&entry_local);
ctx.builder.const_i32(1);
ctx.builder.and_i32();
}
ctx.builder.br_if(ctx.exit_with_fault_label);
2021-01-01 02:14:32 +01:00
ctx.builder.block_end();
gen_profiler_stat_increment(ctx.builder, profiler::stat::SAFE_READ_FAST); // XXX: Both fast and slow
ctx.builder.get_local(&entry_local);
ctx.builder.const_i32(!0xFFF);
ctx.builder.and_i32();
ctx.builder.get_local(&address_local);
ctx.builder.xor_i32();
ctx.builder.free_local(entry_local);
}
fn gen_safe_write(
ctx: &mut JitContext,
bits: BitSize,
address_local: &WasmLocal,
value_local: GenSafeWriteValue,
) {
// Execute a virtual memory write. All slow paths (memory-mapped IO, tlb miss, page fault,
// write across page boundary and page containing jitted code are handled in safe_write_jit_slow
// entry <- tlb_data[addr >> 12 << 2]
// if entry & MASK == TLB_VALID && (addr & 0xFFF) <= 0x1000 - bytes: goto fast
// entry <- safe_write_jit_slow(addr, value, instruction_pointer)
// if page_fault: goto exit-with-pagefault
// fast: mem[(entry & ~0xFFF) ^ addr] <- value
let cont = ctx.builder.block_void();
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&address_local);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(12);
ctx.builder.shr_u_i32();
ctx.builder.const_i32(2);
ctx.builder.shl_i32();
ctx.builder
2021-01-01 02:14:32 +01:00
.load_aligned_i32(unsafe { &tlb_data[0] as *const i32 as u32 });
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
let entry_local = ctx.builder.tee_new_local();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
ctx.builder
2018-08-22 04:06:52 +02:00
.const_i32((0xFFF & !TLB_GLOBAL & !(if ctx.cpu.cpl3() { 0 } else { TLB_NO_USER })) as i32);
2021-01-01 02:14:30 +01:00
ctx.builder.and_i32();
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(TLB_VALID as i32);
ctx.builder.eq_i32();
2018-08-22 02:36:44 +02:00
if bits != BitSize::BYTE {
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&address_local);
ctx.builder.const_i32(0xFFF);
ctx.builder.and_i32();
ctx.builder.const_i32(0x1000 - bits.bytes() as i32);
ctx.builder.le_i32();
2021-01-01 02:14:30 +01:00
ctx.builder.and_i32();
2018-08-22 02:36:44 +02:00
}
ctx.builder.br_if(cont);
if cfg!(feature = "profiler") {
ctx.builder.get_local(&address_local);
ctx.builder.get_local(&entry_local);
ctx.builder.call_fn2("report_safe_write_jit_slow");
}
ctx.builder.get_local(&address_local);
match value_local {
GenSafeWriteValue::I32(local) => ctx.builder.get_local(local),
GenSafeWriteValue::I64(local) => ctx.builder.get_local_i64(local),
GenSafeWriteValue::TwoI64s(local1, local2) => {
ctx.builder.get_local_i64(local1);
ctx.builder.get_local_i64(local2)
},
}
ctx.builder
2021-01-01 02:14:30 +01:00
.const_i32(ctx.start_of_current_instruction as i32 & 0xFFF);
match bits {
BitSize::BYTE => {
ctx.builder.call_fn3_ret("safe_write8_slow_jit");
},
BitSize::WORD => {
ctx.builder.call_fn3_ret("safe_write16_slow_jit");
},
BitSize::DWORD => {
ctx.builder.call_fn3_ret("safe_write32_slow_jit");
},
BitSize::QWORD => {
ctx.builder
.call_fn3_i32_i64_i32_ret("safe_write64_slow_jit");
},
BitSize::DQWORD => {
ctx.builder
.call_fn4_i32_i64_i64_i32_ret("safe_write128_slow_jit");
},
}
2021-01-01 02:14:30 +01:00
ctx.builder.tee_local(&entry_local);
ctx.builder.const_i32(1);
ctx.builder.and_i32();
if cfg!(feature = "profiler") {
ctx.builder.if_void();
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
ctx.builder.block_end();
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&entry_local);
ctx.builder.const_i32(1);
ctx.builder.and_i32();
}
ctx.builder.br_if(ctx.exit_with_fault_label);
ctx.builder.block_end();
gen_profiler_stat_increment(ctx.builder, profiler::stat::SAFE_WRITE_FAST); // XXX: Both fast and slow
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&entry_local);
ctx.builder.const_i32(!0xFFF);
ctx.builder.and_i32();
ctx.builder.get_local(&address_local);
ctx.builder.xor_i32();
2018-12-04 23:05:21 +01:00
match value_local {
2021-01-01 02:14:30 +01:00
GenSafeWriteValue::I32(local) => ctx.builder.get_local(local),
GenSafeWriteValue::I64(local) => ctx.builder.get_local_i64(local),
GenSafeWriteValue::TwoI64s(local1, local2) => {
assert!(bits == BitSize::DQWORD);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
let virt_address_local = ctx.builder.tee_new_local();
2021-01-01 02:14:30 +01:00
ctx.builder.get_local_i64(local1);
ctx.builder.store_unaligned_i64(0);
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&virt_address_local);
ctx.builder.get_local_i64(local2);
ctx.builder.store_unaligned_i64(8);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
ctx.builder.free_local(virt_address_local);
},
2018-12-04 23:05:21 +01:00
}
match bits {
2018-08-22 02:36:44 +02:00
BitSize::BYTE => {
ctx.builder.store_u8(0);
2018-08-22 02:36:44 +02:00
},
BitSize::WORD => {
ctx.builder.store_unaligned_u16(0);
},
BitSize::DWORD => {
ctx.builder.store_unaligned_i32(0);
},
BitSize::QWORD => {
ctx.builder.store_unaligned_i64(0);
},
BitSize::DQWORD => {}, // handled above
}
ctx.builder.free_local(entry_local);
}
pub fn gen_safe_read_write(
ctx: &mut JitContext,
bits: BitSize,
address_local: &WasmLocal,
f: &dyn Fn(&mut JitContext),
) {
// Execute a virtual memory read+write. All slow paths (memory-mapped IO, tlb miss, page fault,
// write across page boundary and page containing jitted code are handled in
// safe_read_write_jit_slow
// entry <- tlb_data[addr >> 12 << 2]
// can_use_fast_path <- entry & MASK == TLB_VALID && (addr & 0xFFF) <= 0x1000 - bytes
// if can_use_fast_path: goto fast
// entry <- safe_read_write_jit_slow(addr, instruction_pointer)
// if page_fault: goto exit-with-pagefault
// fast: value <- f(mem[(entry & ~0xFFF) ^ addr])
// if !can_use_fast_path { safe_write_jit_slow(addr, value, instruction_pointer) }
// mem[(entry & ~0xFFF) ^ addr] <- value
let cont = ctx.builder.block_void();
ctx.builder.get_local(address_local);
ctx.builder.const_i32(12);
ctx.builder.shr_u_i32();
ctx.builder.const_i32(2);
ctx.builder.shl_i32();
ctx.builder
2021-01-01 02:14:32 +01:00
.load_aligned_i32(unsafe { &tlb_data[0] as *const i32 as u32 });
let entry_local = ctx.builder.tee_new_local();
ctx.builder
.const_i32((0xFFF & !TLB_GLOBAL & !(if ctx.cpu.cpl3() { 0 } else { TLB_NO_USER })) as i32);
ctx.builder.and_i32();
ctx.builder.const_i32(TLB_VALID as i32);
ctx.builder.eq_i32();
if bits != BitSize::BYTE {
ctx.builder.get_local(&address_local);
ctx.builder.const_i32(0xFFF);
ctx.builder.and_i32();
ctx.builder.const_i32(0x1000 - bits.bytes() as i32);
ctx.builder.le_i32();
ctx.builder.and_i32();
}
let can_use_fast_path_local = ctx.builder.tee_new_local();
ctx.builder.br_if(cont);
if cfg!(feature = "profiler") {
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&address_local);
ctx.builder.get_local(&entry_local);
ctx.builder.call_fn2("report_safe_read_write_jit_slow");
}
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&address_local);
ctx.builder
2021-01-01 02:14:30 +01:00
.const_i32(ctx.start_of_current_instruction as i32 & 0xFFF);
match bits {
2018-08-22 02:36:44 +02:00
BitSize::BYTE => {
ctx.builder.call_fn2_ret("safe_read_write8_slow_jit");
2018-08-22 02:36:44 +02:00
},
BitSize::WORD => {
ctx.builder.call_fn2_ret("safe_read_write16_slow_jit");
},
BitSize::DWORD => {
ctx.builder.call_fn2_ret("safe_read_write32s_slow_jit");
},
2021-01-01 02:14:32 +01:00
BitSize::QWORD => {
ctx.builder.call_fn2_ret("safe_read_write64_slow_jit");
},
2021-10-29 00:43:20 +02:00
BitSize::DQWORD => {
dbg_assert!(false);
},
}
2021-01-01 02:14:30 +01:00
ctx.builder.tee_local(&entry_local);
ctx.builder.const_i32(1);
ctx.builder.and_i32();
if cfg!(feature = "profiler") {
ctx.builder.if_void();
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
ctx.builder.block_end();
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&entry_local);
ctx.builder.const_i32(1);
ctx.builder.and_i32();
}
ctx.builder.br_if(ctx.exit_with_fault_label);
ctx.builder.block_end();
gen_profiler_stat_increment(ctx.builder, profiler::stat::SAFE_READ_WRITE_FAST); // XXX: Also slow
ctx.builder.get_local(&entry_local);
ctx.builder.const_i32(!0xFFF);
ctx.builder.and_i32();
ctx.builder.get_local(&address_local);
ctx.builder.xor_i32();
ctx.builder.free_local(entry_local);
let phys_addr_local = ctx.builder.tee_new_local();
match bits {
BitSize::BYTE => {
2021-01-01 02:14:31 +01:00
ctx.builder.load_u8(0);
},
BitSize::WORD => {
2021-01-01 02:14:31 +01:00
ctx.builder.load_unaligned_u16(0);
},
BitSize::DWORD => {
2021-01-01 02:14:31 +01:00
ctx.builder.load_unaligned_i32(0);
},
2021-01-01 02:14:32 +01:00
BitSize::QWORD => {
ctx.builder.load_unaligned_i64(0);
},
BitSize::DQWORD => assert!(false), // not used
}
// value is now on stack
f(ctx);
2021-01-01 02:14:32 +01:00
// TODO: Could get rid of this local by returning one from f
let value_local = if bits == BitSize::QWORD {
GenSafeReadWriteValue::I64(ctx.builder.set_new_local_i64())
}
else {
GenSafeReadWriteValue::I32(ctx.builder.set_new_local())
};
ctx.builder.get_local(&can_use_fast_path_local);
ctx.builder.eqz_i32();
2021-01-01 02:14:30 +01:00
ctx.builder.if_void();
{
ctx.builder.get_local(&address_local);
2021-01-01 02:14:32 +01:00
match &value_local {
GenSafeReadWriteValue::I32(l) => ctx.builder.get_local(l),
GenSafeReadWriteValue::I64(l) => ctx.builder.get_local_i64(l),
}
ctx.builder
.const_i32(ctx.start_of_current_instruction as i32);
match bits {
BitSize::BYTE => {
ctx.builder.call_fn3_ret("safe_write8_slow_jit");
},
BitSize::WORD => {
ctx.builder.call_fn3_ret("safe_write16_slow_jit");
},
BitSize::DWORD => {
ctx.builder.call_fn3_ret("safe_write32_slow_jit");
},
2021-01-01 02:14:32 +01:00
BitSize::QWORD => {
ctx.builder
.call_fn3_i32_i64_i32_ret("safe_write64_slow_jit");
},
2021-10-29 00:43:20 +02:00
BitSize::DQWORD => {
dbg_assert!(false);
},
}
2021-01-01 02:14:33 +01:00
if cfg!(debug_assertions) {
ctx.builder.const_i32(1);
ctx.builder.and_i32();
2021-01-01 02:14:33 +01:00
ctx.builder.if_void();
{
// handled above
ctx.builder.const_i32(match bits {
BitSize::BYTE => 8,
BitSize::WORD => 16,
BitSize::DWORD => 32,
2021-01-01 02:14:32 +01:00
BitSize::QWORD => 64,
_ => {
dbg_assert!(false);
0
},
});
ctx.builder.get_local(&address_local);
ctx.builder.call_fn2("bug_gen_safe_read_write_page_fault");
}
2021-01-01 02:14:33 +01:00
ctx.builder.block_end();
}
else {
ctx.builder.drop_();
}
}
2021-01-01 02:14:30 +01:00
ctx.builder.block_end();
ctx.builder.get_local(&phys_addr_local);
2021-01-01 02:14:32 +01:00
match &value_local {
GenSafeReadWriteValue::I32(l) => ctx.builder.get_local(l),
GenSafeReadWriteValue::I64(l) => ctx.builder.get_local_i64(l),
}
match bits {
BitSize::BYTE => {
ctx.builder.store_u8(0);
},
BitSize::WORD => {
ctx.builder.store_unaligned_u16(0);
},
BitSize::DWORD => {
ctx.builder.store_unaligned_i32(0);
},
2021-01-01 02:14:32 +01:00
BitSize::QWORD => {
ctx.builder.store_unaligned_i64(0);
},
2021-10-29 00:43:20 +02:00
BitSize::DQWORD => {
dbg_assert!(false);
},
}
2021-01-01 02:14:32 +01:00
match value_local {
GenSafeReadWriteValue::I32(l) => ctx.builder.free_local(l),
GenSafeReadWriteValue::I64(l) => ctx.builder.free_local_i64(l),
}
ctx.builder.free_local(can_use_fast_path_local);
ctx.builder.free_local(phys_addr_local);
}
#[cfg(debug_assertions)]
#[no_mangle]
pub fn bug_gen_safe_read_write_page_fault(bits: i32, addr: u32) {
dbg_log!("bug: gen_safe_read_write_page_fault {} {:x}", bits, addr);
dbg_assert!(false);
}
pub fn gen_jmp_rel16(builder: &mut WasmBuilder, rel16: u16) {
2018-07-22 07:26:54 +02:00
let cs_offset_addr = global_pointers::get_seg_offset(regs::CS);
builder.load_fixed_i32(cs_offset_addr);
let local = builder.set_new_local();
2018-07-22 07:26:54 +02:00
// generate:
// *instruction_pointer = cs_offset + ((*instruction_pointer - cs_offset + rel16) & 0xFFFF);
{
builder.const_i32(global_pointers::instruction_pointer as i32);
2018-07-22 07:26:54 +02:00
2021-01-01 02:14:31 +01:00
gen_get_eip(builder);
2021-01-01 02:14:30 +01:00
builder.get_local(&local);
builder.sub_i32();
2018-07-22 07:26:54 +02:00
2021-01-01 02:14:30 +01:00
builder.const_i32(rel16 as i32);
builder.add_i32();
2018-07-22 07:26:54 +02:00
2021-01-01 02:14:30 +01:00
builder.const_i32(0xFFFF);
builder.and_i32();
2018-07-22 07:26:54 +02:00
2021-01-01 02:14:30 +01:00
builder.get_local(&local);
builder.add_i32();
2018-07-22 07:26:54 +02:00
2021-01-01 02:14:30 +01:00
builder.store_aligned_i32(0);
}
builder.free_local(local);
2018-07-22 07:26:54 +02:00
}
2018-07-22 11:32:29 +02:00
pub fn gen_pop16_ss16(ctx: &mut JitContext) {
2018-07-24 13:54:08 +02:00
// sp = segment_offsets[SS] + reg16[SP] (or just reg16[SP] if has_flat_segmentation)
gen_get_reg16(ctx, regs::SP);
2018-07-24 13:54:08 +02:00
if !ctx.cpu.has_flat_segmentation() {
2021-01-01 02:14:31 +01:00
gen_get_ss_offset(ctx);
2021-01-01 02:14:30 +01:00
ctx.builder.add_i32();
2018-07-24 13:54:08 +02:00
}
2018-07-23 09:37:44 +02:00
2018-07-24 13:54:08 +02:00
// result = safe_read16(sp)
let address_local = ctx.builder.set_new_local();
gen_safe_read16(ctx, &address_local);
ctx.builder.free_local(address_local);
2018-07-23 09:49:13 +02:00
2018-07-24 13:54:08 +02:00
// reg16[SP] += 2;
gen_get_reg16(ctx, regs::SP);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(2);
ctx.builder.add_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg16(ctx, regs::SP);
2018-07-22 11:32:29 +02:00
// return value is already on stack
}
pub fn gen_pop16_ss32(ctx: &mut JitContext) {
2021-01-01 02:14:29 +01:00
// esp = segment_offsets[SS] + reg32[ESP] (or just reg32[ESP] if has_flat_segmentation)
gen_get_reg32(ctx, regs::ESP);
2018-07-23 09:49:13 +02:00
2018-07-24 13:54:08 +02:00
if !ctx.cpu.has_flat_segmentation() {
2021-01-01 02:14:31 +01:00
gen_get_ss_offset(ctx);
2021-01-01 02:14:30 +01:00
ctx.builder.add_i32();
2018-07-23 09:49:13 +02:00
}
2018-07-24 13:54:08 +02:00
// result = safe_read16(esp)
let address_local = ctx.builder.set_new_local();
gen_safe_read16(ctx, &address_local);
ctx.builder.free_local(address_local);
2018-07-24 13:54:08 +02:00
2021-01-01 02:14:29 +01:00
// reg32[ESP] += 2;
gen_get_reg32(ctx, regs::ESP);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(2);
ctx.builder.add_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg32(ctx, regs::ESP);
2018-07-22 11:32:29 +02:00
// return value is already on stack
}
pub fn gen_pop16(ctx: &mut JitContext) {
if ctx.cpu.ssize_32() {
gen_pop16_ss32(ctx);
}
else {
gen_pop16_ss16(ctx);
}
}
pub fn gen_pop32s_ss16(ctx: &mut JitContext) {
// sp = reg16[SP]
gen_get_reg16(ctx, regs::SP);
2018-07-22 11:32:29 +02:00
2018-07-23 09:49:13 +02:00
// result = safe_read32s(segment_offsets[SS] + sp) (or just sp if has_flat_segmentation)
if !ctx.cpu.has_flat_segmentation() {
2021-01-01 02:14:31 +01:00
gen_get_ss_offset(ctx);
2021-01-01 02:14:30 +01:00
ctx.builder.add_i32();
2018-07-23 09:49:13 +02:00
}
let address_local = ctx.builder.set_new_local();
gen_safe_read32(ctx, &address_local);
ctx.builder.free_local(address_local);
2018-07-22 11:32:29 +02:00
// reg16[SP] = sp + 4;
gen_get_reg16(ctx, regs::SP);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(4);
ctx.builder.add_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg16(ctx, regs::SP);
2018-07-22 11:32:29 +02:00
// return value is already on stack
}
pub fn gen_pop32s_ss32(ctx: &mut JitContext) {
2018-07-23 09:49:13 +02:00
if !ctx.cpu.has_flat_segmentation() {
2021-01-01 02:14:29 +01:00
gen_get_reg32(ctx, regs::ESP);
2021-01-01 02:14:31 +01:00
gen_get_ss_offset(ctx);
2021-01-01 02:14:30 +01:00
ctx.builder.add_i32();
2021-01-01 02:14:29 +01:00
let address_local = ctx.builder.set_new_local();
gen_safe_read32(ctx, &address_local);
ctx.builder.free_local(address_local);
}
else {
let reg = ctx.register_locals[regs::ESP as usize].unsafe_clone();
gen_safe_read32(ctx, &reg);
2018-07-23 09:49:13 +02:00
}
2018-07-22 11:32:29 +02:00
gen_get_reg32(ctx, regs::ESP);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(4);
ctx.builder.add_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg32(ctx, regs::ESP);
2018-07-22 11:32:29 +02:00
// return value is already on stack
}
pub fn gen_pop32s(ctx: &mut JitContext) {
if ctx.cpu.ssize_32() {
gen_pop32s_ss32(ctx);
}
else {
gen_pop32s_ss16(ctx);
}
}
pub fn gen_adjust_stack_reg(ctx: &mut JitContext, offset: u32) {
if ctx.cpu.ssize_32() {
gen_get_reg32(ctx, regs::ESP);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(offset as i32);
ctx.builder.add_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg32(ctx, regs::ESP);
}
else {
gen_get_reg16(ctx, regs::SP);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(offset as i32);
ctx.builder.add_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg16(ctx, regs::SP);
}
}
2018-12-18 23:57:21 +01:00
pub fn gen_leave(ctx: &mut JitContext, os32: bool) {
// [e]bp = safe_read{16,32}([e]bp)
if ctx.cpu.ssize_32() {
gen_get_reg32(ctx, regs::EBP);
2018-12-18 23:57:21 +01:00
}
else {
gen_get_reg16(ctx, regs::BP);
2018-12-18 23:57:21 +01:00
}
let old_vbp = ctx.builder.tee_new_local();
if !ctx.cpu.has_flat_segmentation() {
2021-01-01 02:14:31 +01:00
gen_get_ss_offset(ctx);
2021-01-01 02:14:30 +01:00
ctx.builder.add_i32();
2018-12-18 23:57:21 +01:00
}
if os32 {
let address_local = ctx.builder.set_new_local();
gen_safe_read32(ctx, &address_local);
ctx.builder.free_local(address_local);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg32(ctx, regs::EBP);
2018-12-18 23:57:21 +01:00
}
else {
let address_local = ctx.builder.set_new_local();
gen_safe_read16(ctx, &address_local);
ctx.builder.free_local(address_local);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg16(ctx, regs::BP);
2018-12-18 23:57:21 +01:00
}
// [e]sp = [e]bp + (os32 ? 4 : 2)
if ctx.cpu.ssize_32() {
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&old_vbp);
ctx.builder.const_i32(if os32 { 4 } else { 2 });
ctx.builder.add_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg32(ctx, regs::ESP);
2018-12-18 23:57:21 +01:00
}
else {
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&old_vbp);
ctx.builder.const_i32(if os32 { 4 } else { 2 });
ctx.builder.add_i32();
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg16(ctx, regs::SP);
2018-12-18 23:57:21 +01:00
}
ctx.builder.free_local(old_vbp);
}
pub fn gen_task_switch_test(ctx: &mut JitContext) {
// generate if(cr[0] & (CR0_EM | CR0_TS)) { task_switch_test_jit(); goto exit_with_fault; }
let cr0_offset = global_pointers::get_creg_offset(0);
dbg_assert!(regs::CR0_EM | regs::CR0_TS <= 0xFF);
ctx.builder.load_fixed_u8(cr0_offset);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32((regs::CR0_EM | regs::CR0_TS) as i32);
ctx.builder.and_i32();
2021-01-01 02:14:30 +01:00
ctx.builder.if_void();
{
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
gen_fn1_const(
ctx.builder,
"task_switch_test_jit",
ctx.start_of_current_instruction,
);
ctx.builder.br(ctx.exit_with_fault_label);
}
2021-01-01 02:14:30 +01:00
ctx.builder.block_end();
}
pub fn gen_task_switch_test_mmx(ctx: &mut JitContext) {
// generate if(cr[0] & (CR0_EM | CR0_TS)) { task_switch_test_mmx_jit(); goto exit_with_fault; }
let cr0_offset = global_pointers::get_creg_offset(0);
dbg_assert!(regs::CR0_EM | regs::CR0_TS <= 0xFF);
ctx.builder.load_fixed_u8(cr0_offset);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32((regs::CR0_EM | regs::CR0_TS) as i32);
ctx.builder.and_i32();
2021-01-01 02:14:30 +01:00
ctx.builder.if_void();
{
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
gen_fn1_const(
ctx.builder,
"task_switch_test_mmx_jit",
ctx.start_of_current_instruction,
);
ctx.builder.br(ctx.exit_with_fault_label);
}
2021-01-01 02:14:30 +01:00
ctx.builder.block_end();
}
2018-08-20 19:18:17 +02:00
pub fn gen_push16(ctx: &mut JitContext, value_local: &WasmLocal) {
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
if ctx.cpu.ssize_32() {
gen_get_reg32(ctx, regs::ESP);
}
else {
gen_get_reg16(ctx, regs::SP);
};
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(2);
ctx.builder.sub_i32();
let reg_updated_local = if !ctx.cpu.ssize_32() || !ctx.cpu.has_flat_segmentation() {
let reg_updated_local = ctx.builder.tee_new_local();
if !ctx.cpu.ssize_32() {
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(0xFFFF);
ctx.builder.and_i32();
}
if !ctx.cpu.has_flat_segmentation() {
2021-01-01 02:14:31 +01:00
gen_get_ss_offset(ctx);
2021-01-01 02:14:30 +01:00
ctx.builder.add_i32();
}
let sp_local = ctx.builder.set_new_local();
gen_safe_write16(ctx, &sp_local, &value_local);
ctx.builder.free_local(sp_local);
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&reg_updated_local);
reg_updated_local
}
else {
// short path: The address written to is equal to ESP/SP minus two
let reg_updated_local = ctx.builder.tee_new_local();
gen_safe_write16(ctx, &reg_updated_local, &value_local);
reg_updated_local
};
2018-08-20 19:18:17 +02:00
if ctx.cpu.ssize_32() {
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg32(ctx, regs::ESP);
2018-08-20 19:18:17 +02:00
}
else {
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg16(ctx, regs::SP);
2018-08-17 17:53:02 +02:00
};
ctx.builder.free_local(reg_updated_local);
}
2018-08-17 17:53:02 +02:00
2018-08-20 19:18:17 +02:00
pub fn gen_push32(ctx: &mut JitContext, value_local: &WasmLocal) {
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
if ctx.cpu.ssize_32() {
gen_get_reg32(ctx, regs::ESP);
}
else {
gen_get_reg16(ctx, regs::SP);
};
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(4);
ctx.builder.sub_i32();
2018-08-17 17:53:02 +02:00
let new_sp_local = if !ctx.cpu.ssize_32() || !ctx.cpu.has_flat_segmentation() {
let new_sp_local = ctx.builder.tee_new_local();
if !ctx.cpu.ssize_32() {
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(0xFFFF);
ctx.builder.and_i32();
}
if !ctx.cpu.has_flat_segmentation() {
2021-01-01 02:14:31 +01:00
gen_get_ss_offset(ctx);
2021-01-01 02:14:30 +01:00
ctx.builder.add_i32();
}
2018-08-17 17:53:02 +02:00
let sp_local = ctx.builder.set_new_local();
2018-08-17 17:53:02 +02:00
gen_safe_write32(ctx, &sp_local, &value_local);
ctx.builder.free_local(sp_local);
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&new_sp_local);
new_sp_local
}
else {
// short path: The address written to is equal to ESP/SP minus four
let new_sp_local = ctx.builder.tee_new_local();
gen_safe_write32(ctx, &new_sp_local, &value_local);
new_sp_local
};
2018-08-17 17:53:02 +02:00
2018-08-20 19:18:17 +02:00
if ctx.cpu.ssize_32() {
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg32(ctx, regs::ESP);
2018-08-20 19:18:17 +02:00
}
else {
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
gen_set_reg16(ctx, regs::SP);
};
ctx.builder.free_local(new_sp_local);
2018-08-17 17:53:02 +02:00
}
2018-08-17 18:57:48 +02:00
pub fn gen_push32_sreg(ctx: &mut JitContext, reg: u32) {
gen_get_sreg(ctx, reg);
let value_local = ctx.builder.set_new_local();
if ctx.cpu.ssize_32() {
gen_get_reg32(ctx, regs::ESP);
}
else {
gen_get_reg16(ctx, regs::SP);
};
ctx.builder.const_i32(4);
ctx.builder.sub_i32();
let new_sp_local = if !ctx.cpu.ssize_32() || !ctx.cpu.has_flat_segmentation() {
let new_sp_local = ctx.builder.tee_new_local();
if !ctx.cpu.ssize_32() {
ctx.builder.const_i32(0xFFFF);
ctx.builder.and_i32();
}
if !ctx.cpu.has_flat_segmentation() {
gen_get_ss_offset(ctx);
ctx.builder.add_i32();
}
let sp_local = ctx.builder.set_new_local();
gen_safe_write16(ctx, &sp_local, &value_local);
ctx.builder.free_local(sp_local);
ctx.builder.get_local(&new_sp_local);
new_sp_local
}
else {
// short path: The address written to is equal to ESP/SP minus four
let new_sp_local = ctx.builder.tee_new_local();
gen_safe_write16(ctx, &new_sp_local, &value_local);
new_sp_local
};
if ctx.cpu.ssize_32() {
gen_set_reg32(ctx, regs::ESP);
}
else {
gen_set_reg16(ctx, regs::SP);
};
ctx.builder.free_local(new_sp_local);
ctx.builder.free_local(value_local);
}
2018-08-18 09:33:14 +02:00
pub fn gen_get_real_eip(ctx: &mut JitContext) {
2021-01-01 02:14:31 +01:00
gen_get_eip(ctx.builder);
2021-01-01 02:14:33 +01:00
ctx.builder.const_i32(!0xFFF);
ctx.builder.and_i32();
ctx.builder.const_i32(ctx.cpu.eip as i32 & 0xFFF);
ctx.builder.or_i32();
2022-11-10 18:01:12 +01:00
if !ctx.cpu.has_flat_segmentation() {
ctx.builder
.load_fixed_i32(global_pointers::get_seg_offset(regs::CS));
ctx.builder.sub_i32();
}
2018-08-18 09:33:14 +02:00
}
2019-01-07 15:35:57 +01:00
pub fn gen_set_last_op1(builder: &mut WasmBuilder, source: &WasmLocal) {
builder.const_i32(global_pointers::last_op1 as i32);
2021-01-01 02:14:30 +01:00
builder.get_local(&source);
builder.store_aligned_i32(0);
2019-01-07 15:35:57 +01:00
}
pub fn gen_set_last_result(builder: &mut WasmBuilder, source: &WasmLocal) {
builder.const_i32(global_pointers::last_result as i32);
2021-01-01 02:14:30 +01:00
builder.get_local(&source);
builder.store_aligned_i32(0);
}
2021-01-01 02:14:32 +01:00
pub fn gen_clear_flags_changed_bits(builder: &mut WasmBuilder, bits_to_clear: i32) {
builder.const_i32(global_pointers::flags_changed as i32);
gen_get_flags_changed(builder);
builder.const_i32(!bits_to_clear);
builder.and_i32();
builder.store_aligned_i32(0);
}
pub fn gen_set_last_op_size_and_flags_changed(
builder: &mut WasmBuilder,
last_op_size: i32,
flags_changed: i32,
) {
dbg_assert!(last_op_size == OPSIZE_8 || last_op_size == OPSIZE_16 || last_op_size == OPSIZE_32);
dbg_assert!(global_pointers::last_op_size as i32 % 8 == 0);
dbg_assert!(global_pointers::last_op_size as i32 + 4 == global_pointers::flags_changed as i32);
builder.const_i32(global_pointers::last_op_size as i32);
builder.const_i64(last_op_size as u32 as i64 | (flags_changed as u32 as i64) << 32);
builder.store_aligned_i64(0);
}
pub fn gen_set_flags_bits(builder: &mut WasmBuilder, bits_to_set: i32) {
builder.const_i32(global_pointers::flags as i32);
2021-01-01 02:14:31 +01:00
gen_get_flags(builder);
2021-01-01 02:14:30 +01:00
builder.const_i32(bits_to_set);
builder.or_i32();
builder.store_aligned_i32(0);
}
pub fn gen_clear_flags_bits(builder: &mut WasmBuilder, bits_to_clear: i32) {
builder.const_i32(global_pointers::flags as i32);
2021-01-01 02:14:31 +01:00
gen_get_flags(builder);
2021-01-01 02:14:30 +01:00
builder.const_i32(!bits_to_clear);
builder.and_i32();
builder.store_aligned_i32(0);
}
#[derive(PartialEq)]
pub enum ConditionNegate {
True,
False,
}
pub fn gen_getzf(ctx: &mut JitContext, negate: ConditionNegate) {
match &ctx.previous_instruction {
2022-11-04 20:44:25 +01:00
Instruction::Cmp {
dest: InstructionOperandDest::WasmLocal(dest),
source: InstructionOperand::WasmLocal(source),
opsize: OPSIZE_32,
} => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
ctx.builder.get_local(dest);
ctx.builder.get_local(source);
if negate == ConditionNegate::False {
ctx.builder.eq_i32();
}
else {
ctx.builder.ne_i32();
}
},
Instruction::Cmp {
dest: InstructionOperandDest::WasmLocal(dest),
source: InstructionOperand::Immediate(i),
opsize: OPSIZE_32,
} => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
ctx.builder.get_local(dest);
if *i != 0 {
ctx.builder.const_i32(*i);
if negate == ConditionNegate::False {
ctx.builder.eq_i32();
}
else {
ctx.builder.ne_i32();
}
}
else {
if negate == ConditionNegate::False {
ctx.builder.eqz_i32();
}
}
},
Instruction::Cmp { .. }
| Instruction::Sub { .. }
| Instruction::Add { .. }
2022-11-15 02:08:10 +01:00
| Instruction::AdcSbb { .. }
| Instruction::NonZeroShift { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
2022-11-04 05:15:26 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
if negate == ConditionNegate::False {
ctx.builder.eqz_i32();
}
},
Instruction::Bitwise { opsize, .. } => {
2022-11-04 05:15:26 +01:00
let &opsize = opsize;
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
// Note: Necessary because test{8,16} don't mask their neither last_result nor any of their operands
2022-11-04 05:15:26 +01:00
// TODO: Use local instead of last_result for 8-bit/16-bit
if opsize == OPSIZE_32 {
2022-11-04 05:15:26 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
}
else if opsize == OPSIZE_16 {
ctx.builder
.load_fixed_u16(global_pointers::last_result as u32);
}
else if opsize == OPSIZE_8 {
ctx.builder
.load_fixed_u8(global_pointers::last_result as u32);
}
if negate == ConditionNegate::False {
ctx.builder.eqz_i32();
}
},
&Instruction::Other => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
gen_get_flags_changed(ctx.builder);
ctx.builder.const_i32(FLAG_ZERO);
ctx.builder.and_i32();
ctx.builder.if_i32();
2022-11-04 05:15:26 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
let last_result = ctx.builder.tee_new_local();
ctx.builder.const_i32(-1);
ctx.builder.xor_i32();
ctx.builder.get_local(&last_result);
ctx.builder.free_local(last_result);
ctx.builder.const_i32(1);
ctx.builder.sub_i32();
ctx.builder.and_i32();
gen_get_last_op_size(ctx.builder);
ctx.builder.shr_u_i32();
ctx.builder.const_i32(1);
ctx.builder.and_i32();
2021-01-01 02:14:30 +01:00
ctx.builder.else_();
gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_ZERO);
ctx.builder.and_i32();
ctx.builder.block_end();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
}
}
2021-01-01 02:14:30 +01:00
pub fn gen_getcf(ctx: &mut JitContext, negate: ConditionNegate) {
match &ctx.previous_instruction {
2022-11-05 21:58:38 +01:00
Instruction::Cmp { source, opsize, .. }
| Instruction::Sub {
source,
opsize,
is_dec: false,
..
} => {
2022-11-04 21:39:56 +01:00
// Note: x < y and x < x - y can be used interchangeably (see getcf)
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
2022-11-04 21:39:56 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
match (opsize, source) {
(&OPSIZE_32, InstructionOperand::WasmLocal(l)) => ctx.builder.get_local(l),
(_, &InstructionOperand::Immediate(i)) => ctx.builder.const_i32(i),
_ => gen_get_last_result(ctx.builder, &ctx.previous_instruction),
}
2022-11-05 03:14:58 +01:00
if negate == ConditionNegate::True {
ctx.builder.geu_i32();
}
else {
ctx.builder.ltu_i32();
}
},
2022-11-05 21:58:38 +01:00
Instruction::Add {
source,
opsize,
is_inc: false,
..
} => {
2022-11-05 03:14:58 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
match (opsize, source) {
(&OPSIZE_32, InstructionOperand::WasmLocal(l)) => ctx.builder.get_local(l),
(_, &InstructionOperand::Immediate(i)) => ctx.builder.const_i32(i),
_ => gen_get_last_op1(ctx.builder, &ctx.previous_instruction),
}
if negate == ConditionNegate::True {
ctx.builder.geu_i32();
}
else {
ctx.builder.ltu_i32();
}
},
2022-11-05 21:58:38 +01:00
Instruction::Add { is_inc: true, .. } | Instruction::Sub { is_dec: true, .. } => {
gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_CARRY);
ctx.builder.and_i32();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
Instruction::Bitwise { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
ctx.builder
.const_i32(if negate == ConditionNegate::True { 1 } else { 0 });
},
2022-11-15 02:08:10 +01:00
Instruction::NonZeroShift { .. } | Instruction::AdcSbb { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_CARRY);
ctx.builder.and_i32();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
&Instruction::Other => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
gen_get_flags_changed(ctx.builder);
let flags_changed = ctx.builder.tee_new_local();
ctx.builder.const_i32(FLAG_CARRY);
ctx.builder.and_i32();
ctx.builder.if_i32();
ctx.builder.get_local(&flags_changed);
ctx.builder.const_i32(31);
ctx.builder.shr_s_i32();
ctx.builder.free_local(flags_changed);
let sub_mask = ctx.builder.set_new_local();
2021-01-01 02:14:30 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.get_local(&sub_mask);
ctx.builder.xor_i32();
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
ctx.builder.get_local(&sub_mask);
ctx.builder.xor_i32();
ctx.builder.ltu_i32();
ctx.builder.else_();
gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_CARRY);
ctx.builder.and_i32();
ctx.builder.block_end();
ctx.builder.free_local(sub_mask);
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
}
}
2022-11-04 20:18:34 +01:00
pub fn gen_getsf(ctx: &mut JitContext, negate: ConditionNegate) {
match &ctx.previous_instruction {
2022-11-04 05:15:26 +01:00
Instruction::Cmp { opsize, .. }
| Instruction::Sub { opsize, .. }
| Instruction::Add { opsize, .. }
2022-11-15 02:08:10 +01:00
| Instruction::AdcSbb { opsize, .. }
| Instruction::Bitwise { opsize, .. }
| Instruction::NonZeroShift { opsize, .. } => {
2022-11-04 05:15:26 +01:00
let &opsize = opsize;
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
2022-11-04 05:15:26 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
2022-11-04 20:18:34 +01:00
if opsize == OPSIZE_32 {
ctx.builder.const_i32(0);
if negate == ConditionNegate::True {
ctx.builder.ge_i32();
}
else {
ctx.builder.lt_i32();
}
}
else {
2022-11-04 20:18:34 +01:00
// TODO: use register (see get_last_result)
ctx.builder
.const_i32(if opsize == OPSIZE_16 { 0x8000 } else { 0x80 });
ctx.builder.and_i32();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
}
},
&Instruction::Other => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
gen_get_flags_changed(ctx.builder);
ctx.builder.const_i32(FLAG_SIGN);
ctx.builder.and_i32();
ctx.builder.if_i32();
{
2022-11-04 05:15:26 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
gen_get_last_op_size(ctx.builder);
ctx.builder.shr_u_i32();
ctx.builder.const_i32(1);
ctx.builder.and_i32();
}
ctx.builder.else_();
{
gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_SIGN);
ctx.builder.and_i32();
}
ctx.builder.block_end();
2022-11-04 20:18:34 +01:00
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
}
}
2022-11-04 05:15:26 +01:00
pub fn gen_getof(ctx: &mut JitContext) {
2022-11-04 19:30:33 +01:00
match &ctx.previous_instruction {
Instruction::Cmp { opsize, .. } | Instruction::Sub { opsize, .. } => {
2022-11-05 21:52:28 +01:00
// TODO: a better formula might be possible
2022-11-04 19:30:33 +01:00
let &opsize = opsize;
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.xor_i32();
2022-11-04 19:30:33 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.sub_i32();
ctx.builder.xor_i32();
ctx.builder.and_i32();
2022-11-04 19:30:33 +01:00
ctx.builder.const_i32(if opsize == OPSIZE_32 {
0x8000_0000u32 as i32
}
else if opsize == OPSIZE_16 {
0x8000
}
else {
0x80
});
ctx.builder.and_i32();
},
2022-11-05 21:52:28 +01:00
Instruction::Add { opsize, .. } => {
// TODO: a better formula might be possible
let &opsize = opsize;
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.xor_i32();
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
ctx.builder.sub_i32();
ctx.builder.xor_i32();
ctx.builder.and_i32();
ctx.builder.const_i32(if opsize == OPSIZE_32 {
0x8000_0000u32 as i32
}
else if opsize == OPSIZE_16 {
0x8000
}
else {
0x80
});
ctx.builder.and_i32();
},
Instruction::Bitwise { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
ctx.builder.const_i32(0);
},
2022-11-15 02:08:10 +01:00
Instruction::NonZeroShift { .. } | Instruction::AdcSbb { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_OVERFLOW);
ctx.builder.and_i32();
},
&Instruction::Other => {
2022-11-04 19:30:33 +01:00
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
gen_get_flags_changed(ctx.builder);
let flags_changed = ctx.builder.tee_new_local();
ctx.builder.const_i32(FLAG_OVERFLOW);
ctx.builder.and_i32();
ctx.builder.if_i32();
{
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
let last_op1 = ctx.builder.tee_new_local();
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
let last_result = ctx.builder.tee_new_local();
ctx.builder.xor_i32();
ctx.builder.get_local(&last_result);
ctx.builder.get_local(&last_op1);
ctx.builder.sub_i32();
gen_get_flags_changed(ctx.builder);
ctx.builder.const_i32(31);
ctx.builder.shr_u_i32();
ctx.builder.sub_i32();
2022-11-04 19:30:33 +01:00
ctx.builder.get_local(&last_result);
ctx.builder.xor_i32();
ctx.builder.and_i32();
gen_get_last_op_size(ctx.builder);
ctx.builder.shr_u_i32();
ctx.builder.const_i32(1);
ctx.builder.and_i32();
ctx.builder.free_local(last_op1);
ctx.builder.free_local(last_result);
}
ctx.builder.else_();
{
gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_OVERFLOW);
ctx.builder.and_i32();
}
ctx.builder.block_end();
ctx.builder.free_local(flags_changed);
},
}
}
pub fn gen_test_be(ctx: &mut JitContext, negate: ConditionNegate) {
match &ctx.previous_instruction {
Instruction::Cmp {
dest,
source,
opsize,
} => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
match dest {
2022-11-03 22:02:34 +01:00
InstructionOperandDest::WasmLocal(l) => {
ctx.builder.get_local(l);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 0xFF } else { 0xFFFF });
ctx.builder.and_i32();
}
},
2022-11-03 22:02:34 +01:00
InstructionOperandDest::Other => {
2022-11-04 18:52:34 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
},
}
match source {
InstructionOperand::WasmLocal(l) => {
ctx.builder.get_local(l);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 0xFF } else { 0xFFFF });
ctx.builder.and_i32();
}
},
InstructionOperand::Other => {
2022-11-04 18:52:34 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
2022-11-04 05:15:26 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.sub_i32();
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 0xFF } else { 0xFFFF });
ctx.builder.and_i32();
}
},
&InstructionOperand::Immediate(i) => {
dbg_assert!(*opsize != OPSIZE_8 || i >= 0 && i < 0x100);
dbg_assert!(*opsize != OPSIZE_16 || i >= 0 && i < 0x10000);
ctx.builder.const_i32(i);
},
}
if negate == ConditionNegate::True {
ctx.builder.gtu_i32();
}
else {
ctx.builder.leu_i32();
}
},
2022-11-04 21:48:59 +01:00
Instruction::Sub {
opsize,
2022-11-04 22:28:17 +01:00
source,
2022-11-05 21:58:38 +01:00
is_dec: false,
..
2022-11-04 21:48:59 +01:00
} => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
2022-11-04 18:52:34 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
2022-11-04 22:28:17 +01:00
match (opsize, source) {
(&OPSIZE_32, InstructionOperand::WasmLocal(l)) => ctx.builder.get_local(l),
(_, &InstructionOperand::Immediate(i)) => ctx.builder.const_i32(i),
_ => {
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.sub_i32();
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 0xFF } else { 0xFFFF });
ctx.builder.and_i32();
}
},
}
if negate == ConditionNegate::True {
ctx.builder.gtu_i32();
}
else {
ctx.builder.leu_i32();
}
},
&Instruction::Bitwise { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
gen_getzf(ctx, negate);
},
&Instruction::Add { .. } | &Instruction::Sub { is_dec: true, .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
// not the best code generation, but reasonable for this fairly uncommon case
gen_getcf(ctx, ConditionNegate::False);
gen_getzf(ctx, ConditionNegate::False);
ctx.builder.or_i32();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
2022-11-15 02:08:10 +01:00
Instruction::Other | Instruction::NonZeroShift { .. } | Instruction::AdcSbb { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
gen_getcf(ctx, ConditionNegate::False);
gen_getzf(ctx, ConditionNegate::False);
ctx.builder.or_i32();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
}
}
pub fn gen_test_l(ctx: &mut JitContext, negate: ConditionNegate) {
match &ctx.previous_instruction {
Instruction::Cmp {
dest,
source,
opsize,
} => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
match dest {
2022-11-03 22:02:34 +01:00
InstructionOperandDest::WasmLocal(l) => {
ctx.builder.get_local(l);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
2022-11-03 22:02:34 +01:00
InstructionOperandDest::Other => {
2022-11-04 18:52:34 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
}
match source {
InstructionOperand::WasmLocal(l) => {
ctx.builder.get_local(l);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
InstructionOperand::Other => {
2022-11-04 18:52:34 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
2022-11-04 05:15:26 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.sub_i32();
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
&InstructionOperand::Immediate(i) => {
ctx.builder.const_i32(i);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
}
if negate == ConditionNegate::True {
ctx.builder.ge_i32();
}
else {
ctx.builder.lt_i32();
}
},
2022-11-05 21:58:38 +01:00
Instruction::Sub { opsize, source, .. } => {
2022-11-05 00:55:57 +01:00
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
match (opsize, source) {
(&OPSIZE_32, InstructionOperand::WasmLocal(l)) => ctx.builder.get_local(l),
(_, &InstructionOperand::Immediate(i)) => ctx.builder.const_i32(
i << if *opsize == OPSIZE_32 {
0
}
else if *opsize == OPSIZE_16 {
16
}
else {
24
},
),
_ => {
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.sub_i32();
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
}
if negate == ConditionNegate::True {
ctx.builder.ge_i32();
}
else {
ctx.builder.lt_i32();
}
},
&Instruction::Bitwise { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
gen_getsf(ctx, negate);
},
2022-11-15 02:08:10 +01:00
&Instruction::Other
| Instruction::Add { .. }
| Instruction::NonZeroShift { .. }
| Instruction::AdcSbb { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
if let Instruction::Add { .. } = ctx.previous_instruction {
gen_profiler_stat_increment(
ctx.builder,
profiler::stat::CONDITION_UNOPTIMISED_UNHANDLED_L,
);
}
2022-11-04 20:18:34 +01:00
gen_getsf(ctx, ConditionNegate::False);
ctx.builder.eqz_i32();
2022-11-04 05:15:26 +01:00
gen_getof(ctx);
ctx.builder.eqz_i32();
ctx.builder.xor_i32();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
}
}
pub fn gen_test_le(ctx: &mut JitContext, negate: ConditionNegate) {
match &ctx.previous_instruction {
Instruction::Cmp {
dest,
source,
opsize,
} => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
match dest {
2022-11-03 22:02:34 +01:00
InstructionOperandDest::WasmLocal(l) => {
ctx.builder.get_local(l);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
2022-11-03 22:02:34 +01:00
InstructionOperandDest::Other => {
2022-11-04 18:52:34 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
}
match source {
InstructionOperand::WasmLocal(l) => {
ctx.builder.get_local(l);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
InstructionOperand::Other => {
2022-11-04 18:52:34 +01:00
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
2022-11-04 05:15:26 +01:00
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.sub_i32();
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
&InstructionOperand::Immediate(i) => {
ctx.builder.const_i32(i);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
}
if negate == ConditionNegate::True {
ctx.builder.gt_i32();
}
else {
ctx.builder.le_i32();
}
},
2022-11-05 21:58:38 +01:00
Instruction::Sub { opsize, source, .. } => {
2022-11-05 00:56:59 +01:00
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
match (opsize, source) {
(&OPSIZE_32, InstructionOperand::WasmLocal(l)) => ctx.builder.get_local(l),
(_, &InstructionOperand::Immediate(i)) => ctx.builder.const_i32(
i << if *opsize == OPSIZE_32 {
0
}
else if *opsize == OPSIZE_16 {
16
}
else {
24
},
),
_ => {
gen_get_last_op1(ctx.builder, &ctx.previous_instruction);
gen_get_last_result(ctx.builder, &ctx.previous_instruction);
ctx.builder.sub_i32();
if *opsize == OPSIZE_8 || *opsize == OPSIZE_16 {
ctx.builder
.const_i32(if *opsize == OPSIZE_8 { 24 } else { 16 });
ctx.builder.shl_i32();
}
},
}
if negate == ConditionNegate::True {
ctx.builder.gt_i32();
}
else {
ctx.builder.le_i32();
}
},
&Instruction::Bitwise { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_OPTIMISED);
// TODO: Could probably be improved (<= 0)
gen_test_l(ctx, ConditionNegate::False);
gen_getzf(ctx, ConditionNegate::False);
ctx.builder.or_i32();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
2022-11-15 02:08:10 +01:00
Instruction::Other
| Instruction::Add { .. }
| Instruction::NonZeroShift { .. }
| Instruction::AdcSbb { .. } => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
if let Instruction::Add { .. } = ctx.previous_instruction {
gen_profiler_stat_increment(
ctx.builder,
profiler::stat::CONDITION_UNOPTIMISED_UNHANDLED_LE,
);
}
gen_test_l(ctx, ConditionNegate::False);
gen_getzf(ctx, ConditionNegate::False);
ctx.builder.or_i32();
if negate == ConditionNegate::True {
ctx.builder.eqz_i32();
}
},
}
}
pub fn gen_test_loopnz(ctx: &mut JitContext, is_asize_32: bool) {
gen_test_loop(ctx, is_asize_32);
2021-01-01 02:14:30 +01:00
ctx.builder.eqz_i32();
gen_getzf(ctx, ConditionNegate::False);
2021-01-01 02:14:30 +01:00
ctx.builder.or_i32();
ctx.builder.eqz_i32();
}
pub fn gen_test_loopz(ctx: &mut JitContext, is_asize_32: bool) {
gen_test_loop(ctx, is_asize_32);
2021-01-01 02:14:30 +01:00
ctx.builder.eqz_i32();
gen_getzf(ctx, ConditionNegate::False);
2021-01-01 02:14:30 +01:00
ctx.builder.eqz_i32();
ctx.builder.or_i32();
ctx.builder.eqz_i32();
}
pub fn gen_test_loop(ctx: &mut JitContext, is_asize_32: bool) {
if is_asize_32 {
gen_get_reg32(ctx, regs::ECX);
}
else {
gen_get_reg16(ctx, regs::CX);
}
}
pub fn gen_test_jcxz(ctx: &mut JitContext, is_asize_32: bool) {
if is_asize_32 {
gen_get_reg32(ctx, regs::ECX);
}
else {
gen_get_reg16(ctx, regs::CX);
}
2021-01-01 02:14:30 +01:00
ctx.builder.eqz_i32();
}
pub fn gen_fpu_get_sti(ctx: &mut JitContext, i: u32) {
ctx.builder
.const_i32(global_pointers::sse_scratch_register as i32);
2021-01-01 02:14:30 +01:00
ctx.builder.const_i32(i as i32);
ctx.builder.call_fn2("fpu_get_sti");
ctx.builder
.load_fixed_i64(global_pointers::sse_scratch_register as u32);
ctx.builder
.load_fixed_u16(global_pointers::sse_scratch_register as u32 + 8);
}
2021-01-01 02:14:31 +01:00
pub fn gen_fpu_load_m32(ctx: &mut JitContext, modrm_byte: ModrmByte) {
ctx.builder
.const_i32(global_pointers::sse_scratch_register as i32);
gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.call_fn2("f32_to_f80");
ctx.builder
.load_fixed_i64(global_pointers::sse_scratch_register as u32);
ctx.builder
.load_fixed_u16(global_pointers::sse_scratch_register as u32 + 8);
}
2021-01-01 02:14:31 +01:00
pub fn gen_fpu_load_m64(ctx: &mut JitContext, modrm_byte: ModrmByte) {
ctx.builder
.const_i32(global_pointers::sse_scratch_register as i32);
gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.call_fn2_i32_i64("f64_to_f80");
ctx.builder
.load_fixed_i64(global_pointers::sse_scratch_register as u32);
ctx.builder
.load_fixed_u16(global_pointers::sse_scratch_register as u32 + 8);
}
pub fn gen_fpu_load_i16(ctx: &mut JitContext, modrm_byte: ModrmByte) {
ctx.builder
.const_i32(global_pointers::sse_scratch_register as i32);
gen_modrm_resolve_safe_read16(ctx, modrm_byte);
sign_extend_i16(ctx.builder);
ctx.builder.call_fn2("i32_to_f80");
ctx.builder
.load_fixed_i64(global_pointers::sse_scratch_register as u32);
ctx.builder
.load_fixed_u16(global_pointers::sse_scratch_register as u32 + 8);
}
pub fn gen_fpu_load_i32(ctx: &mut JitContext, modrm_byte: ModrmByte) {
ctx.builder
.const_i32(global_pointers::sse_scratch_register as i32);
gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.call_fn2("i32_to_f80");
ctx.builder
.load_fixed_i64(global_pointers::sse_scratch_register as u32);
ctx.builder
.load_fixed_u16(global_pointers::sse_scratch_register as u32 + 8);
}
pub fn gen_fpu_load_i64(ctx: &mut JitContext, modrm_byte: ModrmByte) {
ctx.builder
.const_i32(global_pointers::sse_scratch_register as i32);
gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.call_fn2_i32_i64("i64_to_f80");
ctx.builder
.load_fixed_i64(global_pointers::sse_scratch_register as u32);
ctx.builder
.load_fixed_u16(global_pointers::sse_scratch_register as u32 + 8);
}
2021-01-01 02:14:30 +01:00
pub fn gen_trigger_de(ctx: &mut JitContext) {
gen_fn1_const(
2021-01-01 02:14:30 +01:00
ctx.builder,
"trigger_de_jit",
ctx.start_of_current_instruction,
2021-01-01 02:14:30 +01:00
);
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
ctx.builder.br(ctx.exit_with_fault_label);
2021-01-01 02:14:30 +01:00
}
pub fn gen_trigger_ud(ctx: &mut JitContext) {
gen_fn1_const(
ctx.builder,
"trigger_ud_jit",
ctx.start_of_current_instruction,
);
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
ctx.builder.br(ctx.exit_with_fault_label);
}
2021-01-01 02:14:28 +01:00
pub fn gen_trigger_gp(ctx: &mut JitContext, error_code: u32) {
gen_fn2_const(
ctx.builder,
"trigger_gp_jit",
error_code,
ctx.start_of_current_instruction,
);
2021-01-01 02:14:28 +01:00
gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
ctx.builder.br(ctx.exit_with_fault_label);
2021-01-01 02:14:28 +01:00
}
pub fn gen_condition_fn_negated(ctx: &mut JitContext, condition: u8) {
gen_condition_fn(ctx, condition ^ 1)
}
2021-01-01 02:14:30 +01:00
pub fn gen_condition_fn(ctx: &mut JitContext, condition: u8) {
if condition & 0xF0 == 0x00 || condition & 0xF0 == 0x70 || condition & 0xF0 == 0x80 {
2021-01-01 02:14:30 +01:00
match condition & 0xF {
0x0 => {
2022-11-04 05:15:26 +01:00
gen_getof(ctx);
2021-01-01 02:14:30 +01:00
},
0x1 => {
2022-11-04 05:15:26 +01:00
gen_getof(ctx);
2021-01-01 02:14:30 +01:00
ctx.builder.eqz_i32();
},
0x2 => {
gen_getcf(ctx, ConditionNegate::False);
2021-01-01 02:14:30 +01:00
},
0x3 => {
gen_getcf(ctx, ConditionNegate::True);
2021-01-01 02:14:30 +01:00
},
0x4 => {
gen_getzf(ctx, ConditionNegate::False);
2021-01-01 02:14:30 +01:00
},
0x5 => {
gen_getzf(ctx, ConditionNegate::True);
2021-01-01 02:14:30 +01:00
},
0x6 => {
gen_test_be(ctx, ConditionNegate::False);
2021-01-01 02:14:30 +01:00
},
0x7 => {
gen_test_be(ctx, ConditionNegate::True);
2021-01-01 02:14:30 +01:00
},
0x8 => {
2022-11-04 20:18:34 +01:00
gen_getsf(ctx, ConditionNegate::False);
2021-01-01 02:14:30 +01:00
},
0x9 => {
2022-11-04 20:18:34 +01:00
gen_getsf(ctx, ConditionNegate::True);
2021-01-01 02:14:30 +01:00
},
0xA => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED_PF);
ctx.builder.call_fn0_ret("test_p");
},
0xB => {
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED);
gen_profiler_stat_increment(ctx.builder, profiler::stat::CONDITION_UNOPTIMISED_PF);
ctx.builder.call_fn0_ret("test_np");
},
2021-01-01 02:14:30 +01:00
0xC => {
gen_test_l(ctx, ConditionNegate::False);
2021-01-01 02:14:30 +01:00
},
0xD => {
gen_test_l(ctx, ConditionNegate::True);
2021-01-01 02:14:30 +01:00
},
0xE => {
gen_test_le(ctx, ConditionNegate::False);
2021-01-01 02:14:30 +01:00
},
0xF => {
gen_test_le(ctx, ConditionNegate::True);
2021-01-01 02:14:30 +01:00
},
2021-10-29 00:43:20 +02:00
_ => {
dbg_assert!(false);
},
}
}
else {
// loop, loopnz, loopz, jcxz
dbg_assert!(condition & !0x3 == 0xE0);
if condition == 0xE0 {
gen_test_loopnz(ctx, ctx.cpu.asize_32());
}
else if condition == 0xE1 {
gen_test_loopz(ctx, ctx.cpu.asize_32());
}
else if condition == 0xE2 {
gen_test_loop(ctx, ctx.cpu.asize_32());
}
else if condition == 0xE3 {
gen_test_jcxz(ctx, ctx.cpu.asize_32());
}
}
}
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
pub fn gen_move_registers_from_locals_to_memory(ctx: &mut JitContext) {
if cfg!(feature = "profiler") {
let instruction = memory::read32s(ctx.start_of_current_instruction) as u32;
::opstats::gen_opstat_unguarded_register(ctx.builder, instruction);
}
2021-01-01 02:14:28 +01:00
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
for i in 0..8 {
ctx.builder
.const_i32(global_pointers::get_reg32_offset(i as u32) as i32);
2021-01-01 02:14:30 +01:00
ctx.builder.get_local(&ctx.register_locals[i]);
ctx.builder.store_aligned_i32(0);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
}
}
pub fn gen_move_registers_from_memory_to_locals(ctx: &mut JitContext) {
if cfg!(feature = "profiler") {
let instruction = memory::read32s(ctx.start_of_current_instruction) as u32;
::opstats::gen_opstat_unguarded_register(ctx.builder, instruction);
}
2021-01-01 02:14:28 +01:00
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
for i in 0..8 {
ctx.builder
.const_i32(global_pointers::get_reg32_offset(i as u32) as i32);
2021-01-01 02:14:31 +01:00
ctx.builder.load_aligned_i32(0);
2021-01-01 02:14:30 +01:00
ctx.builder.set_local(&ctx.register_locals[i]);
Store registers in locals This changes registers to be temporarily stored in wasm locals, across each complete wasm module. Registers are moved from memory to locals upon entering the wasm module and moved from locals to memory upon leaving. Additionally, calls to functions that modify registers are wrapped between moving registers to memory before and moving back to locals after. This affects: 1. All non-custom instructions 2. safe_{read,write}_slow, since it may page fault (the slow path of all memory accesses) 3. task_switch_test* and trigger_ud 4. All block boundaries 5. The fallback functions of gen_safe_read_write (read-modify-write memory accesses) The performance benefits are currently mostly eaten up by 1. and 4. (if one calculates the total number of read/writes to registers in memory, they are higher after this patch, as each instructions of typ 1. or 4. requires moving all 8 register twice). This can be improved later by the relatively mechanical work of making instructions custom (not necessarily full code generation, only the part of the instruction where registers are accessed). Multi-page wasm module generation will significantly reduce the number of type 4. instructions. Due to 2., the overall code size has significantly increased. This case (the slow path of memory access) is often generated but rarely executed. These moves can be removed in a later patch by a different scheme for safe_{read,write}_slow, which has been left out of this patch for simplicity of reviewing. This also simplifies our code generation for storing registers, as instructions_body.const_i32(register_offset); // some computations ... instruction_body.store_i32(); turns into: // some computations ... write_register(register_index); I.e., a prefix is not necessary anymore as locals are indexed directly. Further patches will allow getting rid of some temporary locals, as registers now can be used directly.
2018-12-25 11:42:03 +01:00
}
}
pub fn gen_profiler_stat_increment(builder: &mut WasmBuilder, stat: profiler::stat) {
if !cfg!(feature = "profiler") {
2021-01-01 02:14:28 +01:00
return;
}
let addr = unsafe { profiler::stat_array.as_mut_ptr().offset(stat as isize) } as u32;
builder.increment_fixed_i64(addr, 1)
2018-08-17 18:57:48 +02:00
}
pub fn gen_debug_track_jit_exit(builder: &mut WasmBuilder, address: u32) {
if cfg!(feature = "profiler") {
gen_fn1_const(builder, "track_jit_exit", address);
}
}