diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/asm/amd64/AMD64Assembler.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/asm/amd64/AMD64Assembler.java index d66ba6b46244..39cb12c32773 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/asm/amd64/AMD64Assembler.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/asm/amd64/AMD64Assembler.java @@ -512,7 +512,8 @@ public static class AMD64MROp extends AMD64RROp { // @formatter:off public static final AMD64MROp MOVB = new AMD64MROp("MOVB", 0x88, OpAssertion.ByteAssertion); public static final AMD64MROp MOV = new AMD64MROp("MOV", 0x89, OpAssertion.WordOrLargerAssertion); - // @formatter:on + public static final AMD64MROp TEST = new AMD64MROp("TEST", 0x85, OpAssertion.WordOrLargerAssertion); + // @formatter:on protected AMD64MROp(String opcode, int op, OpAssertion assertion) { this(opcode, 0, 0, op, assertion, null); diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/asm/amd64/AMD64MacroAssembler.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/asm/amd64/AMD64MacroAssembler.java index 7650b7e69b88..d9931c91f682 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/asm/amd64/AMD64MacroAssembler.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/asm/amd64/AMD64MacroAssembler.java @@ -299,7 +299,7 @@ public final void movdbl(AMD64Address dst, Register src) { */ public final void movlong(AMD64Address dst, long src) { if (NumUtil.isInt(src)) { - AMD64MIOp.MOV.emit(this, OperandSize.QWORD, dst, (int) src); + emitAMD64MIOp(AMD64MIOp.MOV, OperandSize.QWORD, dst, (int) src, false); } else { AMD64Address high = new AMD64Address(dst.getBase(), dst.getIndex(), dst.getScale(), dst.getDisplacement() + 4, dst.getDisplacementAnnotation(), dst.instructionStartPosition); movl(dst, (int) (src & 0xFFFFFFFF)); @@ -1439,4 +1439,79 @@ public final void ptestU(AVXKind.AVXSize size, Register dst, AMD64Address src, R public boolean isAVX() { return supports(CPUFeature.AVX); } + + public final void moveInt(Register dst, int imm) { + if (imm == 0) { + Register zeroValueRegister = getZeroValueRegister(); + if (!Register.None.equals(zeroValueRegister)) { + movl(dst, zeroValueRegister); + return; + } + } + movl(dst, imm); + } + + public final void moveInt(AMD64Address dst, int imm) { + if (imm == 0) { + Register zeroValueRegister = getZeroValueRegister(); + if (!Register.None.equals(zeroValueRegister)) { + movl(dst, zeroValueRegister); + return; + } + } + movl(dst, imm); + } + + public final void moveIntSignExtend(Register result, int imm) { + if (imm == 0) { + Register zeroValueRegister = getZeroValueRegister(); + if (!Register.None.equals(zeroValueRegister)) { + movl(result, zeroValueRegister); + return; + } + } + movslq(result, imm); + } + + private static AMD64MROp toMR(AMD64MIOp op) { + if (op == AMD64MIOp.MOVB) { + return AMD64MROp.MOVB; + } else if (op == AMD64MIOp.MOV) { + return AMD64MROp.MOV; + } else if (op == AMD64MIOp.TEST) { + return AMD64MROp.TEST; + } + return null; + } + + public final void emitAMD64MIOp(AMD64MIOp opcode, OperandSize size, Register dst, int imm, boolean annotateImm) { + if (imm == 0) { + Register zeroValueRegister = getZeroValueRegister(); + AMD64MROp mrOp = toMR(opcode); + if (!Register.None.equals(zeroValueRegister) && mrOp != null) { + mrOp.emit(this, size, dst, zeroValueRegister); + return; + } + } + opcode.emit(this, size, dst, imm, annotateImm); + } + + public final void emitAMD64MIOp(AMD64MIOp opcode, OperandSize size, AMD64Address dst, int imm, boolean annotateImm) { + if (imm == 0) { + Register zeroValueRegister = getZeroValueRegister(); + AMD64MROp mrOp = toMR(opcode); + if (!Register.None.equals(zeroValueRegister) && mrOp != null) { + mrOp.emit(this, size, dst, zeroValueRegister); + return; + } + } + opcode.emit(this, size, dst, imm, annotateImm); + } + + /** + * Returns a register whose content is always zero. + */ + public Register getZeroValueRegister() { + return Register.None; + } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java index 372dc430c8fb..5fdfa895ef19 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,6 +96,7 @@ import jdk.vm.ci.code.RegisterConfig; import jdk.vm.ci.code.RegisterValue; import jdk.vm.ci.meta.AllocatableValue; +import jdk.vm.ci.meta.Constant; import jdk.vm.ci.meta.DeoptimizationAction; import jdk.vm.ci.meta.DeoptimizationReason; import jdk.vm.ci.meta.JavaConstant; @@ -103,6 +104,7 @@ import jdk.vm.ci.meta.PlatformKind; import jdk.vm.ci.meta.SpeculationLog; import jdk.vm.ci.meta.Value; +import jdk.vm.ci.meta.ValueKind; /** * LIR generator specialized for AMD64 HotSpot. @@ -655,4 +657,15 @@ public int getArrayLengthOffset() { public Register getHeapBaseRegister() { return getProviders().getRegisters().getHeapBaseRegister(); } + + @Override + public AllocatableValue emitLoadConstant(ValueKind kind, Constant constant) { + if (((AMD64Kind) kind.getPlatformKind()).isInteger() && constant instanceof JavaConstant && constant.isDefaultForKind()) { + Register zeroValueRegister = getProviders().getRegisters().getZeroValueRegister(config); + if (!Register.None.equals(zeroValueRegister)) { + return zeroValueRegister.asValue(kind); + } + } + return super.emitLoadConstant(kind, constant); + } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/AMD64HotSpotMacroAssembler.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/AMD64HotSpotMacroAssembler.java index b9ffb9e580fb..cb5e2b831943 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/AMD64HotSpotMacroAssembler.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/AMD64HotSpotMacroAssembler.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -171,4 +171,9 @@ protected final int membarOffset() { } return offset; } + + @Override + public Register getZeroValueRegister() { + return providers.getRegisters().getZeroValueRegister(config); + } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/x/AMD64HotSpotXAtomicReadAndWriteOp.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/x/AMD64HotSpotXAtomicReadAndWriteOp.java index 36faa63f96d3..e39b2f3978ae 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/x/AMD64HotSpotXAtomicReadAndWriteOp.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/x/AMD64HotSpotXAtomicReadAndWriteOp.java @@ -54,7 +54,7 @@ public AMD64HotSpotXAtomicReadAndWriteOp(Variable result, AMD64AddressValue load @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { AMD64Move.move(QWORD, crb, masm, result, newValue); - masm.xchgq(asRegister(result), loadAddress.toAddress()); + masm.xchgq(asRegister(result), loadAddress.toAddress(masm)); emitBarrier(crb, masm); } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/x/AMD64HotSpotXBarrieredOp.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/x/AMD64HotSpotXBarrieredOp.java index 91f1caa40581..91986534ce7d 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/x/AMD64HotSpotXBarrieredOp.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/hotspot/amd64/x/AMD64HotSpotXBarrieredOp.java @@ -65,7 +65,7 @@ protected AMD64HotSpotXBarrieredOp(LIRInstructionClass newKind) { return new AMD64AddressValue(newKind, base, index, stride, displacement, displacementAnnotation); } - private static Register toRegister(AllocatableValue value) { - if (value.equals(Value.ILLEGAL)) { - return Register.None; - } else { - RegisterValue reg = (RegisterValue) value; - return reg.getRegister(); + /** + * Baseless address encoding forces 4-byte displacement. E.g., + * + *
+     * mov rax, QWORD PTR [rax*8+0x10]       48 8b 04 c5 10 00 00 00
+     * mov rax, QWORD PTR [r12+rax*8+0x10]   49 8b 44 c4 10
+     * 
+ * + * We use r12 as the base register for addresses without base, if the displacement is within + * range of a byte and the value in r12 is constantly 0. The latter scenario may happen in + * HotSpot with compressed oops where r12 is served as the heap base register, and when the + * offset for heap base is 0. + * + * For displacement outside the range of a byte, we keep the base register {@link Register#None} + * to avoid potential additional REX prefix for the extended register (r8-r15). E.g., + * + *
+     * mov eax, DWORD PTR [rax*8+0x100]      8b 04 c5 00 01 00 00
+     * mov eax, DWORD PTR [r12+rax*8+0x100]  41 8b 84 c4 00 01 00 00
+     * 
+ */ + private Register getBaseRegisterForBaselessAddress(AMD64MacroAssembler masm) { + if (NumUtil.isByte(displacement)) { + Register reg = masm.getZeroValueRegister(); + if (r12.equals(reg)) { + return r12; + } } + return Register.None; } - public AMD64Address toAddress() { - return new AMD64Address(toRegister(base), toRegister(index), stride, displacement, displacementAnnotation); + public AMD64Address toAddress(AMD64MacroAssembler masm) { + Register baseReg = Value.ILLEGAL.equals(base) ? getBaseRegisterForBaselessAddress(masm) : ((RegisterValue) base).getRegister(); + Register indexReg = Value.ILLEGAL.equals(index) ? Register.None : ((RegisterValue) index).getRegister(); + return new AMD64Address(baseReg, indexReg, stride, displacement, displacementAnnotation); } @Override diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Binary.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Binary.java index 056329464f20..76c2acf2bc51 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Binary.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Binary.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -247,7 +247,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (state != null) { crb.recordImplicitException(masm.position(), state); } - opcode.emit(masm, size, asRegister(result), y.toAddress()); + opcode.emit(masm, size, asRegister(result), y.toAddress(masm)); } @Override diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64BinaryConsumer.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64BinaryConsumer.java index 82468338a2be..bab4f11b17c2 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64BinaryConsumer.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64BinaryConsumer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,10 +24,10 @@ */ package jdk.graal.compiler.lir.amd64; +import static jdk.graal.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; import static jdk.vm.ci.code.ValueUtil.asRegister; import static jdk.vm.ci.code.ValueUtil.isRegister; import static jdk.vm.ci.code.ValueUtil.isStackSlot; -import static jdk.graal.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; import jdk.graal.compiler.asm.amd64.AMD64Address; import jdk.graal.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; @@ -43,7 +43,6 @@ import jdk.graal.compiler.lir.Opcode; import jdk.graal.compiler.lir.StandardOp; import jdk.graal.compiler.lir.asm.CompilationResultBuilder; - import jdk.vm.ci.code.site.DataSectionReference; import jdk.vm.ci.meta.AllocatableValue; import jdk.vm.ci.meta.Constant; @@ -119,10 +118,10 @@ protected ConstOp(LIRInstructionClass c, AMD64MIOp opcode, Op @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (isRegister(x)) { - opcode.emit(masm, size, asRegister(x), y, shouldAnnotate()); + masm.emitAMD64MIOp(opcode, size, asRegister(x), y, shouldAnnotate()); } else { assert isStackSlot(x); - opcode.emit(masm, size, (AMD64Address) crb.asAddress(x), y, shouldAnnotate()); + masm.emitAMD64MIOp(opcode, size, (AMD64Address) crb.asAddress(x), y, shouldAnnotate()); } } @@ -228,7 +227,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (state != null) { crb.recordImplicitException(masm.position(), state); } - opcode.emit(masm, size, asRegister(x), y.toAddress()); + opcode.emit(masm, size, asRegister(x), y.toAddress(masm)); } @Override @@ -272,7 +271,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (state != null) { crb.recordImplicitException(masm.position(), state); } - opcode.emit(masm, size, x.toAddress(), asRegister(y)); + opcode.emit(masm, size, x.toAddress(masm), asRegister(y)); } @Override @@ -324,7 +323,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (state != null) { crb.recordImplicitException(masm.position(), state); } - opcode.emit(masm, size, x.toAddress(), y, shouldAnnotate()); + masm.emitAMD64MIOp(opcode, size, x.toAddress(masm), y, shouldAnnotate()); } protected boolean shouldAnnotate() { diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64CacheWritebackOp.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64CacheWritebackOp.java index e7c8ee97001c..b21689ee358e 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64CacheWritebackOp.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64CacheWritebackOp.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,6 @@ public AMD64CacheWritebackOp(AMD64AddressValue address) { @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { - masm.clflush(address.toAddress()); + masm.clflush(address.toAddress(masm)); } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ControlFlow.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ControlFlow.java index f6b0e76fa7c4..47f39bed188e 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ControlFlow.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ControlFlow.java @@ -221,14 +221,14 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { } else { AMD64AddressValue yAddress = (AMD64AddressValue) y; if (crb.isSuccessorEdge(trueDestination)) { - masm.testAndJcc(size, asRegister(x), yAddress.toAddress(), condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); + masm.testAndJcc(size, asRegister(x), yAddress.toAddress(masm), condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); } else if (crb.isSuccessorEdge(falseDestination)) { - masm.testAndJcc(size, asRegister(x), yAddress.toAddress(), condition, trueDestination.label(), false, applyBeforeFusedPair); + masm.testAndJcc(size, asRegister(x), yAddress.toAddress(masm), condition, trueDestination.label(), false, applyBeforeFusedPair); } else if (trueDestinationProbability < 0.5) { - masm.testAndJcc(size, asRegister(x), yAddress.toAddress(), condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); + masm.testAndJcc(size, asRegister(x), yAddress.toAddress(masm), condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); masm.jmp(trueDestination.label()); } else { - masm.testAndJcc(size, asRegister(x), yAddress.toAddress(), condition, trueDestination.label(), false, applyBeforeFusedPair); + masm.testAndJcc(size, asRegister(x), yAddress.toAddress(masm), condition, trueDestination.label(), false, applyBeforeFusedPair); masm.jmp(falseDestination.label()); } } @@ -301,14 +301,14 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { } else { AMD64AddressValue xAddress = (AMD64AddressValue) x; if (crb.isSuccessorEdge(trueDestination)) { - masm.testAndJcc(size, xAddress.toAddress(), y, condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); + masm.testAndJcc(size, xAddress.toAddress(masm), y, condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); } else if (crb.isSuccessorEdge(falseDestination)) { - masm.testAndJcc(size, xAddress.toAddress(), y, condition, trueDestination.label(), false, applyBeforeFusedPair); + masm.testAndJcc(size, xAddress.toAddress(masm), y, condition, trueDestination.label(), false, applyBeforeFusedPair); } else if (trueDestinationProbability < 0.5) { - masm.testAndJcc(size, xAddress.toAddress(), y, condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); + masm.testAndJcc(size, xAddress.toAddress(masm), y, condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); masm.jmp(trueDestination.label()); } else { - masm.testAndJcc(size, xAddress.toAddress(), y, condition, trueDestination.label(), false, applyBeforeFusedPair); + masm.testAndJcc(size, xAddress.toAddress(masm), y, condition, trueDestination.label(), false, applyBeforeFusedPair); masm.jmp(falseDestination.label()); } } @@ -397,14 +397,14 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { } else { AMD64AddressValue yAddress = (AMD64AddressValue) y; if (crb.isSuccessorEdge(trueDestination)) { - masm.cmpAndJcc(size, asRegister(x), yAddress.toAddress(), condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); + masm.cmpAndJcc(size, asRegister(x), yAddress.toAddress(masm), condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); } else if (crb.isSuccessorEdge(falseDestination)) { - masm.cmpAndJcc(size, asRegister(x), yAddress.toAddress(), condition, trueDestination.label(), false, applyBeforeFusedPair); + masm.cmpAndJcc(size, asRegister(x), yAddress.toAddress(masm), condition, trueDestination.label(), false, applyBeforeFusedPair); } else if (trueDestinationProbability < 0.5) { - masm.cmpAndJcc(size, asRegister(x), yAddress.toAddress(), condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); + masm.cmpAndJcc(size, asRegister(x), yAddress.toAddress(masm), condition.negate(), falseDestination.label(), false, applyBeforeFusedPair); masm.jmp(trueDestination.label()); } else { - masm.cmpAndJcc(size, asRegister(x), yAddress.toAddress(), condition, trueDestination.label(), false, applyBeforeFusedPair); + masm.cmpAndJcc(size, asRegister(x), yAddress.toAddress(masm), condition, trueDestination.label(), false, applyBeforeFusedPair); masm.jmp(falseDestination.label()); } } @@ -495,14 +495,14 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { } else { AMD64AddressValue xAddress = (AMD64AddressValue) x; if (crb.isSuccessorEdge(trueDestination)) { - masm.cmpAndJcc(size, xAddress.toAddress(), y, condition.negate(), falseDestination.label(), false, inlineDataInCode, applyBeforeFusedPair); + masm.cmpAndJcc(size, xAddress.toAddress(masm), y, condition.negate(), falseDestination.label(), false, inlineDataInCode, applyBeforeFusedPair); } else if (crb.isSuccessorEdge(falseDestination)) { - masm.cmpAndJcc(size, xAddress.toAddress(), y, condition, trueDestination.label(), false, inlineDataInCode, applyBeforeFusedPair); + masm.cmpAndJcc(size, xAddress.toAddress(masm), y, condition, trueDestination.label(), false, inlineDataInCode, applyBeforeFusedPair); } else if (trueDestinationProbability < 0.5) { - masm.cmpAndJcc(size, xAddress.toAddress(), y, condition.negate(), falseDestination.label(), false, inlineDataInCode, applyBeforeFusedPair); + masm.cmpAndJcc(size, xAddress.toAddress(masm), y, condition.negate(), falseDestination.label(), false, inlineDataInCode, applyBeforeFusedPair); masm.jmp(trueDestination.label()); } else { - masm.cmpAndJcc(size, xAddress.toAddress(), y, condition, trueDestination.label(), false, inlineDataInCode, applyBeforeFusedPair); + masm.cmpAndJcc(size, xAddress.toAddress(masm), y, condition, trueDestination.label(), false, inlineDataInCode, applyBeforeFusedPair); masm.jmp(falseDestination.label()); } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ConvertFloatToIntegerOp.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ConvertFloatToIntegerOp.java index 00eb931f22f6..80351b592704 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ConvertFloatToIntegerOp.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ConvertFloatToIntegerOp.java @@ -129,7 +129,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { Label isNotNaN = new Label(); UCOMIS.emit(masm, floatSize, src, src); masm.jcc(AMD64Assembler.ConditionFlag.NoParity, isNotNaN, true); - masm.movl(dst, 0); + masm.moveInt(dst, 0); masm.jmp(done); masm.bind(isNotNaN); } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Move.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Move.java index bc77b76915af..2c8d1d060fd4 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Move.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Move.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -272,10 +272,10 @@ public LeaOp(AllocatableValue result, AMD64AddressValue address, OperandSize siz @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (size == OperandSize.QWORD) { - masm.leaq(asRegister(result, AMD64Kind.QWORD), address.toAddress()); + masm.leaq(asRegister(result, AMD64Kind.QWORD), address.toAddress(masm)); } else { assert size == OperandSize.DWORD : size; - masm.lead(asRegister(result, AMD64Kind.DWORD), address.toAddress()); + masm.lead(asRegister(result, AMD64Kind.DWORD), address.toAddress(masm)); } } } @@ -348,7 +348,7 @@ public NullCheckOp(AMD64AddressValue address, LIRFrameState state) { @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { crb.recordImplicitException(masm.position(), state); - masm.nullCheck(address.toAddress()); + masm.nullCheck(address.toAddress(masm)); } @Override @@ -392,16 +392,16 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { } switch (accessKind) { case BYTE: - masm.cmpxchgb(asRegister(newValue), address.toAddress()); + masm.cmpxchgb(asRegister(newValue), address.toAddress(masm)); break; case WORD: - masm.cmpxchgw(asRegister(newValue), address.toAddress()); + masm.cmpxchgw(asRegister(newValue), address.toAddress(masm)); break; case DWORD: - masm.cmpxchgl(asRegister(newValue), address.toAddress()); + masm.cmpxchgl(asRegister(newValue), address.toAddress(masm)); break; case QWORD: - masm.cmpxchgq(asRegister(newValue), address.toAddress()); + masm.cmpxchgq(asRegister(newValue), address.toAddress(masm)); break; default: throw GraalError.shouldNotReachHereUnexpectedValue(accessKind); // ExcludeFromJacocoGeneratedReport @@ -435,16 +435,16 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { } switch (accessKind) { case BYTE: - masm.xaddb(address.toAddress(), asRegister(result)); + masm.xaddb(address.toAddress(masm), asRegister(result)); break; case WORD: - masm.xaddw(address.toAddress(), asRegister(result)); + masm.xaddw(address.toAddress(masm), asRegister(result)); break; case DWORD: - masm.xaddl(address.toAddress(), asRegister(result)); + masm.xaddl(address.toAddress(masm), asRegister(result)); break; case QWORD: - masm.xaddq(address.toAddress(), asRegister(result)); + masm.xaddq(address.toAddress(masm), asRegister(result)); break; default: throw GraalError.shouldNotReachHereUnexpectedValue(accessKind); // ExcludeFromJacocoGeneratedReport @@ -475,16 +475,16 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { move(accessKind, crb, masm, result, newValue); switch (accessKind) { case BYTE: - masm.xchgb(asRegister(result), address.toAddress()); + masm.xchgb(asRegister(result), address.toAddress(masm)); break; case WORD: - masm.xchgw(asRegister(result), address.toAddress()); + masm.xchgw(asRegister(result), address.toAddress(masm)); break; case DWORD: - masm.xchgl(asRegister(result), address.toAddress()); + masm.xchgl(asRegister(result), address.toAddress(masm)); break; case QWORD: - masm.xchgq(asRegister(result), address.toAddress()); + masm.xchgq(asRegister(result), address.toAddress(masm)); break; default: throw GraalError.shouldNotReachHereUnexpectedValue(accessKind); // ExcludeFromJacocoGeneratedReport @@ -694,8 +694,7 @@ public static void const2reg(CompilationResultBuilder crb, AMD64MacroAssembler m // Do not optimize with an XOR as this instruction may be between // a CMP and a Jcc in which case the XOR will modify the condition // flags and interfere with the Jcc. - masm.movl(result, input.asInt()); - + masm.moveInt(result, input.asInt()); break; case Long: // Do not optimize with an XOR as this instruction may be between @@ -703,7 +702,7 @@ public static void const2reg(CompilationResultBuilder crb, AMD64MacroAssembler m // flags and interfere with the Jcc. if (input.asLong() == (int) input.asLong()) { // Sign extended to long - masm.movslq(result, (int) input.asLong()); + masm.moveIntSignExtend(result, (int) input.asLong()); } else if ((input.asLong() & 0xFFFFFFFFL) == input.asLong()) { // Zero extended to long masm.movl(result, (int) input.asLong()); @@ -737,7 +736,7 @@ public static void const2reg(CompilationResultBuilder crb, AMD64MacroAssembler m masm.movq(result, crb.uncompressedNullRegister); } else { // Upper bits will be zeroed so this also works for narrow oops - masm.movslq(result, 0); + masm.moveIntSignExtend(result, 0); } } else { if (crb.target.inlineObjects) { @@ -819,16 +818,16 @@ public static void const2stack(CompilationResultBuilder crb, AMD64MacroAssembler switch ((AMD64Kind) result.getPlatformKind()) { case BYTE: assert NumUtil.isByte(imm) : "Is not in byte range: " + imm; - AMD64MIOp.MOVB.emit(masm, OperandSize.BYTE, dest, (int) imm); + masm.emitAMD64MIOp(AMD64MIOp.MOVB, OperandSize.BYTE, dest, (int) imm, false); break; case WORD: assert NumUtil.isShort(imm) : "Is not in short range: " + imm; - AMD64MIOp.MOV.emit(masm, OperandSize.WORD, dest, (int) imm); + masm.emitAMD64MIOp(AMD64MIOp.MOV, OperandSize.WORD, dest, (int) imm, false); break; case DWORD: case SINGLE: assert NumUtil.isInt(imm) : "Is not in int range: " + imm; - masm.movl(dest, (int) imm); + masm.moveInt(dest, (int) imm); break; case QWORD: case DOUBLE: diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64PrefetchOp.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64PrefetchOp.java index dd0a2a03b63a..cc88e7e044b7 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64PrefetchOp.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64PrefetchOp.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,16 +46,16 @@ public AMD64PrefetchOp(AMD64AddressValue address, int instr) { public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { switch (instr) { case 0: - masm.prefetchnta(address.toAddress()); + masm.prefetchnta(address.toAddress(masm)); break; case 1: - masm.prefetcht0(address.toAddress()); + masm.prefetcht0(address.toAddress(masm)); break; case 2: - masm.prefetcht2(address.toAddress()); + masm.prefetcht2(address.toAddress(masm)); break; case 3: - masm.prefetchw(address.toAddress()); + masm.prefetchw(address.toAddress(masm)); break; default: throw GraalError.shouldNotReachHere("unspported prefetch op " + instr); // ExcludeFromJacocoGeneratedReport diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Unary.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Unary.java index 42aba2d57d63..fe478ecc7e3a 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Unary.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64Unary.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -170,7 +170,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (state != null) { crb.recordImplicitException(masm.position(), state); } - opcode.emit(masm, size, asRegister(result), input.toAddress()); + opcode.emit(masm, size, asRegister(result), input.toAddress(masm)); } @Override diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64UnaryConsumer.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64UnaryConsumer.java index 722be7d78079..c09ab8f2c54a 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64UnaryConsumer.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64UnaryConsumer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,7 @@ public MemoryOp(AMD64MOp opcode, OperandSize size, AMD64AddressValue value) { @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { - opcode.emit(masm, size, value.toAddress()); + opcode.emit(masm, size, value.toAddress(masm)); } public AMD64MOp getOpcode() { diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ZeroMemoryOp.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ZeroMemoryOp.java index e32e77831cc7..be6337e63d89 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ZeroMemoryOp.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/AMD64ZeroMemoryOp.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,7 @@ public AMD64ZeroMemoryOp(AMD64AddressValue pointer, RegisterValue length) { @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { assert AMD64.rcx.equals(length.getRegister()); - masm.leaq(AMD64.rdi, pointer.toAddress()); + masm.leaq(AMD64.rdi, pointer.toAddress(masm)); masm.xorq(AMD64.rax, AMD64.rax); masm.repStosb(); } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/g1/AMD64G1PreWriteBarrierOp.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/g1/AMD64G1PreWriteBarrierOp.java index 9fa413c96394..9f818da303c8 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/g1/AMD64G1PreWriteBarrierOp.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/g1/AMD64G1PreWriteBarrierOp.java @@ -84,7 +84,7 @@ public AMD64G1PreWriteBarrierOp(Value address, Value expectedObject, Value temp, @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { - AMD64Address storeAddress = ((AMD64AddressValue) this.address).toAddress(); + AMD64Address storeAddress = ((AMD64AddressValue) this.address).toAddress(masm); Register thread = tool.getThread(masm); Register tmp = asRegister(temp); Register previousValue = expectedObject.equals(Value.ILLEGAL) ? asRegister(temp2) : asRegister(expectedObject); diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorBinary.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorBinary.java index 141efeee5b87..ec54f07c78f7 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorBinary.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorBinary.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -154,7 +154,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (state != null) { crb.recordImplicitException(masm.position(), state); } - opcode.emit(masm, size, asRegister(result), asRegister(x), y.toAddress()); + opcode.emit(masm, size, asRegister(result), asRegister(x), y.toAddress(masm)); } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorMove.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorMove.java index 57490ab19e9f..309c18e2f1d2 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorMove.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorMove.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -293,7 +293,7 @@ public VectorLoadOp(AVXSize size, VexMoveOp op, AllocatableValue result, AMD64Ad @Override public void emitMemAccess(AMD64MacroAssembler masm) { - op.emit(masm, size, asRegister(result), address.toAddress()); + op.emit(masm, size, asRegister(result), address.toAddress(masm)); } } @@ -309,7 +309,7 @@ public VectorStoreOp(AVXSize size, VexMoveOp op, AMD64AddressValue address, Allo @Override public void emitMemAccess(AMD64MacroAssembler masm) { - op.emit(masm, size, address.toAddress(), asRegister(input)); + op.emit(masm, size, address.toAddress(masm), asRegister(input)); } } diff --git a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorUnary.java b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorUnary.java index 2bf1e719f62b..f07dd1c73448 100644 --- a/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorUnary.java +++ b/compiler/src/jdk.graal.compiler/src/jdk/graal/compiler/lir/amd64/vector/AMD64VectorUnary.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -178,7 +178,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (state != null) { crb.recordImplicitException(masm.position(), state); } - opcode.emit(masm, size, asRegister(result), input.toAddress()); + opcode.emit(masm, size, asRegister(result), input.toAddress(masm)); } } @@ -249,7 +249,7 @@ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { if (state != null) { crb.recordImplicitException(masm.position(), state); } - opcode.emit(masm, size, asRegister(result), asRegister(result), input.toAddress()); + opcode.emit(masm, size, asRegister(result), asRegister(result), input.toAddress(masm)); } }