name
stringlengths 1
473k
| code
stringlengths 7
647k
| asm
stringlengths 4
3.39M
| file
stringlengths 8
196
|
---|---|---|---|
(anonymous namespace)::RISCVInstructionSelector::replacePtrWithInt(llvm::MachineOperand&, llvm::MachineIRBuilder&, llvm::MachineRegisterInfo&)
|
bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) {
Register PtrReg = Op.getReg();
assert(MRI.getType(PtrReg).isPointer() && "Operand is not a pointer!");
const LLT sXLen = LLT::scalar(STI.getXLen());
auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
MRI.setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
Op.setReg(PtrToInt.getReg(0));
return select(*PtrToInt);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rcx, %r15
movq %rdx, %rax
movq %rsi, %r14
movq %rdi, %rbx
movl 0x4(%rsi), %ecx
movq 0x48(%rdi), %rdx
movzbl 0x1d7(%rdx), %esi
shll $0x8, %esi
addq $0x101, %rsi # imm = 0x101
leaq 0x10(%rsp), %rdx
movq %rsi, (%rdx)
xorl %esi, %esi
movl %esi, 0x8(%rdx)
leaq 0x20(%rsp), %r8
movl %ecx, (%r8)
movl %esi, 0x10(%r8)
movq (%rax), %r10
movq $0x0, (%rsp)
movl $0x1, %ecx
movl $0x1, %r9d
movq %rax, %rdi
movl $0x4c, %esi
callq *0x20(%r10)
movq %rdx, %r12
movq 0x20(%rdx), %rax
movl 0x4(%rax), %esi
movq 0x60(%rbx), %rax
movq 0x8(%rax), %rax
movq 0x8(%rax), %rdx
movq %r15, %rdi
callq 0x1d82b1c
movq 0x20(%r12), %rax
movl 0x4(%rax), %esi
movq %r14, %rdi
callq 0x1d531ce
movq (%rbx), %rax
movq %rbx, %rdi
movq %r12, %rsi
callq *0x58(%rax)
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/Target/RISCV/GISel/RISCVInstructionSelector.cpp
|
llvm::RISCVLegalizerInfo::legalizeIntrinsic(llvm::LegalizerHelper&, llvm::MachineInstr&) const
|
bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
switch (IntrinsicID) {
default:
return false;
case Intrinsic::vacopy: {
// vacopy arguments must be legal because of the intrinsic signature.
// No need to check here.
MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
MachineFunction &MF = *MI.getMF();
const DataLayout &DL = MIRBuilder.getDataLayout();
LLVMContext &Ctx = MF.getFunction().getContext();
Register DstLst = MI.getOperand(1).getReg();
LLT PtrTy = MRI.getType(DstLst);
// Load the source va_list
Align Alignment = DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx));
MachineMemOperand *LoadMMO = MF.getMachineMemOperand(
MachinePointerInfo(), MachineMemOperand::MOLoad, PtrTy, Alignment);
auto Tmp = MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), *LoadMMO);
// Store the result in the destination va_list
MachineMemOperand *StoreMMO = MF.getMachineMemOperand(
MachinePointerInfo(), MachineMemOperand::MOStore, PtrTy, Alignment);
MIRBuilder.buildStore(Tmp, DstLst, *StoreMMO);
MI.eraseFromParent();
return true;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xd8, %rsp
movq %rdx, %rbx
movq %rsi, %r14
movq %rdx, %rdi
callq 0x1d3dfae
movq 0x20(%rbx), %rcx
movl %eax, %eax
shlq $0x5, %rax
movl 0x10(%rcx,%rax), %eax
cmpl $0x169, %eax # imm = 0x169
jne 0x11c0662
movl %eax, 0x34(%rsp)
movq (%r14), %r15
movq 0x18(%r15), %r14
movq %rbx, %rdi
callq 0x1d3de64
movq %rax, %r12
movq %r15, 0x58(%rsp)
movq 0x8(%r15), %rax
movq (%rax), %rdi
callq 0x2a3b2e4
movq %rax, %r13
movq %r12, 0x50(%rsp)
movq (%r12), %rdi
callq 0x2a3b1a0
movq 0x20(%rbx), %rcx
movl 0x24(%rcx), %ecx
xorl %r15d, %r15d
testl %ecx, %ecx
movq %rcx, 0x48(%rsp)
jns 0x11c04e7
andl $0x7fffffff, %ecx # imm = 0x7FFFFFFF
cmpl %ecx, 0x1d0(%r14)
jbe 0x11c04e7
movq 0x1c8(%r14), %rdx
movq (%rdx,%rcx,8), %r12
jmp 0x11c04ea
xorl %r12d, %r12d
movq %r12, %rdi
movq %rax, %rsi
callq 0x15ea3de
movq %r13, %rdi
movq %rax, %rsi
callq 0x29fec04
xorps %xmm0, %xmm0
movaps %xmm0, 0x80(%rsp)
movq %r15, 0x8d(%rsp)
xorl %ebp, %ebp
leaq 0xa0(%rsp), %r15
movaps %xmm0, 0x10(%r15)
movaps %xmm0, (%r15)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rsp)
movaps 0x80(%rsp), %xmm0
movups %xmm0, (%rsp)
movl %ebp, 0x28(%rsp)
movl %ebp, 0x20(%rsp)
movl $0x1, %ecx
movl %ecx, 0x18(%rsp)
movzbl %al, %ecx
movl %ecx, 0x30(%rsp)
movq 0x50(%rsp), %r13
movq %r13, %rdi
movl $0x1, %esi
movq %r12, %rdx
movq %r15, %r8
xorl %r9d, %r9d
callq 0x1d348c4
leaq 0xc0(%rsp), %rcx
movq %r12, (%rcx)
movl %ebp, 0x8(%rcx)
movq %rcx, %rdx
movq 0x20(%rbx), %rcx
movl 0x44(%rcx), %ecx
movl %ecx, (%r15)
movl %ebp, 0x10(%r15)
leaq 0xa0(%rsp), %r15
movq 0x58(%rsp), %r14
movq %r14, %rdi
movl $0x59, %esi
movq %r15, %rcx
movq %rax, %r8
callq 0x15df116
movq %rax, 0x40(%rsp)
movq %rdx, 0x38(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x60(%rsp)
movq %rbp, 0x6d(%rsp)
movaps %xmm0, 0x10(%r15)
movaps %xmm0, (%r15)
movq 0x70(%rsp), %rax
movq %rax, 0x10(%rsp)
movaps 0x60(%rsp), %xmm0
movups %xmm0, (%rsp)
movl %ebp, 0x28(%rsp)
movl %ebp, 0x20(%rsp)
movl $0x1, %eax
movl %eax, 0x18(%rsp)
leaq 0xa0(%rsp), %r15
movq %r13, %rdi
movl $0x2, %esi
movq %r12, %rdx
movl 0x30(%rsp), %ecx
movq %r15, %r8
xorl %r9d, %r9d
callq 0x1d348c4
movq 0x40(%rsp), %rcx
movq %rcx, (%r15)
movq 0x38(%rsp), %rcx
movq %rcx, 0x8(%r15)
movl $0x1, %ecx
movl %ecx, 0x10(%r15)
movq 0x48(%rsp), %rcx
leaq 0xc0(%rsp), %rdx
movl %ecx, (%rdx)
movl %ebp, 0x10(%rdx)
leaq 0xa0(%rsp), %rsi
movq %r14, %rdi
movq %rax, %rcx
callq 0x15df314
movq %rbx, %rdi
callq 0x1d3deba
movl 0x34(%rsp), %eax
cmpl $0x169, %eax # imm = 0x169
sete %al
addq $0xd8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
|
llvm::Pass* llvm::callDefaultCtor<(anonymous namespace)::RISCVMakeCompressibleOpt, true>()
|
Pass *callDefaultCtor() {
return new PassName();
}
|
pushq %rax
movl $0x38, %edi
callq 0x7808d0
xorl %ecx, %ecx
movq %rcx, 0x8(%rax)
leaq 0x475a624(%rip), %rdx # 0x592f924
movq %rdx, 0x10(%rax)
movl $0x2, 0x18(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%rax)
movq %rcx, 0x30(%rax)
leaq 0x45eb433(%rip), %rcx # 0x57c0750
movq %rcx, (%rax)
popq %rcx
retq
|
/llvm/PassSupport.h
|
llvm::createRISCVPreRAExpandPseudoPass()
|
FunctionPass *createRISCVPreRAExpandPseudoPass() { return new RISCVPreRAExpandPseudo(); }
|
pushq %rax
movl $0x48, %edi
callq 0x7808d0
xorl %ecx, %ecx
movq %rcx, 0x8(%rax)
leaq 0x4755ec7(%rip), %rdx # 0x592f939
movq %rdx, 0x10(%rax)
movl $0x2, 0x18(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%rax)
movq %rcx, 0x30(%rax)
leaq 0x45e6f01(%rip), %rcx # 0x57c0990
movq %rcx, (%rax)
popq %rcx
retq
|
/Target/RISCV/RISCVExpandPseudoInsts.cpp
|
(anonymous namespace)::RISCVExpandPseudo::expandVMSET_VMCLR(llvm::MachineBasicBlock&, llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>, unsigned int)
|
bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned Opcode) {
DebugLoc DL = MBBI->getDebugLoc();
Register DstReg = MBBI->getOperand(0).getReg();
const MCInstrDesc &Desc = TII->get(Opcode);
BuildMI(MBB, MBBI, DL, Desc, DstReg)
.addReg(DstReg, RegState::Undef)
.addReg(DstReg, RegState::Undef);
MBBI->eraseFromParent(); // The pseudo instruction is gone now.
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movl %ecx, %r12d
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movq 0x38(%rdx), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0x11daf6f
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x20(%rbx), %rax
movl 0x4(%rax), %ebp
movq 0x40(%r15), %rax
movq 0x8(%rax), %r15
movl %r12d, %r12d
shlq $0x5, %r12
movq 0x10(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x11dafa3
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
subq %r12, %r15
movq 0x8(%rsp), %rsi
movq %rsi, 0x20(%rsp)
testq %rsi, %rsi
je 0x11dafcf
leaq 0x8(%rsp), %r12
leaq 0x20(%rsp), %rdx
movq %r12, %rdi
callq 0x2a759cc
movq $0x0, (%r12)
xorps %xmm0, %xmm0
leaq 0x20(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
movq %r14, %rdi
movq %rbx, 0x18(%rsp)
movq %rbx, %rsi
movq %r15, %rcx
movl %ebp, %r8d
callq 0x90f593
movq %rax, %r14
movq %rdx, %r15
xorl %r12d, %r12d
leaq 0x38(%rsp), %r13
movq %r12, 0x8(%r13)
movl $0x10000000, %ebx # imm = 0x10000000
movl %ebx, (%r13)
movl %ebp, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %rax, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movq %r12, 0x8(%r13)
movl %ebx, (%r13)
movl %ebp, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x1d3c22c
leaq 0x20(%rsp), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je 0x11db061
leaq 0x20(%rsp), %rdi
callq 0x2a758fc
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0x11db075
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0x18(%rsp), %rdi
callq 0x1d3deba
movq 0x10(%rsp), %rsi
testq %rsi, %rsi
je 0x11db093
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/RISCV/RISCVExpandPseudoInsts.cpp
|
llvm::RISCVFrameLowering::canUseAsEpilogue(llvm::MachineBasicBlock const&) const
|
bool RISCVFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
const MachineFunction *MF = MBB.getParent();
MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
if (!RVFI->useSaveRestoreLibCalls(*MF))
return true;
// Using the __riscv_restore libcalls to restore CSRs requires a tail call.
// This means if we still need to continue executing code within this function
// the restore cannot take place in this basic block.
if (MBB.succ_size() > 1)
return false;
MachineBasicBlock *SuccMBB =
MBB.succ_empty() ? TmpMBB->getFallThrough() : *MBB.succ_begin();
// Doing a tail call should be safe if there are no successors, because either
// we have a returning block or the end of the block is unreachable, so the
// restore will be eliminated regardless.
if (!SuccMBB)
return true;
// The successor can only contain a return, since we would effectively be
// replacing the successor with our own tail return at the end of our block.
return SuccMBB->isReturnBlock() && SuccMBB->size() == 1;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq 0x20(%rsi), %rsi
movq 0x30(%rsi), %rdi
callq 0x11dfe0e
movb $0x1, %bl
testb %al, %al
je 0x11e0af5
movq 0x58(%r14), %rax
movq 0x60(%r14), %rcx
movq %rcx, %rdx
subq %rax, %rdx
movabsq $0x7fffffff0, %rsi # imm = 0x7FFFFFFF0
testq %rsi, %rdx
je 0x11e0aff
xorl %ebx, %ebx
movl %ebx, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
cmpq %rcx, %rax
je 0x11e0b09
movq (%rax), %r14
jmp 0x11e0b19
movq %r14, %rdi
movl $0x1, %esi
callq 0x1cfd2e4
movq %rax, %r14
testq %r14, %r14
je 0x11e0af5
movq %r14, %rdi
callq 0xbc375c
testb %al, %al
je 0x11e0af3
movq 0x38(%r14), %rax
addq $0x30, %r14
cmpq %r14, %rax
je 0x11e0af3
xorl %ecx, %ecx
movq 0x8(%rax), %rax
incq %rcx
cmpq %r14, %rax
jne 0x11e0b39
cmpl $0x1, %ecx
sete %bl
jmp 0x11e0af5
nop
|
/Target/RISCV/RISCVFrameLowering.cpp
|
llvm::detail::DenseMapPair<llvm::GetElementPtrInst*, std::pair<llvm::Value*, llvm::Value*>>* llvm::DenseMapBase<llvm::DenseMap<llvm::GetElementPtrInst*, std::pair<llvm::Value*, llvm::Value*>, llvm::DenseMapInfo<llvm::GetElementPtrInst*, void>, llvm::detail::DenseMapPair<llvm::GetElementPtrInst*, std::pair<llvm::Value*, llvm::Value*>>>, llvm::GetElementPtrInst*, std::pair<llvm::Value*, llvm::Value*>, llvm::DenseMapInfo<llvm::GetElementPtrInst*, void>, llvm::detail::DenseMapPair<llvm::GetElementPtrInst*, std::pair<llvm::Value*, llvm::Value*>>>::InsertIntoBucketImpl<llvm::GetElementPtrInst*>(llvm::GetElementPtrInst* const&, llvm::GetElementPtrInst* const&, llvm::detail::DenseMapPair<llvm::GetElementPtrInst*, std::pair<llvm::Value*, llvm::Value*>>*)
|
BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
BucketT *TheBucket) {
incrementEpoch();
// If the load of the hash table is more than 3/4, or if fewer than 1/8 of
// the buckets are empty (meaning that many are filled with tombstones),
// grow the table.
//
// The later case is tricky. For example, if we had one empty bucket with
// tons of tombstones, failing lookups (e.g. for insertion) would have to
// probe almost the entire table until it found the empty bucket. If the
// table completely filled with tombstones, no lookup would ever succeed,
// causing infinite loops in lookup.
unsigned NewNumEntries = getNumEntries() + 1;
unsigned NumBuckets = getNumBuckets();
if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
this->grow(NumBuckets * 2);
LookupBucketFor(Lookup, TheBucket);
NumBuckets = getNumBuckets();
} else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
NumBuckets/8)) {
this->grow(NumBuckets);
LookupBucketFor(Lookup, TheBucket);
}
assert(TheBucket);
// Only update the state after we've grown our bucket space appropriately
// so that when growing buckets we have self-consistent entry count.
incrementNumEntries();
// If we are writing over a tombstone, remember this.
const KeyT EmptyKey = getEmptyKey();
if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
decrementNumTombstones();
return TheBucket;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x10, %rsp
movq %rcx, %rax
movq %rdx, %r14
movq %rdi, %rbx
movl 0x8(%rdi), %ecx
movl 0x10(%rdi), %esi
leal 0x4(,%rcx,4), %edx
leal (%rsi,%rsi,2), %edi
cmpl %edi, %edx
jae 0x11e2f73
notl %ecx
addl %esi, %ecx
subl 0xc(%rbx), %ecx
movl %esi, %edx
shrl $0x3, %edx
cmpl %edx, %ecx
jbe 0x11e2f75
incl 0x8(%rbx)
cmpq $-0x1000, (%rax) # imm = 0xF000
je 0x11e2f69
decl 0xc(%rbx)
addq $0x10, %rsp
popq %rbx
popq %r14
popq %r15
retq
addl %esi, %esi
movq %rbx, %rdi
callq 0x11e2f96
leaq 0x8(%rsp), %r15
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x11e2e94
movq (%r15), %rax
jmp 0x11e2f5a
nop
|
/llvm/ADT/DenseMap.h
|
llvm::RISCVDAGToDAGISel::selectSHXADDOp(llvm::SDValue, unsigned int, llvm::SDValue&)
|
bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
SDValue &Val) {
if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
SDValue N0 = N.getOperand(0);
bool LeftShift = N0.getOpcode() == ISD::SHL;
if ((LeftShift || N0.getOpcode() == ISD::SRL) &&
isa<ConstantSDNode>(N0.getOperand(1))) {
uint64_t Mask = N.getConstantOperandVal(1);
unsigned C2 = N0.getConstantOperandVal(1);
unsigned XLen = Subtarget->getXLen();
if (LeftShift)
Mask &= maskTrailingZeros<uint64_t>(C2);
else
Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
// Look for (and (shl y, c2), c1) where c1 is a shifted mask with no
// leading zeros and c3 trailing zeros. We can use an SRLI by c2+c3
// followed by a SHXADD with c3 for the X amount.
if (isShiftedMask_64(Mask)) {
unsigned Leading = XLen - llvm::bit_width(Mask);
unsigned Trailing = llvm::countr_zero(Mask);
if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
SDLoc DL(N);
EVT VT = N.getValueType();
Val = SDValue(CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(Trailing - C2, DL, VT)),
0);
return true;
}
// Look for (and (shr y, c2), c1) where c1 is a shifted mask with c2
// leading zeros and c3 trailing zeros. We can use an SRLI by C3
// followed by a SHXADD using c3 for the X amount.
if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
SDLoc DL(N);
EVT VT = N.getValueType();
Val = SDValue(
CurDAG->getMachineNode(
RISCV::SRLI, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(Leading + Trailing, DL, VT)),
0);
return true;
}
}
}
}
bool LeftShift = N.getOpcode() == ISD::SHL;
if ((LeftShift || N.getOpcode() == ISD::SRL) &&
isa<ConstantSDNode>(N.getOperand(1))) {
SDValue N0 = N.getOperand(0);
if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
isa<ConstantSDNode>(N0.getOperand(1))) {
uint64_t Mask = N0.getConstantOperandVal(1);
if (isShiftedMask_64(Mask)) {
unsigned C1 = N.getConstantOperandVal(1);
unsigned XLen = Subtarget->getXLen();
unsigned Leading = XLen - llvm::bit_width(Mask);
unsigned Trailing = llvm::countr_zero(Mask);
// Look for (shl (and X, Mask), C1) where Mask has 32 leading zeros and
// C3 trailing zeros. If C1+C3==ShAmt we can use SRLIW+SHXADD.
if (LeftShift && Leading == 32 && Trailing > 0 &&
(Trailing + C1) == ShAmt) {
SDLoc DL(N);
EVT VT = N.getValueType();
Val = SDValue(CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(Trailing, DL, VT)),
0);
return true;
}
// Look for (srl (and X, Mask), C1) where Mask has 32 leading zeros and
// C3 trailing zeros. If C3-C1==ShAmt we can use SRLIW+SHXADD.
if (!LeftShift && Leading == 32 && Trailing > C1 &&
(Trailing - C1) == ShAmt) {
SDLoc DL(N);
EVT VT = N.getValueType();
Val = SDValue(CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, N0.getOperand(0),
CurDAG->getTargetConstant(Trailing, DL, VT)),
0);
return true;
}
}
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xc8, %rsp
movl %ecx, %r12d
movl %edx, %r9d
movq %rsi, %r15
movq %rdi, %r14
cmpl $0xb9, 0x18(%rsi)
jne 0x11f379d
movq 0x28(%r15), %rax
movq 0x28(%rax), %rcx
movl 0x18(%rcx), %edx
cmpl $0x23, %edx
je 0x11f36c3
cmpl $0xb, %edx
jne 0x11f379d
movq (%rax), %rbx
movl 0x18(%rbx), %eax
movl %eax, %edx
orl $0x2, %edx
cmpl $0xbf, %edx
jne 0x11f379d
movq 0x28(%rbx), %rdx
movq 0x28(%rdx), %rdx
movl 0x18(%rdx), %esi
cmpl $0x23, %esi
je 0x11f36f3
cmpl $0xb, %esi
jne 0x11f379d
movq 0x58(%rcx), %rcx
cmpl $0x41, 0x20(%rcx)
jb 0x11f3703
movq 0x18(%rcx), %rcx
jmp 0x11f3707
addq $0x18, %rcx
movq 0x58(%rdx), %rsi
cmpl $0x41, 0x20(%rsi)
jb 0x11f3717
movq 0x18(%rsi), %rsi
jmp 0x11f371b
addq $0x18, %rsi
movq 0x108(%r14), %rdx
movzbl 0x1d7(%rdx), %edi
movl %edi, %r13d
shll $0x5, %r13d
addl $0x20, %r13d
movq (%rcx), %rdx
movq (%rsi), %rbp
cmpl $0xbd, %eax
jne 0x11f375f
movl %ebp, %ecx
negb %cl
movq $-0x1, %rsi
shrq %cl, %rsi
notq %rsi
xorl %ecx, %ecx
cmpl $0x1, %ebp
sbbq %rcx, %rcx
orq %rsi, %rcx
jmp 0x11f3784
movl $0xffffffc0, %esi # imm = 0xFFFFFFC0
testb %dil, %dil
movl $0xffffffe0, %ecx # imm = 0xFFFFFFE0
cmovnel %esi, %ecx
addl %ebp, %ecx
movq $-0x1, %rsi
shrq %cl, %rsi
xorl %ecx, %ecx
cmpl %ebp, %r13d
cmovneq %rsi, %rcx
andq %rdx, %rcx
je 0x11f379d
leaq -0x1(%rcx), %rdx
orq %rcx, %rdx
leaq 0x1(%rdx), %rsi
testq %rdx, %rsi
je 0x11f384b
movl 0x18(%r15), %ebx
movl %ebx, %eax
orl $0x2, %eax
cmpl $0xbf, %eax
jne 0x11f3837
movq 0x28(%r15), %rax
movq 0x28(%rax), %rcx
movl 0x18(%rcx), %ecx
cmpl $0x23, %ecx
je 0x11f37c6
cmpl $0xb, %ecx
jne 0x11f3837
movq (%rax), %r13
cmpl $0xb9, 0x18(%r13)
jne 0x11f3837
movl %r9d, %ebp
movq %r8, 0x38(%rsp)
movl 0x8(%rax), %edx
movq %r13, %rdi
movl $0x1, %esi
callq 0x179219e
testb %al, %al
je 0x11f3837
movq 0x28(%r13), %rax
movq 0x28(%rax), %rax
movl 0x18(%rax), %ecx
cmpl $0x23, %ecx
je 0x11f3804
cmpl $0xb, %ecx
jne 0x11f3837
movq 0x58(%rax), %rax
cmpl $0x41, 0x20(%rax)
jb 0x11f3814
movq 0x18(%rax), %rax
jmp 0x11f3818
addq $0x18, %rax
movl %ebp, %r8d
movq (%rax), %rax
testq %rax, %rax
je 0x11f3837
leaq -0x1(%rax), %rcx
orq %rax, %rcx
leaq 0x1(%rcx), %rdx
testq %rcx, %rdx
je 0x11f3a2f
xorl %eax, %eax
addq $0xc8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
bsrq %rcx, %rdx
xorl $-0x40, %edx
addl $0x41, %edx
bsfq %rcx, %r10
cmpl %r12d, %r10d
jne 0x11f3948
cmpl $0xbd, %eax
jne 0x11f3948
cmpl %ebp, %r10d
jbe 0x11f3948
cmpl %edx, %r13d
jne 0x11f3948
movq %r8, 0x38(%rsp)
movq 0x48(%r15), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0x11f38ad
leaq 0x28(%rsp), %rdi
movl $0x1, %edx
movl %r9d, %r12d
movq %r10, %r13
callq 0x2a757d8
movq %r13, %r10
movl %r12d, %r9d
movl 0x44(%r15), %eax
leaq 0x28(%rsp), %r12
movl %eax, 0x8(%r12)
movq 0x30(%r15), %rax
movl %r9d, %ecx
shlq $0x4, %rcx
movq 0x8(%rax,%rcx), %r15
movq 0x38(%r14), %r14
movq 0x28(%rbx), %rdx
movups (%rdx), %xmm0
movl (%rax,%rcx), %ebx
movaps %xmm0, 0xb0(%rsp)
subl %ebp, %r10d
xorl %ebp, %ebp
movl %ebp, (%rsp)
movq %r14, %rdi
movq %r10, %rsi
movq %r12, %rdx
movl %ebx, %ecx
movq %r15, %r8
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x70(%rsp)
movl %edx, 0x78(%rsp)
movups 0x70(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movaps 0xb0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x32b1, %esi # imm = 0x32B1
movq %r12, %rdx
movl %ebx, %ecx
movq %r15, %r8
callq 0x178f4ac
movq 0x38(%rsp), %r13
movq %rax, (%r13)
movl %ebp, 0x8(%r13)
jmp 0x11f3c18
cmpl %r12d, %r10d
jne 0x11f379d
subl %edx, %r13d
cmpl %ebp, %r13d
jne 0x11f379d
cmpl $0xbd, %eax
je 0x11f379d
movq %r8, 0x38(%rsp)
movq 0x48(%r15), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0x11f3996
leaq 0x28(%rsp), %rdi
movl $0x1, %edx
movl %r9d, %ebp
movq %r10, %r12
callq 0x2a757d8
movq %r12, %r10
movl %ebp, %r9d
movl 0x44(%r15), %eax
leaq 0x28(%rsp), %r12
movl %eax, 0x8(%r12)
movq 0x30(%r15), %rax
movl %r9d, %ecx
shlq $0x4, %rcx
movq 0x8(%rax,%rcx), %r15
movq 0x38(%r14), %r14
movq 0x28(%rbx), %rdx
movups (%rdx), %xmm0
movl (%rax,%rcx), %ebp
movaps %xmm0, 0xa0(%rsp)
addl %r10d, %r13d
xorl %ebx, %ebx
movl %ebx, (%rsp)
movq %r14, %rdi
movq %r13, %rsi
movq %r12, %rdx
movl %ebp, %ecx
movq %r15, %r8
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x60(%rsp)
movl %edx, 0x68(%rsp)
movups 0x60(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movaps 0xa0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x32b1, %esi # imm = 0x32B1
movq %r12, %rdx
movl %ebp, %ecx
movq %r15, %r8
callq 0x178f4ac
movq 0x38(%rsp), %rcx
movq %rax, (%rcx)
movl %ebx, 0x8(%rcx)
jmp 0x11f3c18
movq 0x28(%r15), %rcx
movq 0x28(%rcx), %rcx
movq 0x58(%rcx), %rcx
cmpl $0x41, 0x20(%rcx)
jb 0x11f3a47
movq 0x18(%rcx), %rcx
jmp 0x11f3a4b
addq $0x18, %rcx
movl (%rcx), %ecx
movq 0x108(%r14), %rdx
movzbl 0x1d7(%rdx), %esi
shll $0x5, %esi
bsrq %rax, %rdi
xorl $0x3f, %edi
bsfq %rax, %r9
movl %r9d, %edx
addl %edi, %esi
addl $-0x20, %esi
cmpl $0xbd, %ebx
jne 0x11f3b35
testq %r9, %r9
je 0x11f3b35
leal (%rcx,%rdx), %eax
cmpl %r12d, %eax
jne 0x11f3b35
cmpl $0x20, %esi
jne 0x11f3b35
movq 0x48(%r15), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0x11f3ac1
leaq 0x28(%rsp), %rdi
movl $0x1, %edx
movq %r9, %rbx
callq 0x2a757d8
movq %rbx, %r9
movl %ebp, %r8d
movl 0x44(%r15), %eax
leaq 0x28(%rsp), %r12
movl %eax, 0x8(%r12)
movq 0x30(%r15), %rax
movl %r8d, %ecx
shlq $0x4, %rcx
movq 0x8(%rax,%rcx), %r15
movq 0x38(%r14), %r14
movq 0x28(%r13), %rdx
movups (%rdx), %xmm0
movl (%rax,%rcx), %r13d
movaps %xmm0, 0x90(%rsp)
xorl %ebx, %ebx
movl %ebx, (%rsp)
movq %r14, %rdi
movq %r9, %rsi
movq %r12, %rdx
movl %r13d, %ecx
movq %r15, %r8
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x50(%rsp)
movl %edx, 0x58(%rsp)
movups 0x50(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movaps 0x90(%rsp), %xmm0
jmp 0x11f3bf2
xorl %eax, %eax
subl %ecx, %edx
jbe 0x11f3839
cmpl $0xbd, %ebx
je 0x11f3839
cmpl %r12d, %edx
jne 0x11f3839
cmpl $0x20, %esi
jne 0x11f3839
movq 0x48(%r15), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0x11f3b83
leaq 0x28(%rsp), %rdi
movl $0x1, %edx
movq %r9, %rbx
callq 0x2a757d8
movq %rbx, %r9
movl %ebp, %r8d
movl 0x44(%r15), %eax
leaq 0x28(%rsp), %r12
movl %eax, 0x8(%r12)
movq 0x30(%r15), %rax
movl %r8d, %ecx
shlq $0x4, %rcx
movq 0x8(%rax,%rcx), %r15
movq 0x38(%r14), %r14
movq 0x28(%r13), %rdx
movups (%rdx), %xmm0
movl (%rax,%rcx), %r13d
movaps %xmm0, 0x80(%rsp)
xorl %ebx, %ebx
movl %ebx, (%rsp)
movq %r14, %rdi
movq %r9, %rsi
movq %r12, %rdx
movl %r13d, %ecx
movq %r15, %r8
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x40(%rsp)
movl %edx, 0x48(%rsp)
movups 0x40(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movaps 0x80(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x32b2, %esi # imm = 0x32B2
movq %r12, %rdx
movl %r13d, %ecx
movq %r15, %r8
callq 0x178f4ac
movq 0x38(%rsp), %rbp
movq %rax, (%rbp)
movl %ebx, 0x8(%rbp)
movq (%r12), %rsi
testq %rsi, %rsi
je 0x11f3c2b
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
movb $0x1, %al
jmp 0x11f3839
|
/Target/RISCV/RISCVISelDAGToDAG.cpp
|
llvm::RISCVDAGToDAGISel::selectSimm5Shl2(llvm::SDValue, llvm::SDValue&, llvm::SDValue&)
|
bool RISCVDAGToDAGISel::selectSimm5Shl2(SDValue N, SDValue &Simm5,
SDValue &Shl2) {
if (auto *C = dyn_cast<ConstantSDNode>(N)) {
int64_t Offset = C->getSExtValue();
int64_t Shift;
for (Shift = 0; Shift < 4; Shift++)
if (isInt<5>(Offset >> Shift) && ((Offset % (1LL << Shift)) == 0))
break;
// Constant cannot be encoded.
if (Shift == 4)
return false;
EVT Ty = N->getValueType(0);
Simm5 = CurDAG->getTargetConstant(Offset >> Shift, SDLoc(N), Ty);
Shl2 = CurDAG->getTargetConstant(Shift, SDLoc(N), Ty);
return true;
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rcx, %r9
movq %rsi, %r14
movl 0x18(%rsi), %eax
cmpl $0x23, %eax
je 0x11f43f3
cmpl $0xb, %eax
jne 0x11f43f8
movq %r14, %rbx
jmp 0x11f43fa
xorl %ebx, %ebx
testq %rbx, %rbx
je 0x11f4423
movq 0x58(%rbx), %rcx
movl 0x20(%rcx), %eax
cmpl $0x40, %eax
ja 0x11f4428
movq 0x18(%rcx), %rdx
movl %eax, %ecx
negb %cl
shlq %cl, %rdx
sarq %cl, %rdx
xorl %ebp, %ebp
testl %eax, %eax
cmovneq %rdx, %rbp
jmp 0x11f442f
jmp 0x11f458b
movq 0x18(%rcx), %rax
movq (%rax), %rbp
xorl %r13d, %r13d
movq %rbp, %rax
movl %r13d, %ecx
sarq %cl, %rax
addq $0x10, %rax
cmpq $0x1f, %rax
ja 0x11f445a
movq $-0x1, %rax
movl %r13d, %ecx
shlq %cl, %rax
notq %rax
testq %rax, %rbp
je 0x11f4469
incq %r13
cmpq $0x4, %r13
jne 0x11f4432
movl $0x4, %r13d
cmpq $0x4, %r13
setne %cl
je 0x11f458b
movq %r9, 0x30(%rsp)
movq %r8, 0x40(%rsp)
movq 0x30(%r14), %rax
movb (%rax), %dl
movq 0x8(%rax), %rax
movq %rax, 0x28(%rsp)
movl %r13d, %ecx
sarq %cl, %rbp
movq %rdi, 0x38(%rsp)
movq 0x38(%rdi), %r15
movq 0x48(%r14), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0x11f44c1
leaq 0x10(%rsp), %rdi
movl %edx, %r12d
movl $0x1, %edx
callq 0x2a757d8
movl %r12d, %edx
movl 0x44(%r14), %eax
leaq 0x10(%rsp), %r12
movl %eax, 0x8(%r12)
movl $0x0, (%rsp)
movzbl %dl, %ecx
movq %r15, %rdi
movq %rbp, %rsi
movq %r12, %rdx
movl %ecx, 0x24(%rsp)
movq 0x28(%rsp), %rbp
movq %rbp, %r8
movl $0x1, %r9d
callq 0x17645fe
movq 0x30(%rsp), %rcx
movq %rax, (%rcx)
movl %edx, 0x8(%rcx)
movq (%r12), %rsi
testq %rsi, %rsi
je 0x11f4517
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movq 0x38(%rsp), %rax
movq 0x38(%rax), %r15
movq 0x48(%r14), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0x11f453d
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r14), %eax
leaq 0x10(%rsp), %r14
movl %eax, 0x8(%r14)
movl $0x0, (%rsp)
movq %r15, %rdi
movq %r13, %rsi
movq %r14, %rdx
movl 0x24(%rsp), %ecx
movq %rbp, %r8
movl $0x1, %r9d
callq 0x17645fe
movq 0x40(%rsp), %rcx
movq %rax, (%rcx)
movl %edx, 0x8(%rcx)
movq (%r14), %rsi
testq %rsi, %rsi
je 0x11f4589
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movb $0x1, %cl
testq %rbx, %rbx
setne %al
andb %cl, %al
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/RISCV/RISCVISelDAGToDAG.cpp
|
llvm::cl::opt<int, false, llvm::cl::parser<int>>::opt<char [23], llvm::cl::OptionHidden, llvm::cl::desc, llvm::cl::initializer<int>>(char const (&) [23], llvm::cl::OptionHidden const&, llvm::cl::desc const&, llvm::cl::initializer<int> const&)
|
explicit opt(const Mods &... Ms)
: Option(llvm::cl::Optional, NotHidden), Parser(*this) {
apply(this, Ms...);
done();
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %r8, %rbx
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %r14
xorl %esi, %esi
xorl %edx, %edx
callq 0x7fca88
movl $0x0, 0x80(%r14)
xorps %xmm0, %xmm0
movups %xmm0, 0x88(%r14)
movb $0x0, 0x94(%r14)
leaq 0x455d6e8(%rip), %rax # 0x5757040
movq %rax, 0x88(%r14)
leaq 0x46fe36a(%rip), %rax # 0x58f7cd0
addq $0x10, %rax
movq %rax, (%r14)
leaq 0x46fe114(%rip), %rax # 0x58f7a88
addq $0x10, %rax
movq %rax, 0x98(%r14)
movups %xmm0, 0xa0(%r14)
leaq -0x8ddb6c(%rip), %rax # 0x91be22
movq %rax, 0xb8(%r14)
leaq -0x8ddb78(%rip), %rax # 0x91be24
movq %rax, 0xb0(%r14)
movq %r14, %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movq %rbx, %r8
callq 0x12557d3
movq %r14, %rdi
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
jmp 0x2b1e814
|
/llvm/Support/CommandLine.h
|
llvm::RISCVTargetLowering::isLegalICmpImmediate(long) const
|
constexpr bool isInt(int64_t x) {
if constexpr (N == 0)
return 0 == x;
if constexpr (N == 8)
return static_cast<int8_t>(x) == x;
if constexpr (N == 16)
return static_cast<int16_t>(x) == x;
if constexpr (N == 32)
return static_cast<int32_t>(x) == x;
if constexpr (N < 64)
return -(INT64_C(1) << (N - 1)) <= x && x < (INT64_C(1) << (N - 1));
(void)x; // MSVC v19.25 warns that x is unused.
return true;
}
|
addq $0x800, %rsi # imm = 0x800
cmpq $0x1000, %rsi # imm = 0x1000
setb %al
retq
|
/llvm/Support/MathExtras.h
|
lowerConstant(llvm::SDValue, llvm::SelectionDAG&, llvm::RISCVSubtarget const&)
|
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
assert(Op.getValueType() == MVT::i64 && "Unexpected VT");
int64_t Imm = cast<ConstantSDNode>(Op)->getSExtValue();
// All simm32 constants should be handled by isel.
// NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
// this check redundant, but small immediates are common so this check
// should have better compile time.
if (isInt<32>(Imm))
return Op;
// We only need to cost the immediate, if constant pool lowering is enabled.
if (!Subtarget.useConstantPoolForLargeInts())
return Op;
RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, Subtarget);
if (Seq.size() <= Subtarget.getMaxBuildIntsCost())
return Op;
// Optimizations below are disabled for opt size. If we're optimizing for
// size, use a constant pool.
if (DAG.shouldOptForSize())
return SDValue();
// Special case. See if we can build the constant as (ADD (SLLI X, C), X) do
// that if it will avoid a constant pool.
// It will require an extra temporary register though.
// If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
// low and high 32 bits are the same and bit 31 and 63 are set.
unsigned ShiftAmt, AddOpc;
RISCVMatInt::InstSeq SeqLo =
RISCVMatInt::generateTwoRegInstSeq(Imm, Subtarget, ShiftAmt, AddOpc);
if (!SeqLo.empty() && (SeqLo.size() + 2) <= Subtarget.getMaxBuildIntsCost())
return Op;
return SDValue();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rcx, %r15
movq %rdx, %r12
movl %esi, %ebx
movq %rdi, %r14
movq 0x58(%rdi), %rcx
movl 0x20(%rcx), %eax
cmpl $0x40, %eax
ja 0x1206a21
movq 0x18(%rcx), %rdx
movl %eax, %ecx
negb %cl
shlq %cl, %rdx
sarq %cl, %rdx
xorl %r13d, %r13d
testl %eax, %eax
cmovneq %rdx, %r13
jmp 0x1206a28
movq 0x18(%rcx), %rax
movq (%rax), %r13
movslq %r13d, %rax
cmpq %r13, %rax
je 0x1206adc
movq %r15, %rdi
callq 0x119fe6a
testb %al, %al
je 0x1206adc
leaq 0x58(%rsp), %rbp
movq %rbp, %rdi
movq %r13, %rsi
movq %r15, %rdx
callq 0x19aacfc
movl 0x8(%rbp), %ebp
movq %r15, %rdi
callq 0x119fe74
cmpl %eax, %ebp
jbe 0x1206ac9
movq %r12, %rdi
callq 0x1763398
testb %al, %al
je 0x1206a79
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp 0x1206ac9
leaq 0x8(%rsp), %r12
leaq 0x4(%rsp), %rcx
movq %rsp, %r8
movq %r12, %rdi
movq %r13, %rsi
movq %r15, %rdx
callq 0x19abef8
movl 0x8(%r12), %r12d
testq %r12, %r12
je 0x1206ab1
addq $0x2, %r12
movq %r15, %rdi
callq 0x119fe74
movl %eax, %eax
cmpq %rax, %r12
jbe 0x1206ab6
xorl %ebx, %ebx
xorl %r14d, %r14d
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x1206ac9
callq 0x780910
leaq 0x68(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x1206adc
callq 0x780910
movq %r14, %rax
movl %ebx, %edx
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/RISCV/RISCVISelLowering.cpp
|
llvm::RISCVTargetLowering::lowerVectorMaskExt(llvm::SDValue, llvm::SelectionDAG&, long) const
|
SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
int64_t ExtTrueVal) const {
SDLoc DL(Op);
MVT VecVT = Op.getSimpleValueType();
SDValue Src = Op.getOperand(0);
// Only custom-lower extensions from mask types
assert(Src.getValueType().isVector() &&
Src.getValueType().getVectorElementType() == MVT::i1);
if (VecVT.isScalableVector()) {
SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
}
MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
MVT I1ContainerVT =
MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
SDValue VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
MVT XLenVT = Subtarget.getXLenVT();
SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
DAG.getUNDEF(ContainerVT), SplatZero, VL);
SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
SDValue Select =
DAG.getNode(RISCVISD::VMERGE_VL, DL, ContainerVT, CC, SplatTrueVal,
SplatZero, DAG.getUNDEF(ContainerVT), VL);
return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x178, %rsp # imm = 0x178
movq %r8, 0x60(%rsp)
movq %rcx, %rbx
movl %edx, %ebp
movq %rsi, %r14
movq %rdi, %r15
movq 0x48(%rsi), %rsi
movq %rsi, 0x68(%rsp)
testq %rsi, %rsi
je 0x120ca2a
leaq 0x68(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r14), %eax
movl %eax, 0x70(%rsp)
movq 0x28(%r14), %rcx
movq 0x30(%r14), %rax
movl %ebp, %edx
shlq $0x4, %rdx
movb (%rax,%rdx), %al
movq (%rcx), %rbp
movl 0x8(%rcx), %r13d
leal 0x77(%rax), %edx
cmpb $0x34, %dl
ja 0x120cb21
movl 0xc(%rcx), %ecx
movl %ecx, 0x5c(%rsp)
xorl %ecx, %ecx
movl %ecx, (%rsp)
movzbl %al, %r14d
leaq 0x68(%rsp), %rdx
movq %rbx, %rdi
xorl %esi, %esi
movl %r14d, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, %r12
movl %edx, %r15d
xorl %eax, %eax
movl %eax, (%rsp)
movq %rbx, %rdi
movq 0x60(%rsp), %rsi
leaq 0x68(%rsp), %rdx
movl %r14d, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rbp, 0xa8(%rsp)
movl %r13d, 0xb0(%rsp)
movl 0x5c(%rsp), %ecx
movl %ecx, 0xb4(%rsp)
movq %rax, 0x148(%rsp)
movl %edx, 0x150(%rsp)
movq %r12, 0x138(%rsp)
movl %r15d, 0x140(%rsp)
movups 0x138(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0x148(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xa8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0xcd, %esi
leaq 0x68(%rsp), %rdx
movl %r14d, %ecx
xorl %r8d, %r8d
callq 0x1764b02
jmp 0x120cdc7
movq 0x4e0f8(%r15), %rsi
movzbl %al, %r12d
movl %r12d, %edi
movl %r12d, 0x5c(%rsp)
callq 0x11fefaa
movq %r15, %r14
movzbl %al, %r15d
leaq 0x2190259(%rip), %rax # 0x339cda0
movzwl -0x2(%rax,%r15,2), %eax
leal 0x77(%r15), %ecx
xorl %esi, %esi
cmpb $0x35, %cl
setb %sil
shlq $0x20, %rsi
orq %rax, %rsi
movl $0x2, %edi
callq 0x920260
movzbl %al, %edi
xorl %esi, %esi
movq %rbp, %rdx
movl %r13d, %ecx
movq %rbx, %r8
callq 0x1200379
movq %rax, 0x98(%rsp)
movl %edx, 0x7c(%rsp)
movq 0x4e0f8(%r14), %r9
movq %r14, %r13
movq %r14, 0xa0(%rsp)
leaq 0x158(%rsp), %r14
leaq 0x68(%rsp), %rcx
movq %r14, %rdi
movl %r12d, %esi
movl %r15d, %edx
movq %rcx, %r12
movq %rbx, %r8
callq 0x1200493
movups 0x10(%r14), %xmm0
movaps %xmm0, 0x80(%rsp)
movq 0x4e0f8(%r13), %rax
movzbl 0x1d7(%rax), %ebp
xorl %eax, %eax
movl %eax, (%rsp)
addl $0x7, %ebp
movq %rbx, %rdi
xorl %esi, %esi
movq %r12, %rdx
movl %ebp, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, %r14
movl %edx, %r13d
xorl %eax, %eax
movl %eax, (%rsp)
movq %rbx, %rdi
movq 0x60(%rsp), %rsi
movq %r12, %rdx
movl %ebp, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, 0x60(%rsp)
movl %edx, %ebp
movq %rbx, %rdi
movl %r15d, %esi
xorl %edx, %edx
callq 0x9fbc02
movq %rax, 0x128(%rsp)
movl %edx, 0x130(%rsp)
movq %r14, 0x118(%rsp)
movl %r13d, 0x120(%rsp)
movaps 0x80(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0x118(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x128(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x222, %esi # imm = 0x222
movq %r12, %rdx
movl %r15d, %ecx
xorl %r8d, %r8d
callq 0x1764b02
movq %rax, %r13
movl %edx, %r14d
movq %rbx, %rdi
movl %r15d, %esi
xorl %edx, %edx
callq 0x9fbc02
movq %rax, 0x108(%rsp)
movl %edx, 0x110(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0xf8(%rsp)
movl %ebp, 0x100(%rsp)
movaps 0x80(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0xf8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x108(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x222, %esi # imm = 0x222
movq %r12, %rdx
movl %r15d, %ecx
xorl %r8d, %r8d
callq 0x1764b02
movq 0x98(%rsp), %rcx
movq %rcx, 0xe8(%rsp)
movl 0x7c(%rsp), %ecx
movl %ecx, 0xf0(%rsp)
movq %rax, 0xd8(%rsp)
movl %edx, 0xe0(%rsp)
movq %r13, 0xc8(%rsp)
movl %r14d, 0xd0(%rsp)
movq %rbx, %rdi
movl %r15d, %esi
xorl %edx, %edx
callq 0x9fbc02
movq %rax, 0xb8(%rsp)
movl %edx, 0xc0(%rsp)
movaps 0x80(%rsp), %xmm0
movups %xmm0, 0x40(%rsp)
movups 0xb8(%rsp), %xmm0
movups %xmm0, 0x30(%rsp)
movups 0xc8(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0xd8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xe8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x297, %esi # imm = 0x297
movq %r12, %rdx
movl %r15d, %ecx
xorl %r8d, %r8d
callq 0x1780602
movl %edx, %ecx
movq 0xa0(%rsp), %rdx
movq 0x4e0f8(%rdx), %r9
movl 0x5c(%rsp), %edi
xorl %esi, %esi
movq %rax, %rdx
movq %rbx, %r8
callq 0x120053c
movq %rax, %rbx
movl %edx, %ebp
movq 0x68(%rsp), %rsi
testq %rsi, %rsi
je 0x120cde0
leaq 0x68(%rsp), %rdi
callq 0x2a758fc
movq %rbx, %rax
movl %ebp, %edx
addq $0x178, %rsp # imm = 0x178
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/RISCV/RISCVISelLowering.cpp
|
llvm::RISCVTargetLowering::lowerStrictFPExtendOrRoundLike(llvm::SDValue, llvm::SelectionDAG&) const
|
SDValue
RISCVTargetLowering::lowerStrictFPExtendOrRoundLike(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Chain = Op.getOperand(0);
SDValue Src = Op.getOperand(1);
MVT VT = Op.getSimpleValueType();
MVT SrcVT = Src.getSimpleValueType();
MVT ContainerVT = VT;
if (VT.isFixedLengthVector()) {
MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
ContainerVT =
SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
}
auto [Mask, VL] = getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
// RVV can only widen/truncate fp to types double/half the size as the source.
if ((VT.getVectorElementType() == MVT::f64 &&
(SrcVT.getVectorElementType() == MVT::f16 ||
SrcVT.getVectorElementType() == MVT::bf16)) ||
((VT.getVectorElementType() == MVT::f16 ||
VT.getVectorElementType() == MVT::bf16) &&
SrcVT.getVectorElementType() == MVT::f64)) {
// For double rounding, the intermediate rounding should be round-to-odd.
unsigned InterConvOpc = Op.getOpcode() == ISD::STRICT_FP_EXTEND
? RISCVISD::STRICT_FP_EXTEND_VL
: RISCVISD::STRICT_VFNCVT_ROD_VL;
MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
Src = DAG.getNode(InterConvOpc, DL, DAG.getVTList(InterVT, MVT::Other),
Chain, Src, Mask, VL);
Chain = Src.getValue(1);
}
unsigned ConvOpc = Op.getOpcode() == ISD::STRICT_FP_EXTEND
? RISCVISD::STRICT_FP_EXTEND_VL
: RISCVISD::STRICT_FP_ROUND_VL;
SDValue Res = DAG.getNode(ConvOpc, DL, DAG.getVTList(ContainerVT, MVT::Other),
Chain, Src, Mask, VL);
if (VT.isFixedLengthVector()) {
// StrictFP operations have two result values. Their lowered result should
// have same result count.
SDValue SubVec = convertFromScalableVector(VT, Res, DAG, Subtarget);
Res = DAG.getMergeValues({SubVec, Res.getValue(1)}, DL);
}
return Res;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x118, %rsp # imm = 0x118
movq %rcx, 0x58(%rsp)
movl %edx, %r14d
movq %rsi, %r12
movq 0x48(%rsi), %rsi
movq %rsi, 0x70(%rsp)
testq %rsi, %rsi
movq %rdi, 0x80(%rsp)
je 0x12103b9
leaq 0x70(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x80(%rsp), %rdi
movl 0x44(%r12), %eax
movl %eax, 0x78(%rsp)
movq 0x28(%r12), %rax
movq 0x30(%r12), %rcx
movq (%rax), %r8
movl 0x8(%rax), %r9d
movl 0xc(%rax), %edx
movl %edx, 0x64(%rsp)
movq 0x28(%rax), %r13
movl 0x30(%rax), %esi
movl 0x34(%rax), %edx
movl %r14d, %eax
shlq $0x4, %rax
movb (%rcx,%rax), %r14b
movzbl %r14b, %ebp
movq 0x30(%r13), %rax
movq %rsi, %rcx
shlq $0x4, %rcx
movzbl (%rax,%rcx), %r15d
leal -0x11(%rbp), %eax
movl %eax, 0x8c(%rsp)
cmpb $0x77, %al
movl %edx, 0x60(%rsp)
movq %rsi, 0x50(%rsp)
movq %r8, 0x68(%rsp)
movl %r9d, 0x4c(%rsp)
ja 0x121048f
movq 0x4e0f8(%rdi), %rsi
movl %r15d, %edi
callq 0x11fefaa
leaq 0x1fbf606(%rip), %rcx # 0x31cfa40
movzbl %al, %ebx
leaq 0x218c95c(%rip), %rax # 0x339cda0
movzwl -0x2(%rax,%rbx,2), %eax
leal 0x77(%rbx), %edx
xorl %esi, %esi
cmpb $0x35, %dl
setb %sil
shlq $0x20, %rsi
orq %rax, %rsi
movzbl -0x1(%rbp,%rcx), %edi
callq 0x920260
movl %eax, %r14d
movl %ebx, %edi
xorl %esi, %esi
movq %r13, %rdx
movq 0x50(%rsp), %rcx
movq 0x58(%rsp), %r8
callq 0x1200379
movq 0x80(%rsp), %rdi
movq %rax, %r13
movq %rdx, 0x50(%rsp)
movq 0x4e0f8(%rdi), %r9
movzbl %r14b, %ebx
leaq 0xd8(%rsp), %r14
leaq 0x70(%rsp), %rcx
movq %r14, %rdi
movl %r15d, %esi
movl %ebx, %edx
movq 0x58(%rsp), %r8
callq 0x1200493
leaq 0x1fbf580(%rip), %rax # 0x31cfa40
movb -0x1(%rbp,%rax), %cl
movzbl %cl, %edx
leal -0xa(%rdx), %esi
cmpl $0x2, %esi
movq %rbp, 0x90(%rsp)
jae 0x12104e4
cmpb $0xd, -0x1(%r15,%rax)
je 0x12104fe
jmp 0x1210608
cmpl $0xd, %edx
jne 0x1210608
movb -0x1(%r15,%rax), %dl
andb $-0x2, %dl
cmpb $0xa, %dl
jne 0x12105fc
xorl %r15d, %r15d
cmpl $0x91, 0x18(%r12)
setne %r15b
addl $0x381, %r15d # imm = 0x381
leaq 0x218c884(%rip), %rax # 0x339cda0
movzwl -0x2(%rax,%rbx,2), %eax
leal 0x77(%rbx), %ecx
xorl %esi, %esi
cmpb $0x35, %cl
setb %sil
shlq $0x20, %rsi
orq %rax, %rsi
movl $0xc, %edi
callq 0x920260
movzbl %al, %esi
movq %rbx, %rbp
movq 0x58(%rsp), %rbx
movq %rbx, %rdi
xorl %edx, %edx
movl $0x1, %ecx
xorl %r8d, %r8d
callq 0x1762ed2
movl %edx, %r8d
movq 0x68(%rsp), %rcx
movq %rcx, 0xc8(%rsp)
movl 0x4c(%rsp), %ecx
movl %ecx, 0xd0(%rsp)
movl 0x64(%rsp), %ecx
movl %ecx, 0xd4(%rsp)
movq %r13, 0xb8(%rsp)
movq 0x50(%rsp), %rcx
movl %ecx, 0xc0(%rsp)
movl 0x60(%rsp), %ecx
movl %ecx, 0xc4(%rsp)
movups 0x10(%r14), %xmm0
movups %xmm0, 0x30(%rsp)
movups 0xd8(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0xb8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xc8(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x70(%rsp), %rdx
movq %rbx, %rdi
movl %r15d, %esi
movq %rax, %rcx
callq 0x178dd02
movq %rbp, %rsi
movq %rax, %r13
movq %rdx, 0x50(%rsp)
movq %rax, %rbp
movq %r14, %rbx
movl $0x1, %r14d
jmp 0x1210618
andb $-0x2, %cl
cmpb $0xa, %cl
je 0x12104d7
movq 0x68(%rsp), %rbp
movq %rbx, %rsi
movq %r14, %rbx
movl 0x4c(%rsp), %r14d
xorl %r15d, %r15d
cmpl $0x91, 0x18(%r12)
sete %r15b
orl $0x380, %r15d # imm = 0x380
movq 0x58(%rsp), %r12
movq %r12, %rdi
xorl %edx, %edx
movl $0x1, %ecx
xorl %r8d, %r8d
callq 0x1762ed2
movl %edx, %r8d
movq %rbp, 0xa8(%rsp)
movl %r14d, 0xb0(%rsp)
movl 0x64(%rsp), %ecx
movl %ecx, 0xb4(%rsp)
movq %r13, 0x98(%rsp)
movq 0x50(%rsp), %rcx
movl %ecx, 0xa0(%rsp)
movl 0x60(%rsp), %ecx
movl %ecx, 0xa4(%rsp)
movups 0x10(%rbx), %xmm0
movups %xmm0, 0x30(%rsp)
movups 0xd8(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0x98(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xa8(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x70(%rsp), %rdx
movq %r12, %rdi
movl %r15d, %esi
movq %rax, %rcx
callq 0x178dd02
movq %rax, %r14
movl %edx, %ebp
cmpb $0x77, 0x8c(%rsp)
ja 0x121072f
movq 0x80(%rsp), %rax
movq 0x4e0f8(%rax), %r9
movq 0x90(%rsp), %rdi
xorl %esi, %esi
movq %r14, %rdx
movl %ebp, %ecx
movq 0x58(%rsp), %rbx
movq %rbx, %r8
callq 0x120053c
leaq 0xf8(%rsp), %rsi
movq %rax, (%rsi)
movl %edx, 0x8(%rsi)
movq %r14, 0x10(%rsi)
movl $0x1, 0x18(%rsi)
leaq 0x70(%rsp), %rcx
movl $0x2, %edx
movq %rbx, %rdi
callq 0x178587a
movq %rax, %r14
movl %edx, %ebp
movq 0x70(%rsp), %rsi
testq %rsi, %rsi
je 0x1210743
leaq 0x70(%rsp), %rdi
callq 0x2a758fc
movq %r14, %rax
movl %ebp, %edx
addq $0x118, %rsp # imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/RISCV/RISCVISelLowering.cpp
|
(anonymous namespace)::NodeExtensionHelper::getMaskAndVL(llvm::SDNode const*, llvm::SelectionDAG&, llvm::RISCVSubtarget const&)
|
static std::pair<SDValue, SDValue>
getMaskAndVL(const SDNode *Root, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
assert(isSupportedRoot(Root, Subtarget) && "Unexpected root");
switch (Root->getOpcode()) {
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::OR:
case ISD::SHL: {
SDLoc DL(Root);
MVT VT = Root->getSimpleValueType(0);
return getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
}
default:
return std::make_pair(Root->getOperand(3), Root->getOperand(4));
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, %rbx
movl 0x18(%rsi), %eax
leal -0x38(%rax), %ecx
cmpl $0x3, %ecx
jb 0x1252d52
cmpl $0xbd, %eax
je 0x1252d52
cmpl $0xba, %eax
jne 0x1252da4
movq 0x48(%r12), %rsi
movq %rsi, (%rsp)
testq %rsi, %rsi
je 0x1252d6d
movq %rsp, %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r12), %eax
movq %rsp, %r13
movl %eax, 0x8(%r13)
movq 0x30(%r12), %rax
movl (%rax), %esi
movq %rbx, %rdi
movq %r13, %rdx
movq %r15, %rcx
movq %r14, %r8
callq 0x1201786
movq (%r13), %rsi
testq %rsi, %rsi
je 0x1252dbb
movq %rsp, %rdi
callq 0x2a758fc
jmp 0x1252dbb
movq 0x28(%r12), %rax
movups 0x78(%rax), %xmm0
movups %xmm0, (%rbx)
movups 0xa0(%rax), %xmm0
movups %xmm0, 0x10(%rbx)
movq %rbx, %rax
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/Target/RISCV/RISCVISelLowering.cpp
|
tryFoldSelectIntoOp(llvm::SDNode*, llvm::SelectionDAG&, llvm::SDValue, llvm::SDValue, bool)
|
static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG,
SDValue TrueVal, SDValue FalseVal,
bool Swapped) {
bool Commutative = true;
unsigned Opc = TrueVal.getOpcode();
switch (Opc) {
default:
return SDValue();
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::SUB:
Commutative = false;
break;
case ISD::ADD:
case ISD::OR:
case ISD::XOR:
break;
}
if (!TrueVal.hasOneUse() || isa<ConstantSDNode>(FalseVal))
return SDValue();
unsigned OpToFold;
if (FalseVal == TrueVal.getOperand(0))
OpToFold = 0;
else if (Commutative && FalseVal == TrueVal.getOperand(1))
OpToFold = 1;
else
return SDValue();
EVT VT = N->getValueType(0);
SDLoc DL(N);
SDValue OtherOp = TrueVal.getOperand(1 - OpToFold);
EVT OtherOpVT = OtherOp.getValueType();
SDValue IdentityOperand =
DAG.getNeutralElement(Opc, DL, OtherOpVT, N->getFlags());
if (!Commutative)
IdentityOperand = DAG.getConstant(0, DL, OtherOpVT);
assert(IdentityOperand && "No identity operand!");
if (Swapped)
std::swap(OtherOp, IdentityOperand);
SDValue NewSel =
DAG.getSelect(DL, OtherOpVT, N->getOperand(0), OtherOp, IdentityOperand);
return DAG.getNode(TrueVal.getOpcode(), DL, VT, FalseVal, NewSel);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movl %r9d, 0x34(%rsp)
movq %rdx, %r15
movq %rsi, %r14
movq %rdi, 0x38(%rsp)
movl 0x18(%rdx), %r13d
xorl %ebp, %ebp
xorl %ebx, %ebx
cmpl $0xbc, %r13d
jg 0x1253f14
movb $0x1, %r12b
leal -0xba(%r13), %eax
cmpl $0x2, %eax
jb 0x1253f27
cmpl $0x38, %r13d
je 0x1253f27
cmpl $0x39, %r13d
je 0x1253f24
jmp 0x1254045
leal -0xbd(%r13), %eax
cmpl $0x2, %eax
ja 0x1254045
xorl %r12d, %r12d
movq %r8, 0x50(%rsp)
movq %r15, %rdi
movl $0x1, %esi
movl %ecx, %edx
callq 0x179219e
testb %al, %al
je 0x1254041
movq 0x50(%rsp), %rdx
movl 0x18(%rdx), %eax
xorl %ebp, %ebp
cmpl $0xb, %eax
je 0x1254043
movl $0x0, %ebx
cmpl $0x23, %eax
je 0x1254045
movq 0x28(%r15), %rax
cmpq %rdx, (%rax)
jne 0x1254020
movl $0x1, %ebp
movl 0x34(%rsp), %ecx
cmpl %ecx, 0x8(%rax)
jne 0x1254020
movq 0x38(%rsp), %rcx
movq 0x30(%rcx), %rax
movb (%rax), %dl
movb %dl, 0x33(%rsp)
movq 0x8(%rax), %rax
movq %rax, 0x48(%rsp)
movq 0x48(%rcx), %rsi
movq %rsi, 0x58(%rsp)
testq %rsi, %rsi
je 0x1253fb8
leaq 0x58(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x38(%rsp), %rsi
movl 0x44(%rsi), %eax
leaq 0x58(%rsp), %rdx
movl %eax, 0x8(%rdx)
movq 0x28(%r15), %rax
leaq (%rbp,%rbp,4), %rcx
movq (%rax,%rcx,8), %r8
movl 0x8(%rax,%rcx,8), %edi
movl 0xc(%rax,%rcx,8), %eax
movl %eax, 0x44(%rsp)
movq %r8, 0x68(%rsp)
movq 0x30(%r8), %rax
movq %rdi, 0x70(%rsp)
movq %rdi, %rcx
shlq $0x4, %rcx
movq 0x8(%rax,%rcx), %rbx
movzbl (%rax,%rcx), %ebp
movl 0x1c(%rsi), %r9d
movq %r14, %rdi
movl %r13d, %esi
movl %ebp, %ecx
movq %rbx, %r8
callq 0x17952ee
testb %r12b, %r12b
je 0x125405c
movl %ebp, %r13d
movq %rbx, %r12
jmp 0x1254080
testb %r12b, %r12b
je 0x1254041
xorl %ebp, %ebp
cmpq %rdx, 0x28(%rax)
jne 0x1254043
movl $0x0, %ebx
movl 0x34(%rsp), %ecx
cmpl %ecx, 0x30(%rax)
je 0x1253f83
jmp 0x1254045
xorl %ebp, %ebp
xorl %ebx, %ebx
movq %rbx, %rax
movl %ebp, %edx
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x0, (%rsp)
leaq 0x58(%rsp), %rdx
movq %r14, %rdi
xorl %esi, %esi
movl %ebp, %r13d
movl %ebp, %ecx
movq %rbx, %r12
movq %rbx, %r8
xorl %r9d, %r9d
callq 0x17645fe
movq 0x48(%rsp), %rbx
cmpb $0x0, 0xf0(%rsp)
movq 0x70(%rsp), %r10
movq 0x68(%rsp), %r11
je 0x12540a0
movl %edx, %edi
movq %rax, %rsi
jmp 0x12540ac
movl %r10d, %edi
movq %r11, %rsi
movl %edx, %r10d
movq %rax, %r11
movq %r12, %rcx
movl %r13d, %edx
movq 0x38(%rsp), %rax
movq 0x28(%rax), %rax
movq (%rax), %r8
movl 0x8(%rax), %r9d
movq %rsi, 0x78(%rsp)
movl %edi, 0x80(%rsp)
movl 0x44(%rsp), %eax
movl %eax, 0x84(%rsp)
movq %r11, 0xa8(%rsp)
movl %r10d, 0xb0(%rsp)
movups 0xa8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x78(%rsp), %xmm0
movups %xmm0, (%rsp)
movl $0x0, 0x20(%rsp)
leaq 0x58(%rsp), %r12
movq %r14, %rdi
movq %r12, %rsi
callq 0x9f1108
movl 0x18(%r15), %esi
movq 0x50(%rsp), %rcx
movq %rcx, 0x98(%rsp)
movl 0x34(%rsp), %ecx
movl %ecx, 0xa0(%rsp)
movq %rax, 0x88(%rsp)
movl %edx, 0x90(%rsp)
movups 0x88(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x98(%rsp), %xmm0
movups %xmm0, (%rsp)
movzbl 0x33(%rsp), %ecx
movq %r14, %rdi
movq %r12, %rdx
movq %rbx, %r8
callq 0x17638a8
movq %rax, %rbx
movl %edx, %ebp
movq (%r12), %rsi
testq %rsi, %rsi
je 0x1254045
leaq 0x58(%rsp), %rdi
callq 0x2a758fc
jmp 0x1254045
nop
|
/Target/RISCV/RISCVISelLowering.cpp
|
void llvm::RVVArgDispatcher::constructArgInfos<llvm::ISD::InputArg>(llvm::ArrayRef<llvm::ISD::InputArg>)
|
void RVVArgDispatcher::constructArgInfos(ArrayRef<Arg> ArgList) {
// This lambda determines whether an array of types are constructed by
// homogeneous vector types.
auto isHomogeneousScalableVectorType = [](ArrayRef<Arg> ArgList) {
// First, extract the first element in the argument type.
auto It = ArgList.begin();
MVT FirstArgRegType = It->VT;
// Return if there is no return or the type needs split.
if (It == ArgList.end() || It->Flags.isSplit())
return false;
++It;
// Return if this argument type contains only 1 element, or it's not a
// vector type.
if (It == ArgList.end() || !FirstArgRegType.isScalableVector())
return false;
// Second, check if the following elements in this argument type are all the
// same.
for (; It != ArgList.end(); ++It)
if (It->Flags.isSplit() || It->VT != FirstArgRegType)
return false;
return true;
};
if (isHomogeneousScalableVectorType(ArgList)) {
// Handle as tuple type
RVVArgInfos.push_back({(unsigned)ArgList.size(), ArgList[0].VT, false});
} else {
// Handle as normal vector type
bool FirstVMaskAssigned = false;
for (const auto &OutArg : ArgList) {
MVT RegisterVT = OutArg.VT;
// Skip non-RVV register type
if (!RegisterVT.isVector())
continue;
if (RegisterVT.isFixedLengthVector())
RegisterVT = TLI->getContainerForFixedLengthVector(RegisterVT);
if (!FirstVMaskAssigned && RegisterVT.getVectorElementType() == MVT::i1) {
RVVArgInfos.push_back({1, RegisterVT, true});
FirstVMaskAssigned = true;
continue;
}
RVVArgInfos.push_back({1, RegisterVT, false});
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
imulq $0x38, %rdx, %r15
testq %rdx, %rdx
je 0x12558f7
testb $0x1, 0x1(%r14)
jne 0x12558f7
cmpq $0x1, %rdx
je 0x12558f7
movzbl 0x10(%r14), %eax
leal 0x77(%rax), %ecx
cmpb $0x34, %cl
ja 0x12558f7
leaq -0x38(%r15), %rcx
xorl %esi, %esi
cmpq %rsi, %rcx
je 0x125597d
testb $0x1, 0x39(%r14,%rsi)
jne 0x12558f7
leaq 0x38(%rsi), %rdi
cmpb %al, 0x48(%r14,%rsi)
movq %rdi, %rsi
je 0x12558d8
testq %rdx, %rdx
je 0x125596e
xorl %r12d, %r12d
xorl %r13d, %r13d
movb 0x10(%r14,%r12), %al
leal -0x11(%rax), %ecx
cmpb $-0x54, %cl
ja 0x1255965
cmpb $0x77, %cl
ja 0x1255927
movq 0x58(%rbx), %rcx
movq 0x4e0f8(%rcx), %rsi
movzbl %al, %edi
callq 0x11fefaa
movzbl %al, %esi
testb $0x1, %r13b
jne 0x125594b
movb $0x1, %bpl
leaq 0x1f7a106(%rip), %rax # 0x31cfa40
cmpb $0x2, -0x1(%rsi,%rax)
movabsq $0x10000000001, %rax # imm = 0x10000000001
je 0x1255953
movl $0x1, %eax
movl %r13d, %ebp
shlq $0x20, %rsi
orq %rax, %rsi
movq %rbx, %rdi
callq 0x12503d4
movl %ebp, %r13d
addq $0x38, %r12
cmpq %r12, %r15
jne 0x1255902
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
shlq $0x20, %rax
movl %edx, %esi
orq %rax, %rsi
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x12503d4
|
/Target/RISCV/RISCVISelLowering.cpp
|
llvm::RISCVMachineFunctionInfo::isSExt32Register(llvm::Register) const
|
bool RISCVMachineFunctionInfo::isSExt32Register(Register Reg) const {
return is_contained(SExt32Registers, Reg);
}
|
pushq %rbx
subq $0x10, %rsp
movq %rdi, %rbx
leaq 0xc(%rsp), %rdx
movl %esi, (%rdx)
movq 0x40(%rdi), %rdi
movl 0x48(%rbx), %eax
leaq (%rdi,%rax,4), %rsi
callq 0xbe0938
movl 0x48(%rbx), %ecx
shlq $0x2, %rcx
addq 0x40(%rbx), %rcx
cmpq %rcx, %rax
setne %al
addq $0x10, %rsp
popq %rbx
retq
|
/Target/RISCV/RISCVMachineFunctionInfo.cpp
|
llvm::DenseMap<llvm::MachineInstr const*, llvm::SmallVector<unsigned int, 12u>, llvm::DenseMapInfo<llvm::MachineInstr const*, void>, llvm::detail::DenseMapPair<llvm::MachineInstr const*, llvm::SmallVector<unsigned int, 12u>>>::grow(unsigned int)
|
void grow(unsigned AtLeast) {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
assert(Buckets);
if (!OldBuckets) {
this->BaseT::initEmpty();
return;
}
this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
// Free the old table.
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
alignof(BucketT));
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
movl 0x10(%rdi), %r15d
decl %esi
movl %esi, %eax
shrl %eax
orl %esi, %eax
movl %eax, %ecx
shrl $0x2, %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x4, %eax
orl %ecx, %eax
movl %eax, %ecx
shrl $0x8, %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x10, %eax
orl %ecx, %eax
incl %eax
cmpl $0x41, %eax
movl $0x40, %ecx
cmovael %eax, %ecx
movq (%rdi), %r14
movl %ecx, 0x10(%rdi)
shlq $0x3, %rcx
leaq (%rcx,%rcx,8), %rdi
movl $0x8, %esi
callq 0x2b410ec
movq %rax, (%rbx)
testq %r14, %r14
je 0x1256fcf
shlq $0x3, %r15
leaq (%r15,%r15,8), %r15
leaq (%r14,%r15), %rdx
movq %rbx, %rdi
movq %r14, %rsi
callq 0x12570a4
movl $0x8, %edx
movq %r14, %rdi
movq %r15, %rsi
popq %rbx
popq %r14
popq %r15
jmp 0x2b410f1
movq %rax, %rcx
movq $0x0, 0x8(%rbx)
movl 0x10(%rbx), %eax
testq %rax, %rax
je 0x125709e
leaq (%rax,%rax,8), %rax
leaq -0x48(,%rax,8), %rax
movabsq $-0x1c71c71c71c71c71, %rdx # imm = 0xE38E38E38E38E38F
mulq %rdx
movq %rdx, %xmm0
shrq $0x6, %rdx
addq $0x2, %rdx
andq $-0x2, %rdx
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
psrlq $0x6, %xmm0
xorl %eax, %eax
movdqa 0x195dffc(%rip), %xmm1 # 0x2bb5020
movdqa 0x195e004(%rip), %xmm2 # 0x2bb5030
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movq %rax, %xmm4
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
por %xmm1, %xmm4
pxor %xmm2, %xmm4
movdqa %xmm4, %xmm5
pcmpgtd %xmm0, %xmm5
pcmpeqd %xmm0, %xmm4
pshufd $0xf5, %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
pand %xmm5, %xmm6
pshufd $0xf5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
por %xmm6, %xmm4
movd %xmm4, %esi
notl %esi
testb $0x1, %sil
je 0x1257077
movq $-0x1000, (%rcx) # imm = 0xF000
pxor %xmm3, %xmm4
pextrw $0x4, %xmm4, %esi
testb $0x1, %sil
je 0x125708e
movq $-0x1000, 0x48(%rcx) # imm = 0xF000
addq $0x2, %rax
addq $0x90, %rcx
cmpq %rax, %rdx
jne 0x1257034
popq %rbx
popq %r14
popq %r15
retq
|
/llvm/ADT/DenseMap.h
|
llvm::SparcRegisterInfo::isReservedReg(llvm::MachineFunction const&, llvm::MCRegister) const
|
bool SparcRegisterInfo::isReservedReg(const MachineFunction &MF,
MCRegister Reg) const {
return getReservedRegs(MF)[Reg];
}
|
pushq %r14
pushq %rbx
subq $0x48, %rsp
movl %edx, %ebx
movq %rsi, %rdx
movq %rdi, %rsi
movq (%rdi), %rax
movq %rsp, %r14
movq %r14, %rdi
callq *0x60(%rax)
movq (%r14), %rdi
movl %ebx, %eax
shrl $0x6, %eax
movq (%rdi,%rax,8), %r14
leaq 0x10(%rsp), %rax
cmpq %rax, %rdi
je 0x125b4a8
callq 0x780910
btq %rbx, %r14
setb %al
addq $0x48, %rsp
popq %rbx
popq %r14
retq
nop
|
/Target/Sparc/SparcRegisterInfo.cpp
|
llvm::StringMap<std::unique_ptr<llvm::SparcSubtarget, std::default_delete<llvm::SparcSubtarget>>, llvm::MallocAllocator>::~StringMap()
|
~StringMap() {
// Delete all the elements in the map, but don't reset the elements
// to default values. This is a copy of clear(), but avoids unnecessary
// work not required in the destructor.
if (!empty()) {
for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
StringMapEntryBase *Bucket = TheTable[I];
if (Bucket && Bucket != getTombstoneVal()) {
static_cast<MapEntryTy *>(Bucket)->Destroy(getAllocator());
}
}
}
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
cmpl $0x0, 0xc(%rdi)
je 0x125c31e
movl 0x8(%rbx), %r14d
testq %r14, %r14
je 0x125c31e
xorl %r15d, %r15d
movq (%rbx), %rax
movq (%rax,%r15,8), %rdi
cmpq $-0x8, %rdi
je 0x125c316
testq %rdi, %rdi
je 0x125c316
movq %rbx, %rsi
callq 0x125cbe2
incq %r15
cmpq %r15, %r14
jne 0x125c2fc
movq (%rbx), %rdi
popq %rbx
popq %r14
popq %r15
jmp 0x780910
nop
|
/llvm/ADT/StringMap.h
|
llvm::SystemZAsmPrinter::PrintAsmMemoryOperand(llvm::MachineInstr const*, unsigned int, char const*, llvm::raw_ostream&)
|
bool SystemZAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
unsigned OpNo,
const char *ExtraCode,
raw_ostream &OS) {
if (ExtraCode && ExtraCode[0] && !ExtraCode[1]) {
switch (ExtraCode[0]) {
case 'A':
// Unlike EmitMachineNode(), EmitSpecialNode(INLINEASM) does not call
// setMemRefs(), so MI->memoperands() is empty and the alignment
// information is not available.
return false;
case 'O':
OS << MI->getOperand(OpNo + 1).getImm();
return false;
case 'R':
::printReg(MI->getOperand(OpNo).getReg(), MAI, OS);
return false;
}
}
printAddress(MAI, MI->getOperand(OpNo).getReg(),
MCOperand::createImm(MI->getOperand(OpNo + 1).getImm()),
MI->getOperand(OpNo + 2).getReg(), OS);
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x18, %rsp
movq %r8, %rbx
testq %rcx, %rcx
je 0x1275c83
movzbl (%rcx), %eax
testl %eax, %eax
je 0x1275c83
cmpb $0x0, 0x1(%rcx)
je 0x1275d21
movq 0x40(%rdi), %r14
movq 0x20(%rsi), %rax
movl %edx, %ecx
shlq $0x5, %rcx
movl 0x4(%rax,%rcx), %ebp
leal 0x1(%rdx), %ecx
shlq $0x5, %rcx
movq 0x10(%rax,%rcx), %rcx
leaq 0x8(%rsp), %rdi
movb $0x2, (%rdi)
movq %rcx, 0x8(%rdi)
addl $0x2, %edx
shlq $0x5, %rdx
movl 0x4(%rax,%rdx), %r15d
movq %r14, %rsi
movq %rbx, %rdx
callq 0x1275c28
movl %r15d, %eax
orl %ebp, %eax
je 0x1275d8f
movq 0x20(%rbx), %rax
cmpq 0x18(%rbx), %rax
jae 0x1275ce6
leaq 0x1(%rax), %rcx
movq %rcx, 0x20(%rbx)
movb $0x28, (%rax)
jmp 0x1275cf3
movq %rbx, %rdi
movl $0x28, %esi
callq 0x2b7d68e
testl %r15d, %r15d
je 0x1275d5a
movq %r14, %rdi
movl %r15d, %esi
movq %rbx, %rdx
callq 0x1278a52
testl %ebp, %ebp
je 0x1275d6b
movq 0x20(%rbx), %rax
cmpq 0x18(%rbx), %rax
jae 0x1275d4d
leaq 0x1(%rax), %rcx
movq %rcx, 0x20(%rbx)
movb $0x2c, (%rax)
jmp 0x1275d5a
cmpl $0x41, %eax
je 0x1275d8f
cmpl $0x52, %eax
je 0x1275d9c
cmpl $0x4f, %eax
jne 0x1275c83
incl %edx
movq 0x20(%rsi), %rax
shlq $0x5, %rdx
movq 0x10(%rax,%rdx), %rsi
movq %rbx, %rdi
callq 0x2b7d122
jmp 0x1275d8f
movq %rbx, %rdi
movl $0x2c, %esi
callq 0x2b7d68e
testl %ebp, %ebp
je 0x1275d6b
movq %r14, %rdi
movl %ebp, %esi
movq %rbx, %rdx
callq 0x1278a52
movq 0x20(%rbx), %rax
cmpq 0x18(%rbx), %rax
jae 0x1275d82
leaq 0x1(%rax), %rcx
movq %rcx, 0x20(%rbx)
movb $0x29, (%rax)
jmp 0x1275d8f
movq %rbx, %rdi
movl $0x29, %esi
callq 0x2b7d68e
xorl %eax, %eax
addq $0x18, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
movq 0x20(%rsi), %rax
movl %edx, %ecx
shlq $0x5, %rcx
movl 0x4(%rax,%rcx), %eax
movq 0x40(%rdi), %rsi
movl %eax, %edi
movq %rbx, %rdx
callq 0x1275dba
jmp 0x1275d8f
|
/Target/SystemZ/SystemZAsmPrinter.cpp
|
llvm::SystemZAsmPrinter::emitPPA2(llvm::Module&)
|
void SystemZAsmPrinter::emitPPA2(Module &M) {
OutStreamer->pushSection();
OutStreamer->switchSection(getObjFileLowering().getPPA2Section());
MCContext &OutContext = OutStreamer->getContext();
// Make CELQSTRT symbol.
const char *StartSymbolName = "CELQSTRT";
MCSymbol *CELQSTRT = OutContext.getOrCreateSymbol(StartSymbolName);
// Create symbol and assign to class field for use in PPA1.
PPA2Sym = OutContext.createTempSymbol("PPA2", false);
MCSymbol *DateVersionSym = OutContext.createTempSymbol("DVS", false);
std::time_t Time = getTranslationTime(M);
SmallString<15> CompilationTime; // 14 + null
raw_svector_ostream O(CompilationTime);
O << formatv("{0:%Y%m%d%H%M%S}", llvm::sys::toUtcTime(Time));
uint32_t ProductVersion = getProductVersion(M),
ProductRelease = getProductRelease(M),
ProductPatch = getProductPatch(M);
SmallString<7> Version; // 6 + null
raw_svector_ostream ostr(Version);
ostr << formatv("{0,0-2:d}{1,0-2:d}{2,0-2:d}", ProductVersion, ProductRelease,
ProductPatch);
// Drop 0 during conversion.
SmallString<sizeof(CompilationTime) - 1> CompilationTimeStr;
SmallString<sizeof(Version) - 1> VersionStr;
ConverterEBCDIC::convertToEBCDIC(CompilationTime, CompilationTimeStr);
ConverterEBCDIC::convertToEBCDIC(Version, VersionStr);
enum class PPA2MemberId : uint8_t {
// See z/OS Language Environment Vendor Interfaces v2r5, p.23, for
// complete list. Only the C runtime is supported by this backend.
LE_C_Runtime = 3,
};
enum class PPA2MemberSubId : uint8_t {
// List of languages using the LE C runtime implementation.
C = 0x00,
CXX = 0x01,
Swift = 0x03,
Go = 0x60,
LLVMBasedLang = 0xe7,
};
// PPA2 Flags
enum class PPA2Flags : uint8_t {
CompileForBinaryFloatingPoint = 0x80,
CompiledWithXPLink = 0x01,
CompiledUnitASCII = 0x04,
HasServiceInfo = 0x20,
};
PPA2MemberSubId MemberSubId = PPA2MemberSubId::LLVMBasedLang;
if (auto *MD = M.getModuleFlag("zos_cu_language")) {
StringRef Language = cast<MDString>(MD)->getString();
MemberSubId = StringSwitch<PPA2MemberSubId>(Language)
.Case("C", PPA2MemberSubId::C)
.Case("C++", PPA2MemberSubId::CXX)
.Case("Swift", PPA2MemberSubId::Swift)
.Case("Go", PPA2MemberSubId::Go)
.Default(PPA2MemberSubId::LLVMBasedLang);
}
// Emit PPA2 section.
OutStreamer->emitLabel(PPA2Sym);
OutStreamer->emitInt8(static_cast<uint8_t>(PPA2MemberId::LE_C_Runtime));
OutStreamer->emitInt8(static_cast<uint8_t>(MemberSubId));
OutStreamer->emitInt8(0x22); // Member defined, c370_plist+c370_env
OutStreamer->emitInt8(0x04); // Control level 4 (XPLink)
OutStreamer->emitAbsoluteSymbolDiff(CELQSTRT, PPA2Sym, 4);
OutStreamer->emitInt32(0x00000000);
OutStreamer->emitAbsoluteSymbolDiff(DateVersionSym, PPA2Sym, 4);
OutStreamer->emitInt32(
0x00000000); // Offset to main entry point, always 0 (so says TR).
uint8_t Flgs = static_cast<uint8_t>(PPA2Flags::CompileForBinaryFloatingPoint);
Flgs |= static_cast<uint8_t>(PPA2Flags::CompiledWithXPLink);
if (auto *MD = M.getModuleFlag("zos_le_char_mode")) {
const StringRef &CharMode = cast<MDString>(MD)->getString();
if (CharMode == "ascii") {
Flgs |= static_cast<uint8_t>(
PPA2Flags::CompiledUnitASCII); // Setting bit for ASCII char. mode.
} else if (CharMode != "ebcdic") {
report_fatal_error(
"Only ascii or ebcdic are valid values for zos_le_char_mode "
"metadata");
}
}
OutStreamer->emitInt8(Flgs);
OutStreamer->emitInt8(0x00); // Reserved.
// No MD5 signature before timestamp.
// No FLOAT(AFP(VOLATILE)).
// Remaining 5 flag bits reserved.
OutStreamer->emitInt16(0x0000); // 16 Reserved flag bits.
// Emit date and version section.
OutStreamer->emitLabel(DateVersionSym);
OutStreamer->emitBytes(CompilationTimeStr.str());
OutStreamer->emitBytes(VersionStr.str());
OutStreamer->emitInt16(0x0000); // Service level string length.
// The binder requires that the offset to the PPA2 be emitted in a different,
// specially-named section.
OutStreamer->switchSection(getObjFileLowering().getPPA2ListSection());
// Emit 8 byte alignment.
// Emit pointer to PPA2 label.
OutStreamer->AddComment("A(PPA2-CELQSTRT)");
OutStreamer->emitAbsoluteSymbolDiff(PPA2Sym, CELQSTRT, 8);
OutStreamer->popSection();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1a8, %rsp # imm = 0x1A8
movq %rsi, %r12
movq %rdi, %rbx
movq 0x50(%rdi), %rdi
callq 0xac0e72
movq 0x50(%rbx), %r14
movq %rbx, %rdi
callq 0x1606e3a
movq 0x2f0(%rax), %rsi
movq (%r14), %rax
movq %r14, %rdi
xorl %edx, %edx
callq *0xa8(%rax)
movq 0x50(%rbx), %rax
movq 0x8(%rax), %r15
leaq 0x2bba1bb(%rip), %rax # 0x3e320c5
leaq 0x28(%rsp), %r13
movq %rax, (%r13)
movw $0x103, %bp # imm = 0x103
movw %bp, 0x20(%r13)
movq %r15, %rdi
movq %r13, %rsi
callq 0x28d1f00
movq %rax, 0x10(%rsp)
leaq 0x2bba0cb(%rip), %rax # 0x3e31ffe
movq %rax, (%r13)
movw %bp, 0x20(%r13)
leaq 0x28(%rsp), %r13
movq %r15, %rdi
movq %r13, %rsi
xorl %edx, %edx
callq 0x28d2b64
movq %rax, 0x328(%rbx)
leaq 0x2bba172(%rip), %rax # 0x3e320ce
movq %rax, (%r13)
movw %bp, 0x20(%r13)
leaq 0x28(%rsp), %rsi
movq %r15, %rdi
xorl %edx, %edx
callq 0x28d2b64
movq %rax, 0x8(%rsp)
movq %r12, %rdi
callq 0x1276b51
movq %rax, %r13
leaq 0xa8(%rsp), %rax
movq %rax, -0x18(%rax)
xorl %ecx, %ecx
movq %rcx, -0x10(%rax)
movq $0xf, -0x8(%rax)
leaq 0x148(%rsp), %rbp
movl $0x2, 0x8(%rbp)
movb $0x0, 0x28(%rbp)
movl $0x1, 0x2c(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbp)
movq %rcx, 0x20(%rbp)
leaq 0x468202e(%rip), %r15 # 0x58f9ff8
addq $0x10, %r15
movq %r15, (%rbp)
leaq 0x90(%rsp), %rax
movq %rax, 0x30(%rbp)
movq %rbp, %rdi
callq 0x7fa22e
leaq 0x58(%rsp), %r14
leaq 0x2bba0e0(%rip), %rax # 0x3e320d2
movq %rax, -0x30(%r14)
movq $0x10, -0x28(%r14)
movq %r14, -0x20(%r14)
movq $0x1, -0x18(%r14)
leaq 0x48(%rsp), %rcx
leaq 0x454e65a(%rip), %rax # 0x57c6670
movq %rax, (%rcx)
movq %r13, 0x8(%rcx)
movq %rcx, 0x10(%rcx)
leaq 0x28(%rsp), %rsi
movq %rbp, %rdi
callq 0x2b7d838
movq %r12, %rdi
callq 0x1276ba5
movl %eax, 0x4(%rsp)
movq %r12, %rdi
callq 0x1276be7
movl %eax, (%rsp)
leaq 0x2bba417(%rip), %rsi # 0x3e32463
movl $0x16, %edx
movq %r12, 0x18(%rsp)
movq %r12, %rdi
callq 0x2a84044
movl $0x0, %ecx
testq %rax, %rax
je 0x127806f
movq 0x80(%rax), %rcx
testq %rcx, %rcx
movl $0x0, %eax
je 0x127808b
cmpl $0x41, 0x20(%rcx)
jb 0x1278085
movq 0x18(%rcx), %rcx
jmp 0x1278089
addq $0x18, %rcx
movl (%rcx), %eax
leaq 0x24(%rsp), %rbp
movl %eax, (%rbp)
leaq 0xd0(%rsp), %rax
movq %rax, -0x18(%rax)
xorl %r12d, %r12d
movq %r12, -0x10(%rax)
movq $0x7, -0x8(%rax)
leaq 0x110(%rsp), %r13
movl $0x2, 0x8(%r13)
movb $0x0, 0x28(%r13)
movl $0x1, 0x2c(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %r12, 0x20(%r13)
movq %r15, (%r13)
leaq 0xb8(%rsp), %r15
movq %r15, 0x30(%r13)
movq %r13, %rdi
callq 0x7fa22e
leaq 0x78(%rsp), %rax
leaq 0x2bb9fe8(%rip), %rcx # 0x3e320e3
movq %rcx, -0x50(%rax)
movq $0x1b, -0x48(%rax)
movq %rax, -0x40(%rax)
movq $0x3, -0x38(%rax)
leaq 0x44e48a6(%rip), %rcx # 0x575c9c0
movq %rcx, -0x30(%rax)
movq %rbp, -0x28(%rax)
movq %rcx, -0x20(%rax)
movq %rsp, %rdx
movq %rdx, -0x18(%rax)
leaq 0x68(%rsp), %rax
movq %rcx, (%rax)
leaq 0x4(%rsp), %rcx
movq %rcx, 0x8(%rax)
movq %rax, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0x48(%rsp), %rcx
movq %rcx, 0x20(%rax)
leaq 0x28(%rsp), %rsi
movq %r13, %rdi
callq 0x2b7d838
leaq 0x40(%rsp), %rax
movq %rax, -0x18(%rax)
movq %r12, -0x10(%rax)
movq $0x27, -0x8(%rax)
leaq 0xf0(%rsp), %r14
movq %r14, -0x18(%r14)
movq %r12, -0x10(%r14)
movq $0x1f, -0x8(%r14)
movq 0x90(%rsp), %rdi
movq 0x98(%rsp), %rsi
leaq 0x28(%rsp), %rdx
callq 0x2b2b3dc
movq (%r15), %rdi
movq 0x8(%r15), %rsi
leaq 0xd8(%rsp), %rdx
callq 0x2b2b3dc
leaq 0x2bb9f41(%rip), %rsi # 0x3e320ff
movl $0xf, %edx
movq 0x18(%rsp), %r12
movq %r12, %rdi
callq 0x2a84044
testq %rax, %rax
je 0x127820a
movq %rax, %rdi
callq 0x2a76bfe
movq %rax, %rbp
movq %rdx, %r13
cmpq $0x1, %rdx
jne 0x1278215
leaq 0x3663ad3(%rip), %rsi # 0x48dbcc3
movq %rbp, %rdi
movq %r13, %rdx
callq 0x780c70
xorl %r15d, %r15d
testl %eax, %eax
sete %r15b
shll $0x8, %r15d
jmp 0x1278218
movl $0xe7, %r13d
jmp 0x12782b8
xorl %r15d, %r15d
cmpq $0x3, %r13
jne 0x1278244
testw %r15w, %r15w
jne 0x1278244
leaq 0x3e250de(%rip), %rsi # 0x509d309
movq %rbp, %rdi
movq %r13, %rdx
callq 0x780c70
testl %eax, %eax
movl $0x101, %eax # imm = 0x101
cmovnel %r15d, %eax
movl %eax, %r15d
cmpq $0x5, %r13
jne 0x1278275
movzwl %r15w, %eax
andl $0x100, %eax # imm = 0x100
jne 0x1278275
leaq 0x3e29c00(%rip), %rsi # 0x50a1e5c
movq %rbp, %rdi
movq %r13, %rdx
callq 0x780c70
testl %eax, %eax
movl $0x103, %eax # imm = 0x103
cmovnel %r15d, %eax
movl %eax, %r15d
cmpq $0x2, %r13
jne 0x12782a6
movzwl %r15w, %eax
andl $0x100, %eax # imm = 0x100
jne 0x12782a6
leaq 0x3e29b4f(%rip), %rsi # 0x50a1ddc
movq %rbp, %rdi
movq %r13, %rdx
callq 0x780c70
testl %eax, %eax
movl $0x160, %eax # imm = 0x160
cmovnel %r15d, %eax
movl %eax, %r15d
btl $0x8, %r15d
movl $0xe7, %eax
cmovbl %r15d, %eax
movzbl %al, %r13d
movq 0x8(%rsp), %r15
movq 0x50(%rbx), %rdi
movq 0x328(%rbx), %rsi
movq (%rdi), %rax
xorl %edx, %edx
callq *0xc8(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
movl $0x3, %esi
movl $0x1, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
movq %r13, %rsi
movl $0x1, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
movl $0x22, %esi
movl $0x1, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
movl $0x4, %esi
movl $0x1, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %rdi
movq 0x328(%rbx), %rdx
movq (%rdi), %rax
movq 0x10(%rsp), %rsi
movl $0x4, %ecx
callq *0x350(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
xorl %esi, %esi
movl $0x4, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %rdi
movq 0x328(%rbx), %rdx
movq (%rdi), %rax
movq %r15, %rsi
movl $0x4, %ecx
callq *0x350(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
xorl %esi, %esi
movl $0x4, %edx
callq *0x208(%rax)
leaq 0x2bb9d79(%rip), %rsi # 0x3e3210f
movl $0x10, %edx
movq %r12, %rdi
callq 0x2a84044
movl $0x81, %r12d
testq %rax, %rax
je 0x127840a
movq %rax, %rdi
callq 0x2a76bfe
movq %rax, %rbp
movq %rdx, %r13
cmpq $0x6, %rdx
je 0x12783f0
cmpq $0x5, %r13
jne 0x12785a1
leaq 0x3d3fbb9(%rip), %rsi # 0x4fb7f8c
movq %rbp, %rdi
movq %r13, %rdx
callq 0x780c70
testl %eax, %eax
je 0x1278596
cmpq $0x6, %r13
jne 0x12785a1
leaq 0x2bb9d29(%rip), %rsi # 0x3e32120
movq %rbp, %rdi
movq %r13, %rdx
callq 0x780c70
testl %eax, %eax
jne 0x12785a1
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
movq %r12, %rsi
movl $0x1, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
xorl %esi, %esi
movl $0x1, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
xorl %esi, %esi
movl $0x2, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
movq %r15, %rsi
xorl %edx, %edx
callq *0xc8(%rax)
movq 0x50(%rbx), %rdi
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq (%rdi), %rax
callq *0x1f0(%rax)
movq 0x50(%rbx), %rdi
movq 0xd8(%rsp), %rsi
movq 0xe0(%rsp), %rdx
movq (%rdi), %rax
callq *0x1f0(%rax)
movq 0x50(%rbx), %rdi
movq (%rdi), %rax
xorl %esi, %esi
movl $0x2, %edx
callq *0x208(%rax)
movq 0x50(%rbx), %r15
movq %rbx, %rdi
callq 0x1606e3a
movq 0x2f8(%rax), %rsi
movq (%r15), %rax
movq %r15, %rdi
xorl %edx, %edx
callq *0xa8(%rax)
movq 0x50(%rbx), %rdi
leaq 0x2bb9c9e(%rip), %rax # 0x3e3216b
leaq 0x180(%rsp), %rsi
movq %rax, (%rsi)
movw $0x103, 0x20(%rsi) # imm = 0x103
movq (%rdi), %rax
movl $0x1, %edx
callq *0x78(%rax)
movq 0x50(%rbx), %rdi
movq 0x328(%rbx), %rsi
movq (%rdi), %rax
movq 0x10(%rsp), %rdx
movl $0x8, %ecx
callq *0x350(%rax)
movq 0x50(%rbx), %rdi
callq 0x28f8a44
movq 0xd8(%rsp), %rdi
cmpq %r14, %rdi
je 0x1278522
callq 0x780910
movq 0x28(%rsp), %rdi
leaq 0x40(%rsp), %rax
cmpq %rax, %rdi
je 0x1278536
callq 0x780910
leaq 0x110(%rsp), %rdi
callq 0x2b7e98e
movq 0xb8(%rsp), %rdi
leaq 0xd0(%rsp), %rax
cmpq %rax, %rdi
je 0x127855d
callq 0x780910
leaq 0x148(%rsp), %rdi
callq 0x2b7e98e
movq 0x90(%rsp), %rdi
leaq 0xa8(%rsp), %rax
cmpq %rax, %rdi
je 0x1278584
callq 0x780910
addq $0x1a8, %rsp # imm = 0x1A8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x85, %r12d
jmp 0x127840a
leaq 0x2bb9b7f(%rip), %rdi # 0x3e32127
movl $0x1, %esi
callq 0x2b31b17
|
/Target/SystemZ/SystemZAsmPrinter.cpp
|
llvm::SystemZAsmPrinter::~SystemZAsmPrinter()
|
class LLVM_LIBRARY_VISIBILITY SystemZAsmPrinter : public AsmPrinter {
private:
MCSymbol *CurrentFnPPA1Sym; // PPA1 Symbol.
MCSymbol *CurrentFnEPMarkerSym; // Entry Point Marker.
MCSymbol *PPA2Sym;
SystemZTargetStreamer *getTargetStreamer() {
MCTargetStreamer *TS = OutStreamer->getTargetStreamer();
assert(TS && "do not have a target streamer");
return static_cast<SystemZTargetStreamer *>(TS);
}
/// Call type information for XPLINK.
enum class CallType {
BASR76 = 0, // b'x000' == BASR r7,r6
BRAS7 = 1, // b'x001' == BRAS r7,ep
RESVD_2 = 2, // b'x010'
BRASL7 = 3, // b'x011' == BRASL r7,ep
RESVD_4 = 4, // b'x100'
RESVD_5 = 5, // b'x101'
BALR1415 = 6, // b'x110' == BALR r14,r15
BASR33 = 7, // b'x111' == BASR r3,r3
};
// The Associated Data Area (ADA) contains descriptors which help locating
// external symbols. For each symbol and type, the displacement into the ADA
// is stored.
class AssociatedDataAreaTable {
public:
using DisplacementTable =
MapVector<std::pair<const MCSymbol *, unsigned>, uint32_t>;
private:
const uint64_t PointerSize;
/// The mapping of name/slot type pairs to displacements.
DisplacementTable Displacements;
/// The next available displacement value. Incremented when new entries into
/// the ADA are created.
uint32_t NextDisplacement = 0;
public:
AssociatedDataAreaTable(uint64_t PointerSize) : PointerSize(PointerSize) {}
/// @brief Add a function descriptor to the ADA.
/// @param MI Pointer to an ADA_ENTRY instruction.
/// @return The displacement of the descriptor into the ADA.
uint32_t insert(const MachineOperand MO);
/// @brief Get the displacement into associated data area (ADA) for a name.
/// If no displacement is already associated with the name, assign one and
/// return it.
/// @param Sym The symbol for which the displacement should be returned.
/// @param SlotKind The ADA type.
/// @return The displacement of the descriptor into the ADA.
uint32_t insert(const MCSymbol *Sym, unsigned SlotKind);
/// Get the table of GOFF displacements. This is 'const' since it should
/// never be modified by anything except the APIs on this class.
const DisplacementTable &getTable() const { return Displacements; }
uint32_t getNextDisplacement() const { return NextDisplacement; }
};
AssociatedDataAreaTable ADATable;
void emitPPA1(MCSymbol *FnEndSym);
void emitPPA2(Module &M);
void emitADASection();
void emitIDRLSection(Module &M);
public:
SystemZAsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
: AsmPrinter(TM, std::move(Streamer)), CurrentFnPPA1Sym(nullptr),
CurrentFnEPMarkerSym(nullptr), PPA2Sym(nullptr),
ADATable(TM.getPointerSize(0)) {}
// Override AsmPrinter.
StringRef getPassName() const override { return "SystemZ Assembly Printer"; }
void emitInstruction(const MachineInstr *MI) override;
void emitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) override;
void emitEndOfAsmFile(Module &M) override;
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
const char *ExtraCode, raw_ostream &OS) override;
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
const char *ExtraCode, raw_ostream &OS) override;
bool doInitialization(Module &M) override {
SM.reset();
return AsmPrinter::doInitialization(M);
}
void emitFunctionEntryLabel() override;
void emitFunctionBodyEnd() override;
void emitStartOfAsmFile(Module &M) override;
private:
void emitCallInformation(CallType CT);
void LowerFENTRY_CALL(const MachineInstr &MI, SystemZMCInstLower &MCIL);
void LowerSTACKMAP(const MachineInstr &MI);
void LowerPATCHPOINT(const MachineInstr &MI, SystemZMCInstLower &Lower);
void emitAttributes(Module &M);
}
|
pushq %rbx
movq %rdi, %rbx
leaq 0x454da5d(%rip), %rax # 0x57c6428
movq %rax, (%rdi)
movq 0x350(%rdi), %rdi
leaq 0x360(%rbx), %rax
cmpq %rax, %rdi
je 0x12789e6
callq 0x780910
movq 0x338(%rbx), %rdi
movl 0x348(%rbx), %eax
shlq $0x3, %rax
leaq (%rax,%rax,2), %rsi
movl $0x8, %edx
callq 0x2b410f1
movq %rbx, %rdi
callq 0x1606b06
movl $0x368, %esi # imm = 0x368
movq %rbx, %rdi
popq %rbx
jmp 0x7800d0
nop
|
/Target/SystemZ/SystemZAsmPrinter.h
|
llvm::DenseMapBase<llvm::DenseMap<llvm::MCSymbol const*, unsigned int, llvm::DenseMapInfo<llvm::MCSymbol const*, void>, llvm::detail::DenseMapPair<llvm::MCSymbol const*, unsigned int>>, llvm::MCSymbol const*, unsigned int, llvm::DenseMapInfo<llvm::MCSymbol const*, void>, llvm::detail::DenseMapPair<llvm::MCSymbol const*, unsigned int>>::clear()
|
unsigned getNumEntries() const {
return NumEntries;
}
|
movl 0x8(%rdi), %eax
testl %eax, %eax
jne 0x1278eb3
cmpl $0x0, 0xc(%rdi)
je 0x1278f8c
shll $0x2, %eax
movl 0x10(%rdi), %ecx
cmpl %ecx, %eax
setae %al
cmpl $0x41, %ecx
setb %dl
orb %al, %dl
je 0x1278f8e
testq %rcx, %rcx
je 0x1278f84
movq (%rdi), %rax
movabsq $0xfffffffffffffff, %rdx # imm = 0xFFFFFFFFFFFFFFF
addq %rdx, %rcx
andq %rcx, %rdx
andl $0x1, %ecx
negq %rcx
addq %rdx, %rcx
addq $0x2, %rcx
movq %rdx, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
addq $0x10, %rax
xorl %edx, %edx
movdqa 0x193c113(%rip), %xmm1 # 0x2bb5020
movdqa 0x193c11b(%rip), %xmm2 # 0x2bb5030
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movq %rdx, %xmm4
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
por %xmm1, %xmm4
pxor %xmm2, %xmm4
movdqa %xmm4, %xmm5
pcmpgtd %xmm0, %xmm5
pcmpeqd %xmm0, %xmm4
pshufd $0xf5, %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
pand %xmm5, %xmm6
pshufd $0xf5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
por %xmm6, %xmm4
movd %xmm4, %esi
notl %esi
testb $0x1, %sil
je 0x1278f61
movq $-0x1000, -0x10(%rax) # imm = 0xF000
pxor %xmm3, %xmm4
pextrw $0x4, %xmm4, %esi
testb $0x1, %sil
je 0x1278f77
movq $-0x1000, (%rax) # imm = 0xF000
addq $0x2, %rdx
addq $0x20, %rax
cmpq %rdx, %rcx
jne 0x1278f1d
movq $0x0, 0x8(%rdi)
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::SystemZInstrInfo::expandRXYPseudo(llvm::MachineInstr&, unsigned int, unsigned int) const
|
void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const {
Register Reg = MI.getOperand(0).getReg();
unsigned Opcode = getOpcodeForOffset(
SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode,
MI.getOperand(2).getImm());
MI.setDesc(get(Opcode));
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %rbx
movq %rdi, %r14
movq 0x20(%rsi), %rax
movl 0x4(%rax), %esi
leal -0x1(%rsi), %edi
cmpl $0x3ffffffe, %edi # imm = 0x3FFFFFFE
ja 0x127a70f
leaq 0x454cc67(%rip), %rdi # 0x57c7350
movq (%rdi), %rdi
movl %esi, %r8d
shrl $0x3, %r8d
movzwl 0x16(%rdi), %r9d
cmpl %r9d, %r8d
jae 0x127a70f
movq 0x8(%rdi), %rdi
movzbl (%rdi,%r8), %edi
andl $0x7, %esi
btl %esi, %edi
cmovbl %ecx, %edx
movq 0x50(%rax), %rax
movq %r14, %rdi
movl %edx, %esi
movq %rax, %rdx
xorl %ecx, %ecx
callq 0x127a00c
movq 0x8(%r14), %rsi
movl %eax, %eax
shlq $0x5, %rax
subq %rax, %rsi
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0x1d3c794
|
/Target/SystemZ/SystemZInstrInfo.cpp
|
llvm::SystemZInstrInfo::getLoadAndTest(unsigned int) const
|
unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
switch (Opcode) {
case SystemZ::L: return SystemZ::LT;
case SystemZ::LY: return SystemZ::LT;
case SystemZ::LG: return SystemZ::LTG;
case SystemZ::LGF: return SystemZ::LTGF;
case SystemZ::LR: return SystemZ::LTR;
case SystemZ::LGFR: return SystemZ::LTGFR;
case SystemZ::LGR: return SystemZ::LTGR;
case SystemZ::LCDFR: return SystemZ::LCDBR;
case SystemZ::LPDFR: return SystemZ::LPDBR;
case SystemZ::LNDFR: return SystemZ::LNDBR;
case SystemZ::LCDFR_32: return SystemZ::LCEBR;
case SystemZ::LPDFR_32: return SystemZ::LPEBR;
case SystemZ::LNDFR_32: return SystemZ::LNEBR;
// On zEC12 we prefer to use RISBGN. But if there is a chance to
// actually use the condition code, we may turn it back into RISGB.
// Note that RISBG is not really a "load-and-test" instruction,
// but sets the same condition code values, so is OK to use here.
case SystemZ::RISBGN: return SystemZ::RISBG;
default: return 0;
}
}
|
movl $0x702, %eax # imm = 0x702
cmpl $0x610, %esi # imm = 0x610
jg 0x1280add
cmpl $0x5d8, %esi # imm = 0x5D8
jle 0x1280b0b
cmpl $0x5df, %esi # imm = 0x5DF
jg 0x1280b4b
cmpl $0x5d9, %esi # imm = 0x5D9
je 0x1280b77
cmpl $0x5de, %esi # imm = 0x5DE
jne 0x1280b9b
movl $0x709, %eax # imm = 0x709
retq
cmpl $0x6e4, %esi # imm = 0x6E4
jle 0x1280b2d
cmpl $0x71b, %esi # imm = 0x71B
jg 0x1280b61
cmpl $0x6e5, %esi # imm = 0x6E5
je 0x1280b7d
cmpl $0x6f5, %esi # imm = 0x6F5
jne 0x1280b9b
movl $0x70c, %eax # imm = 0x70C
retq
cmpl $0x595, %esi # imm = 0x595
je 0x1280b9d
cmpl $0x5b0, %esi # imm = 0x5B0
je 0x1280b8f
cmpl $0x5b1, %esi # imm = 0x5B1
jne 0x1280b9b
movl $0x5b3, %eax # imm = 0x5B3
retq
cmpl $0x611, %esi # imm = 0x611
je 0x1280b89
cmpl $0x612, %esi # imm = 0x612
je 0x1280b95
cmpl $0x6e4, %esi # imm = 0x6E4
jne 0x1280b9b
movl $0x6e3, %eax # imm = 0x6E3
retq
cmpl $0x5e0, %esi # imm = 0x5E0
je 0x1280b83
cmpl $0x5e7, %esi # imm = 0x5E7
jne 0x1280b9b
movl $0x70b, %eax # imm = 0x70B
retq
cmpl $0x71c, %esi # imm = 0x71C
je 0x1280b9d
cmpl $0x7e0, %esi # imm = 0x7E0
jne 0x1280b9b
movl $0x7de, %eax # imm = 0x7DE
retq
movl $0x708, %eax # imm = 0x708
retq
movl $0x6e8, %eax # imm = 0x6E8
retq
movl $0x70a, %eax # imm = 0x70A
retq
movl $0x610, %eax # imm = 0x610
retq
movl $0x5af, %eax # imm = 0x5AF
retq
movl $0x614, %eax # imm = 0x614
retq
xorl %eax, %eax
retq
|
/Target/SystemZ/SystemZInstrInfo.cpp
|
llvm::SystemZ::splitBlockAfter(llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>, llvm::MachineBasicBlock*)
|
MachineBasicBlock *SystemZ::splitBlockAfter(MachineBasicBlock::iterator MI,
MachineBasicBlock *MBB) {
MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
NewMBB->splice(NewMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
return NewMBB;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %rbx
movq %rdi, %r15
movq %rsi, %rdi
callq 0x1280f13
movq %rax, %r14
testq %r15, %r15
je 0x1280f87
testb $0x4, (%r15)
je 0x1280f87
jmp 0x1280f8e
movq 0x8(%r15), %r15
testb $0x8, 0x2c(%r15)
jne 0x1280f83
movq 0x8(%r15), %rcx
leaq 0x30(%rbx), %r8
cmpq %r8, %rcx
je 0x1280faf
movq 0x38(%r14), %rsi
leaq 0x28(%rbx), %rdx
movq %r14, %rdi
addq $0x28, %rdi
callq 0x9a5456
movq %r14, %rdi
movq %rbx, %rsi
callq 0x1cfd150
movq %r14, %rax
popq %rbx
popq %r14
popq %r15
retq
|
/Target/SystemZ/SystemZInstrInfo.cpp
|
llvm::VR32BitGetRawAllocationOrder(llvm::MachineFunction const&)
|
iterator begin() const { return RegsBegin; }
|
leaq 0x45e9681(%rip), %rax # 0x586ac70
movq 0x20(%rax), %rcx
movzwl 0x34(%rax), %eax
movq %rcx, -0x28(%rsp)
movq %rax, -0x20(%rsp)
leaq 0x2bb1e58(%rip), %rax # 0x3e33460
movq %rax, -0x18(%rsp)
movq $0x20, -0x10(%rsp)
movq 0x10(%rdi), %rax
movl 0x17c(%rax), %ecx
movl 0x184(%rax), %eax
xorl $0x4, %eax
xorl $0xf, %ecx
xorl %edx, %edx
orl %eax, %ecx
sete %dl
shll $0x4, %edx
movq -0x28(%rsp,%rdx), %rax
movq -0x20(%rsp,%rdx), %rdx
retq
|
/llvm/MC/MCRegisterInfo.h
|
llvm::SystemZELFRegisters::getCallPreservedMask(llvm::MachineFunction const&, unsigned int) const
|
const uint32_t *
SystemZELFRegisters::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
if (CC == CallingConv::GHC)
return CSR_SystemZ_NoRegs_RegMask;
if (CC == CallingConv::AnyReg)
return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
: CSR_SystemZ_AllRegs_RegMask;
if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
MF.getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_SystemZ_SwiftError_RegMask;
return CSR_SystemZ_ELF_RegMask;
}
|
cmpl $0xa, %edx
je 0x1282a3b
pushq %rbx
subq $0x10, %rsp
movq %rsi, %rbx
movq 0x10(%rsi), %rdi
cmpl $0xd, %edx
jne 0x1282a43
leaq 0x2bb07c9(%rip), %rcx # 0x3e331f0
leaq 0x2bb07a2(%rip), %rax # 0x3e331d0
cmpb $0x0, 0x146(%rdi)
cmovneq %rcx, %rax
jmp 0x1282a8b
leaq 0x2bb07ee(%rip), %rax # 0x3e33230
retq
movq (%rdi), %rax
callq *0x90(%rax)
movq (%rax), %rcx
movq %rax, %rdi
callq *0x850(%rcx)
testb %al, %al
je 0x1282a84
movq (%rbx), %rax
movq 0x70(%rax), %rax
leaq 0x8(%rsp), %rdi
movq %rax, (%rdi)
movl $0x45, %esi
xorl %edx, %edx
callq 0x29acba8
testb %al, %al
je 0x1282a84
leaq 0x2bb07ce(%rip), %rax # 0x3e33250
jmp 0x1282a8b
leaq 0x2bb0785(%rip), %rax # 0x3e33210
addq $0x10, %rsp
popq %rbx
retq
nop
|
/Target/SystemZ/SystemZRegisterInfo.cpp
|
llvm::BasicTTIImplBase<llvm::SystemZTTIImpl>::isTypeLegal(llvm::Type*)
|
bool isTypeLegal(Type *Ty) {
EVT VT = getTLI()->getValueType(DL, Ty, /*AllowUnknown=*/true);
return getTLI()->isTypeLegal(VT);
}
|
pushq %rbx
movq %rsi, %rdx
movq %rdi, %rbx
movq 0x8(%rdi), %rsi
movq 0x18(%rdi), %rdi
movl $0x1, %ecx
callq 0x920138
testb %al, %al
je 0x128679b
movq 0x18(%rbx), %rcx
movzbl %al, %eax
cmpq $0x0, 0x68(%rcx,%rax,8)
setne %al
jmp 0x128679d
xorl %eax, %eax
popq %rbx
retq
nop
|
/llvm/CodeGen/BasicTTIImpl.h
|
llvm::SystemZTTIImpl::getMinPrefetchStride(unsigned int, unsigned int, unsigned int, bool) const
|
unsigned SystemZTTIImpl::getMinPrefetchStride(unsigned NumMemAccesses,
unsigned NumStridedMemAccesses,
unsigned NumPrefetches,
bool HasCall) const {
// Don't prefetch a loop with many far apart accesses.
if (NumPrefetches > 16)
return UINT_MAX;
// Emit prefetch instructions for smaller strides in cases where we think
// the hardware prefetcher might not be able to keep up.
if (NumStridedMemAccesses > 32 && !HasCall &&
(NumMemAccesses - NumStridedMemAccesses) * 32 <= NumStridedMemAccesses)
return 1;
return ST->hasMiscellaneousExtensions3() ? 8192 : 2048;
}
|
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
cmpl $0x10, %ecx
ja 0x128bbd3
cmpl $0x21, %edx
setb %al
orb %r8b, %al
jne 0x128bbbb
subl %edx, %esi
shll $0x5, %esi
movl $0x1, %eax
cmpl %edx, %esi
jbe 0x128bbd3
movq 0x10(%rdi), %rax
cmpb $0x0, 0x13b(%rax)
movl $0x2000, %ecx # imm = 0x2000
movl $0x800, %eax # imm = 0x800
cmovnel %ecx, %eax
retq
|
/Target/SystemZ/SystemZTargetTransformInfo.cpp
|
(anonymous namespace)::SystemZDAGToDAGISel::storeLoadIsAligned(llvm::SDNode*) const
|
bool SystemZDAGToDAGISel::storeLoadIsAligned(SDNode *N) const {
auto *MemAccess = cast<MemSDNode>(N);
auto *LdSt = dyn_cast<LSBaseSDNode>(MemAccess);
TypeSize StoreSize = MemAccess->getMemoryVT().getStoreSize();
SDValue BasePtr = MemAccess->getBasePtr();
MachineMemOperand *MMO = MemAccess->getMemOperand();
assert(MMO && "Expected a memory operand.");
// The memory access must have a proper alignment and no index register.
// Only load and store nodes have the offset operand (atomic loads do not).
if (MemAccess->getAlign().value() < StoreSize ||
(LdSt && !LdSt->getOffset().isUndef()))
return false;
// The MMO must not have an unaligned offset.
if (MMO->getOffset() % StoreSize != 0)
return false;
// An access to GOT or the Constant Pool is aligned.
if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
if ((PSV->isGOT() || PSV->isConstantPool()))
return true;
// Check the alignment of a Global Address.
if (BasePtr.getNumOperands())
if (GlobalAddressSDNode *GA =
dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0))) {
// The immediate offset must be aligned.
if (GA->getOffset() % StoreSize != 0)
return false;
// The alignment of the symbol itself must be at least the store size.
const GlobalValue *GV = GA->getGlobal();
const DataLayout &DL = GV->getDataLayout();
if (GV->getPointerAlignment(DL).value() < StoreSize)
return false;
}
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rdi, %r14
movl $0xfffffedf, %eax # imm = 0xFFFFFEDF
addl 0x18(%rdi), %eax
xorl %ebx, %ebx
cmpl $0x2, %eax
movl $0x0, %r13d
cmovbq %rdi, %r13
movb 0x58(%rdi), %al
movq 0x60(%rdi), %rcx
leaq 0x18(%rsp), %rdi
movb %al, (%rdi)
movq %rcx, 0x8(%rdi)
callq 0x9ecd2a
leaq 0x8(%rsp), %r15
movq %rax, (%r15)
movb %dl, 0x8(%r15)
movq %r14, %rdi
callq 0x9fdbd6
movq (%rax), %r12
movq 0x68(%r14), %r14
movq %r14, %rdi
callq 0x1d56102
movl $0x1, %ebp
movl %eax, %ecx
shlq %cl, %rbp
movq %r15, %rdi
callq 0x2b60e74
cmpq %rax, %rbp
jb 0x1299141
testq %r13, %r13
je 0x1299062
cmpl $0x121, 0x18(%r13) # imm = 0x121
movl $0x50, %eax
movl $0x78, %ecx
cmoveq %rax, %rcx
movq 0x28(%r13), %rax
movq (%rax,%rcx), %rax
cmpl $0x33, 0x18(%rax)
jne 0x1299135
movq 0x8(%r14), %r15
leaq 0x8(%rsp), %rdi
callq 0x2b60e74
movq %rax, %rcx
xorl %ebx, %ebx
movq %r15, %rax
xorl %edx, %edx
divq %rcx
testq %rdx, %rdx
jne 0x1299141
movq (%r14), %rax
testb $0x4, %al
sete %cl
andq $-0x8, %rax
sete %dl
orb %cl, %dl
jne 0x12990aa
movl 0x8(%rax), %eax
movb $0x1, %bl
orl $0x2, %eax
cmpl $0x3, %eax
je 0x1299141
cmpw $0x0, 0x40(%r12)
je 0x129913f
movq 0x28(%r12), %rax
movq (%rax), %r15
movl 0x18(%r15), %eax
cmpq $0x26, %rax
ja 0x1299152
movabsq $0x6000006000, %rcx # imm = 0x6000006000
btq %rax, %rcx
jae 0x1299152
testq %r15, %r15
je 0x1299139
movq 0x60(%r15), %r14
leaq 0x8(%rsp), %rdi
callq 0x2b60e74
movq %rax, %rcx
xorl %ebx, %ebx
movq %r14, %rax
xorl %edx, %edx
divq %rcx
testq %rdx, %rdx
jne 0x129913b
movq 0x58(%r15), %rbx
movq %rbx, %rdi
callq 0x2a4771e
movq %rbx, %rdi
movq %rax, %rsi
callq 0x2aa134e
movl $0x1, %ebx
movl %eax, %ecx
shlq %cl, %rbx
leaq 0x8(%rsp), %rdi
callq 0x2b60e74
cmpq %rax, %rbx
setae %bl
jmp 0x129913b
xorl %ebx, %ebx
jmp 0x1299141
movb $0x1, %bl
testb %bl, %bl
je 0x1299141
movb $0x1, %bl
movl %ebx, %eax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
xorl %r15d, %r15d
jmp 0x12990dd
nop
|
/Target/SystemZ/SystemZISelDAGToDAG.cpp
|
llvm::getNodeAVL(llvm::SDValue)
|
SDValue getNodeAVL(SDValue Op) {
auto PosOpt = getAVLPos(Op->getOpcode());
return PosOpt ? Op->getOperand(*PosOpt) : SDValue();
}
|
pushq %rbx
movq %rdi, %rbx
movl 0x18(%rdi), %edi
callq 0x12f4039
btq $0x20, %rax
jb 0x12f466e
xorl %edx, %edx
xorl %eax, %eax
jmp 0x12f4680
movq 0x28(%rbx), %rcx
movl %eax, %eax
leaq (%rax,%rax,4), %rdx
movq (%rcx,%rdx,8), %rax
movl 0x8(%rcx,%rdx,8), %edx
popq %rbx
retq
|
/Target/VE/VECustomDAG.cpp
|
llvm::SmallVectorTemplateBase<std::pair<llvm::StringRef, llvm::SmallVector<llvm::MCSymbol*, 4u>>, false>::grow(unsigned long)
|
void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
size_t NewCapacity;
T *NewElts = mallocForGrow(MinSize, NewCapacity);
moveElementsForGrow(NewElts);
takeAllocationForGrow(NewElts, NewCapacity);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %rdx
movq %rdi, %rbx
leaq 0x10(%rdi), %r15
movq %rsp, %r12
movl $0x40, %ecx
movq %r15, %rsi
movq %r12, %r8
callq 0x2b4ec3c
movq %rax, %r14
movq %rbx, %rdi
movq %rax, %rsi
callq 0x12f83b6
movq (%r12), %r12
movq (%rbx), %rdi
cmpq %r15, %rdi
je 0x12f83a2
callq 0x780910
movq %r14, (%rbx)
movl %r12d, 0xc(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/SmallVector.h
|
llvm::computeLegalValueVTs(llvm::WebAssemblyTargetLowering const&, llvm::LLVMContext&, llvm::DataLayout const&, llvm::Type*, llvm::SmallVectorImpl<llvm::MVT>&)
|
void llvm::computeLegalValueVTs(const WebAssemblyTargetLowering &TLI,
LLVMContext &Ctx, const DataLayout &DL,
Type *Ty, SmallVectorImpl<MVT> &ValueVTs) {
SmallVector<EVT, 4> VTs;
ComputeValueVTs(TLI, DL, Ty, VTs);
for (EVT VT : VTs) {
unsigned NumRegs = TLI.getNumRegisters(Ctx, VT);
MVT RegisterVT = TLI.getRegisterType(Ctx, VT);
for (unsigned I = 0; I != NumRegs; ++I)
ValueVTs.push_back(RegisterVT);
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %r8, %rbx
movq %rsi, 0x28(%rsp)
leaq 0x58(%rsp), %rsi
movq %rsi, -0x10(%rsi)
movabsq $0x400000000, %rax # imm = 0x400000000
movq %rax, -0x8(%rsi)
movq $0x0, 0x38(%rsp)
movb $0x0, 0x40(%rsp)
movups 0x38(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x48(%rsp), %r12
movq %rdi, 0x20(%rsp)
movq %rdx, %rsi
movq %rcx, %rdx
movq %r12, %rcx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x1c4746f
movl 0x8(%r12), %ecx
testq %rcx, %rcx
je 0x12f87e9
movq %rcx, %rax
movq 0x48(%rsp), %r14
shlq $0x4, %rax
addq %r14, %rax
movq %rax, 0x18(%rsp)
movq 0x28(%rsp), %r15
movq 0x8(%r14), %rcx
movq %rcx, 0x30(%rsp)
movl (%r14), %r13d
movq 0x20(%rsp), %r12
movq %r12, %rdi
movq %r15, %rsi
movl %r13d, %edx
xorl %r8d, %r8d
callq 0xa4dd7e
movl %eax, %ebp
movq %r12, %rdi
movq %r15, %rsi
movl %r13d, %edx
movq 0x30(%rsp), %rcx
callq 0xa4d858
movl %eax, %r12d
testl %ebp, %ebp
je 0x12f87de
movzbl %r12b, %esi
movq %rbx, %rdi
callq 0x9444fc
decl %ebp
jne 0x12f87ce
addq $0x10, %r14
cmpq 0x18(%rsp), %r14
jne 0x12f8790
movq 0x48(%rsp), %rdi
leaq 0x58(%rsp), %rax
cmpq %rax, %rdi
je 0x12f87fd
callq 0x780910
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
|
void llvm::cl::apply<llvm::cl::opt<bool, false, llvm::cl::parser<bool>>, char [47], llvm::cl::OptionHidden, llvm::cl::desc, llvm::cl::initializer<bool>>(llvm::cl::opt<bool, false, llvm::cl::parser<bool>>*, char const (&) [47], llvm::cl::OptionHidden const&, llvm::cl::desc const&, llvm::cl::initializer<bool> const&)
|
void apply(Opt *O, const Mod &M, const Mods &... Ms) {
applicator<Mod>::opt(M, *O);
apply(O, Ms...);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %r8, %r14
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %rbx
movq %rsi, %rdi
callq 0x7802c0
movq %rbx, %rdi
movq %r13, %rsi
movq %rax, %rdx
callq 0x2b1f336
movl (%r12), %eax
movzwl 0xa(%rbx), %ecx
shll $0x5, %eax
andl $0x60, %eax
andl $-0x61, %ecx
orl %eax, %ecx
movw %cx, 0xa(%rbx)
movups (%r15), %xmm0
movups %xmm0, 0x20(%rbx)
movq (%r14), %rax
movb (%rax), %cl
movb %cl, 0x80(%rbx)
movb $0x1, 0x91(%rbx)
movb (%rax), %al
movb %al, 0x90(%rbx)
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
nop
|
/llvm/Support/CommandLine.h
|
llvm::SmallVectorTemplateBase<std::unique_ptr<llvm::WebAssemblyException, std::default_delete<llvm::WebAssemblyException>>, false>::grow(unsigned long)
|
void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
size_t NewCapacity;
T *NewElts = mallocForGrow(MinSize, NewCapacity);
moveElementsForGrow(NewElts);
takeAllocationForGrow(NewElts, NewCapacity);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %rdx
movq %rdi, %rbx
leaq 0x10(%rdi), %r15
movq %rsp, %r12
movl $0x8, %ecx
movq %r15, %rsi
movq %r12, %r8
callq 0x2b4ec3c
movq %rax, %r14
movq %rbx, %rdi
movq %rax, %rsi
callq 0x1313036
movq (%r12), %r12
movq (%rbx), %rdi
cmpq %r15, %rdi
je 0x1313022
callq 0x780910
movq %r14, (%rbx)
movl %r12d, 0xc(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/SmallVector.h
|
(anonymous namespace)::WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction(llvm::MachineFunction&)
|
bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction(
MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n"
"********** Function: "
<< MF.getName() << '\n');
// Start the recursive process on the entire function body.
BlockSet AllBlocks;
for (auto &MBB : MF) {
AllBlocks.insert(&MBB);
}
if (LLVM_UNLIKELY(processRegion(&*MF.begin(), AllBlocks, MF))) {
// We rewrote part of the function; recompute relevant things.
MF.RenumberBlocks();
// Now we've inserted dispatch blocks, some register uses can have incoming
// paths without a def. For example, before this pass register %a was
// defined in BB1 and used in BB2, and there was only one path from BB1 and
// BB2. But if this pass inserts a dispatch block having multiple
// predecessors between the two BBs, now there are paths to BB2 without
// visiting BB1, and %a's use in BB2 is not dominated by its def. Adding
// IMPLICIT_DEFs to all regs is one simple way to fix it.
addImplicitDefs(MF);
return true;
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movq %rsi, %rbx
leaq 0x20(%rsp), %rax
movq %rax, -0x20(%rax)
movq %rax, -0x18(%rax)
movq $0x4, -0x10(%rax)
movl $0x0, -0x8(%rax)
leaq 0x140(%rsi), %r13
movq 0x148(%rsi), %r14
cmpq %r13, %r14
je 0x13150f7
leaq 0x40(%rsp), %r15
movq %rsp, %r12
movq %r15, %rdi
movq %r12, %rsi
movq %r14, %rdx
callq 0x9a11bc
movq 0x8(%r14), %r14
cmpq %r13, %r14
jne 0x13150e0
movq 0x148(%rbx), %rdi
movq %rsp, %rsi
movq %rbx, %rdx
callq 0x131513a
movl %eax, %ebp
testb %al, %al
jne 0x1315130
movq 0x8(%rsp), %rdi
cmpq (%rsp), %rdi
je 0x131511f
callq 0x780910
movl %ebp, %eax
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rbx, %rdi
callq 0x7841fb
jmp 0x131510f
|
/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
|
bool llvm::PatternMatch::Shuffle_match<llvm::PatternMatch::ThreeOps_match<llvm::PatternMatch::class_match<llvm::Value>, llvm::PatternMatch::class_match<llvm::Value>, llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_zero_int, llvm::ConstantInt, true>, 62u>, llvm::PatternMatch::class_match<llvm::Value>, llvm::PatternMatch::m_ZeroMask>::match<llvm::Value>(llvm::Value*)
|
bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
|
pushq %rbp
pushq %rbx
pushq %rax
movb (%rsi), %bpl
xorl %ebx, %ebx
cmpb $0x5c, %bpl
cmoveq %rsi, %rbx
jne 0x133811a
movq -0x40(%rbx), %rsi
callq 0x133812a
testb %al, %al
je 0x1338118
movq 0x48(%rbx), %rdi
movl 0x50(%rbx), %eax
leaq (%rdi,%rax,4), %rbx
movq %rbx, %rsi
callq 0xe1fff0
cmpq %rbx, %rax
sete %cl
jmp 0x133811a
xorl %ecx, %ecx
cmpb $0x5c, %bpl
sete %al
andb %cl, %al
addq $0x8, %rsp
popq %rbx
popq %rbp
retq
|
/llvm/IR/PatternMatch.h
|
(anonymous namespace)::WebAssemblyFastISel::getLegalType(llvm::MVT::SimpleValueType)
|
MVT::SimpleValueType getLegalType(MVT::SimpleValueType VT) {
switch (VT) {
case MVT::i1:
case MVT::i8:
case MVT::i16:
return MVT::i32;
case MVT::i32:
case MVT::i64:
case MVT::f32:
case MVT::f64:
return VT;
case MVT::funcref:
case MVT::externref:
if (Subtarget->hasReferenceTypes())
return VT;
break;
case MVT::exnref:
if (Subtarget->hasReferenceTypes() && Subtarget->hasExceptionHandling())
return VT;
break;
case MVT::f16:
return MVT::f32;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v4f32:
case MVT::v2i64:
case MVT::v2f64:
if (Subtarget->hasSIMD128())
return VT;
break;
default:
break;
}
return MVT::INVALID_SIMPLE_VALUE_TYPE;
}
|
movl %esi, %eax
cmpl $0x4d, %esi
jg 0x133cd0e
cmpl $0x3c, %esi
ja 0x133cd33
movl %esi, %ecx
movl $0x3180, %edx # imm = 0x3180
btq %rcx, %rdx
jb 0x133cd0d
movabsq $0x1004008000000000, %rdx # imm = 0x1004008000000000
btq %rcx, %rdx
jb 0x133cd54
cmpq $0xb, %rcx
jne 0x133cd33
movb $0xc, %al
retq
leal -0x4e(%rsi), %ecx
cmpl $0x32, %ecx
jbe 0x133cd44
leal -0xc2(%rsi), %ecx
cmpl $0x2, %ecx
jae 0x133cd67
movq 0xb0(%rdi), %rcx
cmpb $0x0, 0x12d(%rcx)
je 0x133cd64
jmp 0x133cd0d
movb $0x7, %al
leal -0x5(%rsi), %ecx
cmpl $0x2, %ecx
jb 0x133cd0d
cmpl $0x2, %esi
je 0x133cd0d
jmp 0x133cd64
movabsq $0x4000100000001, %rdx # imm = 0x4000100000001
btq %rcx, %rdx
jae 0x133cd16
movq 0xb0(%rdi), %rcx
cmpl $0x0, 0x120(%rcx)
jg 0x133cd0d
xorl %eax, %eax
retq
cmpl $0xc4, %esi
jne 0x133cd64
movq 0xb0(%rdi), %rcx
cmpb $0x1, 0x12d(%rcx)
jne 0x133cd64
cmpb $0x0, 0x126(%rcx)
jne 0x133cd0d
jmp 0x133cd64
|
/Target/WebAssembly/WebAssemblyFastISel.cpp
|
llvm::X86Subtarget::swiftAsyncContextIsDynamicallySet() const
|
bool swiftAsyncContextIsDynamicallySet() const {
// Older OS versions (particularly system unwinders) are confused by the
// Swift extended frame, so when building code that might be run on them we
// must dynamically query the concurrency library to determine whether
// extended frames should be flagged as present.
const Triple &TT = getTargetTriple();
unsigned Major = TT.getOSVersion().getMajor();
switch(TT.getOS()) {
default:
return false;
case Triple::IOS:
case Triple::TvOS:
return Major < 15;
case Triple::WatchOS:
return Major < 8;
case Triple::MacOSX:
case Triple::Darwin:
return Major < 12;
}
}
|
pushq %rbx
movq %rdi, %rbx
addq $0x1f0, %rdi # imm = 0x1F0
callq 0x2b04486
movq %rax, %rcx
movl 0x21c(%rbx), %edx
xorl %eax, %eax
cmpl $0x8, %edx
jle 0x13aca55
cmpl $0x9, %edx
je 0x13aca64
cmpl $0x1b, %edx
je 0x13aca69
cmpl $0x1a, %edx
je 0x13aca5f
jmp 0x13aca6f
cmpl $0x1, %edx
je 0x13aca64
cmpl $0x5, %edx
jne 0x13aca6f
cmpl $0xf, %ecx
jmp 0x13aca6c
cmpl $0xc, %ecx
jmp 0x13aca6c
cmpl $0x8, %ecx
setb %al
popq %rbx
retq
nop
|
/Target/X86/X86Subtarget.h
|
LowerAndToBT(llvm::SDValue, llvm::ISD::CondCode, llvm::SDLoc const&, llvm::SelectionDAG&, llvm::X86::CondCode&)
|
static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
SelectionDAG &DAG, X86::CondCode &X86CC) {
assert(And.getOpcode() == ISD::AND && "Expected AND node!");
SDValue Op0 = And.getOperand(0);
SDValue Op1 = And.getOperand(1);
if (Op0.getOpcode() == ISD::TRUNCATE)
Op0 = Op0.getOperand(0);
if (Op1.getOpcode() == ISD::TRUNCATE)
Op1 = Op1.getOperand(0);
SDValue Src, BitNo;
if (Op1.getOpcode() == ISD::SHL)
std::swap(Op0, Op1);
if (Op0.getOpcode() == ISD::SHL) {
if (isOneConstant(Op0.getOperand(0))) {
// If we looked past a truncate, check that it's only truncating away
// known zeros.
unsigned BitWidth = Op0.getValueSizeInBits();
unsigned AndBitWidth = And.getValueSizeInBits();
if (BitWidth > AndBitWidth) {
KnownBits Known = DAG.computeKnownBits(Op0);
if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
return SDValue();
}
Src = Op1;
BitNo = Op0.getOperand(1);
}
} else if (Op1.getOpcode() == ISD::Constant) {
ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
uint64_t AndRHSVal = AndRHS->getZExtValue();
SDValue AndLHS = Op0;
if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
Src = AndLHS.getOperand(0);
BitNo = AndLHS.getOperand(1);
} else {
// Use BT if the immediate can't be encoded in a TEST instruction or we
// are optimizing for size and the immedaite won't fit in a byte.
bool OptForSize = DAG.shouldOptForSize();
if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
isPowerOf2_64(AndRHSVal)) {
Src = AndLHS;
BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
Src.getValueType());
}
}
}
// No patterns found, give up.
if (!Src.getNode())
return SDValue();
// Remove any bit flip.
if (isBitwiseNot(Src)) {
Src = Src.getOperand(0);
CC = CC == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
}
// Attempt to create the X86ISD::BT node.
if (SDValue BT = getBT(Src, BitNo, dl, DAG)) {
X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
return BT;
}
return SDValue();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x78, %rsp
movq %r9, %r15
movq %r8, %rbx
movq %rcx, 0x38(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x70(%rsp)
movq 0x28(%rdi), %rax
movups (%rax), %xmm0
movaps %xmm0, 0x10(%rsp)
movq 0x28(%rax), %r13
movq 0x10(%rsp), %rcx
cmpl $0xd7, 0x18(%rcx)
jne 0x13f0f6b
movq 0x28(%rcx), %rcx
movl 0x8(%rcx), %esi
movl %esi, 0x18(%rsp)
movq (%rcx), %rcx
movq %rcx, 0x10(%rsp)
cmpl $0xd7, 0x18(%r13)
jne 0x13f0f82
movq 0x28(%r13), %rax
movq (%rax), %r13
movl 0x8(%rax), %r12d
jmp 0x13f0f86
movl 0x30(%rax), %r12d
cmpl $0xbd, 0x18(%r13)
jne 0x13f0fa9
movq 0x10(%rsp), %rax
movl 0x18(%rsp), %ecx
movq %r13, 0x10(%rsp)
movl %r12d, 0x18(%rsp)
movq %rax, %r13
movl %ecx, %r12d
movq 0x10(%rsp), %r14
movl 0x18(%r14), %eax
cmpl $0xbd, %eax
movq %r15, 0x60(%rsp)
movl %edx, 0x2c(%rsp)
movq %rbx, 0x30(%rsp)
jne 0x13f102d
movq 0x28(%r14), %rax
movq (%rax), %rdi
movl 0x8(%rax), %esi
callq 0x17767fa
testb %al, %al
je 0x13f117a
leaq 0x10(%rsp), %rdi
callq 0x9e6d1a
leaq 0x40(%rsp), %r14
movq %rax, (%r14)
movb %dl, 0x8(%r14)
movq %r14, %rdi
callq 0x2b60e74
movq %rax, %rbx
leaq 0x68(%rsp), %rdi
callq 0x9e6d1a
movq %rax, (%r14)
movb %dl, 0x8(%r14)
leaq 0x40(%rsp), %rdi
callq 0x2b60e74
subl %eax, %ebx
ja 0x13f11f9
movq 0x10(%rsp), %rax
movq 0x28(%rax), %rax
jmp 0x13f106c
cmpl $0xb, 0x18(%r13)
jne 0x13f117a
movq 0x58(%r13), %rcx
cmpl $0x41, 0x20(%rcx)
jb 0x13f1048
movq 0x18(%rcx), %rcx
jmp 0x13f104c
addq $0x18, %rcx
movq (%rcx), %rbp
movl 0x18(%rsp), %r13d
cmpl $0xbf, %eax
jne 0x13f1078
cmpq $0x1, %rbp
jne 0x13f1078
movq 0x28(%r14), %rax
movq (%rax), %r13
movl 0x8(%rax), %r12d
movq 0x28(%rax), %r15
movl 0x30(%rax), %ebx
jmp 0x13f1185
movq %rbx, %rdi
callq 0x1763398
movq %rbp, %rcx
shrq $0x20, %rcx
jne 0x13f111b
cmpq $0x100, %rbp # imm = 0x100
setb %cl
xorb $0x1, %al
xorl %r12d, %r12d
orb %cl, %al
jne 0x13f117d
movq %r13, %rdi
movq %rbp, %rax
shrq %rax
movabsq $0x5555555555555555, %rcx # imm = 0x5555555555555555
andq %rax, %rcx
movq %rbp, %rax
subq %rcx, %rax
movabsq $0x3333333333333333, %rcx # imm = 0x3333333333333333
movq %rax, %rdx
andq %rcx, %rdx
shrq $0x2, %rax
andq %rcx, %rax
addq %rdx, %rax
movq %rax, %rcx
shrq $0x4, %rcx
addq %rax, %rcx
movabsq $0xf0f0f0f0f0f0f0f, %rax # imm = 0xF0F0F0F0F0F0F0F
andq %rcx, %rax
movabsq $0x101010101010101, %rcx # imm = 0x101010101010101
imulq %rax, %rcx
shrq $0x38, %rcx
movl $0x0, %r13d
movl $0x0, %ebx
movl $0x0, %r15d
cmpl $0x2, %ecx
jb 0x13f1127
jmp 0x13f1185
movq %r13, %rdi
leaq -0x1(%rbp), %rax
testq %rax, %rbp
jne 0x13f117a
movl $0x40, %esi
movl $0x40, %eax
decq %rbp
je 0x13f113e
bsrq %rbp, %rax
xorq $0x3f, %rax
subq %rax, %rsi
movq 0x30(%r14), %rax
movq %rdi, %r12
movq %rdi, %rcx
shlq $0x4, %rcx
movq 0x8(%rax,%rcx), %r8
movl (%rax,%rcx), %ecx
movl $0x0, (%rsp)
movq 0x30(%rsp), %rdi
movq 0x38(%rsp), %rdx
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, %r15
movl %edx, %ebx
movq %r14, %r13
jmp 0x13f1185
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %ebx, %ebx
xorl %r15d, %r15d
testq %r13, %r13
je 0x13f1251
xorl %r14d, %r14d
movq %r13, %rdi
movl %r12d, %esi
xorl %edx, %edx
callq 0x17788d1
testb %al, %al
je 0x13f11bf
movq 0x28(%r13), %rax
movq (%rax), %r13
movl 0x8(%rax), %r12d
xorl %eax, %eax
cmpl $0x11, 0x2c(%rsp)
sete %al
leal (%rax,%rax,4), %ebp
addl $0x11, %ebp
jmp 0x13f11c3
movl 0x2c(%rsp), %ebp
movq %r13, %rdi
movl %r12d, %esi
movq %r15, %rdx
movl %ebx, %ecx
movq 0x38(%rsp), %r8
movq 0x30(%rsp), %r9
callq 0x147933b
testq %rax, %rax
je 0x13f1254
xorl %ecx, %ecx
cmpl $0x11, %ebp
sete %cl
orl $0x2, %ecx
movq 0x60(%rsp), %rsi
movl %ecx, (%rsi)
movl %edx, %r14d
jmp 0x13f1256
movq 0x10(%rsp), %rdx
movl 0x18(%rsp), %ecx
leaq 0x40(%rsp), %r14
movq %r14, %rdi
movq 0x30(%rsp), %rsi
xorl %r8d, %r8d
callq 0x176a228
movq %r14, %rdi
callq 0x781256
movl 0x18(%r14), %ecx
cmpl %ebx, %eax
jae 0x13f1268
cmpl $0x41, %ecx
jb 0x13f123b
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x13f123b
callq 0x7802b0
cmpl $0x41, 0x48(%rsp)
jb 0x13f1251
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x13f1251
callq 0x7802b0
xorl %r14d, %r14d
xorl %eax, %eax
movl %r14d, %edx
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
cmpl $0x41, %ecx
jb 0x13f127c
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x13f127c
callq 0x7802b0
cmpl $0x41, 0x48(%rsp)
jb 0x13f1022
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x13f1022
callq 0x7802b0
jmp 0x13f1022
|
/Target/X86/X86ISelLowering.cpp
|
llvm::X86TargetLowering::shouldExpandAtomicLoadInIR(llvm::LoadInst*) const
|
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
Type *MemType = LI->getType();
if (!LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
!Subtarget.useSoftFloat()) {
// If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
// can use movq to do the load. If we have X87 we can load into an 80-bit
// X87 register and store it to a stack temporary.
if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
(Subtarget.hasSSE1() || Subtarget.hasX87()))
return AtomicExpansionKind::None;
// If this is a 128-bit load with AVX, 128-bit SSE loads/stores are atomic.
if (MemType->getPrimitiveSizeInBits() == 128 && Subtarget.is64Bit() &&
Subtarget.hasAVX())
return AtomicExpansionKind::None;
}
return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
}
|
pushq %r14
pushq %rbx
subq $0x18, %rsp
movq %rdi, %r14
movq 0x8(%rsi), %rbx
movq %rsi, %rdi
callq 0x2a51280
movq %rax, %rdi
movl $0x1c, %esi
callq 0x2a40a66
testb %al, %al
jne 0x1401ea4
movq 0x4e0f8(%r14), %rax
cmpb $0x0, 0x1e6(%rax)
jne 0x1401ea4
movq %rbx, %rdi
callq 0x2a9a7de
leaq 0x8(%rsp), %rdi
movq %rax, (%rdi)
movb %dl, 0x8(%rdi)
callq 0x2b60e74
cmpq $0x40, %rax
jne 0x1401e6b
movq 0x4e0f8(%r14), %rax
cmpb $0x0, 0x1c5(%rax)
je 0x1401ebd
movq %rbx, %rdi
callq 0x2a9a7de
leaq 0x8(%rsp), %rdi
movq %rax, (%rdi)
movb %dl, 0x8(%rdi)
callq 0x2b60e74
cmpq $0x80, %rax
jne 0x1401ea4
movq 0x4e0f8(%r14), %rax
cmpb $0x1, 0x1c5(%rax)
jne 0x1401ea4
cmpl $0x6, 0x130(%rax)
jg 0x1401ecf
movq %r14, %rdi
movq %rbx, %rsi
callq 0x1401cd2
movzbl %al, %eax
shll $0x2, %eax
addq $0x18, %rsp
popq %rbx
popq %r14
retq
cmpl $0x0, 0x130(%rax)
jg 0x1401ecf
cmpb $0x0, 0x1bb(%rax)
je 0x1401e6b
xorl %eax, %eax
jmp 0x1401eb5
nop
|
/Target/X86/X86ISelLowering.cpp
|
LowerFCOPYSIGN(llvm::SDValue, llvm::SelectionDAG&)
|
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue Mag = Op.getOperand(0);
SDValue Sign = Op.getOperand(1);
SDLoc dl(Op);
// If the sign operand is smaller, extend it first.
MVT VT = Op.getSimpleValueType();
if (Sign.getSimpleValueType().bitsLT(VT))
Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
// And if it is bigger, shrink it first.
if (Sign.getSimpleValueType().bitsGT(VT))
Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign,
DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
// At this point the operands and the result should have the same
// type, and that won't be f80 since that is not custom lowered.
bool IsF128 = (VT == MVT::f128);
assert(VT.isFloatingPoint() && VT != MVT::f80 &&
DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
"Unexpected type in LowerFCOPYSIGN");
const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
// Perform all scalar logic operations as 16-byte vectors because there are no
// scalar FP logic instructions in SSE.
// TODO: This isn't necessary. If we used scalar types, we might avoid some
// unnecessary splats, but we might miss load folding opportunities. Should
// this decision be based on OptimizeForSize?
bool IsFakeVector = !VT.isVector() && !IsF128;
MVT LogicVT = VT;
if (IsFakeVector)
LogicVT = (VT == MVT::f64) ? MVT::v2f64
: (VT == MVT::f32) ? MVT::v4f32
: MVT::v8f16;
// The mask constants are automatically splatted for vector types.
unsigned EltSizeInBits = VT.getScalarSizeInBits();
SDValue SignMask = DAG.getConstantFP(
APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
SDValue MagMask = DAG.getConstantFP(
APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
// First, clear all bits but the sign bit from the second operand (sign).
if (IsFakeVector)
Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
// Next, clear the sign bit from the first operand (magnitude).
// TODO: If we had general constant folding for FP logic ops, this check
// wouldn't be necessary.
SDValue MagBits;
if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
APFloat APF = Op0CN->getValueAPF();
APF.clearSign();
MagBits = DAG.getConstantFP(APF, dl, LogicVT);
} else {
// If the magnitude operand wasn't a constant, we need to AND out the sign.
if (IsFakeVector)
Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
}
// OR the magnitude value with the sign bit.
SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
DAG.getIntPtrConstant(0, dl));
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x188, %rsp # imm = 0x188
movq %rdx, %r12
movl %esi, %ebx
movq %rdi, %r14
movq 0x28(%rdi), %rax
movq (%rax), %rcx
movq %rcx, 0x78(%rsp)
movl 0x8(%rax), %ecx
movl %ecx, 0x64(%rsp)
movl 0xc(%rax), %ecx
movl %ecx, 0x58(%rsp)
movq 0x28(%rax), %r13
movl 0x30(%rax), %edx
movl 0x34(%rax), %eax
movl %eax, 0x50(%rsp)
movq 0x48(%rdi), %rsi
movq %rsi, 0x30(%rsp)
testq %rsi, %rsi
je 0x140fc19
movq %rdx, %r15
leaq 0x30(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq %r15, %rdx
movl 0x44(%r14), %eax
movl %eax, 0x38(%rsp)
movq 0x30(%r14), %rax
movl %ebx, %ecx
shlq $0x4, %rcx
movzbl (%rax,%rcx), %ebx
movq 0x30(%r13), %rax
movq %rdx, %rcx
shlq $0x4, %rcx
movzbl (%rax,%rcx), %ecx
shll $0x4, %ecx
leaq 0x1dbf0f8(%rip), %rdi # 0x31ced40
movq -0x10(%rcx,%rdi), %rax
movq %rbx, %r14
decq %r14
movq %r14, %rsi
shlq $0x4, %rsi
leaq (%rdi,%rsi), %rbp
movq (%rsi,%rdi), %r15
testb $0x1, -0x8(%rcx,%rdi)
movzbl %bl, %ecx
movq %rcx, 0x40(%rsp)
je 0x140fc77
testb $0x1, 0x8(%rbp)
je 0x140fcc6
cmpq %r15, %rax
jae 0x140fcc6
movq %r13, 0xe8(%rsp)
movl %edx, 0xf0(%rsp)
movl 0x50(%rsp), %eax
movl %eax, 0xf4(%rsp)
movups 0xe8(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x30(%rsp), %rdx
movq %r12, %rdi
movl $0xe5, %esi
movq 0x40(%rsp), %rcx
xorl %r8d, %r8d
callq 0x176388a
leaq 0x1dbf07d(%rip), %rdi # 0x31ced40
movq %rax, %r13
movq 0x30(%r13), %rax
movl %edx, %ecx
shlq $0x4, %rcx
movzbl (%rax,%rcx), %ecx
shll $0x4, %ecx
movq -0x10(%rcx,%rdi), %rax
testb $0x1, -0x8(%rcx,%rdi)
movq %r12, 0x80(%rsp)
jne 0x140fcf5
testb $0x1, 0x8(%rbp)
jne 0x140fd78
cmpq %r15, %rax
jbe 0x140fd78
movq %r13, 0xd8(%rsp)
movl %edx, 0xe0(%rsp)
movl 0x50(%rsp), %eax
movl %eax, 0xe4(%rsp)
leaq 0x30(%rsp), %r15
movq %r12, %rdi
xorl %esi, %esi
movq %r15, %rdx
movl $0x1, %ecx
callq 0x17638c6
movq %rax, 0x178(%rsp)
movl %edx, 0x180(%rsp)
movups 0x178(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xd8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r12, %rdi
movl $0xe2, %esi
movq %r15, %rdx
movq 0x40(%rsp), %r12
movl %r12d, %ecx
xorl %r8d, %r8d
callq 0x17638a8
movq %rax, %r13
movq %rdx, 0x48(%rsp)
jmp 0x140fd82
movq %rdx, 0x48(%rsp)
movq 0x40(%rsp), %r12
cmpb $0xf, %r12b
sete %bpl
movl %r12d, %edi
xorl %esi, %esi
callq 0xa4ff9e
movq %rax, %r15
leal -0x11(%r12), %eax
cmpb $-0x53, %al
setb %sil
orb %bpl, %sil
cmpb $0xc, %r12b
movl $0x6e, %ecx
movl $0x5c, %edx
cmovel %ecx, %edx
cmpb $0xd, %r12b
movq %r12, %rcx
movl $0x80, %r12d
cmovnel %edx, %r12d
movb %sil, 0x2b(%rsp)
testb %sil, %sil
cmovnel %ecx, %r12d
cmpb $-0x54, %al
ja 0x140fde2
leaq 0x1dbfc62(%rip), %rax # 0x31cfa40
movb (%r14,%rax), %bl
movzbl %bl, %eax
shll $0x4, %eax
leaq 0x1dbef51(%rip), %rcx # 0x31ced40
movl -0x10(%rax,%rcx), %ebx
leaq 0xc8(%rsp), %r14
movq %r14, %rdi
movl %ebx, %esi
callq 0x9f2916
leaq 0x160(%rsp), %rbp
movq %rbp, %rdi
movq %r15, %rsi
movq %r14, %rdx
callq 0x816006
movzbl %r12b, %ecx
leaq 0x158(%rsp), %rsi
leaq 0x30(%rsp), %rdx
movq 0x80(%rsp), %r12
movq %r12, %rdi
movl %ecx, 0x2c(%rsp)
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17656c0
movq %rax, 0x70(%rsp)
movl %edx, 0x60(%rsp)
movq %rbp, %rdi
callq 0x81603c
cmpl $0x41, 0x8(%r14)
jb 0x140fe70
movq 0xc8(%rsp), %rdi
testq %rdi, %rdi
je 0x140fe70
callq 0x7802b0
leaq 0xc8(%rsp), %r14
movq %r14, %rdi
movl %ebx, %esi
callq 0x9f28ce
leaq 0x160(%rsp), %rbx
movq %rbx, %rdi
movq %r15, %rsi
movq %r14, %rdx
callq 0x816006
leaq 0x158(%rsp), %rsi
leaq 0x30(%rsp), %rdx
movq %r12, %rdi
movl 0x2c(%rsp), %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17656c0
movq %rax, 0x68(%rsp)
movl %edx, 0x5c(%rsp)
movq %rbx, %rdi
callq 0x81603c
cmpl $0x41, 0x8(%r14)
movl 0x50(%rsp), %ebp
jb 0x140fee5
movq 0xc8(%rsp), %rdi
testq %rdi, %rdi
je 0x140fee5
callq 0x7802b0
movb 0x2b(%rsp), %r15b
testb %r15b, %r15b
movq 0x78(%rsp), %r14
movl 0x64(%rsp), %ebx
je 0x140ff03
movq 0x48(%rsp), %rdx
movl 0x2c(%rsp), %ecx
jmp 0x140ff4d
movq %r13, 0xb8(%rsp)
movq 0x48(%rsp), %rax
movl %eax, 0xc0(%rsp)
movl %ebp, 0xc4(%rsp)
movups 0xb8(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x30(%rsp), %rdx
movq %r12, %rdi
movl $0xa6, %esi
movl 0x2c(%rsp), %r13d
movl %r13d, %ecx
xorl %r8d, %r8d
callq 0x176388a
movl %r13d, %ecx
movq %rax, %r13
movq %r13, 0xa8(%rsp)
movl %edx, 0xb0(%rsp)
movl %ebp, 0xb4(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0x148(%rsp)
movl 0x60(%rsp), %eax
movl %eax, 0x150(%rsp)
movups 0x148(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xa8(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x30(%rsp), %rdx
movq %r12, %rdi
movl $0x1ea, %esi # imm = 0x1EA
movl %ecx, %ebp
xorl %r8d, %r8d
callq 0x17638a8
movq %rax, 0x50(%rsp)
movl %edx, %r13d
movq %r14, %rdi
movl %ebx, %esi
xorl %edx, %edx
callq 0x17778da
testq %rax, %rax
je 0x141001c
movq 0x58(%rax), %rsi
leaq 0x160(%rsp), %r15
addq $0x20, %rsi
movq %r15, %rdi
callq 0x81e6da
leaq 0x158(%rsp), %rbx
movq %rbx, %rdi
callq 0x149e236
leaq 0x30(%rsp), %rdx
movq %r12, %rdi
movq %rbx, %rsi
movl %ebp, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17656c0
movq %rax, %rbx
movl %edx, %r14d
movq %r15, %rdi
movb 0x2b(%rsp), %r15b
callq 0x81603c
jmp 0x14100d7
testb %r15b, %r15b
je 0x141002b
movl 0x58(%rsp), %eax
movl 0x2c(%rsp), %ebp
jmp 0x1410073
movq %r14, 0x98(%rsp)
movl %ebx, 0xa0(%rsp)
movl 0x58(%rsp), %ebx
movl %ebx, 0xa4(%rsp)
movups 0x98(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x30(%rsp), %rdx
movq %r12, %rdi
movl $0xa6, %esi
movl 0x2c(%rsp), %ebp
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x176388a
movq %rax, %r14
movl %ebx, %eax
movl %edx, %ebx
movq %r14, 0x88(%rsp)
movl %ebx, 0x90(%rsp)
movl %eax, 0x94(%rsp)
movq 0x68(%rsp), %rax
movq %rax, 0x138(%rsp)
movl 0x5c(%rsp), %eax
movl %eax, 0x140(%rsp)
movups 0x138(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x88(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x30(%rsp), %rdx
movq %r12, %rdi
movl $0x1ea, %esi # imm = 0x1EA
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x17638a8
movq %rax, %rbx
movl %edx, %r14d
movq %rbx, 0x128(%rsp)
movl %r14d, 0x130(%rsp)
movq 0x50(%rsp), %rax
movq %rax, 0x118(%rsp)
movl %r13d, 0x120(%rsp)
movups 0x118(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x128(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x30(%rsp), %rdx
movq %r12, %rdi
movl $0x1eb, %esi # imm = 0x1EB
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x17638a8
movq %rax, %rbx
movl %edx, %ebp
testb %r15b, %r15b
jne 0x141019e
movq %rbx, 0x108(%rsp)
movl %ebp, 0x110(%rsp)
leaq 0x30(%rsp), %rbx
movq %r12, %rdi
xorl %esi, %esi
movq %rbx, %rdx
xorl %ecx, %ecx
callq 0x17638c6
movq %rax, 0xf8(%rsp)
movl %edx, 0x100(%rsp)
movups 0xf8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x108(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r12, %rdi
movl $0x9d, %esi
movq %rbx, %rdx
movq 0x40(%rsp), %rcx
xorl %r8d, %r8d
callq 0x17638a8
movq %rax, %rbx
movl %edx, %ebp
movq 0x30(%rsp), %rsi
testq %rsi, %rsi
je 0x14101b2
leaq 0x30(%rsp), %rdi
callq 0x2a758fc
movq %rbx, %rax
movl %ebp, %edx
addq $0x188, %rsp # imm = 0x188
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/X86/X86ISelLowering.cpp
|
isRepeatedShuffleMask(unsigned int, llvm::MVT, llvm::ArrayRef<int>, llvm::SmallVectorImpl<int>&)
|
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
RepeatedMask.assign(LaneSize, -1);
int Size = Mask.size();
for (int i = 0; i < Size; ++i) {
assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
if (Mask[i] < 0)
continue;
if ((Mask[i] % Size) / LaneSize != i / LaneSize)
// This entry crosses lanes, so there is no way to model this shuffle.
return false;
// Ok, handle the in-lane shuffles by detecting if and when they repeat.
// Adjust second vector indices to start at LaneSize instead of Size.
int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
: Mask[i] % LaneSize + LaneSize;
if (RepeatedMask[i % LaneSize] < 0)
// This is the first non-undef entry in this slot of a 128-bit lane.
RepeatedMask[i % LaneSize] = LocalM;
else if (RepeatedMask[i % LaneSize] != LocalM)
// Found a mismatch with the repeated mask.
return false;
}
return true;
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %r8, %r12
movq %rcx, %rbx
movq %rdx, %r14
movl %edi, %eax
leal -0x11(%rsi), %ecx
cmpb $-0x54, %cl
ja 0x1488563
movzbl %sil, %ecx
leaq 0x1d474e2(%rip), %rdx # 0x31cfa40
movb -0x1(%rcx,%rdx), %sil
movzbl %sil, %ecx
shll $0x4, %ecx
leaq 0x1d467cf(%rip), %rsi # 0x31ced40
xorl %r13d, %r13d
xorl %edx, %edx
divq -0x10(%rcx,%rsi)
movq %rax, %r15
movq %r12, %rdi
movq %rax, %rsi
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
callq 0xa54ada
testl %ebx, %ebx
setle %cl
jle 0x14885fe
movq (%r12), %r10
movl %ebx, %r11d
andl $0x7fffffff, %r11d # imm = 0x7FFFFFFF
xorl %esi, %esi
movslq (%r14,%rsi,4), %rdi
testq %rdi, %rdi
js 0x14885f3
movl %edi, %eax
xorl %edx, %edx
divl %ebx
movl %edx, %eax
xorl %edx, %edx
divq %r15
movq %rax, %r9
movq %rsi, %rax
xorl %edx, %edx
divq %r15
cmpq %rax, %r9
jne 0x14885fe
movq %rdx, %r8
movq %rdi, %rax
xorl %edx, %edx
divq %r15
cmpl %ebx, %edi
movl %r15d, %eax
cmovll %r13d, %eax
addl %edx, %eax
movl (%r10,%r8,4), %edx
testl %edx, %edx
js 0x14885ef
cmpl %eax, %edx
je 0x14885f3
jmp 0x14885fe
movl %eax, (%r10,%r8,4)
incq %rsi
cmpq %r11, %rsi
setae %cl
jne 0x14885a5
andb $0x1, %cl
movl %ecx, %eax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/Target/X86/X86ISelLowering.cpp
|
lowerV4I64Shuffle(llvm::SDLoc const&, llvm::ArrayRef<int>, llvm::APInt const&, llvm::SDValue, llvm::SDValue, llvm::X86Subtarget const&, llvm::SelectionDAG&)
|
static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable, SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
Subtarget, DAG))
return V;
if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
Subtarget, DAG))
return Broadcast;
// Try to use shift instructions if fast.
if (Subtarget.preferLowerShuffleAsShift())
if (SDValue Shift =
lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
Subtarget, DAG, /*BitwiseOnly*/ true))
return Shift;
if (V2.isUndef()) {
// When the shuffle is mirrored between the 128-bit lanes of the unit, we
// can use lower latency instructions that will operate on both lanes.
SmallVector<int, 2> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
SmallVector<int, 4> PSHUFDMask;
narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
return DAG.getBitcast(
MVT::v4i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
DAG.getBitcast(MVT::v8i32, V1),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
// AVX2 provides a direct instruction for permuting a single input across
// lanes.
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Try to use shift instructions.
if (SDValue Shift =
lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable, Subtarget,
DAG, /*BitwiseOnly*/ false))
return Shift;
// If we have VLX support, we can use VALIGN or VEXPAND.
if (Subtarget.hasVLX()) {
if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Rotate;
if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
DAG, Subtarget))
return V;
}
// Try to use PALIGNR.
if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
Subtarget, DAG))
return Rotate;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
return V;
bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
// If we have one input in place, then we can permute the other input and
// blend the result.
if (V1IsInPlace || V2IsInPlace)
return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
Subtarget, DAG);
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
return V;
// Try to lower to PERMQ(BLENDD(V1,V2)).
if (SDValue V =
lowerShuffleAsBlendAndPermute(DL, MVT::v4i64, V1, V2, Mask, DAG))
return V;
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
if (!V1IsInPlace && !V2IsInPlace)
if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
return Result;
// Otherwise fall back on generic blend lowering.
return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
Subtarget, DAG);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b8, %rsp # imm = 0x1B8
movl %r9d, %ebp
movq %r8, %r15
movq %rcx, %r13
movq %rsi, %r12
movq 0x208(%rsp), %rcx
movq 0x200(%rsp), %rax
movq %r8, 0x80(%rsp)
movl %r9d, 0x88(%rsp)
movq 0x1f0(%rsp), %r14
movl 0x1f8(%rsp), %ebx
movq %rsi, 0x170(%rsp)
movq %rdx, 0x40(%rsp)
movq %rdx, 0x178(%rsp)
movups 0x170(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rcx, 0x20(%rsp)
movq %rax, 0x18(%rsp)
movq %r13, 0x10(%rsp)
movq %rdi, 0x50(%rsp)
movl $0x50, %esi
movq %r8, %rdx
movl %r9d, %ecx
movq %r14, %r8
movl %ebx, %r9d
callq 0x1495094
testq %rax, %rax
jne 0x1492138
movq %r12, 0x160(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x168(%rsp)
movups 0x160(%rsp), %xmm0
movups %xmm0, (%rsp)
movq 0x208(%rsp), %rax
movq %rax, 0x20(%rsp)
movq 0x200(%rsp), %rax
movq %rax, 0x18(%rsp)
movq %r13, 0x10(%rsp)
movq 0x50(%rsp), %rdi
movl $0x50, %esi
movq %r15, %rdx
movl %ebp, %ecx
movq %r14, %r8
movl %ebx, %r9d
callq 0x1484830
testq %rax, %rax
jne 0x1492138
movq %r13, 0x60(%rsp)
movq %r14, 0x48(%rsp)
movl %ebx, 0x3c(%rsp)
movq %r12, 0x150(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x158(%rsp)
movups 0x150(%rsp), %xmm0
movups %xmm0, (%rsp)
movq 0x50(%rsp), %rdi
movl $0x50, %esi
movq %r15, %rdx
movl %ebp, %ecx
movq 0x200(%rsp), %rbx
movq %rbx, %r8
movq 0x208(%rsp), %r9
callq 0x147c1cb
testq %rax, %rax
je 0x149214a
addq $0x1b8, %rsp # imm = 0x1B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
cmpb $0x1, 0x1d5(%rbx)
movq 0x50(%rsp), %r13
movq %rbx, %rcx
movq 0x40(%rsp), %r14
movq 0x48(%rsp), %rbx
jne 0x14921c8
movq %r12, 0x140(%rsp)
movq %r14, 0x148(%rsp)
movups 0x140(%rsp), %xmm0
movups %xmm0, (%rsp)
movq 0x208(%rsp), %rax
movq %rax, 0x20(%rsp)
movq %rcx, 0x18(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0x10(%rsp)
movl $0x1, 0x28(%rsp)
movq %r13, %rdi
movl $0x50, %esi
movq %r15, %rdx
movl %ebp, %ecx
movq %rbx, %r8
movl 0x3c(%rsp), %r9d
callq 0x1483bd7
testq %rax, %rax
jne 0x1492138
cmpl $0x33, 0x18(%rbx)
jne 0x1492315
leaq 0x78(%rsp), %rcx
movq %rcx, -0x10(%rcx)
movabsq $0x200000000, %rax # imm = 0x200000000
movq %rax, -0x8(%rcx)
leaq 0x68(%rsp), %r8
movl $0x80, %edi
movl $0x50, %esi
movq %r12, %rdx
movq %r14, %rcx
callq 0x1488537
testb %al, %al
je 0x149256c
movq %r13, %r12
leaq 0x1a8(%rsp), %r13
movq %r13, -0x10(%r13)
movabsq $0x400000000, %rax # imm = 0x400000000
movq %rax, -0x8(%r13)
movq 0x68(%rsp), %rsi
movl 0x70(%rsp), %edx
leaq 0x198(%rsp), %r14
movl $0x2, %edi
movq %r14, %rcx
callq 0x271188c
movq 0x208(%rsp), %rbx
movq %rbx, %rdi
movl $0x40, %esi
xorl %edx, %edx
movq %r15, %rcx
movl %ebp, %r8d
callq 0x1763e4c
movq %rax, 0x130(%rsp)
movl %edx, 0x138(%rsp)
movq (%r14), %rdi
movl 0x8(%r14), %esi
callq 0x14836cb
movl %eax, %esi
movl $0x0, (%rsp)
movq %rbx, %rdi
movq %r12, %rdx
movl $0x5, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x120(%rsp)
movl %edx, 0x128(%rsp)
movups 0x120(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x130(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x28f, %esi # imm = 0x28F
movq %r12, %rdx
movl $0x40, %ecx
xorl %r8d, %r8d
callq 0x17638a8
movl %edx, %r8d
movq %rbx, %rdi
movl $0x50, %esi
xorl %edx, %edx
movq %rax, %rcx
callq 0x1763e4c
movq (%r14), %rdi
cmpq %r13, %rdi
je 0x14925f1
movq %rax, %rbx
movl %edx, %ebp
callq 0x780910
movl %ebp, %edx
movq %rbx, %rax
jmp 0x14925f1
movq %r12, 0x100(%rsp)
movq %r14, 0x108(%rsp)
movups 0x100(%rsp), %xmm0
movups %xmm0, (%rsp)
movq 0x208(%rsp), %rax
movq %rax, 0x20(%rsp)
movq 0x200(%rsp), %rax
movq %rax, 0x18(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0x10(%rsp)
movl $0x0, 0x28(%rsp)
movq %r13, %rdi
movl $0x50, %esi
movq %r15, %rdx
movl %ebp, %ecx
movq %rbx, %r8
movl 0x3c(%rsp), %r9d
callq 0x1483bd7
testq %rax, %rax
jne 0x1492138
movq 0x200(%rsp), %rcx
cmpb $0x1, 0x1b2(%rcx)
movq %r12, 0x58(%rsp)
jne 0x1492466
movq %r12, 0xf0(%rsp)
movq %r14, 0xf8(%rsp)
movups 0xf0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r15, %r14
movl %ebp, %r15d
movq %r13, %rbp
movq 0x208(%rsp), %r13
movq %r13, 0x20(%rsp)
movq %rcx, 0x18(%rsp)
movq 0x60(%rsp), %r13
movq %r13, 0x10(%rsp)
movq %rbp, %rdi
movl $0x50, %esi
movq %r14, %rdx
movl %r15d, %ecx
movq %rbx, %r8
movl 0x3c(%rsp), %r9d
callq 0x14856e8
testq %rax, %rax
jne 0x1492138
movq 0x200(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x208(%rsp), %rax
movq %rax, 0x8(%rsp)
leaq 0x1f0(%rsp), %rax
movq %rax, (%rsp)
leaq 0x80(%rsp), %r9
movq %rbp, %rdi
movl $0x50, %esi
movq %r13, %rdx
movq 0x58(%rsp), %rcx
movq 0x40(%rsp), %r8
callq 0x1497aa3
movq 0x200(%rsp), %rcx
testq %rax, %rax
movq %rbp, %r13
movl %r15d, %ebp
movq %r14, %r15
movq 0x40(%rsp), %r14
jne 0x1492138
movq %r12, 0xe0(%rsp)
movq %r14, 0xe8(%rsp)
movups 0xe0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq 0x208(%rsp), %r14
movq %r14, 0x18(%rsp)
movq %rcx, 0x10(%rsp)
movq %r13, %rdi
movl $0x50, %esi
movq %r15, %rdx
movl %ebp, %ecx
movq %rbx, %r8
movl 0x3c(%rsp), %r9d
callq 0x1485a5d
testq %rax, %rax
jne 0x1492138
leaq 0x1f0(%rsp), %rax
movups (%rax), %xmm0
movups %xmm0, (%rsp)
movq %r14, 0x10(%rsp)
movq %r13, %rdi
movl $0x50, %esi
movq 0x58(%rsp), %rbx
movq %rbx, %rdx
movq 0x40(%rsp), %rcx
movq %r15, %r8
movl %ebp, %r9d
callq 0x14853ea
testq %rax, %rax
jne 0x1492138
movq %r14, %r9
movq 0x40(%rsp), %rdi
testl %edi, %edi
setle %cl
movl %ecx, %esi
movq %rdi, %r14
jle 0x149261a
movl (%rbx), %eax
cmpl %edi, %eax
setb %dl
testl %eax, %eax
setg %al
testb %dl, %al
jne 0x1492618
movl %edi, %eax
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
movl $0x1, %esi
movq %rsi, %rdx
cmpq %rsi, %rax
je 0x149255d
movl (%rbx,%rdx,4), %esi
testl %esi, %esi
sets %dil
cmpl %r14d, %esi
setae %r8b
orb %dil, %r8b
movl %esi, %esi
cmpq %rsi, %rdx
sete %dil
orb %r8b, %dil
leaq 0x1(%rdx), %rsi
cmpb $0x1, %dil
je 0x149252c
cmpq %rax, %rdx
setae %sil
movq %r14, %rdi
jmp 0x149261a
movups 0x80(%rsp), %xmm0
movaps %xmm0, 0x180(%rsp)
movq %r12, %rdi
movq %r14, %rsi
callq 0x14836cb
movl %eax, %esi
movl $0x0, (%rsp)
movq 0x208(%rsp), %rbx
movq %rbx, %rdi
movq %r13, %rdx
movl $0x5, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x110(%rsp)
movl %edx, 0x118(%rsp)
movups 0x110(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movaps 0x180(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x2a4, %esi # imm = 0x2A4
movq %r13, %rdx
movl $0x50, %ecx
xorl %r8d, %r8d
callq 0x17638a8
movq 0x68(%rsp), %rdi
leaq 0x78(%rsp), %rcx
cmpq %rcx, %rdi
je 0x1492138
movq %rax, %rbx
movl %edx, %ebp
callq 0x780910
movl %ebp, %edx
movq %rbx, %rax
jmp 0x1492138
xorl %esi, %esi
testl %edi, %edi
jle 0x149264e
movl %r14d, %edi
andl $0x7fffffff, %edi # imm = 0x7FFFFFFF
xorl %r8d, %r8d
xorl %ecx, %ecx
movl (%rbx,%r8,4), %eax
testl %eax, %eax
js 0x1492643
xorl %edx, %edx
divl %r14d
cmpl $0x1, %eax
jne 0x1492643
cmpq %rdx, %r8
jne 0x149264e
incq %r8
cmpq %rdi, %r8
setae %cl
jne 0x149262c
orb %cl, %sil
testb $0x1, %sil
je 0x14926a6
movq %rbx, 0xd0(%rsp)
movq %r14, 0xd8(%rsp)
movups 0xd0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r9, 0x18(%rsp)
movq 0x200(%rsp), %rax
movq %rax, 0x10(%rsp)
movq %r13, %rdi
movl $0x50, %esi
movq %r15, %rdx
movl %ebp, %ecx
movq 0x48(%rsp), %r8
movl 0x3c(%rsp), %r9d
callq 0x1485d36
jmp 0x1492138
movq %rbx, 0xc0(%rsp)
movq %r14, 0xc8(%rsp)
movups 0xc0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r9, 0x18(%rsp)
movq 0x200(%rsp), %rax
movq %rax, 0x10(%rsp)
movq %r13, %rdi
movl $0x50, %esi
movq %r15, %rdx
movl %ebp, %ecx
movq 0x48(%rsp), %r8
movl 0x3c(%rsp), %r9d
callq 0x1495aa7
testq %rax, %rax
jne 0x1492138
movq %rbx, 0xb0(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0xb8(%rsp)
movups 0xb0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq 0x208(%rsp), %rax
movq %rax, 0x10(%rsp)
movl $0x0, 0x18(%rsp)
movq %r13, %rdi
movl $0x50, %esi
movq %r15, %rdx
movl %ebp, %ecx
movq 0x48(%rsp), %rbx
movq %rbx, %r8
movl 0x3c(%rsp), %r9d
callq 0x1488a0e
testq %rax, %rax
jne 0x1492138
movq 0x58(%rsp), %rax
movq %rax, 0xa0(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0xa8(%rsp)
movups 0xa0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq 0x208(%rsp), %rax
movq %rax, 0x10(%rsp)
movq %r13, %rdi
movl $0x50, %esi
movq %r15, %r13
movq %r15, %rdx
movl %ebp, %r15d
movl %ebp, %ecx
movq %rbx, %r8
movl 0x3c(%rsp), %r9d
callq 0x14970c8
testq %rax, %rax
je 0x14927b4
jmp 0x1492138
movq %r12, 0x90(%rsp)
movq %r14, 0x98(%rsp)
movups 0x90(%rsp), %xmm0
movups %xmm0, (%rsp)
movq 0x208(%rsp), %rax
movq %rax, 0x18(%rsp)
movq 0x200(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x50(%rsp), %rdi
movl $0x50, %esi
movq %r13, %rdx
movl %r15d, %ecx
jmp 0x1492692
|
/Target/X86/X86ISelLowering.cpp
|
llvm::createX86OptimizeLEAs()
|
FunctionPass *llvm::createX86OptimizeLEAs() { return new X86OptimizeLEAPass(); }
|
pushq %rax
movl $0x68, %edi
callq 0x7808d0
xorl %ecx, %ecx
movq %rcx, 0x8(%rax)
leaq 0x4428f0c(%rip), %rdx # 0x5932c5c
movq %rdx, 0x10(%rax)
movl $0x2, 0x18(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%rax)
movq %rcx, 0x30(%rax)
leaq 0x42d540b(%rip), %rdx # 0x57df178
movq %rdx, (%rax)
movups %xmm0, 0x38(%rax)
movl $0x0, 0x48(%rax)
movups %xmm0, 0x50(%rax)
movq %rcx, 0x60(%rax)
popq %rcx
retq
|
/Target/X86/X86OptimizeLEAs.cpp
|
(anonymous namespace)::X86SpeculativeLoadHardeningPass::saveEFLAGS(llvm::MachineBasicBlock&, llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>, llvm::DebugLoc const&)
|
unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc) {
// FIXME: Hard coding this to a 32-bit register class seems weird, but matches
// what instruction selection does.
Register Reg = MRI->createVirtualRegister(&X86::GR32RegClass);
// We directly copy the FLAGS register and rely on later lowering to clean
// this up into the appropriate setCC instructions.
BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS);
++NumInstsInserted;
return Reg;
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x40, %rsp
movq %rcx, %r13
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movq 0x40(%rdi), %rdi
leaq 0x42bfee2(%rip), %rsi # 0x57d5ad0
leaq 0x2a458c3(%rip), %rdx # 0x3f5b4b8
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, %ebx
movq (%r13), %rsi
movq %rsi, (%rsp)
testq %rsi, %rsi
je 0x1515c18
movq %rsp, %rdi
movl $0x1, %edx
callq 0x2a757d8
movq (%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x1515c3e
movq %rsp, %r13
leaq 0x8(%rsp), %rdx
movq %r13, %rdi
callq 0x2a759cc
movq $0x0, (%r13)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %r13
movups %xmm0, 0x8(%r13)
movq 0x48(%r12), %rax
movq $-0x260, %rcx # imm = 0xFDA0
addq 0x8(%rax), %rcx
movq %r15, %rdi
movq %r14, %rsi
movq %r13, %rdx
movl %ebx, %r8d
callq 0x90f593
movq %rdx, %rdi
leaq 0x20(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movabsq $0x1c00000000, %rcx # imm = 0x1C00000000
movq %rcx, (%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq (%r13), %rsi
testq %rsi, %rsi
je 0x1515cab
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq (%rsp), %rsi
testq %rsi, %rsi
je 0x1515cbc
movq %rsp, %rdi
callq 0x2a758fc
movl %ebx, %eax
addq $0x40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/Target/X86/X86SpeculativeLoadHardening.cpp
|
(anonymous namespace)::X86OutgoingValueAssigner::assignArg(unsigned int, llvm::EVT, llvm::MVT, llvm::MVT, llvm::CCValAssign::LocInfo, llvm::CallLowering::ArgInfo const&, llvm::ISD::ArgFlagsTy, llvm::CCState&)
|
bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
CCState &State) override {
bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
StackSize = State.getStackSize();
static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2,
X86::XMM3, X86::XMM4, X86::XMM5,
X86::XMM6, X86::XMM7};
if (!Info.IsFixed)
NumXMMRegs = State.getFirstUnallocated(XMMArgRegs);
return Res;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x10, %rsp
movl %r9d, %edx
movl %r8d, %eax
movq %rdi, %rbx
movq 0x50(%rsp), %r14
movq 0x38(%rsp), %r15
movl 0x30(%rsp), %ecx
movq 0x40(%rsp), %r8
movq 0x48(%rsp), %r9
movq %r14, (%rsp)
movl %esi, %edi
movl %eax, %esi
callq *0x8(%rbx)
movq 0x30(%r14), %rcx
movq %rcx, 0x28(%rbx)
cmpb $0x0, 0x58(%r15)
jne 0x151c53e
movq 0x40(%r14), %rsi
xorl %ecx, %ecx
testb $-0x80, 0x10(%rsi)
je 0x151c53b
xorl %edx, %edx
leaq 0x2a20985(%rip), %rdi # 0x3f3ce80
movl $0x8, %ecx
cmpq $0x7, %rdx
je 0x151c52e
movzwl 0x2(%rdi,%rdx,2), %r8d
incq %rdx
movl %r8d, %r9d
shrl $0x5, %r9d
movl (%rsi,%r9,4), %r9d
btl %r8d, %r9d
jb 0x151c500
leaq -0x1(%rdx), %rsi
cmpq $0x7, %rsi
setb %sil
jmp 0x151c535
xorl %esi, %esi
movl $0x8, %edx
testb %sil, %sil
cmovnel %edx, %ecx
movl %ecx, 0x30(%rbx)
addq $0x10, %rsp
popq %rbx
popq %r14
popq %r15
retq
|
/Target/X86/GISel/X86CallLowering.cpp
|
llvm::GISelCSEInfo::insertInstr(llvm::MachineInstr*, void*)
|
void GISelCSEInfo::insertInstr(MachineInstr *MI, void *InsertPos) {
assert(MI);
// If it exists in temporary insts, remove it.
TemporaryInsts.remove(MI);
auto *Node = getUniqueInstrForMI(MI);
insertNode(Node, InsertPos);
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
addq $0xe8, %rdi
callq 0x155e44a
leaq 0x48(%r15), %rdi
movl $0x10, %esi
movl $0x4, %edx
callq 0x8f284c
movq $0x0, (%rax)
movq %r14, 0x8(%rax)
movq %r15, %rdi
movq %rax, %rsi
movq %rbx, %rdx
popq %rbx
popq %r14
popq %r15
jmp 0x155e31a
nop
|
/CodeGen/GlobalISel/CSEInfo.cpp
|
llvm::CallBase::countOperandBundlesOfType(unsigned int) const
|
unsigned countOperandBundlesOfType(uint32_t ID) const {
unsigned Count = 0;
for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
if (getOperandBundleAt(i).getTagID() == ID)
Count++;
return Count;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl %esi, %ebx
movq %rdi, %r14
callq 0x91f95a
testl %eax, %eax
je 0x1565727
movl %eax, %r15d
shlq $0x4, %r15
xorl %r12d, %r12d
xorl %ebp, %ebp
movq %r14, %rdi
callq 0x2a9ec18
movq (%rax,%r12), %rax
xorl %ecx, %ecx
cmpl %ebx, 0x8(%rax)
sete %cl
addl %ecx, %ebp
addq $0x10, %r12
cmpq %r12, %r15
jne 0x1565706
jmp 0x1565729
xorl %ebp, %ebp
movl %ebp, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/llvm/IR/InstrTypes.h
|
llvm::SmallVectorTemplateBase<std::function<void ()>, false>::moveElementsForGrow(std::function<void ()>*)
|
size_t size() const { return Size; }
|
pushq %r14
pushq %rbx
pushq %rax
movl 0x8(%rdi), %eax
testq %rax, %rax
je 0x156a58f
movq (%rdi), %rcx
shlq $0x5, %rax
addq $0x18, %rcx
xorl %edx, %edx
xorps %xmm0, %xmm0
movups %xmm0, (%rsi,%rdx)
movq $0x0, 0x10(%rsi,%rdx)
movq (%rcx,%rdx), %r8
movq %r8, 0x18(%rsi,%rdx)
cmpq $0x0, -0x8(%rcx,%rdx)
je 0x156a586
leaq (%rsi,%rdx), %r8
leaq (%rcx,%rdx), %r9
addq $-0x18, %r9
movups (%r9), %xmm1
movups %xmm1, (%r8)
movq 0x10(%r9), %r8
movq %r8, 0x10(%rsi,%rdx)
movups %xmm0, 0x10(%r9)
addq $0x20, %rdx
cmpq %rdx, %rax
jne 0x156a546
movl 0x8(%rdi), %r14d
testq %r14, %r14
je 0x156a5ca
movq (%rdi), %rax
shlq $0x5, %r14
leaq (%r14,%rax), %rbx
addq $-0x20, %rbx
negq %r14
movq 0x10(%rbx), %rax
testq %rax, %rax
je 0x156a5c0
movq %rbx, %rdi
movq %rbx, %rsi
movl $0x3, %edx
callq *%rax
addq $-0x20, %rbx
addq $0x20, %r14
jne 0x156a5aa
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/llvm/ADT/SmallVector.h
|
llvm::CombinerHelper::applyFunnelShiftToRotate(llvm::MachineInstr&)
|
void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) {
unsigned Opc = MI.getOpcode();
assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
bool IsFSHL = Opc == TargetOpcode::G_FSHL;
Observer.changingInstr(MI);
MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
: TargetOpcode::G_ROTR));
MI.removeOperand(2);
Observer.changedInstr(MI);
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %rbx
xorl %r14d, %r14d
cmpw $0x89, 0x44(%rsi)
movq %rdi, %r15
setne %r14b
movq 0x10(%rdi), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
movq (%r15), %rax
movq 0x10(%rax), %rax
shll $0x5, %r14d
orq $-0x1180, %r14 # imm = 0xEE80
addq 0x8(%rax), %r14
movq %rbx, %rdi
movq %r14, %rsi
callq 0x1d3c794
movq %rbx, %rdi
movl $0x2, %esi
callq 0x1d3c904
movq 0x10(%r15), %rdi
movq (%rdi), %rax
movq 0x28(%rax), %rax
movq %rbx, %rsi
popq %rbx
popq %r14
popq %r15
jmpq *%rax
nop
|
/CodeGen/GlobalISel/CombinerHelper.cpp
|
llvm::CombinerHelper::applyUDivByConst(llvm::MachineInstr&)
|
void CombinerHelper::applyUDivByConst(MachineInstr &MI) {
auto *NewMI = buildUDivUsingMul(MI);
replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %rbx
movq %rdi, %r14
callq 0x1579fac
movq 0x20(%rax), %rax
movl 0x4(%rax), %ebp
movq 0x20(%rbx), %rax
movl 0x4(%rax), %r15d
movq %rbx, %rdi
callq 0x1d3deba
movq 0x8(%r14), %rsi
movq %r14, %rdi
movl %r15d, %edx
movl %ebp, %ecx
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp 0x156b45a
|
/CodeGen/GlobalISel/CombinerHelper.cpp
|
llvm::CombinerHelper::matchTruncLshrBuildVectorFold(llvm::MachineInstr&, llvm::Register&)
|
bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI,
Register &MatchInfo) {
// Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with
// y if K == size of vector element type
std::optional<ValueAndVReg> ShiftAmt;
if (!mi_match(MI.getOperand(1).getReg(), MRI,
m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo))),
m_GCst(ShiftAmt))))
return false;
LLT MatchTy = MRI.getType(MatchInfo);
return ShiftAmt->Value.getZExtValue() == MatchTy.getSizeInBits() &&
MatchTy == MRI.getType(MI.getOperand(0).getReg());
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rdx, %r15
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x10(%rsp), %r12
movb $0x0, 0x18(%r12)
movq 0x20(%rsi), %rax
movl 0x24(%rax), %eax
movq 0x8(%rdi), %rsi
leaq 0x30(%rsp), %rdi
movq %rdx, 0x8(%rdi)
movq %r12, 0x10(%rdi)
leaq 0x8(%rsp), %rdx
movl %eax, (%rdx)
callq 0x158ac9e
testb %al, %al
je 0x157e55d
movl (%r15), %eax
testl %eax, %eax
jns 0x157e4fe
movq 0x8(%rbx), %rcx
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
cmpl %eax, 0x1d0(%rcx)
jbe 0x157e4fe
movq 0x1c8(%rcx), %rcx
movq (%rcx,%rax,8), %rax
jmp 0x157e500
xorl %eax, %eax
movq %rax, 0x8(%rsp)
cmpl $0x41, 0x18(%rsp)
jb 0x157e511
movq 0x10(%rsp), %r12
movq (%r12), %r15
leaq 0x8(%rsp), %rdi
callq 0x94022c
leaq 0x30(%rsp), %rdi
movq %rax, (%rdi)
movb %dl, 0x8(%rdi)
callq 0x2b60e74
cmpq %rax, %r15
jne 0x157e55d
movq 0x20(%r14), %rax
movl 0x4(%rax), %eax
testl %eax, %eax
jns 0x157e561
movq 0x8(%rbx), %rcx
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
cmpl %eax, 0x1d0(%rcx)
jbe 0x157e561
movq 0x1c8(%rcx), %rcx
movq (%rcx,%rax,8), %rax
jmp 0x157e563
xorl %ebx, %ebx
jmp 0x157e56b
xorl %eax, %eax
cmpq %rax, 0x8(%rsp)
sete %bl
cmpb $0x1, 0x28(%rsp)
jne 0x157e58d
movb $0x0, 0x28(%rsp)
cmpl $0x41, 0x18(%rsp)
jb 0x157e58d
movq 0x10(%rsp), %rdi
testq %rdi, %rdi
je 0x157e58d
callq 0x7802b0
movl %ebx, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/CodeGen/GlobalISel/CombinerHelper.cpp
|
llvm::DenseMapBase<llvm::SmallDenseMap<long, long, 8u, llvm::DenseMapInfo<long, void>, llvm::detail::DenseMapPair<long, long>>, long, long, llvm::DenseMapInfo<long, void>, llvm::detail::DenseMapPair<long, long>>::moveFromOldBuckets(llvm::detail::DenseMapPair<long, long>*, llvm::detail::DenseMapPair<long, long>*)
|
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
initEmpty();
// Insert all the old elements.
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
!KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
// Insert the key/value into the new table.
BucketT *DestBucket;
bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
(void)FoundVal; // silence warning.
assert(!FoundVal && "Key already in new map?");
DestBucket->getFirst() = std::move(B->getFirst());
::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
incrementNumEntries();
// Free the value.
B->getSecond().~ValueT();
}
B->getFirst().~KeyT();
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movabsq $0x7fffffffffffffff, %r13 # imm = 0x7FFFFFFFFFFFFFFF
leaq 0x8(%rdi), %rax
andl $0x1, (%rdi)
movl $0x0, 0x4(%rdi)
cmoveq 0x8(%rdi), %rax
movl $0x8, %ecx
cmovel 0x10(%rdi), %ecx
testl %ecx, %ecx
je 0x1585a92
movl %ecx, %ecx
movabsq $0xfffffffffffffff, %rdx # imm = 0xFFFFFFFFFFFFFFF
addq %rdx, %rcx
andq %rcx, %rdx
andl $0x1, %ecx
negq %rcx
addq %rdx, %rcx
addq $0x2, %rcx
movq %rdx, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
addq $0x10, %rax
xorl %edx, %edx
movdqa 0x162f5fd(%rip), %xmm1 # 0x2bb5020
movdqa 0x162f605(%rip), %xmm2 # 0x2bb5030
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movq %rdx, %xmm4
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
por %xmm1, %xmm4
pxor %xmm2, %xmm4
movdqa %xmm4, %xmm5
pcmpgtd %xmm0, %xmm5
pcmpeqd %xmm0, %xmm4
pshufd $0xf5, %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
pand %xmm5, %xmm6
pshufd $0xf5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
por %xmm6, %xmm4
movd %xmm4, %esi
notl %esi
testb $0x1, %sil
je 0x1585a73
movq %r13, -0x10(%rax)
pxor %xmm3, %xmm4
pextrw $0x4, %xmm4, %esi
testb $0x1, %sil
je 0x1585a85
movq %r13, (%rax)
addq $0x2, %rdx
addq $0x20, %rax
cmpq %rdx, %rcx
jne 0x1585a33
cmpq %rbx, %r14
je 0x1585ad3
addq $-0x2, %r13
leaq 0x8(%rsp), %r12
cmpq %r13, (%r14)
jg 0x1585aca
movq %r15, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq 0x1583d5a
movq 0x8(%rsp), %rax
movq (%r14), %rcx
movq %rcx, (%rax)
movq 0x8(%r14), %rcx
movq %rcx, 0x8(%rax)
addl $0x2, (%r15)
addq $0x10, %r14
cmpq %rbx, %r14
jne 0x1585aa0
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::CombinerHelper::matchSubOfVScale(llvm::MachineOperand const&, std::function<void (llvm::MachineIRBuilder&)>&)
|
bool CombinerHelper::matchSubOfVScale(const MachineOperand &MO,
BuildFnTy &MatchInfo) {
GSub *Sub = cast<GSub>(MRI.getVRegDef(MO.getReg()));
GVScale *RHSVScale = cast<GVScale>(MRI.getVRegDef(Sub->getRHSReg()));
Register Dst = MO.getReg();
LLT DstTy = MRI.getType(Dst);
if (!MRI.hasOneNonDBGUse(RHSVScale->getReg(0)) ||
!isLegalOrBeforeLegalizer({TargetOpcode::G_ADD, DstTy}))
return false;
MatchInfo = [=](MachineIRBuilder &B) {
auto VScale = B.buildVScale(DstTy, -RHSVScale->getSrc());
B.buildAdd(Dst, Sub->getLHSReg(), VScale, Sub->getFlags());
};
return true;
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq %rdx, %rbx
movq %rsi, %r13
movq %rdi, %r12
movq 0x8(%rdi), %rdi
movl 0x4(%rsi), %esi
callq 0x1d835a8
movq %rax, %r14
movq 0x8(%r12), %rdi
movq 0x20(%rax), %rax
movl 0x44(%rax), %esi
callq 0x1d835a8
movq %rax, %r15
movl 0x4(%r13), %r13d
movq 0x8(%r12), %rdi
testl %r13d, %r13d
jns 0x158eb6e
movl %r13d, %eax
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
cmpl %eax, 0x1d0(%rdi)
jbe 0x158eb6e
movq 0x1c8(%rdi), %rcx
movq (%rcx,%rax,8), %rax
jmp 0x158eb70
xorl %eax, %eax
movq %rax, 0x8(%rsp)
movq 0x20(%r15), %rax
movl 0x4(%rax), %esi
callq 0x1d83616
testb %al, %al
je 0x158ec4b
leaq 0x10(%rsp), %rsi
movl $0x33, (%rsi)
leaq 0x8(%rsp), %rax
movq %rax, 0x8(%rsi)
movq $0x1, 0x10(%rsi)
xorps %xmm0, %xmm0
movups %xmm0, 0x18(%rsi)
movq %r12, %rdi
callq 0x156b348
testb %al, %al
je 0x158ec4b
movq 0x8(%rsp), %r12
xorps %xmm0, %xmm0
movaps %xmm0, 0x20(%rsp)
movaps %xmm0, 0x10(%rsp)
movl $0x20, %edi
callq 0x7808d0
movq %r12, (%rax)
movq %r15, 0x8(%rax)
movl %r13d, 0x10(%rax)
movq %r14, 0x18(%rax)
movq %rax, 0x10(%rsp)
leaq 0x96d(%rip), %rcx # 0x158f560
movq %rcx, 0x28(%rsp)
leaq 0xa9d(%rip), %rdx # 0x158f69c
movq %rdx, 0x20(%rsp)
movaps 0x10(%rsp), %xmm0
movaps %xmm0, 0x40(%rsp)
movups (%rbx), %xmm1
movaps %xmm1, 0x10(%rsp)
movups %xmm0, (%rbx)
movq 0x10(%rbx), %rax
movq %rax, 0x20(%rsp)
movq %rdx, 0x10(%rbx)
movq 0x18(%rbx), %rdx
movq %rdx, 0x28(%rsp)
movq %rcx, 0x18(%rbx)
testq %rax, %rax
je 0x158ec47
leaq 0x10(%rsp), %rdi
movq %rdi, %rsi
movl $0x3, %edx
callq *%rax
movb $0x1, %al
jmp 0x158ec4d
xorl %eax, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
nop
|
/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
|
llvm::IRTranslator::findUnwindDestinations(llvm::BasicBlock const*, llvm::BranchProbability, llvm::SmallVectorImpl<std::pair<llvm::MachineBasicBlock*, llvm::BranchProbability>>&)
|
bool IRTranslator::findUnwindDestinations(
const BasicBlock *EHPadBB,
BranchProbability Prob,
SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
&UnwindDests) {
EHPersonality Personality = classifyEHPersonality(
EHPadBB->getParent()->getFunction().getPersonalityFn());
bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
bool IsSEH = isAsynchronousEHPersonality(Personality);
if (IsWasmCXX) {
// Ignore this for now.
return false;
}
while (EHPadBB) {
const Instruction *Pad = EHPadBB->getFirstNonPHI();
BasicBlock *NewEHPadBB = nullptr;
if (isa<LandingPadInst>(Pad)) {
// Stop on landingpads. They are not funclets.
UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
break;
}
if (isa<CleanupPadInst>(Pad)) {
// Stop on cleanup pads. Cleanups are always funclet entries for all known
// personalities.
UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
UnwindDests.back().first->setIsEHScopeEntry();
UnwindDests.back().first->setIsEHFuncletEntry();
break;
}
if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
// Add the catchpad handlers to the possible destinations.
for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
// For MSVC++ and the CLR, catchblocks are funclets and need prologues.
if (IsMSVCCXX || IsCoreCLR)
UnwindDests.back().first->setIsEHFuncletEntry();
if (!IsSEH)
UnwindDests.back().first->setIsEHScopeEntry();
}
NewEHPadBB = CatchSwitch->getUnwindDest();
} else {
continue;
}
BranchProbabilityInfo *BPI = FuncInfo.BPI;
if (BPI && NewEHPadBB)
Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
EHPadBB = NewEHPadBB;
}
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rcx, %rbx
movq %rsi, %r12
movq %rdi, %r14
movl %edx, 0xc(%rsp)
movq 0x48(%rsi), %rdi
callq 0x2a40ed6
movq %rax, %rdi
callq 0x2a39a54
movq %rax, 0x28(%rsp)
cmpl $0xc, %eax
je 0x159a978
movq 0x28(%rsp), %rax
leal -0x7(%rax), %ecx
movl %ecx, 0x24(%rsp)
leaq 0x130(%r14), %rcx
movq %rcx, 0x18(%rsp)
addl $-0x9, %eax
movl %eax, 0x20(%rsp)
movq %r14, 0x40(%rsp)
testq %r12, %r12
je 0x159a978
movq %r12, %rdi
callq 0x29ca0f4
movzbl (%rax), %ebp
cmpl $0x50, %ebp
je 0x159a7f7
cmpl $0x5f, %ebp
jne 0x159a849
movq %r12, 0x10(%rsp)
movq 0x18(%rsp), %rdi
leaq 0x10(%rsp), %r15
movq %r15, %rsi
callq 0xaa6974
movq 0x8(%rax), %rax
movq %rax, 0x10(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
leaq 0xc(%rsp), %rdx
callq 0x159a990
movb $0x1, %al
jmp 0x159a970
movq %r12, 0x10(%rsp)
movq 0x18(%rsp), %rdi
leaq 0x10(%rsp), %r15
movq %r15, %rsi
callq 0xaa6974
movq 0x8(%rax), %rax
movq %rax, 0x10(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
leaq 0xc(%rsp), %rdx
callq 0x159a990
movq (%rbx), %rax
movl 0x8(%rbx), %ecx
shlq $0x4, %rcx
movq -0x10(%rax,%rcx), %rcx
movb $0x1, %al
movb %al, 0xc9(%rcx)
movb %al, 0xcb(%rcx)
jmp 0x159a970
cmpb $0x27, %bpl
jne 0x159a915
movq %r12, 0x30(%rsp)
movzbl 0x2(%rax), %r14d
andl $0x1, %r14d
shll $0x5, %r14d
addq $0x20, %r14
movq %rax, 0x38(%rsp)
movl 0x4(%rax), %r12d
shll $0x5, %r12d
cmpq %r12, %r14
je 0x159a900
movq 0x38(%rsp), %rax
movq -0x8(%rax), %r13
movq (%r13,%r14), %rax
movq %rax, 0x10(%rsp)
movq 0x18(%rsp), %rdi
leaq 0x10(%rsp), %r15
movq %r15, %rsi
callq 0xaa6974
movq 0x8(%rax), %rax
movq %rax, 0x10(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
leaq 0xc(%rsp), %rdx
callq 0x159a990
cmpl $0x1, 0x20(%rsp)
ja 0x159a8da
movq (%rbx), %rax
movl 0x8(%rbx), %ecx
shlq $0x4, %rcx
movq -0x10(%rax,%rcx), %rax
movb $0x1, 0xcb(%rax)
cmpl $0x2, 0x24(%rsp)
jb 0x159a8f7
movq (%rbx), %rax
movl 0x8(%rbx), %ecx
shlq $0x4, %rcx
movq -0x10(%rax,%rcx), %rax
movb $0x1, 0xc9(%rax)
addq $0x20, %r14
cmpq %r14, %r12
jne 0x159a888
movq 0x38(%rsp), %rax
testb $0x1, 0x2(%rax)
movq 0x40(%rsp), %r14
jne 0x159a91a
xorl %r15d, %r15d
jmp 0x159a922
xorl %r15d, %r15d
jmp 0x159a927
movq -0x8(%rax), %rax
movq 0x20(%rax), %r15
movq 0x30(%rsp), %r12
cmpb $0x27, %bpl
jne 0x159a96e
movq 0x310(%r14), %rdi
testq %rdi, %rdi
sete %al
testq %r15, %r15
sete %cl
orb %al, %cl
jne 0x159a967
movq %r12, %rsi
movq %r15, %rdx
callq 0x258c7e4
movl 0xc(%rsp), %ecx
movl %eax, %eax
imulq %rcx, %rax
addq $0x40000000, %rax # imm = 0x40000000
shrq $0x1f, %rax
movl %eax, 0xc(%rsp)
xorl %eax, %eax
movq %r15, %r12
jmp 0x159a970
xorl %eax, %eax
testb %al, %al
je 0x159a79e
cmpl $0xc, 0x28(%rsp)
setne %al
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/CodeGen/GlobalISel/IRTranslator.cpp
|
llvm::MachineFunction::VariableDbgInfo& llvm::SmallVectorImpl<llvm::MachineFunction::VariableDbgInfo>::emplace_back<llvm::DILocalVariable const*&, llvm::DIExpression const*&, int&, llvm::DILocation const*&>(llvm::DILocalVariable const*&, llvm::DIExpression const*&, int&, llvm::DILocation const*&)
|
size_t size() const { return Size; }
|
movl 0x8(%rdi), %eax
cmpl 0xc(%rdi), %eax
jae 0x15a0390
movq (%rdi), %r9
movq %rax, %r10
shlq $0x5, %r10
movq (%rsi), %rsi
movq (%rdx), %rdx
movl (%rcx), %ecx
movq (%r8), %r8
movl %ecx, (%r9,%r10)
movb $0x0, 0x4(%r9,%r10)
movq %rsi, 0x8(%r9,%r10)
movq %rdx, 0x10(%r9,%r10)
movq %r8, 0x18(%r9,%r10)
incl %eax
movl %eax, 0x8(%rdi)
shlq $0x5, %rax
addq %r9, %rax
addq $-0x20, %rax
retq
nop
|
/llvm/ADT/SmallVector.h
|
llvm::SrcOp const* llvm::SmallVectorTemplateCommon<llvm::SrcOp, void>::reserveForParamAndGetAddressImpl<llvm::SmallVectorTemplateBase<llvm::SrcOp, true>>(llvm::SmallVectorTemplateBase<llvm::SrcOp, true>*, llvm::SrcOp const&, unsigned long)
|
static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
size_t N) {
size_t NewSize = This->size() + N;
if (LLVM_LIKELY(NewSize <= This->capacity()))
return &Elt;
bool ReferencesStorage = false;
int64_t Index = -1;
if (!U::TakesParamByValue) {
if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))) {
ReferencesStorage = true;
Index = &Elt - This->begin();
}
}
This->grow(NewSize);
return ReferencesStorage ? This->begin() + Index : &Elt;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %rbx
movl 0x8(%rdi), %ecx
movl 0xc(%rdi), %eax
addq %rcx, %rdx
cmpq %rax, %rdx
ja 0x15a2488
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
movq %rdi, %r14
movq (%rdi), %rax
leaq (%rcx,%rcx,2), %rcx
cmpq %rbx, %rax
setbe %sil
leaq (%rax,%rcx,8), %rcx
cmpq %rbx, %rcx
seta %bpl
andb %sil, %bpl
movq $-0x1, %r15
cmpb $0x1, %bpl
je 0x15a24d7
leaq 0x10(%r14), %rsi
movl $0x18, %ecx
movq %r14, %rdi
callq 0x2b4ed86
testb %bpl, %bpl
je 0x15a247a
leaq (%r15,%r15,2), %rbx
shlq $0x3, %rbx
addq (%r14), %rbx
jmp 0x15a247a
movq %rbx, %rcx
subq %rax, %rcx
sarq $0x3, %rcx
movabsq $-0x5555555555555555, %r15 # imm = 0xAAAAAAAAAAAAAAAB
imulq %rcx, %r15
jmp 0x15a24b4
nop
|
/llvm/ADT/SmallVector.h
|
llvm::SmallVectorImpl<llvm::LegalityPredicates::TypePairAndMemDesc>::operator=(llvm::SmallVectorImpl<llvm::LegalityPredicates::TypePairAndMemDesc> const&)
|
SmallVectorImpl<T> &SmallVectorImpl<T>::
operator=(const SmallVectorImpl<T> &RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd;
if (RHSSize)
NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
else
NewEnd = this->begin();
// Destroy excess elements.
this->destroy_range(NewEnd, this->end());
// Trim.
this->set_size(RHSSize);
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: don't do this if they're efficiently moveable.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->clear();
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Copy construct the new elements in place.
this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->set_size(RHSSize);
return *this;
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdi, %rbx
cmpq %rsi, %rdi
je 0x15a6d12
movq %rsi, %r15
movl 0x8(%rsi), %r14d
movl 0x8(%rbx), %r12d
cmpl %r14d, %r12d
jae 0x15a6cb7
cmpl %r14d, 0xc(%rbx)
jae 0x15a6cd0
movl $0x0, 0x8(%rbx)
leaq 0x10(%rbx), %rsi
movl $0x20, %ecx
movq %rbx, %rdi
movq %r14, %rdx
callq 0x2b4ed86
jmp 0x15a6ce9
testl %r14d, %r14d
je 0x15a6d0e
movq (%r15), %rsi
movq %r14, %rdx
shlq $0x5, %rdx
movq (%rbx), %rdi
callq 0x780120
jmp 0x15a6d0e
testq %r12, %r12
je 0x15a6ce9
movq (%r15), %rsi
movq %r12, %rdx
shlq $0x5, %rdx
movq (%rbx), %rdi
callq 0x780120
jmp 0x15a6cec
xorl %r12d, %r12d
movl 0x8(%r15), %edx
subq %r12, %rdx
je 0x15a6d0e
shlq $0x5, %r12
movq (%r15), %rsi
addq %r12, %rsi
addq (%rbx), %r12
shlq $0x5, %rdx
movq %r12, %rdi
callq 0x780890
movl %r14d, 0x8(%rbx)
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/SmallVector.h
|
llvm::LegalizerHelper::lowerSADDO_SSUBO(llvm::MachineInstr&)
|
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) {
auto [Dst0, Dst0Ty, Dst1, Dst1Ty, LHS, LHSTy, RHS, RHSTy] =
MI.getFirst4RegLLTs();
const bool IsAdd = MI.getOpcode() == TargetOpcode::G_SADDO;
LLT Ty = Dst0Ty;
LLT BoolTy = Dst1Ty;
Register NewDst0 = MRI.cloneVirtualRegister(Dst0);
if (IsAdd)
MIRBuilder.buildAdd(NewDst0, LHS, RHS);
else
MIRBuilder.buildSub(NewDst0, LHS, RHS);
// TODO: If SADDSAT/SSUBSAT is legal, compare results to detect overflow.
auto Zero = MIRBuilder.buildConstant(Ty, 0);
// For an addition, the result should be less than one of the operands (LHS)
// if and only if the other operand (RHS) is negative, otherwise there will
// be overflow.
// For a subtraction, the result should be less than one of the operands
// (LHS) if and only if the other operand (RHS) is (non-zero) positive,
// otherwise there will be overflow.
auto ResultLowerThanLHS =
MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, NewDst0, LHS);
auto ConditionRHS = MIRBuilder.buildICmp(
IsAdd ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGT, BoolTy, RHS, Zero);
MIRBuilder.buildXor(Dst1, ConditionRHS, ResultLowerThanLHS);
MIRBuilder.buildCopy(Dst0, NewDst0);
MI.eraseFromParent();
return Legalized;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xd8, %rsp
movq %rsi, %rbx
movq %rdi, %r14
leaq 0x98(%rsp), %r15
movq %r15, %rdi
callq 0x1d436ac
movq %rbx, 0x38(%rsp)
movzwl 0x44(%rbx), %ebp
movq 0x20(%r15), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%r15), %rbx
movq 0x10(%r14), %rdi
movl 0x38(%r15), %esi
leaq 0x299d604(%rip), %rdx # 0x3f5b4b8
xorl %r12d, %r12d
xorl %ecx, %ecx
callq 0x1d830ee
movl %eax, %r13d
movq (%r14), %rdi
movl 0x8(%r15), %eax
movl 0x18(%r15), %ecx
leaq 0x40(%rsp), %rdx
cmpw $0x96, %bp
jne 0x15bdf1e
movl %r13d, (%rdx)
movl $0x1, 0x8(%rdx)
leaq 0x58(%rsp), %r8
movl %ecx, (%r8)
movl %r12d, 0x10(%r8)
movl %eax, 0x18(%r8)
movl %r12d, 0x28(%r8)
movq (%rdi), %rax
movq $0x0, (%rsp)
movl $0x1, %ecx
movl $0x2, %r9d
movl $0x33, %esi
callq *0x20(%rax)
movl $0x28, 0x10(%rsp)
jmp 0x15bdf62
movl %r13d, (%rdx)
movl $0x1, 0x8(%rdx)
leaq 0x58(%rsp), %r8
movl %ecx, (%r8)
movl %r12d, 0x10(%r8)
movl %eax, 0x18(%r8)
movl %r12d, 0x28(%r8)
movq (%rdi), %rax
movq $0x0, (%rsp)
movl $0x1, %ecx
movl $0x2, %r9d
movl $0x34, %esi
callq *0x20(%rax)
movl $0x26, 0x10(%rsp)
movq (%r14), %rdi
leaq 0x58(%rsp), %r12
movq %rbx, (%r12)
xorl %eax, %eax
movl %eax, 0x8(%r12)
movq %r12, %rsi
xorl %edx, %edx
callq 0x15ddf6e
movq %rax, 0x18(%rsp)
movq %rdx, %rbp
movq (%r14), %rdi
leaq 0x88(%rsp), %r15
movq 0x30(%rsp), %rbx
movq %rbx, (%r15)
xorl %ecx, %ecx
movl %ecx, 0x8(%r15)
movl %r13d, (%r12)
movl %ecx, 0x10(%r12)
movl 0xb0(%rsp), %eax
leaq 0x40(%rsp), %r12
movl %eax, (%r12)
movl %ecx, 0x10(%r12)
movl %r13d, 0x14(%rsp)
leaq 0x58(%rsp), %r13
movl $0x28, %esi
movq %r15, %rdx
movq %r13, %rcx
movq %r12, %r8
callq 0x15e0ac4
movq %rax, 0x28(%rsp)
movq %rdx, 0x20(%rsp)
movq (%r14), %rdi
movq %rbx, (%r15)
xorl %ecx, %ecx
movl %ecx, 0x8(%r15)
movl 0xa0(%rsp), %eax
movl %eax, (%r13)
movl %ecx, 0x10(%r13)
xorl %r13d, %r13d
movq 0x18(%rsp), %rax
movq %rax, (%r12)
movq %rbp, 0x8(%r12)
movl $0x1, %ebp
movl %ebp, 0x10(%r12)
leaq 0x88(%rsp), %rdx
leaq 0x58(%rsp), %rbx
leaq 0x40(%rsp), %r15
movl 0x10(%rsp), %esi
movq %rbx, %rcx
movq %r15, %r8
callq 0x15e0ac4
movq (%r14), %rdi
movl 0xc0(%rsp), %ecx
movl %ecx, (%r15)
movl %ebp, 0x8(%r15)
movq %rax, (%rbx)
movq %rdx, 0x8(%rbx)
movl %ebp, 0x10(%rbx)
movq 0x28(%rsp), %rax
movq %rax, 0x18(%rbx)
movq 0x20(%rsp), %rax
movq %rax, 0x20(%rbx)
movl %ebp, 0x28(%rbx)
movq (%rdi), %rax
movq $0x0, (%rsp)
leaq 0x40(%rsp), %rbx
leaq 0x58(%rsp), %r15
movl $0x1, %ecx
movl $0x2, %r9d
movl $0x3e, %esi
movq %rbx, %rdx
movq %r15, %r8
callq *0x20(%rax)
movq (%r14), %rdi
movl 0xd0(%rsp), %eax
movl %eax, (%rbx)
movl %ebp, 0x8(%rbx)
movl 0x14(%rsp), %eax
movl %eax, (%r15)
movl %r13d, 0x10(%r15)
leaq 0x40(%rsp), %rsi
leaq 0x58(%rsp), %rdx
callq 0x15de680
movq 0x38(%rsp), %rdi
callq 0x1d3deba
movl $0x1, %eax
addq $0xd8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/CodeGen/GlobalISel/LegalizerHelper.cpp
|
llvm::LegalizerHelper::lowerSMULH_UMULH(llvm::MachineInstr&)
|
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerSMULH_UMULH(MachineInstr &MI) {
bool IsSigned = MI.getOpcode() == TargetOpcode::G_SMULH;
unsigned ExtOp = IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
Register Result = MI.getOperand(0).getReg();
LLT OrigTy = MRI.getType(Result);
auto SizeInBits = OrigTy.getScalarSizeInBits();
LLT WideTy = OrigTy.changeElementSize(SizeInBits * 2);
auto LHS = MIRBuilder.buildInstr(ExtOp, {WideTy}, {MI.getOperand(1)});
auto RHS = MIRBuilder.buildInstr(ExtOp, {WideTy}, {MI.getOperand(2)});
auto Mul = MIRBuilder.buildMul(WideTy, LHS, RHS);
unsigned ShiftOp = IsSigned ? TargetOpcode::G_ASHR : TargetOpcode::G_LSHR;
auto ShiftAmt = MIRBuilder.buildConstant(WideTy, SizeInBits);
auto Shifted = MIRBuilder.buildInstr(ShiftOp, {WideTy}, {Mul, ShiftAmt});
MIRBuilder.buildTrunc(Result, Shifted);
MI.eraseFromParent();
return Legalized;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x88, %rsp
movq %rdi, %r14
movq %rsi, %rbx
movq 0x20(%rsi), %rax
movl 0x4(%rax), %edx
testl %edx, %edx
jns 0x15be127
movq 0x10(%r14), %rax
movl %edx, %ecx
andl $0x7fffffff, %ecx # imm = 0x7FFFFFFF
cmpl %ecx, 0x1d0(%rax)
jbe 0x15be127
movq 0x1c8(%rax), %rax
movq (%rax,%rcx,8), %rbp
jmp 0x15be129
xorl %ebp, %ebp
movzwl 0x44(%rbx), %eax
movq %rbp, 0x50(%rsp)
testb $0x1, %bpl
movq %rdx, 0x48(%rsp)
jne 0x15be14b
testb $0x4, %bpl
jne 0x15be151
shrl $0x3, %ebp
movzwl %bp, %ebp
jmp 0x15be163
shrq $0x3, %rbp
jmp 0x15be163
movq %rbp, %rcx
shrq $0x13, %rcx
testb $0x2, %bpl
movzwl %cx, %ebp
cmoveq %rcx, %rbp
movzwl %ax, %eax
xorl %edx, %edx
xorl %ecx, %ecx
cmpl $0x9d, %eax
sete %dl
movl %edx, 0x24(%rsp)
setne %cl
leal 0x83(,%rcx,2), %r13d
leal (,%rbp,2), %esi
leaq 0x50(%rsp), %rdi
callq 0x97305a
movq %rax, %r15
movq (%r14), %rdi
movq %r14, 0x28(%rsp)
leaq 0x30(%rsp), %r14
movq %rax, (%r14)
xorl %ecx, %ecx
movl %ecx, 0x8(%r14)
movq 0x20(%rbx), %rax
movl 0x24(%rax), %eax
leaq 0x58(%rsp), %r12
movl %eax, (%r12)
movl %ecx, 0x10(%r12)
movq (%rdi), %rax
movq %rcx, (%rsp)
movl $0x1, %ecx
movl $0x1, %r9d
movl %r13d, %esi
movq %r14, %rdx
movq %r12, %r8
callq *0x20(%rax)
movq %rax, 0x18(%rsp)
movq %rbx, %rax
movq %rbx, 0x40(%rsp)
movq %rdx, 0x10(%rsp)
movq 0x28(%rsp), %rbx
movq (%rbx), %rdi
movq %r15, (%r14)
xorl %ecx, %ecx
movl %ecx, 0x8(%r14)
movq 0x20(%rax), %rax
movl 0x44(%rax), %eax
movl %eax, (%r12)
movl %ecx, 0x10(%r12)
movq (%rdi), %rax
movq %rcx, (%rsp)
leaq 0x30(%rsp), %r14
leaq 0x58(%rsp), %r12
movl $0x1, %ecx
movl $0x1, %r9d
movl %r13d, %esi
movq %r14, %rdx
movq %r12, %r8
callq *0x20(%rax)
movq (%rbx), %rdi
movq %r15, (%r14)
xorl %esi, %esi
movl %esi, 0x8(%r14)
movq 0x18(%rsp), %rcx
movq %rcx, (%r12)
movq 0x10(%rsp), %rcx
movq %rcx, 0x8(%r12)
movl $0x1, %ecx
movl %ecx, 0x10(%r12)
movq %rax, 0x18(%r12)
movq %rdx, 0x20(%r12)
movl %ecx, 0x28(%r12)
movq (%rdi), %rax
movq %rsi, (%rsp)
xorl %r13d, %r13d
leaq 0x30(%rsp), %rdx
leaq 0x58(%rsp), %rbx
movl $0x1, %ecx
movl $0x2, %r9d
movl $0x35, %esi
movq %rbx, %r8
callq *0x20(%rax)
movq %rax, 0x18(%rsp)
movq %rdx, 0x10(%rsp)
movl 0x24(%rsp), %r12d
addl $0x87, %r12d
movq 0x28(%rsp), %r14
movq (%r14), %rdi
movq %r15, (%rbx)
movl %r13d, 0x8(%rbx)
movl %ebp, %edx
leaq 0x58(%rsp), %rbx
movq %rbx, %rsi
callq 0x15ddf6e
movq (%r14), %rdi
movq %r14, %rbp
leaq 0x30(%rsp), %rcx
movq %r15, (%rcx)
movl %r13d, 0x8(%rcx)
movq 0x18(%rsp), %rcx
movq %rcx, (%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0x8(%rbx)
movl $0x1, %ecx
movl %ecx, 0x10(%rbx)
movq %rax, 0x18(%rbx)
movq %rdx, 0x20(%rbx)
movl %ecx, 0x28(%rbx)
movl $0x1, %r15d
movq (%rdi), %rax
movq %r13, (%rsp)
leaq 0x30(%rsp), %rbx
leaq 0x58(%rsp), %r14
movl $0x1, %ecx
movl $0x2, %r9d
movl %r12d, %esi
movq %rbx, %rdx
movq %r14, %r8
callq *0x20(%rax)
movq (%rbp), %rdi
movq 0x48(%rsp), %rcx
movl %ecx, (%rbx)
movl %r15d, 0x8(%rbx)
movq %rax, (%r14)
movq %rdx, 0x8(%r14)
movl %r15d, 0x10(%r14)
leaq 0x30(%rsp), %rsi
leaq 0x58(%rsp), %rdx
xorl %ecx, %ecx
callq 0x15e0a7c
movq 0x40(%rsp), %rdi
callq 0x1d3deba
movl $0x1, %eax
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/CodeGen/GlobalISel/LegalizerHelper.cpp
|
getTruncStoreByteOffset(llvm::GStore&, llvm::Register&, llvm::MachineRegisterInfo&)
|
static std::optional<int64_t>
getTruncStoreByteOffset(GStore &Store, Register &SrcVal,
MachineRegisterInfo &MRI) {
Register TruncVal;
if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal))))
return std::nullopt;
// The shift amount must be a constant multiple of the narrow type.
// It is translated to the offset address in the wide source value "y".
//
// x = G_LSHR y, ShiftAmtC
// s8 z = G_TRUNC x
// store z, ...
Register FoundSrcVal;
int64_t ShiftAmt;
if (!mi_match(TruncVal, MRI,
m_any_of(m_GLShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt)),
m_GAShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt))))) {
if (!SrcVal.isValid() || TruncVal == SrcVal) {
if (!SrcVal.isValid())
SrcVal = TruncVal;
return 0; // If it's the lowest index store.
}
return std::nullopt;
}
unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
if (ShiftAmt % NarrowBits != 0)
return std::nullopt;
const unsigned Offset = ShiftAmt / NarrowBits;
if (SrcVal.isValid() && FoundSrcVal != SrcVal)
return std::nullopt;
if (!SrcVal.isValid())
SrcVal = FoundSrcVal;
else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal))
return std::nullopt;
return Offset;
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
leaq 0x4(%rsp), %rax
movl $0x0, (%rax)
movq 0x20(%rdi), %rcx
movl 0x4(%rcx), %ecx
leaq 0x18(%rsp), %rdi
movq %rax, (%rdi)
leaq 0x8(%rsp), %r12
movl %ecx, (%r12)
movq %rdx, %rsi
movq %r12, %rdx
callq 0x97e174
testb %al, %al
je 0x15dabf0
movq %rsp, %rax
movl $0x0, (%rax)
movl 0x4(%rsp), %ecx
leaq 0x18(%rsp), %rdi
movq %rax, (%rdi)
movq %r12, 0x8(%rdi)
movq %rax, 0x10(%rdi)
movq %r12, 0x18(%rdi)
leaq 0x14(%rsp), %rdx
movl %ecx, (%rdx)
movq %rbx, %rsi
callq 0x15db172
testb %al, %al
je 0x15dab25
movq 0x30(%r15), %rax
testb $0x7, %al
je 0x15dab3c
andq $-0x8, %rax
addq $0x10, %rax
movq %rax, %r15
jmp 0x15dab43
movl (%r14), %ecx
testl %ecx, %ecx
sete %dl
cmpl %ecx, 0x4(%rsp)
sete %al
testl %ecx, %ecx
je 0x15dab62
orb %al, %dl
jmp 0x15dab6b
addq $0x30, %r15
movq %rax, (%r15)
movq (%r15), %rax
movq 0x18(%rax), %rcx
testb $0x1, %cl
jne 0x15dab5c
testb $0x4, %cl
jne 0x15dab72
shrl $0x3, %ecx
movzwl %cx, %ecx
jmp 0x15dab83
shrq $0x3, %rcx
jmp 0x15dab83
movl 0x4(%rsp), %eax
movl %eax, (%r14)
movb $0x1, %dl
xorl %eax, %eax
jmp 0x15dabf2
movq %rcx, %rax
shrq $0x13, %rax
testb $0x2, %cl
movzwl %ax, %ecx
cmoveq %rax, %rcx
movq 0x8(%rsp), %rax
movl %ecx, %ecx
cqto
idivq %rcx
testq %rdx, %rdx
jne 0x15dabf0
movl (%r14), %ecx
testq %rcx, %rcx
je 0x15daba1
cmpl %ecx, (%rsp)
jne 0x15dabf0
testq %rcx, %rcx
je 0x15dabfe
testl %ecx, %ecx
jns 0x15dabc5
andl $0x7fffffff, %ecx # imm = 0x7FFFFFFF
cmpl %ecx, 0x1d0(%rbx)
jbe 0x15dabc5
movq 0x1c8(%rbx), %rdx
movq (%rdx,%rcx,8), %rcx
jmp 0x15dabc7
xorl %ecx, %ecx
movl (%rsp), %edx
testl %edx, %edx
jns 0x15dabe9
andl $0x7fffffff, %edx # imm = 0x7FFFFFFF
cmpl %edx, 0x1d0(%rbx)
jbe 0x15dabe9
movq 0x1c8(%rbx), %rsi
movq (%rsi,%rdx,8), %rdx
jmp 0x15dabeb
xorl %edx, %edx
cmpq %rdx, %rcx
je 0x15dac04
xorl %edx, %edx
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
movl (%rsp), %ecx
movl %ecx, (%r14)
movl %eax, %eax
movb $0x1, %dl
jmp 0x15dabf2
|
/CodeGen/GlobalISel/LoadStoreOpt.cpp
|
llvm::MachineIRBuilder::buildUnmerge(llvm::LLT, llvm::SrcOp const&)
|
MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
const SrcOp &Op) {
unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
SmallVector<DstOp, 8> TmpVec(NumReg, Res);
return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movq %rdx, %rbx
movq %rdi, %r14
leaq 0x10(%rsp), %r15
movq %rsi, (%r15)
movq 0x18(%rdi), %rsi
movq %rdx, %rdi
callq 0x1563f02
leaq 0x8(%rsp), %rdi
movq %rax, (%rdi)
callq 0x94022c
leaq 0x28(%rsp), %rdi
movq %rax, (%rdi)
movb %dl, 0x8(%rdi)
callq 0x2b60e74
movq %rax, %r12
movq %r15, %rdi
callq 0x94022c
leaq 0x18(%rsp), %rdi
movq %rax, (%rdi)
movb %dl, 0x8(%rdi)
callq 0x2b60e74
movq %rax, %rcx
movq %r12, %rax
xorl %edx, %edx
divq %rcx
movl %eax, %esi
movq (%r15), %rdx
leaq 0x38(%rsp), %r12
movq %r12, -0x10(%r12)
movabsq $0x800000000, %rax # imm = 0x800000000
movq %rax, -0x8(%r12)
leaq 0x28(%rsp), %r15
movq %r15, %rdi
xorl %ecx, %ecx
callq 0x15e17d8
movq (%r15), %rdx
movl 0x8(%r15), %ecx
movq (%r14), %rax
movq $0x0, (%rsp)
movl $0x1, %r9d
movq %r14, %rdi
movl $0x46, %esi
movq %rbx, %r8
callq *0x20(%rax)
movq %rax, %rbx
movq %rdx, %r14
movq (%r15), %rdi
cmpq %r12, %rdi
je 0x15de40b
callq 0x780910
movq %rbx, %rax
movq %r14, %rdx
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/CodeGen/GlobalISel/MachineIRBuilder.cpp
|
llvm::MachineIRBuilder::buildConstant(llvm::DstOp const&, llvm::ConstantInt const&)
|
MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
const ConstantInt &Val) {
LLT Ty = Res.getLLTTy(*getMRI());
LLT EltTy = Ty.getScalarType();
assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
"creating constant with the wrong size");
assert(!Ty.isScalableVector() &&
"unexpected scalable vector in buildConstant");
if (Ty.isFixedVector()) {
auto Const = buildInstr(TargetOpcode::G_CONSTANT)
.addDef(getMRI()->createGenericVirtualRegister(EltTy))
.addCImm(&Val);
return buildSplatBuildVector(Res, Const);
}
auto Const = buildInstr(TargetOpcode::G_CONSTANT);
Const->setDebugLoc(DebugLoc());
Res.addDefToMIB(*getMRI(), Const);
Const.addCImm(&Val);
return Const;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movl 0x8(%rsi), %eax
cmpl $0x2, %eax
je 0x15de87b
cmpl $0x1, %eax
jne 0x15de880
movl (%r14), %eax
testl %eax, %eax
jns 0x15de87b
movq 0x18(%r15), %rcx
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
cmpl %eax, 0x1d0(%rcx)
jbe 0x15de87b
movq 0x1c8(%rcx), %rcx
movq (%rcx,%rax,8), %r12
jmp 0x15de883
xorl %r12d, %r12d
jmp 0x15de883
movq (%r14), %r12
movq %r12, 0x18(%rsp)
testq $-0x7, %r12
sete %al
testb $0x4, %r12b
sete %cl
orb %al, %cl
jne 0x15de8aa
leaq 0x18(%rsp), %rdi
callq 0x9616ca
movq %rax, %r12
movq 0x18(%rsp), %rax
testq $-0x7, %rax
sete %cl
testb $0x4, %al
sete %dl
orb %cl, %dl
jne 0x15de8e4
testb $0x2, %al
movabsq $0x8000000000000, %rcx # imm = 0x8000000000000
movabsq $0x800000000000000, %rdx # imm = 0x800000000000000
cmoveq %rcx, %rdx
testq %rax, %rdx
je 0x15de988
movq %r15, %rdi
movl $0x7f, %esi
callq 0x15dd5c4
movq %r15, %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rax, 0x8(%rsp)
movq %rdx, 0x10(%rsp)
leaq 0x20(%rsp), %r12
movq $0x0, (%r12)
leaq 0x38(%rdx), %rdi
movq %r12, %rsi
callq 0xd1b8f6
movq (%r12), %rsi
testq %rsi, %rsi
je 0x15de932
leaq 0x20(%rsp), %rdi
callq 0x2a758fc
movq 0x18(%r15), %rsi
leaq 0x8(%rsp), %r15
movq %r14, %rdi
movq %r15, %rdx
callq 0x943612
movq (%r15), %rsi
movq 0x8(%r15), %rdi
movl $0xfff00000, %eax # imm = 0xFFF00000
leaq 0x28(%rsp), %rdx
andl (%rdx), %eax
orl $0x2, %eax
movl %eax, (%rdx)
movq $0x0, 0x8(%rdx)
movq %rbx, 0x10(%rdx)
callq 0x1d3c22c
movq 0x8(%rsp), %rax
movq 0x10(%rsp), %rdx
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r15, %rdi
movl $0x7f, %esi
callq 0x15dd5c4
movq %r15, %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rax, %r13
movq %rdx, %rbp
movq 0x18(%r15), %rdi
leaq 0x297cb07(%rip), %rdx # 0x3f5b4b8
movq %r12, %rsi
xorl %ecx, %ecx
callq 0x1d831fc
leaq 0x28(%rsp), %r12
xorl %ecx, %ecx
movq %rcx, 0x8(%r12)
movl $0x1000000, (%r12) # imm = 0x1000000
movl %eax, 0x4(%r12)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r12)
movq %rbp, %rdi
movq %r13, %rsi
movq %r12, %rdx
callq 0x1d3c22c
movl $0xfff00000, %eax # imm = 0xFFF00000
andl (%r12), %eax
orl $0x2, %eax
movl %eax, (%r12)
xorl %eax, %eax
movq %rax, 0x8(%r12)
movq %rbx, 0x10(%r12)
leaq 0x28(%rsp), %rbx
movq %rbp, %rdi
movq %r13, %rsi
movq %rbx, %rdx
callq 0x1d3c22c
movq %r13, (%rbx)
movq %rbp, 0x8(%rbx)
movl $0x1, 0x10(%rbx)
leaq 0x28(%rsp), %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x15dea48
movq %rax, 0x8(%rsp)
movq %rdx, 0x10(%rsp)
jmp 0x15de96f
nop
|
/CodeGen/GlobalISel/MachineIRBuilder.cpp
|
llvm::MachineIRBuilder::buildPrefetch(llvm::SrcOp const&, unsigned int, unsigned int, unsigned int, llvm::MachineMemOperand&)
|
MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
unsigned RW,
unsigned Locality,
unsigned CacheType,
MachineMemOperand &MMO) {
auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
Addr.addSrcToMIB(MIB);
MIB.addImm(RW).addImm(Locality).addImm(CacheType);
MIB.addMemOperand(&MMO);
return MIB;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %r9, (%rsp)
movl %r8d, %ebp
movl %ecx, %r15d
movl %edx, %r12d
movq %rsi, %r13
movq %rdi, %r14
movl $0x75, %esi
callq 0x15dd5c4
movq %r14, %rdi
movq %rax, %rsi
callq 0x15dd6ec
leaq 0x28(%rsp), %r14
movq %rax, (%r14)
movq %rdx, 0x8(%r14)
movq %r13, %rdi
movq %r14, %rsi
callq 0x15ddb54
movl %r12d, %eax
movq (%r14), %rsi
movq 0x8(%r14), %rdi
movl $0xfff00000, %r13d # imm = 0xFFF00000
leaq 0x8(%rsp), %r12
movl (%r12), %ecx
andl %r13d, %ecx
incl %ecx
movl %ecx, (%r12)
xorl %ebx, %ebx
movq %rbx, 0x8(%r12)
movq %rax, 0x10(%r12)
movq %r12, %rdx
callq 0x1d3c22c
movl %r15d, %eax
movq (%r14), %rsi
movq 0x8(%r14), %rdi
movl (%r12), %ecx
andl %r13d, %ecx
incl %ecx
movl %ecx, (%r12)
movq %rbx, 0x8(%r12)
movq %rax, 0x10(%r12)
leaq 0x8(%rsp), %r15
movq %r15, %rdx
callq 0x1d3c22c
movl %ebp, %eax
movq (%r14), %rsi
movq 0x8(%r14), %rdi
andl (%r15), %r13d
incl %r13d
movl %r13d, (%r15)
movq %rbx, 0x8(%r15)
movq %rax, 0x10(%r15)
leaq 0x8(%rsp), %rdx
callq 0x1d3c22c
movq (%r14), %rsi
movq 0x8(%r14), %rdi
movq (%rsp), %rdx
callq 0x1d3cbe2
movq (%r14), %rax
movq 0x8(%r14), %rdx
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/CodeGen/GlobalISel/MachineIRBuilder.cpp
|
llvm::RegBankSelect::init(llvm::MachineFunction&)
|
void RegBankSelect::init(MachineFunction &MF) {
RBI = MF.getSubtarget().getRegBankInfo();
assert(RBI && "Cannot work without RegisterBankInfo");
MRI = &MF.getRegInfo();
TRI = MF.getSubtarget().getRegisterInfo();
TPC = &getAnalysis<TargetPassConfig>();
if (OptMode != Mode::Fast) {
MBFI = &getAnalysis<MachineBlockFrequencyInfoWrapperPass>().getMBFI();
MBPI = &getAnalysis<MachineBranchProbabilityInfoWrapperPass>().getMBPI();
} else {
MBFI = nullptr;
MBPI = nullptr;
}
MIRBuilder.setMF(MF);
MORE = std::make_unique<MachineOptimizationRemarkEmitter>(MF, MBFI);
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq 0x10(%rsi), %rdi
movq (%rdi), %rax
callq *0xd0(%rax)
movq %rax, 0x38(%rbx)
movq 0x28(%r14), %rax
movq %rax, 0x40(%rbx)
movq 0x10(%r14), %rdi
movq (%rdi), %rax
callq *0xc8(%rax)
movq %rax, 0x48(%rbx)
movq 0x8(%rbx), %rax
movq (%rax), %rdx
movq 0x8(%rax), %rax
xorl %edi, %edi
leaq 0x4379dc8(%rip), %rcx # 0x595ba7c
leaq 0x10(%rdx), %rsi
cmpq %rcx, (%rdx)
jne 0x15e1cc1
movq 0x8(%rdx), %rdi
je 0x15e1ccb
movq %rsi, %rdx
cmpq %rax, %rsi
jne 0x15e1cb4
movq (%rdi), %rax
leaq 0x4379da7(%rip), %rsi # 0x595ba7c
callq *0x60(%rax)
movq %rax, 0xc8(%rbx)
cmpl $0x0, 0xc0(%rbx)
je 0x15e1d6e
movq 0x8(%rbx), %rax
movq (%rax), %rdx
movq 0x8(%rax), %rax
xorl %edi, %edi
leaq 0x436ec04(%rip), %rcx # 0x5950904
leaq 0x10(%rdx), %rsi
cmpq %rcx, (%rdx)
jne 0x15e1d0d
movq 0x8(%rdx), %rdi
je 0x15e1d17
movq %rsi, %rdx
cmpq %rax, %rsi
jne 0x15e1d00
movq (%rdi), %rax
leaq 0x436ebe3(%rip), %rsi # 0x5950904
callq *0x60(%rax)
addq $0x38, %rax
movq %rax, 0x50(%rbx)
movq 0x8(%rbx), %rax
movq (%rax), %rdx
movq 0x8(%rax), %rax
xorl %edi, %edi
leaq 0x436fae0(%rip), %rcx # 0x5951820
leaq 0x10(%rdx), %rsi
cmpq %rcx, (%rdx)
jne 0x15e1d4d
movq 0x8(%rdx), %rdi
je 0x15e1d57
movq %rsi, %rdx
cmpq %rax, %rsi
jne 0x15e1d40
movq (%rdi), %rax
leaq 0x436fabf(%rip), %rsi # 0x5951820
callq *0x60(%rax)
addq $0x1c, %rax
movq %rax, 0x58(%rbx)
jmp 0x15e1d75
xorps %xmm0, %xmm0
movups %xmm0, 0x50(%rbx)
leaq 0x68(%rbx), %rdi
movq %r14, %rsi
callq 0x15dd558
movl $0x10, %edi
callq 0x7808d0
movq 0x50(%rbx), %rcx
movq %r14, (%rax)
movq %rcx, 0x8(%rax)
movq 0x60(%rbx), %rdi
movq %rax, 0x60(%rbx)
testq %rdi, %rdi
je 0x15e1db4
movl $0x10, %esi
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0x7800d0
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/CodeGen/GlobalISel/RegBankSelect.cpp
|
(anonymous namespace)::XCoreAsmPrinter::printOperand(llvm::MachineInstr const*, int, llvm::raw_ostream&)
|
void XCoreAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O) {
const DataLayout &DL = getDataLayout();
const MachineOperand &MO = MI->getOperand(opNum);
switch (MO.getType()) {
case MachineOperand::MO_Register:
O << XCoreInstPrinter::getRegisterName(MO.getReg());
break;
case MachineOperand::MO_Immediate:
O << MO.getImm();
break;
case MachineOperand::MO_MachineBasicBlock:
MO.getMBB()->getSymbol()->print(O, MAI);
break;
case MachineOperand::MO_GlobalAddress:
PrintSymbolOperand(MO, O);
break;
case MachineOperand::MO_ConstantPoolIndex:
O << DL.getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << '_'
<< MO.getIndex();
break;
case MachineOperand::MO_BlockAddress:
GetBlockAddressSymbol(MO.getBlockAddress())->print(O, MAI);
break;
default:
llvm_unreachable("not implemented");
}
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rcx, %r14
movl %edx, %ebx
movq %rsi, %r12
movq %rdi, %r15
callq 0x1606e44
movq 0x20(%r12), %rcx
movl %ebx, %edx
shlq $0x5, %rdx
leaq (%rcx,%rdx), %rbx
movzbl (%rcx,%rdx), %ecx
leaq 0x296ecee(%rip), %rdx # 0x3f59dd0
movslq (%rdx,%rcx,4), %rcx
addq %rdx, %rcx
jmpq *%rcx
movl 0x4(%rbx), %edi
callq 0x19f3462
movq %r14, %rdi
movq %rax, %rsi
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
jmp 0x7f9b78
movslq 0x1c(%rax), %rax
leaq 0x296ed8c(%rip), %rcx # 0x3f59ea0
movq (%rcx,%rax,8), %rdx
leaq 0x296edc1(%rip), %rcx # 0x3f59ee0
movslq (%rcx,%rax,4), %rsi
addq %rcx, %rsi
movq %r14, %rdi
callq 0x7f9ba8
leaq 0x1d6c58f(%rip), %rsi # 0x33576c4
movl $0x3, %edx
movq %rax, %rdi
callq 0x7f9ba8
movq %rax, %r14
movq %r15, %rdi
callq 0x1606e2e
movl %eax, %esi
movq %r14, %rdi
callq 0x2b7d110
movq 0x20(%rax), %rcx
cmpq 0x18(%rax), %rcx
jae 0x15eb1c8
leaq 0x1(%rcx), %rdx
movq %rdx, 0x20(%rax)
movb $0x5f, (%rcx)
jmp 0x15eb1d5
movq 0x10(%rbx), %rsi
movq %r14, %rdi
jmp 0x15eb1dc
movq 0x10(%rbx), %rdi
callq 0x1cfa618
jmp 0x15eb18e
movq 0x10(%rbx), %rsi
movq %r15, %rdi
callq 0x1611218
movq 0x40(%r15), %rdx
movq %rax, %rdi
movq %r14, %rsi
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
jmp 0x28faec8
movq (%r15), %rax
movq 0x1b0(%rax), %rax
movq %r15, %rdi
movq %rbx, %rsi
movq %r14, %rdx
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
jmpq *%rax
movq %rax, %rdi
movl $0x5f, %esi
callq 0x2b7d68e
movslq 0x10(%rbx), %rsi
movq %rax, %rdi
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
jmp 0x2b7d122
|
/Target/XCore/XCoreAsmPrinter.cpp
|
(anonymous namespace)::XCoreLowerThreadLocal::XCoreLowerThreadLocal()
|
XCoreLowerThreadLocal() : ModulePass(ID) {
initializeXCoreLowerThreadLocalPass(*PassRegistry::getPassRegistry());
}
|
pushq %rax
movq $0x0, 0x8(%rdi)
leaq 0x433c694(%rip), %rax # 0x5933e94
movq %rax, 0x10(%rdi)
movl $0x4, 0x18(%rdi)
leaq 0x41eaff6(%rip), %rax # 0x57e2808
movq %rax, (%rdi)
callq 0x2a9031c
movq %rsp, %rdx
movq %rax, (%rdx)
leaq 0x433c669(%rip), %rdi # 0x5933e90
leaq -0xdf(%rip), %rsi # 0x15f774f
callq 0x939005
popq %rax
retq
|
/Target/XCore/XCoreLowerThreadLocal.cpp
|
llvm::XCoreRegisterInfo::getReservedRegs(llvm::MachineFunction const&) const
|
BitVector XCoreRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
const XCoreFrameLowering *TFI = getFrameLowering(MF);
Reserved.set(XCore::CP);
Reserved.set(XCore::DP);
Reserved.set(XCore::SP);
Reserved.set(XCore::LR);
if (TFI->hasFP(MF)) {
Reserved.set(XCore::R10);
}
return Reserved;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdx, %r14
movq %rdi, %rbx
movl 0x10(%rsi), %r15d
leal 0x3f(%r15), %esi
shrl $0x6, %esi
leaq 0x10(%rdi), %rax
movq %rax, (%rdi)
movabsq $0x600000000, %rax # imm = 0x600000000
movq %rax, 0x8(%rdi)
xorl %edx, %edx
callq 0x7fa254
movl %r15d, 0x40(%rbx)
movq 0x10(%r14), %rdi
movq (%rdi), %rax
callq *0x88(%rax)
movq (%rbx), %rcx
orq $0x1e, (%rcx)
movq (%rax), %rcx
movq %rax, %rdi
movq %r14, %rsi
callq *0xc8(%rcx)
testb %al, %al
je 0x15f9221
movq (%rbx), %rax
orq $0x8000, (%rax) # imm = 0x8000
movq %rbx, %rax
popq %rbx
popq %r14
popq %r15
retq
|
/Target/XCore/XCoreRegisterInfo.cpp
|
llvm::XCoreFrameLowering::emitPrologue(llvm::MachineFunction&, llvm::MachineBasicBlock&) const
|
void XCoreFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo &MFI = MF.getFrameInfo();
const MCRegisterInfo *MRI = MF.getContext().getRegisterInfo();
const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
DebugLoc dl;
if (MFI.getMaxAlign() > getStackAlign())
report_fatal_error("emitPrologue unsupported alignment: " +
Twine(MFI.getMaxAlign().value()));
const AttributeList &PAL = MF.getFunction().getAttributes();
if (PAL.hasAttrSomewhere(Attribute::Nest))
BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0);
// FIX: Needs addMemOperand() but can't use getFixedStack() or getStack().
// Work out frame sizes.
// We will adjust the SP in stages towards the final FrameSize.
assert(MFI.getStackSize()%4 == 0 && "Misaligned frame size");
const int FrameSize = MFI.getStackSize() / 4;
int Adjusted = 0;
bool saveLR = XFI->hasLRSpillSlot();
bool UseENTSP = saveLR && FrameSize
&& (MFI.getObjectOffset(XFI->getLRSpillSlot()) == 0);
if (UseENTSP)
saveLR = false;
bool FP = hasFP(MF);
bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(MF);
if (UseENTSP) {
// Allocate space on the stack at the same time as saving LR.
Adjusted = (FrameSize > MaxImmU16) ? MaxImmU16 : FrameSize;
int Opcode = isImmU6(Adjusted) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6;
MBB.addLiveIn(XCore::LR);
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode));
MIB.addImm(Adjusted);
MIB->addRegisterKilled(XCore::LR, MF.getSubtarget().getRegisterInfo(),
true);
if (emitFrameMoves) {
EmitDefCfaOffset(MBB, MBBI, dl, TII, Adjusted*4);
unsigned DRegNum = MRI->getDwarfRegNum(XCore::LR, true);
EmitCfiOffset(MBB, MBBI, dl, TII, DRegNum, 0);
}
}
// If necessary, save LR and FP to the stack, as we EXTSP.
SmallVector<StackSlotInfo,2> SpillList;
GetSpillList(SpillList, MFI, XFI, saveLR, FP);
// We want the nearest (negative) offsets first, so reverse list.
std::reverse(SpillList.begin(), SpillList.end());
for (unsigned i = 0, e = SpillList.size(); i != e; ++i) {
assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset");
assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset");
int OffsetFromTop = - SpillList[i].Offset/4;
IfNeededExtSP(MBB, MBBI, dl, TII, OffsetFromTop, Adjusted, FrameSize,
emitFrameMoves);
int Offset = Adjusted - OffsetFromTop;
int Opcode = isImmU6(Offset) ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
MBB.addLiveIn(SpillList[i].Reg);
BuildMI(MBB, MBBI, dl, TII.get(Opcode))
.addReg(SpillList[i].Reg, RegState::Kill)
.addImm(Offset)
.addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI,
MachineMemOperand::MOStore));
if (emitFrameMoves) {
unsigned DRegNum = MRI->getDwarfRegNum(SpillList[i].Reg, true);
EmitCfiOffset(MBB, MBBI, dl, TII, DRegNum, SpillList[i].Offset);
}
}
// Complete any remaining Stack adjustment.
IfNeededExtSP(MBB, MBBI, dl, TII, FrameSize, Adjusted, FrameSize,
emitFrameMoves);
assert(Adjusted==FrameSize && "IfNeededExtSP has not completed adjustment");
if (FP) {
// Set the FP from the SP.
BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr).addImm(0);
if (emitFrameMoves)
EmitDefCfaRegister(MBB, MBBI, dl, TII, MF,
MRI->getDwarfRegNum(FramePtr, true));
}
if (emitFrameMoves) {
// Frame moves for callee saved.
for (const auto &SpillLabel : XFI->getSpillLabels()) {
MachineBasicBlock::iterator Pos = SpillLabel.first;
++Pos;
const CalleeSavedInfo &CSI = SpillLabel.second;
int Offset = MFI.getObjectOffset(CSI.getFrameIdx());
unsigned DRegNum = MRI->getDwarfRegNum(CSI.getReg(), true);
EmitCfiOffset(MBB, Pos, dl, TII, DRegNum, Offset);
}
if (XFI->hasEHSpillSlot()) {
// The unwinder requires stack slot & CFI offsets for the exception info.
// We do not save/spill these registers.
const Function *Fn = &MF.getFunction();
const Constant *PersonalityFn =
Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr;
SmallVector<StackSlotInfo, 2> SpillList;
GetEHSpillList(SpillList, MFI, XFI, PersonalityFn,
MF.getSubtarget().getTargetLowering());
assert(SpillList.size()==2 && "Unexpected SpillList size");
EmitCfiOffset(MBB, MBBI, dl, TII,
MRI->getDwarfRegNum(SpillList[0].Reg, true),
SpillList[0].Offset);
EmitCfiOffset(MBB, MBBI, dl, TII,
MRI->getDwarfRegNum(SpillList[1].Reg, true),
SpillList[1].Offset);
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x148, %rsp # imm = 0x148
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r14
movq 0x38(%rdx), %r13
movq 0x38(%rsi), %rbp
movq 0x10(%rsi), %rdi
movq 0x18(%rsi), %rax
movq 0xa0(%rax), %rax
movq %rax, 0x38(%rsp)
movq (%rdi), %rax
callq *0x80(%rax)
movq %rax, %r12
movq 0x30(%r15), %rax
movq %rax, 0x58(%rsp)
movq $0x0, 0x8(%rsp)
movzbl 0x40(%rbp), %ecx
cmpb 0xc(%r14), %cl
ja 0x15fb4fe
movq %r15, 0x30(%rsp)
movq (%r15), %rax
movq 0x70(%rax), %rax
leaq 0x140(%rsp), %rdi
movq %rax, (%rdi)
movl $0x14, %esi
xorl %edx, %edx
callq 0x29acba8
testb %al, %al
je 0x15fabb2
movq 0x8(%rsp), %rsi
movq %rsi, 0x50(%rsp)
testq %rsi, %rsi
je 0x15fab03
leaq 0x50(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x50(%rsp), %rsi
movq %rsi, 0xa0(%rsp)
testq %rsi, %rsi
je 0x15fab31
leaq 0x50(%rsp), %r15
leaq 0xa0(%rsp), %rdx
movq %r15, %rdi
callq 0x2a759cc
movq $0x0, (%r15)
xorps %xmm0, %xmm0
leaq 0xa0(%rsp), %r15
movups %xmm0, 0x8(%r15)
movq $-0x3680, %rcx # imm = 0xC980
addq 0x8(%r12), %rcx
movq %rbx, %rdi
movq %r13, %rsi
movq %r15, %rdx
movl $0x10, %r8d
callq 0x90f593
movl $0xfff00000, %esi # imm = 0xFFF00000
leaq 0xd0(%rsp), %rcx
andl (%rcx), %esi
movq %rdx, %rdi
incl %esi
movl %esi, (%rcx)
xorps %xmm0, %xmm0
movups %xmm0, 0x8(%rcx)
movq %rax, %rsi
movq %rcx, %rdx
callq 0x1d3c22c
movq (%r15), %rsi
testq %rsi, %rsi
je 0x15fab9e
leaq 0xa0(%rsp), %rdi
callq 0x2a758fc
movq 0x50(%rsp), %rsi
testq %rsi, %rsi
je 0x15fabb2
leaq 0x50(%rsp), %rdi
callq 0x2a758fc
movq %r12, 0x18(%rsp)
movq 0x30(%rbp), %rcx
shrq $0x2, %rcx
movl $0x0, 0x14(%rsp)
movq %rbp, %rdx
movq 0x58(%rsp), %r12
movb 0x8(%r12), %bpl
movl %ebp, %eax
xorb $0x1, %al
movq %rcx, 0x70(%rsp)
testl %ecx, %ecx
sete %cl
orb %al, %cl
movq %rdx, 0x68(%rsp)
jne 0x15fac06
movl 0x20(%rdx), %eax
addl 0xc(%r12), %eax
movq 0x8(%rdx), %rcx
leaq (%rax,%rax,4), %rax
cmpq $0x0, (%rcx,%rax,8)
sete %r15b
jmp 0x15fac09
xorl %r15d, %r15d
xorb $0x1, %r15b
andb %r15b, %bpl
movq (%r14), %rax
movq %r14, %rdi
movq 0x30(%rsp), %r14
movq %r14, %rsi
callq *0xc8(%rax)
movb %al, 0x7(%rsp)
movq %r14, %rdi
callq 0x15f917a
movb %al, 0x6(%rsp)
testb %r15b, %r15b
movq %r13, 0x60(%rsp)
jne 0x15faddd
movl $0xffff, %r13d # imm = 0xFFFF
movq 0x70(%rsp), %rax
cmpl %r13d, %eax
cmovll %eax, %r13d
movl %r13d, 0x14(%rsp)
cmpl $0x40, %r13d
movq $-0x169, %r14 # imm = 0xFE97
sbbq $0x0, %r14
movq %rbx, %rdi
movl $0x3, %esi
movq $-0x1, %rdx
callq 0x944102
movq 0x8(%rsp), %rsi
movq %rsi, 0x48(%rsp)
testq %rsi, %rsi
je 0x15fac9a
leaq 0x48(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x48(%rsp), %rsi
movq %rsi, 0xd0(%rsp)
testq %rsi, %rsi
je 0x15facc8
leaq 0x48(%rsp), %r15
leaq 0xd0(%rsp), %rdx
movq %r15, %rdi
callq 0x2a759cc
movq $0x0, (%r15)
xorps %xmm0, %xmm0
leaq 0xd0(%rsp), %r12
movups %xmm0, 0x8(%r12)
shlq $0x5, %r14
movq 0x18(%rsp), %rax
addq 0x8(%rax), %r14
movq %rbx, %rdi
movq 0x60(%rsp), %rsi
movq %r12, %rdx
movq %r14, %rcx
callq 0x93f73a
movq %rax, %r15
movq %rdx, %r14
movq (%r12), %rsi
testq %rsi, %rsi
je 0x15fad15
leaq 0xd0(%rsp), %rdi
callq 0x2a758fc
movq 0x48(%rsp), %rsi
testq %rsi, %rsi
movq 0x58(%rsp), %r12
je 0x15fad2e
leaq 0x48(%rsp), %rdi
callq 0x2a758fc
movslq %r13d, %rax
movl $0xfff00000, %ecx # imm = 0xFFF00000
leaq 0xd0(%rsp), %rdx
andl (%rdx), %ecx
incl %ecx
movl %ecx, (%rdx)
movq $0x0, 0x8(%rdx)
movq %rax, 0x10(%rdx)
movq %r14, %rdi
movq %r15, %rsi
callq 0x1d3c22c
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rdi
movq (%rdi), %rax
callq *0xc8(%rax)
movq %r14, %rdi
movl $0x3, %esi
movq %rax, %rdx
movl $0x1, %ecx
callq 0x1d40faa
cmpb $0x0, 0x6(%rsp)
movq 0x60(%rsp), %r13
je 0x15faddd
movl 0x14(%rsp), %r8d
shll $0x2, %r8d
leaq 0x8(%rsp), %r14
movq %rbx, %rdi
movq %r13, %rsi
movq %r14, %rdx
movq 0x18(%rsp), %r15
movq %r15, %rcx
callq 0x15fb551
movq 0x38(%rsp), %rdi
movl $0x3, %esi
movl $0x1, %edx
callq 0x28f28d0
movq %rbx, %rdi
movq %r13, %rsi
movq %r14, %rdx
movq %r15, %rcx
movl %eax, %r8d
xorl %r9d, %r9d
callq 0x15fb695
leaq 0xb0(%rsp), %rax
movq %rax, -0x10(%rax)
movabsq $0x200000000, %rcx # imm = 0x200000000
movq %rcx, -0x8(%rax)
movzbl %bpl, %ecx
movzbl 0x7(%rsp), %r8d
leaq 0xa0(%rsp), %r14
movq %r14, %rdi
movq 0x68(%rsp), %rsi
movq %r12, %rdx
callq 0x15fb7f6
movq (%r14), %rax
movl 0x8(%r14), %ecx
movl %ecx, %edx
leaq (%rdx,%rdx,2), %rdx
testl %ecx, %ecx
setne %sil
leaq (%rax,%rdx,4), %rcx
addq $-0xc, %rcx
cmpq %rax, %rcx
seta %dl
andb %sil, %dl
cmpb $0x1, %dl
jne 0x15fae8e
addq $0xc, %rax
movl -0x4(%rax), %edx
movl %edx, 0xd8(%rsp)
movq -0xc(%rax), %rdx
movq %rdx, 0xd0(%rsp)
movl 0x8(%rcx), %edx
movl %edx, -0x4(%rax)
movq (%rcx), %rdx
movq %rdx, -0xc(%rax)
movl 0xd8(%rsp), %edx
movl %edx, 0x8(%rcx)
movq 0xd0(%rsp), %rdx
movq %rdx, (%rcx)
addq $-0xc, %rcx
leaq 0xc(%rax), %rdx
cmpq %rcx, %rax
movq %rdx, %rax
jb 0x15fae46
movl 0xa8(%rsp), %eax
movzbl 0x6(%rsp), %ecx
movq %rcx, 0xc8(%rsp)
testq %rax, %rax
movq 0x18(%rsp), %rbp
je 0x15fb100
shlq $0x2, %rax
leaq (%rax,%rax,2), %rax
movq %rax, 0x138(%rsp)
xorl %r14d, %r14d
movq 0xa0(%rsp), %rax
movl 0x4(%rax,%r14), %eax
leal 0x3(%rax), %r15d
testl %eax, %eax
cmovnsl %eax, %r15d
sarl $0x2, %r15d
negl %r15d
movq %rbx, %rdi
movq %r13, %rsi
leaq 0x8(%rsp), %rdx
movq %rbp, %rcx
movl %r15d, %r8d
leaq 0x14(%rsp), %r9
pushq 0xc8(%rsp)
pushq 0x78(%rsp)
callq 0x15fb878
addq $0x10, %rsp
movslq 0x14(%rsp), %r12
movslq %r15d, %rax
subq %rax, %r12
cmpl $0x40, %r12d
movq $-0x1f4, %r15 # imm = 0xFE0C
sbbq $0x0, %r15
movq 0xa0(%rsp), %rax
movl 0x8(%rax,%r14), %esi
movq %rbx, %rdi
movq $-0x1, %rdx
callq 0x944102
movq 0x8(%rsp), %rsi
movq %rsi, 0x20(%rsp)
testq %rsi, %rsi
je 0x15faf5f
movl $0x1, %edx
leaq 0x20(%rsp), %rdi
callq 0x2a757d8
movq 0x20(%rsp), %rsi
movq %rsi, 0x78(%rsp)
testq %rsi, %rsi
leaq 0x78(%rsp), %rcx
je 0x15faf90
leaq 0x20(%rsp), %rdi
leaq 0x78(%rsp), %rdx
callq 0x2a759cc
leaq 0x78(%rsp), %rcx
movq $0x0, 0x20(%rsp)
leaq 0x80(%rsp), %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
shlq $0x5, %r15
addq 0x8(%rbp), %r15
movq %rbx, %rdi
movq %r13, %rsi
movq %rcx, %rdx
movq %r15, %rcx
callq 0x93f73a
movq %rax, %r15
movq %rdx, %r13
movq 0xa0(%rsp), %rax
movl 0x8(%rax,%r14), %eax
xorl %ecx, %ecx
movq %rcx, 0xd8(%rsp)
movl $0x4000000, %ecx # imm = 0x4000000
movl %ecx, 0xd0(%rsp)
movl %eax, 0xd4(%rsp)
leaq 0xe0(%rsp), %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movl %ecx, 0xd0(%rsp)
movq %rdx, %rdi
movq %r15, %rsi
leaq 0xd0(%rsp), %rbp
movq %rbp, %rdx
callq 0x1d3c22c
movl 0xd0(%rsp), %eax
movl $0xfff00000, %ecx # imm = 0xFFF00000
andl %ecx, %eax
incl %eax
movl %eax, 0xd0(%rsp)
xorl %eax, %eax
movq %rax, 0xd8(%rsp)
movq %r12, 0xe0(%rsp)
movq %r13, %rdi
movq %r15, %rsi
movq %rbp, %rdx
callq 0x1d3c22c
movq 0xa0(%rsp), %rax
movl (%rax,%r14), %esi
movq %rbx, %r12
movq %rbx, %rdi
movl $0x2, %edx
callq 0x15fb9d1
movq %r13, %rdi
movq %r15, %rsi
movq %rax, %rdx
callq 0x1d3cbe2
movq 0x78(%rsp), %rsi
testq %rsi, %rsi
je 0x15fb087
leaq 0x78(%rsp), %rdi
callq 0x2a758fc
movq 0x20(%rsp), %rsi
testq %rsi, %rsi
je 0x15fb09b
leaq 0x20(%rsp), %rdi
callq 0x2a758fc
cmpb $0x0, 0x6(%rsp)
movq 0x60(%rsp), %r13
movq %r12, %rbx
movq 0x18(%rsp), %rbp
je 0x15fb0ee
movq 0xa0(%rsp), %rax
movl 0x8(%rax,%r14), %esi
movq 0x38(%rsp), %rdi
movl $0x1, %edx
callq 0x28f28d0
movq 0xa0(%rsp), %rcx
movl 0x4(%rcx,%r14), %r9d
movq %rbx, %rdi
movq %r13, %rsi
leaq 0x8(%rsp), %rdx
movq %rbp, %rcx
movl %eax, %r8d
callq 0x15fb695
addq $0xc, %r14
cmpq %r14, 0x138(%rsp)
jne 0x15faec3
leaq 0x8(%rsp), %rdx
leaq 0x14(%rsp), %r9
movq %rbx, %rdi
movq %r13, %rsi
movq %rbp, %rcx
movq 0x70(%rsp), %r8
pushq 0xc8(%rsp)
pushq %r8
callq 0x15fb878
addq $0x10, %rsp
cmpb $0x0, 0x7(%rsp)
movq 0x58(%rsp), %r15
je 0x15fb33a
movq 0x8(%rsp), %rsi
movq %rsi, 0x40(%rsp)
testq %rsi, %rsi
je 0x15fb158
leaq 0x40(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x40(%rsp), %rsi
movq %rsi, 0x78(%rsp)
testq %rsi, %rsi
je 0x15fb180
leaq 0x40(%rsp), %r14
leaq 0x78(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x78(%rsp), %r14
movups %xmm0, 0x8(%r14)
movq $-0x34a0, %rcx # imm = 0xCB60
addq 0x8(%rbp), %rcx
movq %rbx, %rdi
movq %r13, %rsi
movq %r14, %rdx
movl $0xf, %r8d
callq 0x90f593
movl $0xfff00000, %esi # imm = 0xFFF00000
leaq 0xd0(%rsp), %rcx
andl (%rcx), %esi
movq %rdx, %rdi
incl %esi
movl %esi, (%rcx)
xorps %xmm0, %xmm0
movups %xmm0, 0x8(%rcx)
movq %rax, %rsi
movq %rcx, %rdx
callq 0x1d3c22c
movq (%r14), %rsi
testq %rsi, %rsi
je 0x15fb1e6
leaq 0x78(%rsp), %rdi
callq 0x2a758fc
movq 0x40(%rsp), %rsi
testq %rsi, %rsi
je 0x15fb1fa
leaq 0x40(%rsp), %rdi
callq 0x2a758fc
cmpb $0x0, 0x6(%rsp)
je 0x15fb4be
movq 0x38(%rsp), %rdi
movl $0xf, %esi
movl $0x1, %edx
callq 0x28f28d0
leaq 0xd0(%rsp), %r14
movq %r14, %rdi
xorl %esi, %esi
movl %eax, %edx
xorl %ecx, %ecx
callq 0x9b66b4
movq 0x30(%rsp), %rdi
movq %r14, %rsi
callq 0x1d34244
movl %eax, %ebp
movq 0x48(%r14), %rdi
leaq 0x128(%rsp), %rax
cmpq %rax, %rdi
je 0x15fb25f
movq 0x128(%rsp), %rsi
incq %rsi
callq 0x7800d0
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x15fb27c
movq 0x110(%rsp), %rsi
subq %rdi, %rsi
callq 0x7800d0
movq 0x8(%rsp), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0x15fb29a
leaq 0x28(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x28(%rsp), %rsi
movq %rsi, 0x78(%rsp)
testq %rsi, %rsi
je 0x15fb2c2
leaq 0x28(%rsp), %r14
leaq 0x78(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x78(%rsp), %r14
movups %xmm0, 0x8(%r14)
movq 0x18(%rsp), %rax
movq 0x8(%rax), %rcx
addq $-0x60, %rcx
movq %rbx, %rdi
movq %r13, %rsi
movq %r14, %rdx
callq 0x93f73a
movq %rdx, %rdi
movl $0xfff00000, %ecx # imm = 0xFFF00000
leaq 0xd0(%rsp), %rdx
andl (%rdx), %ecx
orl $0x10, %ecx
movl %ecx, (%rdx)
movq $0x0, 0x8(%rdx)
movl %ebp, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq (%r14), %rsi
testq %rsi, %rsi
je 0x15fb326
leaq 0x78(%rsp), %rdi
callq 0x2a758fc
movq 0x28(%rsp), %rsi
testq %rsi, %rsi
je 0x15fb33a
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
cmpb $0x0, 0x6(%rsp)
je 0x15fb4be
movq 0x38(%r15), %r12
movq 0x40(%r15), %r13
cmpq %r13, %r12
je 0x15fb3c3
leaq 0x8(%rsp), %r14
movq (%r12), %rax
testq %rax, %rax
movq 0x68(%rsp), %rcx
je 0x15fb370
testb $0x4, (%rax)
je 0x15fb370
jmp 0x15fb376
movq 0x8(%rax), %rax
testb $0x8, 0x2c(%rax)
jne 0x15fb36c
movq 0x8(%rax), %r15
movl 0x20(%rcx), %eax
addl 0xc(%r12), %eax
movq 0x8(%rcx), %rcx
leaq (%rax,%rax,4), %rax
movl (%rcx,%rax,8), %ebp
movl 0x8(%r12), %esi
movq 0x38(%rsp), %rdi
movl $0x1, %edx
callq 0x28f28d0
movq %rbx, %rdi
movq %r15, %rsi
movq %r14, %rdx
movq 0x18(%rsp), %rcx
movl %eax, %r8d
movl %ebp, %r9d
callq 0x15fb695
addq $0x18, %r12
cmpq %r13, %r12
jne 0x15fb357
movq 0x58(%rsp), %r12
cmpb $0x0, 0x18(%r12)
movq 0x60(%rsp), %r13
movq 0x18(%rsp), %rbp
je 0x15fb4be
movq 0x30(%rsp), %rax
movq (%rax), %rdi
testb $0x8, 0x2(%rdi)
jne 0x15fb3f1
xorl %r15d, %r15d
jmp 0x15fb3fe
callq 0x2a40ed6
movq %rax, %r15
movq 0x30(%rsp), %rax
movabsq $0x200000000, %rcx # imm = 0x200000000
leaq 0xe0(%rsp), %rdx
movq %rdx, -0x10(%rdx)
movq %rcx, -0x8(%rdx)
movq 0x10(%rax), %rdi
movq (%rdi), %rax
callq *0x90(%rax)
leaq 0xd0(%rsp), %r14
movq %r14, %rdi
movq 0x68(%rsp), %rsi
movq %r12, %rdx
movq %r15, %rcx
movq %rax, %r8
callq 0x15fba86
movq (%r14), %rax
movl 0x8(%rax), %esi
movq 0x38(%rsp), %r12
movq %r12, %rdi
movl $0x1, %edx
callq 0x28f28d0
movq (%r14), %rcx
movl 0x4(%rcx), %r9d
leaq 0x8(%rsp), %r15
movq %rbx, %rdi
movq %r13, %rsi
movq %r15, %rdx
movq %rbp, %rcx
movl %eax, %r8d
callq 0x15fb695
movq (%r14), %rax
movl 0x14(%rax), %esi
movq %r12, %rdi
movl $0x1, %edx
callq 0x28f28d0
movq (%r14), %rcx
movl 0x10(%rcx), %r9d
movq %rbx, %rdi
movq %r13, %rsi
movq %r15, %rdx
movq %rbp, %rcx
movl %eax, %r8d
callq 0x15fb695
movq (%r14), %rdi
leaq 0xe0(%rsp), %rax
cmpq %rax, %rdi
je 0x15fb4be
callq 0x780910
movq 0xa0(%rsp), %rdi
leaq 0xb0(%rsp), %rax
cmpq %rax, %rdi
je 0x15fb4d8
callq 0x780910
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0x15fb4ec
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
addq $0x148, %rsp # imm = 0x148
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x2960057(%rip), %rax # 0x3f5b55c
leaq 0xa0(%rsp), %rsi
movq %rax, (%rsi)
movw $0x103, 0x20(%rsi) # imm = 0x103
movl $0x1, %eax
shlq %cl, %rax
leaq 0x28(%rsp), %rcx
movq %rax, (%rcx)
leaq 0x78(%rsp), %rdx
movw $0x10b, 0x20(%rdx) # imm = 0x10B
movq %rcx, (%rdx)
leaq 0xd0(%rsp), %rbx
movq %rbx, %rdi
callq 0x80ec08
movq %rbx, %rdi
movl $0x1, %esi
callq 0x2b31b3e
|
/Target/XCore/XCoreFrameLowering.cpp
|
EmitDefCfaOffset(llvm::MachineBasicBlock&, llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>, llvm::DebugLoc const&, llvm::TargetInstrInfo const&, int)
|
static void EmitDefCfaOffset(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &dl, const TargetInstrInfo &TII,
int Offset) {
MachineFunction &MF = *MBB.getParent();
unsigned CFIIndex =
MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, Offset));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, 0x10(%rsp)
movq %rdi, %r14
movq 0x20(%rdi), %r13
movslq %r8d, %rdx
leaq 0x30(%rsp), %rbx
movq %rbx, %rdi
xorl %esi, %esi
xorl %ecx, %ecx
callq 0x9ba8be
movq %r13, %rdi
movq %rbx, %rsi
callq 0x1d34244
movl %eax, %ebp
movq 0x48(%rbx), %rdi
leaq 0x88(%rsp), %rax
cmpq %rax, %rdi
je 0x15fb5b6
movq 0x88(%rsp), %rsi
incq %rsi
callq 0x7800d0
movq 0x60(%rsp), %rdi
testq %rdi, %rdi
je 0x15fb5cd
movq 0x70(%rsp), %rsi
subq %rdi, %rsi
callq 0x7800d0
movq (%r12), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x15fb5ea
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x8(%rsp), %rsi
movq %rsi, 0x18(%rsp)
testq %rsi, %rsi
je 0x15fb612
leaq 0x8(%rsp), %rbx
leaq 0x18(%rsp), %rdx
movq %rbx, %rdi
callq 0x2a759cc
movq $0x0, (%rbx)
xorps %xmm0, %xmm0
leaq 0x18(%rsp), %rbx
movups %xmm0, 0x8(%rbx)
movq 0x8(%r15), %rcx
addq $-0x60, %rcx
movq %r14, %rdi
movq 0x10(%rsp), %rsi
movq %rbx, %rdx
callq 0x93f73a
movq %rdx, %rdi
movl $0xfff00000, %ecx # imm = 0xFFF00000
leaq 0x30(%rsp), %rdx
andl (%rdx), %ecx
orl $0x10, %ecx
movl %ecx, (%rdx)
movq $0x0, 0x8(%rdx)
movl %ebp, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq (%rbx), %rsi
testq %rsi, %rsi
je 0x15fb66f
leaq 0x18(%rsp), %rdi
callq 0x2a758fc
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0x15fb683
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/XCore/XCoreFrameLowering.cpp
|
llvm::XCoreTargetLowering::LowerConstantPool(llvm::SDValue, llvm::SelectionDAG&) const
|
SDValue XCoreTargetLowering::
LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
{
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
// FIXME there isn't really debug info here
SDLoc dl(CP);
EVT PtrVT = Op.getValueType();
SDValue Res;
if (CP->isMachineConstantPoolEntry()) {
Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
CP->getAlign(), CP->getOffset());
} else {
Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(),
CP->getOffset());
}
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
}
|
pushq %rbp
pushq %r14
pushq %rbx
subq $0x20, %rsp
movq %rcx, %rbx
movl %edx, %ebp
movq %rsi, %r14
movq 0x48(%rsi), %rsi
movq %rsi, (%rsp)
testq %rsi, %rsi
je 0x15fddf4
movq %rsp, %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r14), %eax
movl %eax, 0x8(%rsp)
movl %ebp, %ecx
shlq $0x4, %rcx
movq 0x30(%r14), %rdx
movq 0x58(%r14), %rsi
movb (%rdx,%rcx), %al
movq 0x8(%rdx,%rcx), %rcx
movl 0x60(%r14), %r9d
movzbl 0x64(%r14), %edx
testl %r9d, %r9d
js 0x15fde3c
movzwl %dx, %r8d
orl $0x100, %r8d # imm = 0x100
movzbl %al, %edx
movq %rbx, %rdi
pushq $0x0
pushq $0x1
callq 0x1766180
jmp 0x15fde5d
andl $0x7fffffff, %r9d # imm = 0x7FFFFFFF
movzwl %dx, %r8d
orl $0x100, %r8d # imm = 0x100
movzbl %al, %edx
movq %rbx, %rdi
pushq $0x0
pushq $0x1
callq 0x176644c
addq $0x10, %rsp
movq %rax, 0x10(%rsp)
movl %edx, 0x18(%rsp)
subq $0x10, %rsp
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x10(%rsp), %r14
movq %rbx, %rdi
movl $0x1e9, %esi # imm = 0x1E9
movq %r14, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
callq 0x176388a
addq $0x10, %rsp
movq %rax, %rbx
movl %edx, %ebp
movq (%r14), %rsi
testq %rsi, %rsi
je 0x15fdead
movq %rsp, %rdi
callq 0x2a758fc
movq %rbx, %rax
movl %ebp, %edx
addq $0x20, %rsp
popq %rbx
popq %r14
popq %rbp
retq
nop
|
/Target/XCore/XCoreISelLowering.cpp
|
llvm::DIELabel::sizeOf(llvm::dwarf::FormParams const&, llvm::dwarf::Form) const
|
unsigned DIELabel::sizeOf(const dwarf::FormParams &FormParams,
dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_data4:
return 4;
case dwarf::DW_FORM_data8:
return 8;
case dwarf::DW_FORM_sec_offset:
case dwarf::DW_FORM_strp:
return FormParams.getDwarfOffsetByteSize();
case dwarf::DW_FORM_addr:
return FormParams.AddrSize;
default:
llvm_unreachable("DIE Value form not supported yet");
}
}
|
cmpl $0x6, %edx
jle 0x161e281
cmpl $0x17, %edx
je 0x161e273
cmpl $0xe, %edx
jne 0x161e28d
cmpb $0x0, 0x3(%rsi)
setne %al
shlb $0x2, %al
addb $0x4, %al
jmp 0x161e28f
movb $0x4, %al
cmpl $0x1, %edx
jne 0x161e28f
movb 0x2(%rsi), %al
jmp 0x161e28f
movb $0x8, %al
movzbl %al, %eax
retq
nop
|
/CodeGen/AsmPrinter/DIE.cpp
|
llvm::DIEBlock::sizeOf(llvm::dwarf::FormParams const&, llvm::dwarf::Form) const
|
unsigned DIEBlock::sizeOf(const dwarf::FormParams &, dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_block1: return Size + sizeof(int8_t);
case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
case dwarf::DW_FORM_block4: return Size + sizeof(int32_t);
case dwarf::DW_FORM_exprloc:
case dwarf::DW_FORM_block: return Size + getULEB128Size(Size);
case dwarf::DW_FORM_data16: return 16;
default: llvm_unreachable("Improper form for block");
}
}
|
leal -0x3(%rdx), %eax
cmpl $0x7, %eax
ja 0x161e335
leaq 0x293e50b(%rip), %rcx # 0x3f5c830
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movl 0x8(%rdi), %eax
addl $0x2, %eax
retq
movl $0x10, %eax
cmpl $0x18, %edx
jne 0x161e35b
pushq %rbx
movl 0x8(%rdi), %ebx
movq %rbx, %rdi
callq 0x2b40dd4
addl %ebx, %eax
popq %rbx
retq
movl 0x8(%rdi), %eax
addl $0x4, %eax
retq
movl 0x8(%rdi), %eax
incl %eax
retq
|
/CodeGen/AsmPrinter/DIE.cpp
|
llvm::SmallVectorTemplateBase<llvm::DbgValueLoc, false>::push_back(llvm::DbgValueLoc const&)
|
void push_back(const T &Elt) {
const T *EltPtr = reserveForParamAndGetAddress(Elt);
::new ((void *)this->end()) T(*EltPtr);
this->set_size(this->size() + 1);
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
movl $0x1, %edx
callq 0x16349f6
movq %rax, %r14
movq (%rbx), %rax
movl 0x8(%rbx), %ecx
leaq (%rcx,%rcx,4), %rcx
shlq $0x4, %rcx
leaq (%rax,%rcx), %r15
movq (%r14), %rdx
leaq (%rax,%rcx), %rsi
addq $0x18, %rsi
movq %rdx, -0x18(%rsi)
movq %rsi, -0x10(%rsi)
movabsq $0x200000000, %rdx # imm = 0x200000000
movq %rdx, -0x8(%rsi)
cmpl $0x0, 0x10(%r14)
je 0x16258c3
leaq (%rax,%rcx), %rdi
addq $0x8, %rdi
movq %r14, %rsi
addq $0x8, %rsi
callq 0x162d9de
movb 0x48(%r14), %al
movb %al, 0x48(%r15)
incl 0x8(%rbx)
popq %rbx
popq %r14
popq %r15
retq
|
/llvm/ADT/SmallVector.h
|
llvm::DwarfDebug::recordSourceLine(unsigned int, unsigned int, llvm::MDNode const*, unsigned int)
|
void DwarfDebug::recordSourceLine(unsigned Line, unsigned Col, const MDNode *S,
unsigned Flags) {
::recordSourceLine(*Asm, Line, Col, S, Flags,
Asm->OutStreamer->getContext().getDwarfCompileUnitID(),
getDwarfVersion(), getUnits());
}
|
subq $0x28, %rsp
movq 0x8(%rdi), %rax
movq 0x50(%rax), %r9
movq 0x8(%r9), %r10
movl 0x760(%r10), %r9d
movzwl 0x758(%r10), %r10d
movq 0xbf8(%rdi), %r11
movq %r11, 0x18(%rsp)
movl 0xc00(%rdi), %edi
movq %rdi, 0x20(%rsp)
movups 0x18(%rsp), %xmm0
movups %xmm0, 0x8(%rsp)
movl %r10d, (%rsp)
movq %rax, %rdi
callq 0x1626f9e
addq $0x28, %rsp
retq
nop
|
/CodeGen/AsmPrinter/DwarfDebug.cpp
|
llvm::DwarfDebug::initSkeletonUnit(llvm::DwarfUnit const&, llvm::DIE&, std::unique_ptr<llvm::DwarfCompileUnit, std::default_delete<llvm::DwarfCompileUnit>>)
|
void DwarfDebug::initSkeletonUnit(const DwarfUnit &U, DIE &Die,
std::unique_ptr<DwarfCompileUnit> NewU) {
if (!CompilationDir.empty())
NewU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
addGnuPubAttributes(*NewU, Die);
SkeletonHolder.addUnit(std::move(NewU));
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rcx, %rbx
movq %rdx, %r15
movq %rdi, %r14
cmpq $0x0, 0xb58(%rdi)
je 0x162a5ed
movq (%rbx), %rdi
movq 0xb50(%r14), %rcx
movq 0xb58(%r14), %r8
movq %r15, %rsi
movl $0x1b, %edx
callq 0x163cf34
movq (%rbx), %r12
movq %r12, %rdi
callq 0x16702a2
testb %al, %al
je 0x162a60c
movq %r12, %rdi
movq %r15, %rsi
movl $0x2134, %edx # imm = 0x2134
callq 0x163cdb2
addq $0xd98, %r14 # imm = 0xD98
movq (%rbx), %rax
movq %rsp, %r15
movq %rax, (%r15)
movq $0x0, (%rbx)
movq %r14, %rdi
movq %r15, %rsi
callq 0x163ab20
movq %r15, %rdi
callq 0x16226e2
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/CodeGen/AsmPrinter/DwarfDebug.cpp
|
void llvm::cl::apply<llvm::cl::opt<llvm::AccelTableKind, false, llvm::cl::parser<llvm::AccelTableKind>>, char [13], llvm::cl::OptionHidden, llvm::cl::desc, llvm::cl::ValuesClass, llvm::cl::initializer<llvm::AccelTableKind>>(llvm::cl::opt<llvm::AccelTableKind, false, llvm::cl::parser<llvm::AccelTableKind>>*, char const (&) [13], llvm::cl::OptionHidden const&, llvm::cl::desc const&, llvm::cl::ValuesClass const&, llvm::cl::initializer<llvm::AccelTableKind> const&)
|
void apply(Opt *O, const Mod &M, const Mods &... Ms) {
applicator<Mod>::opt(M, *O);
apply(O, Ms...);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %r9, %r14
movq %r8, %r15
movq %rcx, %r12
movq %rdx, %r13
movq %rsi, %rbp
movq %rdi, %rbx
movq %rsi, %rdi
callq 0x7802c0
movq %rbx, %rdi
movq %rbp, %rsi
movq %rax, %rdx
callq 0x2b1f336
movl (%r13), %eax
movzwl 0xa(%rbx), %ecx
shll $0x5, %eax
andl $0x60, %eax
andl $-0x61, %ecx
orl %eax, %ecx
movw %cx, 0xa(%rbx)
movups (%r12), %xmm0
movups %xmm0, 0x20(%rbx)
movq %r15, %rdi
movq %rbx, %rsi
callq 0x1631b14
movq (%r14), %rax
movl (%rax), %ecx
movl %ecx, 0x80(%rbx)
movb $0x1, 0x94(%rbx)
movl (%rax), %eax
movl %eax, 0x90(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/llvm/Support/CommandLine.h
|
llvm::SmallVectorTemplateBase<std::pair<llvm::MDNode const*, llvm::DwarfCompileUnit*>, true>::push_back(std::pair<llvm::MDNode const*, llvm::DwarfCompileUnit*>)
|
void push_back(ValueParamT Elt) {
const T *EltPtr = reserveForParamAndGetAddress(Elt);
memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
this->set_size(this->size() + 1);
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r14
movl 0x8(%rdi), %edx
cmpl 0xc(%rdi), %edx
jae 0x1632e50
movq (%r14), %rax
movl 0x8(%r14), %ecx
shlq $0x4, %rcx
movq %r15, (%rax,%rcx)
movq %rbx, 0x8(%rax,%rcx)
incl 0x8(%r14)
popq %rbx
popq %r14
popq %r15
retq
incq %rdx
leaq 0x10(%r14), %rsi
movl $0x10, %ecx
movq %r14, %rdi
callq 0x2b4ed86
jmp 0x1632e32
|
/llvm/ADT/SmallVector.h
|
llvm::DenseMap<llvm::DIE const*, llvm::DwarfCompileUnit*, llvm::DenseMapInfo<llvm::DIE const*, void>, llvm::detail::DenseMapPair<llvm::DIE const*, llvm::DwarfCompileUnit*>>::grow(unsigned int)
|
void grow(unsigned AtLeast) {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
assert(Buckets);
if (!OldBuckets) {
this->BaseT::initEmpty();
return;
}
this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
// Free the old table.
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
alignof(BucketT));
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %r15
movl 0x10(%rdi), %ebx
movq (%rdi), %r14
leal -0x1(%rsi), %eax
movl %eax, %ecx
shrl %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x2, %eax
orl %ecx, %eax
movl %eax, %ecx
shrl $0x4, %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x8, %eax
orl %ecx, %eax
movl %eax, %ecx
shrl $0x10, %ecx
orl %eax, %ecx
incl %ecx
cmpl $0x41, %ecx
movl $0x40, %edi
cmovael %ecx, %edi
movl %edi, 0x10(%r15)
shlq $0x4, %rdi
movl $0x8, %esi
callq 0x2b410ec
movq %rax, (%r15)
testq %r14, %r14
je 0x1632fdc
shlq $0x4, %rbx
leaq (%r14,%rbx), %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x16330a4
movl $0x8, %edx
movq %r14, %rdi
movq %rbx, %rsi
popq %rbx
popq %r14
popq %r15
jmp 0x2b410f1
movq $0x0, 0x8(%r15)
movl 0x10(%r15), %ecx
testq %rcx, %rcx
je 0x163309d
movabsq $0xfffffffffffffff, %rdx # imm = 0xFFFFFFFFFFFFFFF
addq %rdx, %rcx
andq %rcx, %rdx
andl $0x1, %ecx
negq %rcx
addq %rdx, %rcx
addq $0x2, %rcx
movq %rdx, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
addq $0x10, %rax
xorl %edx, %edx
movdqa 0x1581ffa(%rip), %xmm1 # 0x2bb5020
movdqa 0x1582002(%rip), %xmm2 # 0x2bb5030
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movq %rdx, %xmm4
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
por %xmm1, %xmm4
pxor %xmm2, %xmm4
movdqa %xmm4, %xmm5
pcmpgtd %xmm0, %xmm5
pcmpeqd %xmm0, %xmm4
pshufd $0xf5, %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
pand %xmm5, %xmm6
pshufd $0xf5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
por %xmm6, %xmm4
movd %xmm4, %esi
notl %esi
testb $0x1, %sil
je 0x163307a
movq $-0x1000, -0x10(%rax) # imm = 0xF000
pxor %xmm3, %xmm4
pextrw $0x4, %xmm4, %esi
testb $0x1, %sil
je 0x1633090
movq $-0x1000, (%rax) # imm = 0xF000
addq $0x2, %rdx
addq $0x20, %rax
cmpq %rdx, %rcx
jne 0x1633036
popq %rbx
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::SmallVectorImpl<std::pair<unsigned long, llvm::DbgValueLoc>>::erase(std::pair<unsigned long, llvm::DbgValueLoc> const*, std::pair<unsigned long, llvm::DbgValueLoc> const*)
|
iterator erase(const_iterator CS, const_iterator CE) {
// Just cast away constness because this is a non-const member function.
iterator S = const_cast<iterator>(CS);
iterator E = const_cast<iterator>(CE);
assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.");
iterator N = S;
// Shift all elts down.
iterator I = std::move(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->set_size(I - this->begin());
return(N);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %rbx
movq %rdi, %r14
movl 0x8(%rdi), %eax
imulq $0x58, %rax, %rsi
addq (%rdi), %rsi
movq %rdx, %rdi
movq %rbx, %rdx
callq 0x16347f4
movq %rax, %r15
movl 0x8(%r14), %eax
imulq $0x58, %rax, %r12
addq (%r14), %r12
cmpq %r15, %r12
je 0x16347d0
movq -0x48(%r12), %rdi
leaq -0x38(%r12), %rax
cmpq %rdi, %rax
je 0x16347ca
callq 0x780910
addq $-0x58, %r12
jmp 0x16347b1
movq (%r14), %rax
subq %rax, %r15
shrq $0x3, %r15
imull $0xba2e8ba3, %r15d, %eax # imm = 0xBA2E8BA3
movl %eax, 0x8(%r14)
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/llvm/ADT/SmallVector.h
|
void llvm::SmallVectorImpl<llvm::DbgValueLoc>::append<llvm::DbgValueLoc const*, void>(llvm::DbgValueLoc const*, llvm::DbgValueLoc const*)
|
void append(ItTy in_start, ItTy in_end) {
this->assertSafeToAddRange(in_start, in_end);
size_type NumInputs = std::distance(in_start, in_end);
this->reserve(this->size() + NumInputs);
this->uninitialized_copy(in_start, in_end, this->end());
this->set_size(this->size() + NumInputs);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %rbx
movq %rdx, %rax
subq %rsi, %rax
sarq $0x4, %rax
movabsq $-0x3333333333333333, %r12 # imm = 0xCCCCCCCCCCCCCCCD
imulq %rax, %r12
movl 0x8(%rdi), %esi
movl 0xc(%rdi), %eax
addq %r12, %rsi
cmpq %rax, %rsi
jbe 0x1635007
movq %rbx, %rdi
callq 0x1634a82
movl 0x8(%rbx), %eax
leaq (%rax,%rax,4), %rdx
shlq $0x4, %rdx
addq (%rbx), %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x16350c8
addl %r12d, 0x8(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/llvm/ADT/SmallVector.h
|
bool llvm::DenseMapBase<llvm::DenseMap<llvm::LexicalScope*, llvm::SmallVector<llvm::DbgLabel*, 4u>, llvm::DenseMapInfo<llvm::LexicalScope*, void>, llvm::detail::DenseMapPair<llvm::LexicalScope*, llvm::SmallVector<llvm::DbgLabel*, 4u>>>, llvm::LexicalScope*, llvm::SmallVector<llvm::DbgLabel*, 4u>, llvm::DenseMapInfo<llvm::LexicalScope*, void>, llvm::detail::DenseMapPair<llvm::LexicalScope*, llvm::SmallVector<llvm::DbgLabel*, 4u>>>::LookupBucketFor<llvm::LexicalScope*>(llvm::LexicalScope* const&, llvm::detail::DenseMapPair<llvm::LexicalScope*, llvm::SmallVector<llvm::DbgLabel*, 4u>> const*&) const
|
unsigned getNumBuckets() const {
return NumBuckets;
}
|
movl 0x10(%rdi), %ecx
testl %ecx, %ecx
je 0x163b8a3
pushq %rbx
movq (%rdi), %rdi
movl (%rsi), %r8d
movl %r8d, %eax
shrl $0x4, %eax
shrl $0x9, %r8d
xorl %eax, %r8d
decl %ecx
andl %ecx, %r8d
movl $0x1, %r10d
xorl %r9d, %r9d
movl %r8d, %r11d
imulq $0x38, %r11, %rbx
leaq (%rdi,%rbx), %r11
movq (%rdi,%rbx), %rbx
cmpq %rbx, (%rsi)
jne 0x163b86a
movq %r11, (%rdx)
movb $0x1, %al
xorl %r11d, %r11d
testb %r11b, %r11b
jne 0x163b847
jmp 0x163b8a0
cmpq $-0x1000, %rbx # imm = 0xF000
jne 0x163b884
testq %r9, %r9
cmovneq %r9, %r11
movq %r11, (%rdx)
xorl %r11d, %r11d
xorl %eax, %eax
jmp 0x163b863
xorq $-0x2000, %rbx # imm = 0xE000
orq %r9, %rbx
cmoveq %r11, %r9
addl %r10d, %r8d
incl %r10d
andl %ecx, %r8d
movb $0x1, %r11b
jmp 0x163b863
popq %rbx
jmp 0x163b8ac
movq $0x0, (%rdx)
xorl %eax, %eax
andb $0x1, %al
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::DwarfUnit::addConstantValue(llvm::DIE&, llvm::APInt const&, bool)
|
void DwarfUnit::addConstantValue(DIE &Die, const APInt &Val, bool Unsigned) {
unsigned CIBitWidth = Val.getBitWidth();
if (CIBitWidth <= 64) {
addConstantValue(Die, Unsigned,
Unsigned ? Val.getZExtValue() : Val.getSExtValue());
return;
}
DIEBlock *Block = new (DIEValueAllocator) DIEBlock;
// Get the raw data form of the large APInt.
const uint64_t *Ptr64 = Val.getRawData();
int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte.
bool LittleEndian = Asm->getDataLayout().isLittleEndian();
// Output the constant to DWARF one byte at a time.
for (int i = 0; i < NumBytes; i++) {
uint8_t c;
if (LittleEndian)
c = Ptr64[i / 8] >> (8 * (i & 7));
else
c = Ptr64[(NumBytes - 1 - i) / 8] >> (8 * ((NumBytes - 1 - i) & 7));
addUInt(*Block, dwarf::DW_FORM_data1, c);
}
addBlock(Die, dwarf::DW_AT_const_value, Block);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rdx, %r12
movq %rdi, %rbx
movl 0x8(%rdx), %edx
cmpl $0x40, %edx
ja 0x163de79
movl %ecx, %eax
movq (%r12), %r8
movl $0x1000f, %ecx # imm = 0x1000F
testb %al, %al
jne 0x163de5a
movl %edx, %ecx
negb %cl
shlq %cl, %r8
sarq %cl, %r8
xorl %eax, %eax
testl %edx, %edx
cmoveq %rax, %r8
movl $0x1000d, %ecx # imm = 0x1000D
addq $0x8, %rsi
movq %rbx, %rdi
movl $0x1c, %edx
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x163cea4
movq %rsi, 0x10(%rsp)
leaq 0x58(%rbx), %rdi
movl $0x10, %esi
movq %rdi, 0x20(%rsp)
movl $0x4, %edx
callq 0x8f284c
movq %rax, %r15
movq $0x0, (%rax)
movl $0x0, 0x8(%rax)
movl 0x8(%r12), %eax
movq %rax, %r14
cmpl $0x41, %eax
jb 0x163deb8
movq (%r12), %r12
movq %rbx, 0x8(%rsp)
movq 0xb8(%rbx), %rdi
callq 0x1606e44
cmpl $0x8, %r14d
jb 0x163df4c
movq %r14, %rcx
shrl $0x3, %ecx
movb (%rax), %r14b
movl %ecx, %ebp
negl %ebp
movq %rcx, 0x18(%rsp)
leal -0x1(%rcx), %ebx
xorl %r13d, %r13d
testb %r14b, %r14b
je 0x163df04
movl %r13d, %ecx
notl %ecx
addl 0x18(%rsp), %ecx
leal 0x7(%rbx), %eax
testl %ebx, %ebx
cmovnsl %ebx, %eax
sarl $0x3, %eax
cltq
jmp 0x163df0d
movl %r13d, %eax
shrl $0x3, %eax
movl %r13d, %ecx
movq (%r12,%rax,8), %rax
shll $0x3, %ecx
shrq %cl, %rax
movzbl %al, %eax
movabsq $0xb000000000001, %rcx # imm = 0xB000000000001
movq %rcx, 0x28(%rsp)
movq %rax, 0x30(%rsp)
movq %r15, %rdi
movq 0x20(%rsp), %rsi
leaq 0x28(%rsp), %rdx
callq 0x1642ffc
incl %r13d
decl %ebx
movl %ebp, %eax
addl %r13d, %eax
jne 0x163dee7
movl 0x8(%r15), %eax
xorl %ecx, %ecx
cmpl $0x10000, %eax # imm = 0x10000
setb %cl
movl $0x4, %edx
subl %ecx, %edx
cmpl $0x100, %eax # imm = 0x100
movl $0xa, %eax
cmovael %edx, %eax
movzbl %al, %ecx
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movl $0x1c, %edx
movq %r15, %r8
callq 0x163db1e
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/CodeGen/AsmPrinter/DwarfUnit.cpp
|
llvm::CodeViewDebug::emitInlinedCallSite(llvm::CodeViewDebug::FunctionInfo const&, llvm::DILocation const*, llvm::CodeViewDebug::InlineSite const&)
|
void CodeViewDebug::emitInlinedCallSite(const FunctionInfo &FI,
const DILocation *InlinedAt,
const InlineSite &Site) {
assert(TypeIndices.count({Site.Inlinee, nullptr}));
TypeIndex InlineeIdx = TypeIndices[{Site.Inlinee, nullptr}];
// SymbolRecord
MCSymbol *InlineEnd = beginSymbolRecord(SymbolKind::S_INLINESITE);
OS.AddComment("PtrParent");
OS.emitInt32(0);
OS.AddComment("PtrEnd");
OS.emitInt32(0);
OS.AddComment("Inlinee type index");
OS.emitInt32(InlineeIdx.getIndex());
unsigned FileId = maybeRecordFile(Site.Inlinee->getFile());
unsigned StartLineNum = Site.Inlinee->getLine();
OS.emitCVInlineLinetableDirective(Site.SiteFuncId, FileId, StartLineNum,
FI.Begin, FI.End);
endSymbolRecord(InlineEnd);
emitLocalVariableList(FI, Site.InlinedLocals);
// Recurse on child inlined call sites before closing the scope.
for (const DILocation *ChildSite : Site.ChildSites) {
auto I = FI.InlineSites.find(ChildSite);
assert(I != FI.InlineSites.end() &&
"child site not in function inline site map");
emitInlinedCallSite(FI, ChildSite, I->second);
}
// Close the scope.
emitEndSymbolRecord(SymbolKind::S_INLINESITE_END);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rcx, %r15
movq %rsi, %r14
movq %rdi, %rbx
addq $0x450, %rdi # imm = 0x450
movq 0x78(%rcx), %rax
leaq 0x10(%rsp), %rbp
movq %rax, (%rbp)
movq $0x0, 0x8(%rbp)
movq %rbp, %rsi
callq 0x165a1e4
movl 0x10(%rax), %r13d
movq %rbx, %rdi
movl $0x114d, %esi # imm = 0x114D
callq 0x164f01a
movq %rax, 0x8(%rsp)
movq 0x1e0(%rbx), %rdi
leaq 0x290eb4c(%rip), %rax # 0x3f5dda1
movq %rax, (%rbp)
movw $0x103, %r12w # imm = 0x103
movw %r12w, 0x20(%rbp)
movq (%rdi), %rax
leaq 0x10(%rsp), %rbp
movq %rbp, %rsi
movl $0x1, %edx
callq *0x78(%rax)
movq 0x1e0(%rbx), %rdi
movq (%rdi), %rax
xorl %esi, %esi
movl $0x4, %edx
callq *0x208(%rax)
movq 0x1e0(%rbx), %rdi
leaq 0x290eb10(%rip), %rax # 0x3f5ddab
movq %rax, (%rbp)
movw %r12w, 0x20(%rbp)
movq (%rdi), %rax
leaq 0x10(%rsp), %rbp
movq %rbp, %rsi
movl $0x1, %edx
callq *0x78(%rax)
movq 0x1e0(%rbx), %rdi
movq (%rdi), %rax
xorl %esi, %esi
movl $0x4, %edx
callq *0x208(%rax)
movq 0x1e0(%rbx), %rdi
leaq 0x290ead6(%rip), %rax # 0x3f5ddb2
movq %rax, (%rbp)
movw %r12w, 0x20(%rbp)
movq (%rdi), %rax
leaq 0x10(%rsp), %rsi
movl $0x1, %edx
callq *0x78(%rax)
movq 0x1e0(%rbx), %rdi
movq (%rdi), %rax
movq %r13, %rsi
movl $0x4, %edx
callq *0x208(%rax)
movq 0x78(%r15), %rsi
cmpb $0x10, (%rsi)
je 0x164f333
movq -0x10(%rsi), %rax
testb $0x2, %al
jne 0x164f32c
addq $-0x10, %rsi
addl %eax, %eax
andl $0x78, %eax
subq %rax, %rsi
jmp 0x164f330
movq -0x20(%rsi), %rsi
movq (%rsi), %rsi
movq %rbx, %rdi
callq 0x164a75c
movq 0x78(%r15), %rcx
movl 0x10(%rcx), %ecx
movq 0x1e0(%rbx), %rdi
movl 0x80(%r15), %esi
movq 0x1b0(%r14), %r8
movq 0x1b8(%r14), %r9
movq (%rdi), %r10
movl %eax, %edx
callq *0x300(%r10)
movq 0x1e0(%rbx), %rdi
movq (%rdi), %rax
movl $0x2, %esi
xorl %edx, %edx
movl $0x1, %ecx
xorl %r8d, %r8d
callq *0x280(%rax)
movq 0x1e0(%rbx), %rdi
movq (%rdi), %rax
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq *0xc8(%rax)
movq (%r15), %rdx
movl 0x8(%r15), %ecx
movq %rbx, %rdi
movq %r14, %rsi
callq 0x164f41e
movl 0x68(%r15), %eax
testq %rax, %rax
je 0x164f402
movq 0x60(%r15), %r15
leaq (%r15,%rax,8), %r12
movq (%r15), %rcx
movq %rcx, %rax
xorl %edx, %edx
divq 0x8(%r14)
movq (%r14), %rax
movq (%rax,%rdx,8), %rax
movq (%rax), %rdx
cmpq 0x8(%rdx), %rcx
je 0x164f3e7
movq %rdx, %rax
movq (%rdx), %rdx
jmp 0x164f3d9
movq (%rax), %rcx
addq $0x10, %rcx
movq %rbx, %rdi
movq %r14, %rsi
callq 0x164f1f6
addq $0x8, %r15
cmpq %r12, %r15
jne 0x164f3c3
movq %rbx, %rdi
movl $0x114e, %esi # imm = 0x114E
callq 0x164f674
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/CodeGen/AsmPrinter/CodeViewDebug.cpp
|
llvm::CodeViewDebug::getCompleteTypeIndex(llvm::DIType const*)
|
TypeIndex CodeViewDebug::getCompleteTypeIndex(const DIType *Ty) {
// The null DIType is the void type. Don't try to hash it.
if (!Ty)
return TypeIndex::Void();
// Look through typedefs when getting the complete type index. Call
// getTypeIndex on the typdef to ensure that any UDTs are accumulated and are
// emitted only once.
if (Ty->getTag() == dwarf::DW_TAG_typedef)
(void)getTypeIndex(Ty);
while (Ty->getTag() == dwarf::DW_TAG_typedef)
Ty = cast<DIDerivedType>(Ty)->getBaseType();
// If this is a non-record type, the complete type index is the same as the
// normal type index. Just call getTypeIndex.
switch (Ty->getTag()) {
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_union_type:
break;
default:
return getTypeIndex(Ty);
}
const auto *CTy = cast<DICompositeType>(Ty);
TypeLoweringScope S(*this);
// Make sure the forward declaration is emitted first. It's unclear if this
// is necessary, but MSVC does it, and we should follow suit until we can show
// otherwise.
// We only emit a forward declaration for named types.
if (!CTy->getName().empty() || !CTy->getIdentifier().empty()) {
TypeIndex FwdDeclTI = getTypeIndex(CTy);
// Just use the forward decl if we don't have complete type info. This
// might happen if the frontend is using modules and expects the complete
// definition to be emitted elsewhere.
if (CTy->isForwardDecl())
return FwdDeclTI;
}
// Check if we've already translated the complete record type.
// Insert the type with a null TypeIndex to signify that the type is currently
// being lowered.
auto InsertResult = CompleteTypeIndices.insert({CTy, TypeIndex()});
if (!InsertResult.second)
return InsertResult.first->second;
TypeIndex TI;
switch (CTy->getTag()) {
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_structure_type:
TI = lowerCompleteTypeClass(CTy);
break;
case dwarf::DW_TAG_union_type:
TI = lowerCompleteTypeUnion(CTy);
break;
default:
llvm_unreachable("not a record");
}
// Update the type index associated with this CompositeType. This cannot
// use the 'InsertResult' iterator above because it is potentially
// invalidated by map insertions which can occur while lowering the class
// type above.
CompleteTypeIndices[CTy] = TI;
return TI;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x38, %rsp
testq %rsi, %rsi
je 0x164fe04
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
callq 0x2a08d12
cmpw $0x16, %ax
jne 0x164fd2c
movq %rbx, %rdi
movq %r14, %rsi
xorl %edx, %edx
callq 0x164b402
jmp 0x164fd2c
movq -0x10(%r14), %rax
testb $0x2, %al
jne 0x164fd24
addq $-0x10, %r14
addl %eax, %eax
andl $0x78, %eax
subq %rax, %r14
jmp 0x164fd28
movq -0x20(%r14), %r14
movq 0x18(%r14), %r14
movq %r14, %rdi
callq 0x2a08d12
cmpw $0x16, %ax
je 0x164fd0e
movq %r14, %rdi
callq 0x2a08d12
cmpw $0x17, %ax
ja 0x164fe0b
movzwl %ax, %eax
movl $0x880004, %ecx # imm = 0x880004
btl %eax, %ecx
jae 0x164fe0b
movq %r14, 0x8(%rsp)
incl 0x4b0(%rbx)
movq %r14, %rdi
movl $0x2, %esi
callq 0x90bac4
testq %rdx, %rdx
jne 0x164fd8e
movq 0x8(%rsp), %rdi
movl $0x7, %esi
callq 0x90bac4
testq %rdx, %rdx
je 0x164fdae
movq 0x8(%rsp), %rsi
movq %rbx, %rdi
xorl %edx, %edx
callq 0x164b402
movl %eax, %ebp
movq 0x8(%rsp), %rax
testb $0x4, 0x14(%rax)
jne 0x164fe43
leaq 0x468(%rbx), %r14
movq 0x8(%rsp), %rax
leaq 0x18(%rsp), %rcx
movq %rax, -0x8(%rcx)
movl $0x0, (%rcx)
leaq 0x20(%rsp), %r15
leaq 0x10(%rsp), %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x165cad2
cmpb $0x0, 0x10(%r15)
je 0x164fe22
movq 0x8(%rsp), %rdi
callq 0x2a08d12
movq 0x8(%rsp), %rsi
movq %rbx, %rdi
cmpw $0x17, %ax
jne 0x164fe2c
callq 0x1653f3c
jmp 0x164fe31
movl $0x3, %ebp
jmp 0x164fe5a
movq %rbx, %rdi
movq %r14, %rsi
xorl %edx, %edx
addq $0x38, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp 0x164b402
movq 0x20(%rsp), %rax
movl 0x8(%rax), %ebp
jmp 0x164fe43
callq 0x16534cc
movl %eax, %ebp
leaq 0x8(%rsp), %rsi
movq %r14, %rdi
callq 0x165ce40
movl %ebp, 0x8(%rax)
cmpl $0x1, 0x4b0(%rbx)
jne 0x164fe54
movq %rbx, %rdi
callq 0x16541e0
decl 0x4b0(%rbx)
movl %ebp, %eax
addq $0x38, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
nop
|
/CodeGen/AsmPrinter/CodeViewDebug.cpp
|
llvm::MachineInstr::getHeapAllocMarker() const
|
[[nodiscard]] inline To bit_cast(const From &from) noexcept {
#if __has_builtin(__builtin_bit_cast)
return __builtin_bit_cast(To, from);
#else
To to;
std::memcpy(&to, &from, sizeof(To));
return to;
#endif
}
|
movq 0x30(%rdi), %rax
cmpq $0x8, %rax
jb 0x16510f1
movl %eax, %ecx
andl $0x7, %ecx
cmpl $0x3, %ecx
setne %cl
andq $-0x8, %rax
sete %dl
orb %cl, %dl
jne 0x16510f1
cmpb $0x1, 0x6(%rax)
jne 0x16510f1
movslq (%rax), %rcx
leaq (%rax,%rcx,8), %rcx
movzbl 0x4(%rax), %edx
movzbl 0x5(%rax), %eax
addl %edx, %eax
movq 0x10(%rcx,%rax,8), %rax
retq
xorl %eax, %eax
retq
|
/llvm/ADT/bit.h
|
llvm::SmallVectorTemplateBase<llvm::CodeViewDebug::LocalVariable, false>::grow(unsigned long)
|
void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
size_t NewCapacity;
T *NewElts = mallocForGrow(MinSize, NewCapacity);
moveElementsForGrow(NewElts);
takeAllocationForGrow(NewElts, NewCapacity);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %rdx
movq %rdi, %rbx
leaq 0x10(%rdi), %r15
movq %rsp, %r12
movl $0x50, %ecx
movq %r15, %rsi
movq %r12, %r8
callq 0x2b4ec3c
movq %rax, %r14
movq %rbx, %rdi
movq %rax, %rsi
callq 0x16566dc
movq (%r12), %r12
movq (%rbx), %rdi
cmpq %r15, %rdi
je 0x1656334
callq 0x780910
movq %r14, (%rbx)
movl %r12d, 0xc(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/SmallVector.h
|
llvm::DenseMap<std::pair<llvm::DINode const*, llvm::DIType const*>, llvm::codeview::TypeIndex, llvm::DenseMapInfo<std::pair<llvm::DINode const*, llvm::DIType const*>, void>, llvm::detail::DenseMapPair<std::pair<llvm::DINode const*, llvm::DIType const*>, llvm::codeview::TypeIndex>>::init(unsigned int)
|
void init(unsigned InitNumEntries) {
auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
if (allocateBuckets(InitBuckets)) {
this->BaseT::initEmpty();
} else {
NumEntries = 0;
NumTombstones = 0;
}
}
|
pushq %rbp
pushq %rbx
pushq %rax
movq %rdi, %rbx
testl %esi, %esi
je 0x16590c4
shll $0x2, %esi
movl $0xaaaaaaab, %eax # imm = 0xAAAAAAAB
imulq %rsi, %rax
shrq $0x21, %rax
incl %eax
movl %eax, %ecx
shrl %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x2, %eax
orl %ecx, %eax
movl %eax, %ecx
shrl $0x4, %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x8, %eax
orl %ecx, %eax
movl %eax, %ebp
shrl $0x10, %ebp
orl %eax, %ebp
incl %ebp
jmp 0x16590c6
xorl %ebp, %ebp
movl %ebp, 0x10(%rbx)
testl %ebp, %ebp
je 0x16590e3
movl %ebp, %eax
shlq $0x3, %rax
leaq (%rax,%rax,2), %rdi
movl $0x8, %esi
callq 0x2b410ec
jmp 0x16590e5
xorl %eax, %eax
movq %rax, (%rbx)
movq $0x0, 0x8(%rbx)
testl %ebp, %ebp
je 0x165911b
movl 0x10(%rbx), %ecx
testq %rcx, %rcx
je 0x165911b
leaq (%rcx,%rcx,2), %rcx
leaq (%rax,%rcx,8), %rcx
movq $-0x1000, %rdx # imm = 0xF000
movq %rdx, (%rax)
movq %rdx, 0x8(%rax)
addq $0x18, %rax
cmpq %rcx, %rax
jne 0x165910b
addq $0x8, %rsp
popq %rbx
popq %rbp
retq
|
/llvm/ADT/DenseMap.h
|
llvm::SmallVectorImpl<llvm::codeview::VFTableSlotKind>::assign(unsigned long, llvm::codeview::VFTableSlotKind)
|
void assign(size_type NumElts, ValueParamT Elt) {
// Note that Elt could be an internal reference.
if (NumElts > this->capacity()) {
this->growAndAssign(NumElts, Elt);
return;
}
// Assign over existing elements.
std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
if (NumElts > this->size())
std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
else if (NumElts < this->size())
this->destroy_range(this->begin() + NumElts, this->end());
this->set_size(NumElts);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl %edx, %ebp
movq %rsi, %rbx
movq %rdi, %r14
cmpq %rsi, 0x10(%rdi)
jae 0x165b950
leaq 0x8(%r14), %r15
movq $0x0, 0x8(%r14)
leaq 0x18(%r14), %rsi
movl $0x1, %ecx
movq %r14, %rdi
movq %rbx, %rdx
callq 0x2b4efce
movq (%r14), %rdi
movzbl %bpl, %esi
movq %rbx, %rdx
callq 0x780240
movq %r15, %r14
jmp 0x165b98d
movq (%r14), %r15
movq 0x8(%r14), %r12
cmpq %rbx, %r12
movq %rbx, %rdx
cmovbq %r12, %rdx
movzbl %bpl, %ebp
testq %rdx, %rdx
je 0x165b974
movq %r15, %rdi
movl %ebp, %esi
callq 0x780240
addq $0x8, %r14
movq %rbx, %rdx
subq %r12, %rdx
jbe 0x165b98d
addq %r12, %r15
movq %r15, %rdi
movl %ebp, %esi
callq 0x780240
movq %rbx, (%r14)
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
nop
|
/llvm/ADT/SmallVector.h
|
llvm::WasmException::computeCallSiteTable(llvm::SmallVectorImpl<llvm::EHStreamer::CallSiteEntry>&, llvm::SmallVectorImpl<llvm::EHStreamer::CallSiteRange>&, llvm::SmallVectorImpl<llvm::LandingPadInfo const*> const&, llvm::SmallVectorImpl<unsigned int> const&)
|
void WasmException::computeCallSiteTable(
SmallVectorImpl<CallSiteEntry> &CallSites,
SmallVectorImpl<CallSiteRange> &CallSiteRanges,
const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
const SmallVectorImpl<unsigned> &FirstActions) {
MachineFunction &MF = *Asm->MF;
for (unsigned I = 0, N = LandingPads.size(); I < N; ++I) {
const LandingPadInfo *Info = LandingPads[I];
MachineBasicBlock *LPad = Info->LandingPadBlock;
// We don't emit LSDA for single catch (...).
if (!MF.hasWasmLandingPadIndex(LPad))
continue;
// Wasm EH must maintain the EH pads in the order assigned to them by the
// WasmEHPrepare pass.
unsigned LPadIndex = MF.getWasmLandingPadIndex(LPad);
CallSiteEntry Site = {nullptr, nullptr, Info, FirstActions[I]};
if (CallSites.size() < LPadIndex + 1)
CallSites.resize(LPadIndex + 1);
CallSites[LPadIndex] = Site;
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %r8, 0x20(%rsp)
movq %rsi, (%rsp)
movl 0x8(%rcx), %r15d
testq %r15, %r15
je 0x165f441
movq %rcx, %r14
movq 0x8(%rdi), %rax
movl $0x1e0, %r12d # imm = 0x1E0
addq 0x58(%rax), %r12
xorl %r13d, %r13d
movq %r15, 0x18(%rsp)
movq (%r14), %rax
movq (%rax,%r13,8), %rbp
movq (%rbp), %rbx
movq %rbx, 0x8(%rsp)
movq %r12, %rdi
leaq 0x8(%rsp), %rsi
leaq 0x10(%rsp), %rdx
callq 0xd29e64
testb %al, %al
je 0x165f435
movq %r14, %r15
movq %rbx, 0x8(%rsp)
movq %r12, %rdi
leaq 0x8(%rsp), %rsi
leaq 0x10(%rsp), %rdx
callq 0xd29e64
xorl %r14d, %r14d
testb %al, %al
je 0x165f3eb
movq 0x10(%rsp), %rax
movl 0x8(%rax), %r14d
movq 0x20(%rsp), %rax
movq (%rax), %rax
movl (%rax,%r13,4), %ebx
leal 0x1(%r14), %eax
movq (%rsp), %rcx
cmpl %eax, 0x8(%rcx)
jae 0x165f413
movl %eax, %esi
movq (%rsp), %rdi
callq 0x1645b5c
movq (%rsp), %rcx
movl %r14d, %eax
movq (%rcx), %rcx
shlq $0x5, %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rcx,%rax)
movq %rbp, 0x10(%rcx,%rax)
movl %ebx, 0x18(%rcx,%rax)
movq %r15, %r14
movq 0x18(%rsp), %r15
incq %r13
cmpq %r13, %r15
jne 0x165f39b
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/CodeGen/AsmPrinter/WasmException.cpp
|
char* llvm::hashing::detail::hash_combine_recursive_helper::combine_data<bool>(unsigned long&, char*, char*, bool)
|
char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
if (!store_and_advance(buffer_ptr, buffer_end, data)) {
// Check for skew which prevents the buffer from being packed, and do
// a partial store into the buffer to fill it. This is only a concern
// with the variadic combine because that formation can have varying
// argument types.
size_t partial_store_size = buffer_end - buffer_ptr;
memcpy(buffer_ptr, &data, partial_store_size);
// If the store fails, our buffer is full and ready to hash. We have to
// either initialize the hash state (on the first full buffer) or mix
// this buffer into the existing hash state. Length tracks the *hashed*
// length, not the buffered length.
if (length == 0) {
state = state.create(buffer, seed);
length = 64;
} else {
// Mix this chunk into the current state and bump length up by 64.
state.mix(buffer);
length += 64;
}
// Reset the buffer_ptr to the head of the buffer for the next chunk of
// data.
buffer_ptr = buffer;
// Try again to store into the buffer -- this cannot fail as we only
// store types smaller than the buffer.
if (!store_and_advance(buffer_ptr, buffer_end, data,
partial_store_size))
llvm_unreachable("buffer smaller than stored type");
}
return buffer_ptr;
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x40, %rsp
movq %rcx, %r14
movq %rdx, %r13
movq %rsi, %r15
movq %rdi, %rbx
movb %r8b, 0x7(%rsp)
leaq 0x1(%rdx), %rax
cmpq %rcx, %rax
ja 0x1661c54
movb 0x7(%rsp), %cl
movb %cl, (%r13)
movq %rax, %r13
cmpq %r14, %rax
jbe 0x1661cf3
movq %r14, %r12
subq %r13, %r12
leaq 0x7(%rsp), %rsi
movq %r13, %rdi
movq %r12, %rdx
callq 0x780890
cmpq $0x0, (%r15)
je 0x1661c8e
leaq 0x40(%rbx), %rdi
movq %rbx, %rsi
callq 0xa8cd7e
movq (%r15), %rax
addq $0x40, %rax
jmp 0x1661cca
movq 0x78(%rbx), %rdx
leaq 0x8(%rsp), %r13
movq %r13, %rdi
movq %rbx, %rsi
callq 0xa8ccd6
movq 0x30(%r13), %rax
movq %rax, 0x70(%rbx)
movups (%r13), %xmm0
movups 0x10(%r13), %xmm1
movups 0x20(%r13), %xmm2
movups %xmm2, 0x60(%rbx)
movups %xmm1, 0x50(%rbx)
movups %xmm0, 0x40(%rbx)
movl $0x40, %eax
movq %rax, (%r15)
movl $0x1, %edx
subq %r12, %rdx
leaq (%rbx,%rdx), %r13
cmpq %r14, %r13
ja 0x1661cf0
leaq (%rsp,%r12), %rsi
addq $0x7, %rsi
movq %rbx, %rdi
callq 0x780890
jmp 0x1661cf3
movq %rbx, %r13
movq %r13, %rax
addq $0x40, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/llvm/ADT/Hashing.h
|
llvm::DenseMapBase<llvm::DenseMap<llvm::OffsetAndUnitID, llvm::MCSymbol*, llvm::DenseMapInfo<llvm::OffsetAndUnitID, void>, llvm::detail::DenseMapPair<llvm::OffsetAndUnitID, llvm::MCSymbol*>>, llvm::OffsetAndUnitID, llvm::MCSymbol*, llvm::DenseMapInfo<llvm::OffsetAndUnitID, void>, llvm::detail::DenseMapPair<llvm::OffsetAndUnitID, llvm::MCSymbol*>>::moveFromOldBuckets(llvm::detail::DenseMapPair<llvm::OffsetAndUnitID, llvm::MCSymbol*>*, llvm::detail::DenseMapPair<llvm::OffsetAndUnitID, llvm::MCSymbol*>*)
|
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
initEmpty();
// Insert all the old elements.
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
!KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
// Insert the key/value into the new table.
BucketT *DestBucket;
bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
(void)FoundVal; // silence warning.
assert(!FoundVal && "Key already in new map?");
DestBucket->getFirst() = std::move(B->getFirst());
::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
incrementNumEntries();
// Free the value.
B->getSecond().~ValueT();
}
B->getFirst().~KeyT();
}
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movq $0x0, 0x8(%rdi)
movl 0x10(%rdi), %ecx
testq %rcx, %rcx
je 0x16624f1
movq (%r15), %rax
leaq (%rcx,%rcx,2), %rcx
leaq (%rax,%rcx,8), %rcx
movq $-0x1, (%rax)
movb $0x0, 0xc(%rax)
movl $0xffffffff, 0x8(%rax) # imm = 0xFFFFFFFF
addq $0x18, %rax
cmpq %rcx, %rax
jne 0x16624d6
cmpq %rbx, %r14
je 0x166255f
movq %rsp, %r12
movq (%r14), %rax
cmpq $-0x2, %rax
je 0x166251c
cmpq $-0x1, %rax
jne 0x166252a
cmpl $-0x1, 0x8(%r14)
jne 0x1662516
cmpb $0x0, 0xc(%r14)
je 0x1662556
cmpq $-0x2, %rax
jne 0x166252a
cmpl $-0x2, 0x8(%r14)
jne 0x166252a
cmpb $0x0, 0xc(%r14)
je 0x1662556
movq %r15, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq 0x1662226
movq (%rsp), %rax
movq (%r14), %rcx
movq 0x5(%r14), %rdx
movq %rdx, 0x5(%rax)
movq %rcx, (%rax)
movq 0x10(%r14), %rcx
movq %rcx, 0x10(%rax)
incl 0x8(%r15)
addq $0x18, %r14
cmpq %rbx, %r14
jne 0x16624f9
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/DenseMap.h
|
bool llvm::DenseMapBase<llvm::DenseMap<std::pair<llvm::DINode const*, llvm::DILocation const*>, unsigned int, llvm::DenseMapInfo<std::pair<llvm::DINode const*, llvm::DILocation const*>, void>, llvm::detail::DenseMapPair<std::pair<llvm::DINode const*, llvm::DILocation const*>, unsigned int>>, std::pair<llvm::DINode const*, llvm::DILocation const*>, unsigned int, llvm::DenseMapInfo<std::pair<llvm::DINode const*, llvm::DILocation const*>, void>, llvm::detail::DenseMapPair<std::pair<llvm::DINode const*, llvm::DILocation const*>, unsigned int>>::LookupBucketFor<std::pair<llvm::DINode const*, llvm::DILocation const*>>(std::pair<llvm::DINode const*, llvm::DILocation const*> const&, llvm::detail::DenseMapPair<std::pair<llvm::DINode const*, llvm::DILocation const*>, unsigned int> const*&) const
|
unsigned getNumBuckets() const {
return NumBuckets;
}
|
movl 0x10(%rdi), %ecx
testl %ecx, %ecx
je 0x16680b2
pushq %r15
pushq %r14
pushq %rbx
movq (%rdi), %rdi
movl (%rsi), %eax
movl 0x8(%rsi), %r8d
movl %eax, %r9d
shrl $0x4, %r9d
shrl $0x9, %eax
xorl %r9d, %eax
movl %r8d, %r9d
shrl $0x4, %r9d
shrl $0x9, %r8d
xorl %r9d, %r8d
shlq $0x20, %rax
orq %rax, %r8
movabsq $-0x40a7b892e31b1a47, %rax # imm = 0xBF58476D1CE4E5B9
imulq %r8, %rax
movq %rax, %r8
shrq $0x1f, %r8
xorl %eax, %r8d
decl %ecx
andl %ecx, %r8d
movl $0x1, %r11d
xorl %r9d, %r9d
movq $-0x2000, %r10 # imm = 0xE000
movl %r8d, %ebx
leaq (%rbx,%rbx,2), %rbx
leaq (%rdi,%rbx,8), %rbx
movq (%rbx), %r15
movq 0x8(%rbx), %r14
cmpq %r15, (%rsi)
jne 0x166806c
cmpq %r14, 0x8(%rsi)
jne 0x166806c
movq %rbx, (%rdx)
movb $0x1, %al
xorl %ebx, %ebx
testb %bl, %bl
jne 0x1668042
jmp 0x16680ab
cmpq $-0x1000, %r15 # imm = 0xF000
jne 0x166808e
cmpq $-0x1000, %r14 # imm = 0xF000
jne 0x166808e
testq %r9, %r9
cmovneq %r9, %rbx
movq %rbx, (%rdx)
xorl %ebx, %ebx
xorl %eax, %eax
jmp 0x1668066
xorq %r10, %r15
xorq %r10, %r14
orq %r15, %r14
orq %r9, %r14
cmoveq %rbx, %r9
addl %r11d, %r8d
incl %r11d
andl %ecx, %r8d
movb $0x1, %bl
jmp 0x1668066
popq %rbx
popq %r14
popq %r15
jmp 0x16680bb
movq $0x0, (%rdx)
xorl %eax, %eax
andb $0x1, %al
retq
|
/llvm/ADT/DenseMap.h
|
llvm::DIEHash::hashShallowTypeReference(llvm::dwarf::Attribute, llvm::DIE const&, llvm::StringRef)
|
void DIEHash::hashShallowTypeReference(dwarf::Attribute Attribute,
const DIE &Entry, StringRef Name) {
// append the letter 'N'
addULEB128('N');
// the DWARF attribute code (DW_AT_type or DW_AT_friend),
addULEB128(Attribute);
// the context of the tag,
if (const DIE *Parent = Entry.getParent())
addParentContext(*Parent);
// the letter 'E',
addULEB128('E');
// and the name of the type.
addString(Name);
// Currently DW_TAG_friends are not used by Clang, but if they do become so,
// here's the relevant spec text to implement:
//
// For DW_TAG_friend, if the referenced entry is the DW_TAG_subprogram,
// the context is omitted and the name to be used is the ABI-specific name
// of the subprogram (e.g., the mangled linker name).
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %r8, 0x10(%rsp)
movq %rcx, %r15
movq %rdx, %r12
movl %esi, %ebp
movq %rdi, %rbx
leaq 0xd(%rsp), %rsi
movb $0x4e, (%rsi)
movl $0x1, %edx
callq 0x2b42f66
movl %ebp, %r14d
leaq 0xc(%rsp), %r13
movq %r14, %rbp
movl %r14d, %eax
andb $0x7f, %al
shrq $0x7, %rbp
cmpq $0x7f, %r14
seta %cl
shlb $0x7, %cl
orb %al, %cl
movb %cl, 0xc(%rsp)
movl $0x1, %edx
movq %rbx, %rdi
movq %r13, %rsi
callq 0x2b42f66
cmpq $0x7f, %r14
movq %rbp, %r14
ja 0x16699d5
movq %r12, %rdi
callq 0x161d452
testq %rax, %rax
je 0x1669a1f
movq %rbx, %rdi
movq %rax, %rsi
callq 0x16688de
leaq 0xe(%rsp), %rsi
movb $0x45, (%rsi)
movl $0x1, %edx
movq %rbx, %rdi
callq 0x2b42f66
movq %rbx, %rdi
movq %r15, %rsi
movq 0x10(%rsp), %rdx
callq 0x2b4301c
leaq 0xf(%rsp), %rsi
movb $0x0, (%rsi)
movl $0x1, %edx
movq %rbx, %rdi
callq 0x2b42f66
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/CodeGen/AsmPrinter/DIEHash.cpp
|
llvm::DenseMapBase<llvm::DenseMap<llvm::DIE const*, unsigned int, llvm::DenseMapInfo<llvm::DIE const*, void>, llvm::detail::DenseMapPair<llvm::DIE const*, unsigned int>>, llvm::DIE const*, unsigned int, llvm::DenseMapInfo<llvm::DIE const*, void>, llvm::detail::DenseMapPair<llvm::DIE const*, unsigned int>>::clear()
|
unsigned getNumEntries() const {
return NumEntries;
}
|
movl 0x8(%rdi), %eax
testl %eax, %eax
jne 0x166a9eb
cmpl $0x0, 0xc(%rdi)
je 0x166aac4
shll $0x2, %eax
movl 0x10(%rdi), %ecx
cmpl %ecx, %eax
setae %al
cmpl $0x41, %ecx
setb %dl
orb %al, %dl
je 0x166af8e
testq %rcx, %rcx
je 0x166aabc
movq (%rdi), %rax
movabsq $0xfffffffffffffff, %rdx # imm = 0xFFFFFFFFFFFFFFF
addq %rdx, %rcx
andq %rcx, %rdx
andl $0x1, %ecx
negq %rcx
addq %rdx, %rcx
addq $0x2, %rcx
movq %rdx, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
addq $0x10, %rax
xorl %edx, %edx
movdqa 0x154a5db(%rip), %xmm1 # 0x2bb5020
movdqa 0x154a5e3(%rip), %xmm2 # 0x2bb5030
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movq %rdx, %xmm4
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
por %xmm1, %xmm4
pxor %xmm2, %xmm4
movdqa %xmm4, %xmm5
pcmpgtd %xmm0, %xmm5
pcmpeqd %xmm0, %xmm4
pshufd $0xf5, %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
pand %xmm5, %xmm6
pshufd $0xf5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
por %xmm6, %xmm4
movd %xmm4, %esi
notl %esi
testb $0x1, %sil
je 0x166aa99
movq $-0x1000, -0x10(%rax) # imm = 0xF000
pxor %xmm3, %xmm4
pextrw $0x4, %xmm4, %esi
testb $0x1, %sil
je 0x166aaaf
movq $-0x1000, (%rax) # imm = 0xF000
addq $0x2, %rdx
addq $0x20, %rax
cmpq %rdx, %rcx
jne 0x166aa55
movq $0x0, 0x8(%rdi)
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::DwarfCompileUnit::getOrCreateSourceID(llvm::DIFile const*)
|
unsigned DwarfCompileUnit::getOrCreateSourceID(const DIFile *File) {
// If we print assembly, we can't separate .file entries according to
// compile units. Thus all files will belong to the default compile unit.
// FIXME: add a better feature test than hasRawTextSupport. Even better,
// extend .file to support this.
unsigned CUID = Asm->OutStreamer->hasRawTextSupport() ? 0 : getUniqueID();
if (!File)
return Asm->OutStreamer->emitDwarfFileDirective(0, "", "", std::nullopt,
std::nullopt, CUID);
if (LastFile != File) {
LastFile = File;
LastFileID = Asm->OutStreamer->emitDwarfFileDirective(
0, File->getDirectory(), File->getFilename(), DD->getMD5AsBytes(File),
File->getSource(), CUID);
}
return LastFileID;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xc8, %rsp
movq %rsi, %r13
movq %rdi, %rbx
movq 0xb8(%rdi), %rax
movq 0x50(%rax), %rdi
movq (%rdi), %rax
callq *0x68(%rax)
xorl %ebp, %ebp
testb %al, %al
jne 0x166b729
movl 0x48(%rbx), %ebp
testq %r13, %r13
je 0x166b7ba
cmpq %r13, 0x290(%rbx)
je 0x166b912
movq %r13, 0x290(%rbx)
movq 0xb8(%rbx), %rax
movq 0x50(%rax), %rax
movq %rax, 0x98(%rsp)
movq %r13, %rdi
movl $0x1, %esi
callq 0x90bac4
movq %rax, 0x90(%rsp)
movq %rdx, 0x88(%rsp)
xorl %r14d, %r14d
movq %r13, %rdi
xorl %esi, %esi
callq 0x90bac4
movq %rax, %r15
movq %rdx, %r12
movq 0xd0(%rbx), %rsi
leaq 0xb0(%rsp), %rdi
movq %r13, %rdx
callq 0x1622716
movq 0x28(%r13), %rdi
testq %rdi, %rdi
je 0x166b867
callq 0x2a76bfe
movb $0x1, %r14b
jmp 0x166b867
movq 0xb8(%rbx), %rax
movq 0x50(%rax), %rsi
xorl %eax, %eax
movb %al, 0x80(%rsp)
movb %al, 0x68(%rsp)
leaq 0x28efcdf(%rip), %rcx # 0x3f5b4b8
movq %rcx, 0xa0(%rsp)
movq $0x0, 0xa8(%rsp)
movq (%rsi), %rax
movq 0x80(%rsp), %rdx
movq %rdx, 0x38(%rsp)
movups 0x70(%rsp), %xmm0
movups %xmm0, 0x28(%rsp)
movb 0x68(%rsp), %dl
movb %dl, 0x20(%rsp)
movups 0x58(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xa0(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x48(%rsp), %r14
movq %r14, %rdi
xorl %edx, %edx
xorl %r8d, %r8d
movl %ebp, %r9d
callq *0x2b0(%rax)
movl (%r14), %ebx
testb $0x1, 0x8(%r14)
je 0x166b918
movq 0x48(%rsp), %rdi
testq %rdi, %rdi
je 0x166b859
movq (%rdi), %rax
callq *0x8(%rax)
movq $0x0, 0x48(%rsp)
jmp 0x166b918
movq %rax, 0x70(%rsp)
movq %rdx, 0x78(%rsp)
movb %r14b, 0x80(%rsp)
movq %r15, 0x48(%rsp)
movq %r12, 0x50(%rsp)
movq 0x98(%rsp), %rsi
movq (%rsi), %rax
movq 0x80(%rsp), %rcx
movq %rcx, 0x38(%rsp)
movups 0x70(%rsp), %xmm0
movups %xmm0, 0x28(%rsp)
movb 0xc0(%rsp), %cl
movb %cl, 0x20(%rsp)
movups 0xb0(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x48(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x58(%rsp), %r15
movq %r15, %rdi
xorl %edx, %edx
movq 0x90(%rsp), %rcx
movq 0x88(%rsp), %r8
movl %ebp, %r9d
callq *0x2b0(%rax)
movl (%r15), %ebp
testb $0x1, 0x8(%r15)
je 0x166b90c
movq 0x58(%rsp), %rdi
testq %rdi, %rdi
je 0x166b903
movq (%rdi), %rax
callq *0x8(%rax)
movq $0x0, 0x58(%rsp)
movl %ebp, 0x298(%rbx)
movl 0x298(%rbx), %ebx
movl %ebx, %eax
addq $0xc8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.