name
stringlengths 1
473k
| code
stringlengths 7
647k
| asm
stringlengths 4
3.39M
| file
stringlengths 8
196
|
---|---|---|---|
llvm::Pass* llvm::callDefaultCtor<(anonymous namespace)::SILowerWWMCopies, true>()
|
Pass *callDefaultCtor() {
return new PassName();
}
|
pushq %rbx
subq $0x10, %rsp
movl $0x68, %edi
callq 0x7808d0
movq %rax, %rbx
xorl %eax, %eax
movq %rax, 0x8(%rbx)
leaq 0x4d55a95(%rip), %rcx # 0x5917d54
movq %rcx, 0x10(%rbx)
movl $0x2, 0x18(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%rbx)
movq %rax, 0x30(%rbx)
leaq 0x4ba450c(%rip), %rax # 0x57667e8
movq %rax, (%rbx)
callq 0x2a9031c
leaq 0x8(%rsp), %rdx
movq %rax, (%rdx)
leaq 0x4d55a5d(%rip), %rdi # 0x5917d50
leaq -0xea(%rip), %rsi # 0xbc2210
callq 0x939005
movq %rbx, %rax
addq $0x10, %rsp
popq %rbx
retq
|
/llvm/PassSupport.h
|
copyRegOperand(llvm::MachineOperand&, llvm::MachineOperand const&)
|
static void copyRegOperand(MachineOperand &To, const MachineOperand &From) {
assert(To.isReg() && From.isReg());
To.setReg(From.getReg());
To.setSubReg(From.getSubReg());
To.setIsUndef(From.isUndef());
if (To.isUse()) {
To.setIsKill(From.isKill());
} else {
To.setIsDead(From.isDead());
}
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movl 0x4(%rsi), %esi
callq 0x1d531ce
movl $0xfff00, %ecx # imm = 0xFFF00
andl (%r14), %ecx
movl (%rbx), %edx
movl %edx, %eax
andl $0xfff000ff, %eax # imm = 0xFFF000FF
orl %ecx, %eax
movl %eax, (%rbx)
movl $0x10000000, %ecx # imm = 0x10000000
andl (%r14), %ecx
andl $0xefffffff, %eax # imm = 0xEFFFFFFF
orl %ecx, %eax
movl %eax, (%rbx)
movl %edx, %ecx
andl $0x1000000, %ecx # imm = 0x1000000
movl (%r14), %esi
xorl %esi, %edx
shrl $0x1a, %esi
xorl %edi, %edi
btl $0x18, %edx
setae %dil
andl %esi, %edi
orl $0xfaffffff, %ecx # imm = 0xFAFFFFFF
shll $0x1a, %edi
andl %eax, %ecx
orl %edi, %ecx
movl %ecx, (%rbx)
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/Target/AMDGPU/SIPeepholeSDWA.cpp
|
getFlatScratchSpillOpcode(llvm::SIInstrInfo const*, unsigned int, unsigned int)
|
static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII,
unsigned LoadStoreOp,
unsigned EltSize) {
bool IsStore = TII->get(LoadStoreOp).mayStore();
bool HasVAddr = AMDGPU::hasNamedOperand(LoadStoreOp, AMDGPU::OpName::vaddr);
bool UseST =
!HasVAddr && !AMDGPU::hasNamedOperand(LoadStoreOp, AMDGPU::OpName::saddr);
switch (EltSize) {
case 4:
LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
: AMDGPU::SCRATCH_LOAD_DWORD_SADDR;
break;
case 8:
LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR
: AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR;
break;
case 12:
LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR
: AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR;
break;
case 16:
LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR
: AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR;
break;
default:
llvm_unreachable("Unexpected spill load/store size!");
}
if (HasVAddr)
LoadStoreOp = AMDGPU::getFlatScratchInstSVfromSS(LoadStoreOp);
else if (UseST)
LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
return LoadStoreOp;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movl %edx, %r14d
movq 0x8(%rdi), %rax
movl %esi, %ecx
shlq $0x5, %rcx
negq %rcx
movq 0x10(%rax,%rcx), %r15
movzwl %si, %ebp
movl %ebp, %edi
movl $0x1, %esi
callq 0x19130e0
movl %eax, %ebx
cmpw $-0x1, %ax
je 0xbe98ae
xorl %eax, %eax
jmp 0xbe98c1
movl %ebp, %edi
movl $0x1d, %esi
callq 0x19130e0
cmpw $-0x1, %ax
sete %al
addl $-0x4, %r14d
roll $0x1e, %r14d
leaq 0x28b3010(%rip), %rcx # 0x349c8e0
movslq (%rcx,%r14,4), %rdx
addq %rcx, %rdx
jmpq *%rdx
btl $0x14, %r15d
movl $0xe5f, %ecx # imm = 0xE5F
movl $0xeb7, %edi # imm = 0xEB7
jmp 0xbe991b
btl $0x14, %r15d
movl $0xe58, %ecx # imm = 0xE58
movl $0xeb0, %edi # imm = 0xEB0
jmp 0xbe991b
btl $0x14, %r15d
movl $0xe5c, %ecx # imm = 0xE5C
movl $0xeb4, %edi # imm = 0xEB4
jmp 0xbe991b
btl $0x14, %r15d
movl $0xe54, %ecx # imm = 0xE54
movl $0xeac, %edi # imm = 0xEAC
cmovael %ecx, %edi
cmpw $-0x1, %bx
je 0xbe9933
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp 0x19145c7
testb %al, %al
je 0xbe9946
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp 0x191457c
movl %edi, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/SIRegisterInfo.cpp
|
void llvm::SmallVectorImpl<llvm::User*>::append<llvm::Value::user_iterator_impl<llvm::User>, void>(llvm::Value::user_iterator_impl<llvm::User>, llvm::Value::user_iterator_impl<llvm::User>)
|
void append(ItTy in_start, ItTy in_end) {
this->assertSafeToAddRange(in_start, in_end);
size_type NumInputs = std::distance(in_start, in_end);
this->reserve(this->size() + NumInputs);
this->uninitialized_copy(in_start, in_end, this->end());
this->set_size(this->size() + NumInputs);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %rbx
xorl %r12d, %r12d
cmpq %rdx, %rsi
je 0xbf991a
movq %r15, %rax
movq 0x8(%rax), %rax
incq %r12
cmpq %r14, %rax
jne 0xbf990e
movl 0x8(%rbx), %edx
movl 0xc(%rbx), %eax
addq %r12, %rdx
cmpq %rax, %rdx
jbe 0xbf9939
leaq 0x10(%rbx), %rsi
movl $0x8, %ecx
movq %rbx, %rdi
callq 0x2b4ed86
movl 0x8(%rbx), %eax
cmpq %r14, %r15
je 0xbf9960
leaq (,%rax,8), %rcx
addq (%rbx), %rcx
movq 0x18(%r15), %rdx
movq %rdx, (%rcx)
addq $0x8, %rcx
movq 0x8(%r15), %r15
cmpq %r14, %r15
jne 0xbf994c
addl %eax, %r12d
movl %r12d, 0x8(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/SmallVector.h
|
buildMul(llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>&, llvm::Value*, llvm::Value*)
|
static Value *buildMul(IRBuilder<> &B, Value *LHS, Value *RHS) {
const ConstantInt *CI = dyn_cast<ConstantInt>(LHS);
return (CI && CI->isOne()) ? RHS : B.CreateMul(LHS, RHS);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x28, %rsp
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
cmpb $0x11, (%rsi)
jne 0xbfdb59
leaq 0x18(%r14), %rdi
movl 0x20(%r14), %ebp
cmpl $0x40, %ebp
ja 0xbfdb49
cmpq $0x1, (%rdi)
jmp 0xbfdb52
callq 0x2b1284c
decl %ebp
cmpl %ebp, %eax
sete %al
testb %al, %al
jne 0xbfdb79
movq %rsp, %rcx
movw $0x101, 0x20(%rcx) # imm = 0x101
movq %r15, %rdi
movq %r14, %rsi
movq %rbx, %rdx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0xbfdeb4
movq %rax, %rbx
movq %rbx, %rax
addq $0x28, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
|
llvm::AACallEdges& llvm::Attributor::registerAA<llvm::AACallEdges>(llvm::AACallEdges&)
|
AAType ®isterAA(AAType &AA) {
static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
"Cannot register an attribute with a type not derived from "
"'AbstractAttribute'!");
// Put the attribute in the lookup map structure and the container we use to
// keep track of all attributes.
const IRPosition &IRP = AA.getIRPosition();
AbstractAttribute *&AAPtr = AAMap[{&AAType::ID, IRP}];
assert(!AAPtr && "Attribute already in map!");
AAPtr = &AA;
// Register AA with the synthetic root only before the manifest stage.
if (Phase == AttributorPhase::SEEDING || Phase == AttributorPhase::UPDATE)
DG.SyntheticRoot.Deps.insert(
AADepGraphNode::DepTy(&AA, unsigned(DepClassTy::REQUIRED)));
return AA;
}
|
pushq %r14
pushq %rbx
subq $0x18, %rsp
movq %rsi, %rbx
movq %rdi, %r14
addq $0x68, %rdi
leaq 0x4351bae(%rip), %rax # 0x4f52c04
movq %rsp, %rsi
movq %rax, (%rsi)
movups 0x40(%rbx), %xmm0
movups %xmm0, 0x8(%rsi)
callq 0xc011ea
movq %rbx, 0x18(%rax)
cmpl $0x1, 0xd78(%r14)
ja 0xc0108c
addq $0xb0, %r14
movq %rsp, %rsi
movq %rbx, (%rsi)
movq %r14, %rdi
callq 0xc0114c
movq %rbx, %rax
addq $0x18, %rsp
popq %rbx
popq %r14
retq
nop
|
/llvm/Transforms/IPO/Attributor.h
|
bool llvm::function_ref<bool (llvm::Instruction&)>::callback_fn<(anonymous namespace)::AAAMDGPUNoAGPR::updateImpl(llvm::Attributor&)::'lambda'(llvm::Instruction&)>(long, llvm::Instruction&)
|
Ret operator()(Params ...params) const {
return callback(callable, std::forward<Params>(params)...);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x18, %rsp
movq -0x20(%rsi), %rax
movb (%rax), %bpl
testb %bpl, %bpl
je 0xc038eb
cmpb $0x19, %bpl
jne 0xc0393a
movq 0x38(%rax), %rsi
movq 0x40(%rax), %rdx
movq %rsp, %rbx
movq %rbx, %rdi
callq 0x2a4fb72
movq (%rbx), %rax
movq 0x8(%rbx), %rcx
cmpq %rcx, %rax
je 0xc0392f
movq 0x10(%rax), %rdx
movq 0x18(%rax), %rsi
cmpq %rsi, %rdx
sete %r14b
je 0xc038da
movq (%rdx), %rdi
movq 0x8(%rdx), %r8
testq %r8, %r8
je 0xc038c2
cmpb $0x7b, (%rdi)
jne 0xc038c7
incq %rdi
cmpq $0x1, %r8
sete %r8b
jmp 0xc038ca
movb $0x1, %r8b
jmp 0xc038ca
xorl %r8d, %r8d
testb %r8b, %r8b
jne 0xc038d4
cmpb $0x61, (%rdi)
je 0xc038da
addq $0x20, %rdx
jmp 0xc0389b
testb $0x1, %r14b
je 0xc03932
addq $0x50, %rax
cmpq %rcx, %rax
jne 0xc03893
jmp 0xc03932
movb $0x1, %bl
testb $0x20, 0x21(%rax)
jne 0xc03944
movq 0x8(%rdi), %r14
movq (%rdi), %r15
xorps %xmm0, %xmm0
movq %rsp, %rbx
movaps %xmm0, (%rbx)
movq %rax, (%rbx)
movq %rbx, %rdi
callq 0x1f30614
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
xorl %ebx, %ebx
movq %r15, %rdi
movq %r14, %rcx
xorl %r8d, %r8d
callq 0xc031c8
testq %rax, %rax
je 0xc03944
movb 0x59(%rax), %bl
jmp 0xc03944
movb $0x1, %r14b
movq %rsp, %rdi
callq 0xc03952
cmpb $0x19, %bpl
sete %bl
andb %r14b, %bl
movl %ebx, %eax
addq $0x18, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
nop
|
/llvm/ADT/STLFunctionalExtras.h
|
(anonymous namespace)::AMDGPUOutgoingArgHandler::getStackAddress(unsigned long, long, llvm::MachinePointerInfo&, llvm::ISD::ArgFlagsTy)
|
Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO,
ISD::ArgFlagsTy Flags) override {
MachineFunction &MF = MIRBuilder.getMF();
const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32);
const LLT S32 = LLT::scalar(32);
if (IsTailCall) {
Offset += FPDiff;
int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI);
MPO = MachinePointerInfo::getFixedStack(MF, FI);
return FIReg.getReg(0);
}
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
if (!SPReg) {
const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>();
if (ST.enableFlatScratch()) {
// The stack is accessed unswizzled, so we can use a regular copy.
SPReg = MIRBuilder.buildCopy(PtrTy,
MFI->getStackPtrOffsetReg()).getReg(0);
} else {
// The address we produce here, without knowing the use context, is going
// to be interpreted as a vector address, so we need to convert to a
// swizzled address.
SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy},
{MFI->getStackPtrOffsetReg()}).getReg(0);
}
}
auto OffsetReg = MIRBuilder.buildConstant(S32, Offset);
auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MF, Offset);
return AddrReg.getReg(0);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rcx, %rbx
movq %rdx, %r15
movq %rdi, %r12
movq 0x8(%rdi), %rdi
movq 0x8(%rdi), %r14
cmpb $0x1, 0x38(%r12)
jne 0xc0b0d5
movslq 0x30(%r12), %rdx
addq %r15, %rdx
movq 0x38(%r14), %rdi
movl $0x1, %ecx
xorl %r8d, %r8d
callq 0x1d32a60
movl %eax, %ebp
movq 0x8(%r12), %rdi
leaq 0x8(%rsp), %rsi
movq $0x280102, (%rsi) # imm = 0x280102
movl $0x0, 0x8(%rsi)
movl %eax, %edx
callq 0x15ddc04
movq %rdx, %r12
leaq 0x8(%rsp), %r15
movq %r15, %rdi
movq %r14, %rsi
movl %ebp, %edx
xorl %ecx, %ecx
callq 0x1d55ea2
movq 0xd(%r15), %rax
movq %rax, 0xd(%rbx)
movups (%r15), %xmm0
jmp 0xc0b202
cmpl $0x0, 0x34(%r12)
jne 0xc0b179
movq 0x30(%r14), %rax
movq 0x10(%r14), %rcx
cmpb $0x0, 0x2eb(%rcx)
je 0xc0b11a
leaq 0x20(%rsp), %rsi
movq $0x280102, (%rsi) # imm = 0x280102
xorl %ecx, %ecx
movl %ecx, 0x8(%rsi)
movl 0x90(%rax), %eax
leaq 0x8(%rsp), %rdx
movl %eax, (%rdx)
movl %ecx, 0x10(%rdx)
callq 0x15de680
jmp 0xc0b16d
cmpb $0x1, 0x2ec(%rcx)
jne 0xc0b12c
cmpb $0x1, 0x2e9(%rcx)
je 0xc0b0f2
leaq 0x20(%rsp), %rdx
movq $0x280102, (%rdx) # imm = 0x280102
xorl %ecx, %ecx
movl %ecx, 0x8(%rdx)
movl 0x90(%rax), %eax
leaq 0x8(%rsp), %r8
movl %eax, (%r8)
movl %ecx, 0x10(%r8)
movq (%rdi), %rax
movq $0x0, (%rsp)
movl $0x1, %ecx
movl $0x1, %r9d
movl $0xe45, %esi # imm = 0xE45
callq *0x20(%rax)
movq 0x20(%rdx), %rax
movl 0x4(%rax), %eax
movl %eax, 0x34(%r12)
movq 0x8(%r12), %rdi
leaq 0x8(%rsp), %r13
movq $0x101, (%r13) # imm = 0x101
xorl %ebp, %ebp
movl %ebp, 0x8(%r13)
movq %r13, %rsi
movq %r15, %rdx
callq 0x15ddf6e
movq 0x8(%r12), %rdi
leaq 0x38(%rsp), %rsi
movq $0x280102, (%rsi) # imm = 0x280102
movl %ebp, 0x8(%rsi)
movl 0x34(%r12), %ecx
movl %ecx, (%r13)
movl %ebp, 0x10(%r13)
leaq 0x20(%rsp), %rcx
movq %rax, (%rcx)
movq %rdx, 0x8(%rcx)
movl $0x1, 0x10(%rcx)
leaq 0x8(%rsp), %rdx
xorl %r8d, %r8d
callq 0x15dde14
movq %rdx, %r12
leaq 0x8(%rsp), %r13
movq %r13, %rdi
movq %r14, %rsi
movq %r15, %rdx
xorl %ecx, %ecx
callq 0x1d55f72
movq 0xd(%r13), %rax
movq %rax, 0xd(%rbx)
movups (%r13), %xmm0
movups %xmm0, (%rbx)
movq 0x20(%r12), %rax
movl 0x4(%rax), %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPUCallLowering.cpp
|
llvm::IRBuilderBase::CreateSExtOrTrunc(llvm::Value*, llvm::Type*, llvm::Twine const&)
|
Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
const Twine &Name = "") {
assert(V->getType()->isIntOrIntVectorTy() &&
DestTy->isIntOrIntVectorTy() &&
"Can only sign extend/truncate integers!");
Type *VTy = V->getType();
if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
return CreateSExt(V, DestTy, Name);
if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
return CreateTrunc(V, DestTy, Name);
return V;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movq 0x8(%rsi), %rdi
callq 0x2a9a8f4
movl %eax, %ebp
movq %r14, %rdi
callq 0x2a9a8f4
cmpl %eax, %ebp
jae 0xc1034d
movq %r12, %rdi
movl $0x28, %esi
movq %r15, %rdx
movq %r14, %rcx
movq %rbx, %r8
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp 0x932298
jbe 0xc1036e
movq %r12, %rdi
movq %r15, %rsi
movq %r14, %rdx
movq %rbx, %rcx
xorl %r8d, %r8d
xorl %r9d, %r9d
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp 0xa2bb82
movq %r15, %rax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/llvm/IR/IRBuilder.h
|
(anonymous namespace)::AMDGPUCodeGenPrepareImpl::emitRcpIEEE1ULP(llvm::IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>&, llvm::Value*, bool) const
|
Value *AMDGPUCodeGenPrepareImpl::emitRcpIEEE1ULP(IRBuilder<> &Builder,
Value *Src,
bool IsNegative) const {
// Same as for 1.0, but expand the sign out of the constant.
// -1.0 / x -> rcp (fneg x)
if (IsNegative)
Src = Builder.CreateFNeg(Src);
// The rcp instruction doesn't support denormals, so scale the input
// out of the denormal range and convert at the end.
//
// Expand as 2^-n * (1.0 / (x * 2^n))
// TODO: Skip scaling if input is known never denormal and the input
// range won't underflow to denormal. The hard part is knowing the
// result. We need a range check, the result could be denormal for
// 0x1p+126 < den <= 0x1p+127.
auto [FrexpMant, FrexpExp] = getFrexpResults(Builder, Src);
Value *ScaleFactor = Builder.CreateNeg(FrexpExp);
Value *Rcp = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMant);
return Builder.CreateCall(getLdexpF32(), {Rcp, ScaleFactor});
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rsi, %rbx
movq %rdi, %r14
testl %ecx, %ecx
je 0xc10ff8
leaq 0x10(%rsp), %rax
movw $0x101, 0x20(%rax) # imm = 0x101
movq %rbx, %rdi
movq %rdx, %rsi
movq %rax, %rdx
xorl %ecx, %ecx
callq 0xc10d50
movq %rax, %rdx
movq %r14, %rdi
movq %rbx, %rsi
callq 0xc110b2
movq %rax, %r13
movq %rdx, %r12
movw $0x101, %ax # imm = 0x101
leaq 0x10(%rsp), %rbp
movw %ax, 0x20(%rbp)
movq 0x8(%rdx), %rdi
callq 0x29e17e6
xorl %r15d, %r15d
movq %rbx, %rdi
movq %rax, %rsi
movq %r12, %rdx
movq %rbp, %rcx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0xc105bc
movq %rax, %r12
movw $0x101, %ax # imm = 0x101
movw %ax, 0x20(%rbp)
leaq 0x10(%rsp), %r8
movq %rbx, %rdi
movl $0xb80, %esi # imm = 0xB80
movq %r13, %rdx
xorl %ecx, %ecx
callq 0x2a4c16c
movq %rax, %r13
movq %r14, %rdi
callq 0xc111e8
testq %rax, %rax
je 0xc1106f
movq 0x18(%rax), %r15
leaq 0x38(%rsp), %rcx
movq %r13, (%rcx)
movq %r12, 0x8(%rcx)
leaq 0x10(%rsp), %r9
movw $0x101, 0x20(%r9) # imm = 0x101
movq $0x0, (%rsp)
movl $0x2, %r8d
movq %rbx, %rdi
movq %r15, %rsi
movq %rax, %rdx
callq 0x9386f2
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
|
llvm::IRBuilderBase::SetInsertPoint(llvm::ilist_iterator_w_bits<llvm::ilist_detail::node_options<llvm::Instruction, false, false, void, true, llvm::BasicBlock>, false, false>)
|
void SetInsertPoint(BasicBlock::iterator IP) {
BB = IP->getParent();
InsertPt = IP;
SetCurrentDebugLocation(IP->getStableDebugLoc());
}
|
pushq %rbx
subq $0x10, %rsp
movq %rdi, %rbx
leaq -0x18(%rsi), %rdi
testq %rsi, %rsi
cmoveq %rsi, %rdi
movq 0x28(%rdi), %rax
movq %rax, 0x30(%rbx)
movq %rsi, 0x38(%rbx)
movw %dx, 0x40(%rbx)
callq 0x2a5311a
movq (%rax), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0xc114b8
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x8(%rsp), %rdx
movq %rbx, %rdi
xorl %esi, %esi
callq 0x931d0a
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0xc114db
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
addq $0x10, %rsp
popq %rbx
retq
nop
|
/llvm/IR/IRBuilder.h
|
(anonymous namespace)::AMDGPUGlobalISelDivergenceLowering::AMDGPUGlobalISelDivergenceLowering()
|
AMDGPUGlobalISelDivergenceLowering() : MachineFunctionPass(ID) {
initializeAMDGPUGlobalISelDivergenceLoweringPass(
*PassRegistry::getPassRegistry());
}
|
pushq %rax
xorl %eax, %eax
movq %rax, 0x8(%rdi)
leaq 0x4d043c4(%rip), %rcx # 0x59191e8
movq %rcx, 0x10(%rdi)
movl $0x2, 0x18(%rdi)
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%rdi)
movq %rax, 0x30(%rdi)
leaq 0x4b6ac3f(%rip), %rax # 0x577fa80
movq %rax, (%rdi)
callq 0x2a9031c
movq %rsp, %rdx
movq %rax, (%rdx)
leaq 0x4d0438e(%rip), %rdi # 0x59191e4
leaq -0xfd(%rip), %rsi # 0xc14d60
callq 0x939005
popq %rax
retq
|
/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
|
llvm::Incoming& llvm::SmallVectorTemplateBase<llvm::Incoming, true>::growAndEmplaceBack<llvm::Register, llvm::MachineBasicBlock*, llvm::Register>(llvm::Register&&, llvm::MachineBasicBlock*&&, llvm::Register&&)
|
T &growAndEmplaceBack(ArgTypes &&... Args) {
// Use push_back with a copy in case Args has an internal reference,
// side-stepping reference invalidation problems without losing the realloc
// optimization.
push_back(T(std::forward<ArgTypes>(Args)...));
return this->back();
}
|
pushq %rbx
subq $0x20, %rsp
movq %rdi, %rbx
movl (%rsi), %eax
movq (%rdx), %rdx
movl (%rcx), %ecx
leaq 0x8(%rsp), %rsi
movl %eax, (%rsi)
movq %rdx, 0x8(%rsi)
movl %ecx, 0x10(%rsi)
movl $0x1, %edx
callq 0xbc0d7c
movq (%rbx), %rcx
movl 0x8(%rbx), %edx
leaq (%rdx,%rdx,2), %rdx
movq 0x10(%rax), %rsi
movq %rsi, 0x10(%rcx,%rdx,8)
movups (%rax), %xmm0
movups %xmm0, (%rcx,%rdx,8)
movl 0x8(%rbx), %eax
incl %eax
movl %eax, 0x8(%rbx)
movq (%rbx), %rcx
leaq (%rax,%rax,2), %rax
leaq (%rcx,%rax,8), %rax
addq $-0x18, %rax
addq $0x20, %rsp
popq %rbx
retq
|
/llvm/ADT/SmallVector.h
|
void llvm::SmallVectorImpl<llvm::Value*>::append<llvm::Use*, void>(llvm::Use*, llvm::Use*)
|
void append(ItTy in_start, ItTy in_end) {
this->assertSafeToAddRange(in_start, in_end);
size_type NumInputs = std::distance(in_start, in_end);
this->reserve(this->size() + NumInputs);
this->uninitialized_copy(in_start, in_end, this->end());
this->set_size(this->size() + NumInputs);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %rbx
movq %rdx, %r12
subq %rsi, %r12
sarq $0x5, %r12
movl 0x8(%rdi), %edx
movl 0xc(%rdi), %eax
addq %r12, %rdx
cmpq %rax, %rdx
jbe 0xc1b404
leaq 0x10(%rbx), %rsi
movl $0x8, %ecx
movq %rbx, %rdi
callq 0x2b4ed86
movl 0x8(%rbx), %eax
cmpq %r14, %r15
je 0xc1b42a
leaq (,%rax,8), %rcx
addq (%rbx), %rcx
movq (%r15), %rdx
movq %rdx, (%rcx)
addq $0x20, %r15
addq $0x8, %rcx
cmpq %r14, %r15
jne 0xc1b417
addl %eax, %r12d
movl %r12d, 0x8(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/SmallVector.h
|
llvm::AMDGPUInstructionSelector::selectSWMMACIndex16(llvm::MachineOperand&) const
|
InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSWMMACIndex16(MachineOperand &Root) const {
Register Src =
getDefIgnoringCopies(Root.getReg(), *MRI)->getOperand(0).getReg();
unsigned Key = 0;
Register ShiftSrc;
std::optional<ValueAndVReg> ShiftAmt;
if (mi_match(Src, *MRI, m_GLShr(m_Reg(ShiftSrc), m_GCst(ShiftAmt))) &&
MRI->getType(ShiftSrc).getSizeInBits() == 32 &&
ShiftAmt->Value.getZExtValue() == 16) {
Src = ShiftSrc;
Key = 1;
}
return {{
[=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(Key); } // index_key
}};
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x108, %rsp # imm = 0x108
movq %rsi, %r14
movq %rdi, %rbx
movl 0x4(%rdx), %edi
movq 0x48(%rsi), %rsi
callq 0x15e5c06
movq 0x20(%rax), %rax
movl 0x4(%rax), %ebp
leaq 0x4(%rsp), %rax
movl $0x0, (%rax)
leaq 0x8(%rsp), %r15
movb $0x0, 0x18(%r15)
movq 0x48(%r14), %rsi
leaq 0x78(%rsp), %rdi
movq %rax, (%rdi)
movq %r15, 0x8(%rdi)
leaq 0x30(%rsp), %rdx
movl %ebp, (%rdx)
callq 0xc3ae42
testb %al, %al
je 0xc1f4f1
movl 0x4(%rsp), %eax
testl %eax, %eax
jns 0xc1f4ad
movq 0x48(%r14), %rcx
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
cmpl %eax, 0x1d0(%rcx)
jbe 0xc1f4ad
movq 0x1c8(%rcx), %rcx
movq (%rcx,%rax,8), %rax
jmp 0xc1f4af
xorl %eax, %eax
leaq 0x28(%rsp), %rdi
movq %rax, (%rdi)
callq 0x94022c
leaq 0x30(%rsp), %rdi
movq %rax, (%rdi)
movb %dl, 0x8(%rdi)
callq 0x2b60e74
cmpq $0x20, %rax
jne 0xc1f4f1
cmpl $0x41, 0x10(%rsp)
jb 0xc1f4de
movq 0x8(%rsp), %r15
xorl %eax, %eax
cmpq $0x10, (%r15)
sete %cl
jne 0xc1f4ed
movl 0x4(%rsp), %ebp
movb %cl, %al
jmp 0xc1f4f3
xorl %eax, %eax
xorps %xmm0, %xmm0
leaq 0x70(%rsp), %rdx
movaps %xmm0, -0x40(%rdx)
movaps %xmm0, -0x30(%rdx)
movl %ebp, -0x40(%rdx)
leaq 0x1baad(%rip), %rcx # 0xc3afba
movq %rcx, -0x28(%rdx)
leaq 0x1bada(%rip), %rcx # 0xc3aff2
movq %rcx, -0x30(%rdx)
movaps %xmm0, -0x20(%rdx)
movaps %xmm0, -0x10(%rdx)
movl %eax, -0x20(%rdx)
leaq 0x1bae8(%rip), %rax # 0xc3b016
movq %rax, -0x8(%rdx)
leaq 0x1bb13(%rip), %rax # 0xc3b04c
movq %rax, -0x10(%rdx)
leaq 0x88(%rsp), %rax
movq %rax, -0x10(%rax)
movabsq $0x400000000, %r15 # imm = 0x400000000
movq %r15, -0x8(%rax)
leaq 0x78(%rsp), %r14
leaq 0x30(%rsp), %rsi
movq %r14, %rdi
callq 0x94c024
leaq 0x10(%rbx), %rax
movq %rax, (%rbx)
movq %r15, 0x8(%rbx)
cmpl $0x0, 0x8(%r14)
je 0xc1f588
leaq 0x78(%rsp), %rsi
movq %rbx, %rdi
callq 0x94c1b0
movb $0x1, 0x90(%rbx)
leaq 0x78(%rsp), %rdi
callq 0x94bed2
movq $-0x40, %r15
leaq 0x50(%rsp), %r14
movq 0x10(%r14), %rax
testq %rax, %rax
je 0xc1f5bb
movq %r14, %rdi
movq %r14, %rsi
movl $0x3, %edx
callq *%rax
addq $-0x20, %r14
addq $0x20, %r15
jne 0xc1f5a5
cmpb $0x1, 0x20(%rsp)
jne 0xc1f5e7
movb $0x0, 0x20(%rsp)
cmpl $0x41, 0x10(%rsp)
jb 0xc1f5e7
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0xc1f5e7
callq 0x7802b0
movq %rbx, %rax
addq $0x108, %rsp # imm = 0x108
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
llvm::AMDGPUInstructionSelector::selectGlobalLoadLds(llvm::MachineInstr&) const
|
bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
unsigned Opc;
unsigned Size = MI.getOperand(3).getImm();
switch (Size) {
default:
return false;
case 1:
Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
break;
case 2:
Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
break;
case 4:
Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
break;
}
MachineBasicBlock *MBB = MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
.add(MI.getOperand(2));
Register Addr = MI.getOperand(1).getReg();
Register VOffset;
// Try to split SAddr and VOffset. Global and LDS pointers share the same
// immediate offset, so we cannot use a regular SelectGlobalSAddr().
if (!isSGPR(Addr)) {
auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
if (isSGPR(AddrDef->Reg)) {
Addr = AddrDef->Reg;
} else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
Register SAddr =
getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
if (isSGPR(SAddr)) {
Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
Addr = SAddr;
VOffset = Off;
}
}
}
}
if (isSGPR(Addr)) {
Opc = AMDGPU::getGlobalSaddrOp(Opc);
if (!VOffset) {
VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
.addImm(0);
}
}
auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
.addReg(Addr);
if (isSGPR(Addr))
MIB.addReg(VOffset);
MIB.add(MI.getOperand(4)) // offset
.add(MI.getOperand(5)); // cpol
MachineMemOperand *LoadMMO = *MI.memoperands_begin();
MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
LoadPtrI.Offset = MI.getOperand(4).getImm();
MachinePointerInfo StorePtrI = LoadPtrI;
LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
auto F = LoadMMO->getFlags() &
~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
Size, LoadMMO->getBaseAlign());
MachineMemOperand *StoreMMO =
MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
sizeof(int32_t), Align(4));
MIB.setMemRefs({LoadMMO, StoreMMO});
MI.eraseFromParent();
return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq 0x20(%rsi), %rax
movq 0x70(%rax), %r13
cmpl $0x1, %r13d
je 0xc30005
cmpl $0x4, %r13d
je 0xc2fffb
cmpl $0x2, %r13d
jne 0xc30148
movl $0xdcc, 0x30(%rsp) # imm = 0xDCC
jmp 0xc3000d
movl $0xdc4, 0x30(%rsp) # imm = 0xDC4
jmp 0xc3000d
movl $0xdca, 0x30(%rsp) # imm = 0xDCA
movq 0x18(%r14), %r15
movq 0x38(%r14), %rsi
movq %rsi, 0x68(%rsp)
testq %rsi, %rsi
je 0xc3002e
leaq 0x68(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x68(%rsp), %rsi
movq %rsi, 0x70(%rsp)
testq %rsi, %rsi
movq %r14, %rbp
je 0xc3005c
leaq 0x68(%rsp), %r14
leaq 0x70(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
movq %rbp, %r14
xorps %xmm0, %xmm0
movups %xmm0, 0x78(%rsp)
movq 0x58(%rbx), %rax
movq $-0x260, %rcx # imm = 0xFDA0
addq 0x8(%rax), %rcx
testb $0x4, 0x2c(%r14)
jne 0xc30092
leaq 0x70(%rsp), %rdx
movq %r15, %rdi
movq %r14, %rsi
movl $0x142, %r8d # imm = 0x142
callq 0x90f593
jmp 0xc300a8
leaq 0x70(%rsp), %rdx
movq %r15, %rdi
movq %r14, %rsi
movl $0x142, %r8d # imm = 0x142
callq 0x9105ce
movq %rdx, %rdi
movq 0x20(%r14), %rdx
addq $0x40, %rdx
movq %rax, %rsi
callq 0x1d3c22c
movq 0x70(%rsp), %rsi
testq %rsi, %rsi
je 0xc300cf
leaq 0x70(%rsp), %rdi
callq 0x2a758fc
movq %r15, 0x38(%rsp)
movq 0x68(%rsp), %rsi
testq %rsi, %rsi
je 0xc300e8
leaq 0x68(%rsp), %rdi
callq 0x2a758fc
movq 0x20(%r14), %rax
movl 0x24(%rax), %r12d
movq 0x68(%rbx), %rdi
movq 0x48(%rbx), %rdx
movq 0x60(%rbx), %rcx
movl %r12d, %esi
callq 0x1e294f4
xorl %r15d, %r15d
cmpl $0x1, (%rax)
je 0xc301ab
movq 0x48(%rbx), %rdx
leaq 0x70(%rsp), %r14
movq %r14, %rdi
movl %r12d, %esi
callq 0x15e5b42
movl 0x8(%r14), %esi
movq 0x68(%rbx), %rdi
movq 0x48(%rbx), %rdx
movq 0x60(%rbx), %rcx
callq 0x1e294f4
cmpl $0x1, (%rax)
jne 0xc3014f
xorl %r15d, %r15d
movl 0x78(%rsp), %r12d
jmp 0xc301a8
xorl %eax, %eax
jmp 0xc305b1
movq 0x70(%rsp), %rax
xorl %r15d, %r15d
cmpw $0xd4, 0x44(%rax)
jne 0xc301a8
movq 0x20(%rax), %rax
movl 0x24(%rax), %edi
movq 0x48(%rbx), %rsi
callq 0x15e5c32
movl %eax, %r14d
movq 0x68(%rbx), %rdi
movq 0x48(%rbx), %rdx
movq 0x60(%rbx), %rcx
movl %eax, %esi
callq 0x1e294f4
cmpl $0x1, (%rax)
jne 0xc301a8
movq 0x70(%rsp), %rax
movq 0x20(%rax), %rax
movl 0x44(%rax), %esi
movq 0x48(%rbx), %rdi
callq 0xc37064
movl %eax, %r15d
testl %eax, %eax
cmovnel %r14d, %r12d
movq %rbp, %r14
movq 0x68(%rbx), %rdi
movq 0x48(%rbx), %rdx
movq 0x60(%rbx), %rcx
movl %r12d, %esi
callq 0x1e294f4
cmpl $0x1, (%rax)
jne 0xc302d6
movl 0x30(%rsp), %edi
callq 0x191465d
movl %eax, 0x30(%rsp)
testl %r15d, %r15d
jne 0xc302d6
movq 0x48(%rbx), %rdi
leaq 0x4b37eef(%rip), %rsi # 0x57680d8
leaq 0x332b2c8(%rip), %rdx # 0x3f5b4b8
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, %r15d
movq 0x38(%r14), %rsi
movq %rsi, 0x60(%rsp)
testq %rsi, %rsi
je 0xc30217
leaq 0x60(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x60(%rsp), %rsi
movq %rsi, 0x40(%rsp)
testq %rsi, %rsi
je 0xc30242
leaq 0x60(%rsp), %r14
leaq 0x40(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
movq %rbp, %r14
xorps %xmm0, %xmm0
movups %xmm0, 0x48(%rsp)
movq 0x58(%rbx), %rax
movq $-0x41e60, %rcx # imm = 0xFFFBE1A0
addq 0x8(%rax), %rcx
testb $0x4, 0x2c(%r14)
jne 0xc30277
leaq 0x40(%rsp), %rdx
movq 0x38(%rsp), %rdi
movq %r14, %rsi
movl %r15d, %r8d
callq 0x90f593
jmp 0xc3028c
leaq 0x40(%rsp), %rdx
movq 0x38(%rsp), %rdi
movq %r14, %rsi
movl %r15d, %r8d
callq 0x9105ce
movq %rdx, %rdi
movl $0xfff00000, %ecx # imm = 0xFFF00000
leaq 0x70(%rsp), %rdx
andl (%rdx), %ecx
incl %ecx
movl %ecx, (%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x8(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq 0x40(%rsp), %rsi
testq %rsi, %rsi
je 0xc302c2
leaq 0x40(%rsp), %rdi
callq 0x2a758fc
movq 0x60(%rsp), %rsi
testq %rsi, %rsi
je 0xc302d6
leaq 0x60(%rsp), %rdi
callq 0x2a758fc
movq 0x38(%r14), %rsi
movq %rsi, 0x58(%rsp)
testq %rsi, %rsi
je 0xc302f3
leaq 0x58(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x58(%rsp), %rsi
movq %rsi, 0x40(%rsp)
testq %rsi, %rsi
je 0xc3031b
leaq 0x58(%rsp), %r14
leaq 0x40(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x40(%rsp), %r14
movups %xmm0, 0x8(%r14)
movq 0x58(%rbx), %rax
movq 0x8(%rax), %rcx
movl 0x30(%rsp), %eax
shlq $0x5, %rax
subq %rax, %rcx
movq 0x38(%rsp), %rdi
movq %rbp, %rsi
movq %r14, %rdx
callq 0x93f66e
movq %rdx, %rdi
leaq 0x70(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movl $0x0, (%rdx)
movl %r12d, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rdi, 0x30(%rsp)
movq %rax, 0x38(%rsp)
movq %rax, %rsi
callq 0x1d3c22c
movq (%r14), %rsi
testq %rsi, %rsi
je 0xc30390
leaq 0x40(%rsp), %rdi
callq 0x2a758fc
movq 0x58(%rsp), %rsi
testq %rsi, %rsi
je 0xc303a4
leaq 0x58(%rsp), %rdi
callq 0x2a758fc
movl %r13d, %eax
movq %rax, 0x90(%rsp)
movq 0x68(%rbx), %rdi
movq 0x48(%rbx), %rdx
movq 0x60(%rbx), %rcx
movl %r12d, %esi
callq 0x1e294f4
cmpl $0x1, (%rax)
movq %rbp, %r14
jne 0xc303f8
leaq 0x70(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movl $0x0, (%rdx)
movl %r15d, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq 0x30(%rsp), %rdi
movq 0x38(%rsp), %rsi
callq 0x1d3c22c
movq 0x20(%r14), %rdx
subq $-0x80, %rdx
movq 0x30(%rsp), %r15
movq %r15, %rdi
movq 0x38(%rsp), %r12
movq %r12, %rsi
callq 0x1d3c22c
movl $0xa0, %edx
addq 0x20(%r14), %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movq 0x30(%r14), %rax
movq %rbx, %r15
testb $0x7, %al
je 0xc30441
andq $-0x8, %rax
addq $0x10, %rax
movq %rax, %rcx
jmp 0xc30448
leaq 0x30(%r14), %rcx
movq %rax, (%rcx)
movq (%rcx), %rax
movq (%rax), %rbx
movl 0x14(%rax), %r8d
movl %r8d, 0x9c(%rsp)
movq 0x20(%r14), %rcx
movq 0x90(%rcx), %rbp
movzwl 0x20(%rax), %r12d
andl $0x1fc, %r12d # imm = 0x1FC
movq 0x18(%r15), %rdi
movl $0x1, %edx
movl %r12d, %esi
orl %edx, %esi
movzbl 0x22(%rax), %ecx
xorps %xmm0, %xmm0
leaq 0x70(%rsp), %r13
movaps %xmm0, 0x10(%r13)
movaps %xmm0, (%r13)
movq %rbx, 0x40(%rsp)
movq %rbp, 0x48(%rsp)
movl %edx, 0x50(%rsp)
movl %r8d, 0x54(%rsp)
movq 0x50(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x40(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, 0xa0(%rsp)
xorl %r14d, %r14d
movl %r14d, 0x28(%rsp)
movl %r14d, 0x20(%rsp)
movl %edx, 0x18(%rsp)
movq 0x90(%rsp), %rdx
movq %r13, %r8
xorl %r9d, %r9d
callq 0x1d3482e
movq %rax, 0x90(%rsp)
movq 0x18(%r15), %rdi
orl $0x2, %r12d
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%r13)
movaps %xmm0, (%r13)
movq %rbx, 0x40(%rsp)
movq %rbp, 0x48(%rsp)
movl $0x3, 0x50(%rsp)
movl 0x9c(%rsp), %eax
movl %eax, 0x54(%rsp)
movq 0x50(%rsp), %rax
movq %rax, 0x10(%rsp)
movups 0x40(%rsp), %xmm0
movups %xmm0, (%rsp)
movl %r14d, 0x28(%rsp)
movl %r14d, 0x20(%rsp)
movl $0x1, %eax
movl %eax, 0x18(%rsp)
leaq 0x70(%rsp), %r13
movl $0x4, %edx
movl %r12d, %esi
movl $0x2, %ecx
movq %r13, %r8
xorl %r9d, %r9d
callq 0x1d3482e
movq 0x90(%rsp), %rcx
movq %rcx, (%r13)
movq %rax, 0x8(%r13)
leaq 0x70(%rsp), %rdx
movl $0x2, %ecx
movq 0x30(%rsp), %rbx
movq %rbx, %rdi
movq 0x38(%rsp), %rsi
callq 0x1d3cb36
movq 0xa0(%rsp), %rdi
callq 0x1d3deba
movq 0x58(%r15), %rsi
movq 0x60(%r15), %rdx
movq 0x68(%r15), %rcx
movq %rbx, %rdi
callq 0x15e4a41
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
llvm::AMDGPUInstructionSelector::selectG_SELECT(llvm::MachineInstr&) const
|
bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
if (selectImpl(I, *CoverageInfo))
return true;
MachineBasicBlock *BB = I.getParent();
const DebugLoc &DL = I.getDebugLoc();
Register DstReg = I.getOperand(0).getReg();
unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
assert(Size <= 32 || Size == 64);
const MachineOperand &CCOp = I.getOperand(1);
Register CCReg = CCOp.getReg();
if (!isVCC(CCReg, *MRI)) {
unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
AMDGPU::S_CSELECT_B32;
MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
.addReg(CCReg);
// The generic constrainSelectedInstRegOperands doesn't work for the scc register
// bank, because it does not cover the register class that we used to represent
// for it. So we need to manually set the register class here.
if (!MRI->getRegClassOrNull(CCReg))
MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
.add(I.getOperand(2))
.add(I.getOperand(3));
bool Ret = false;
Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
I.eraseFromParent();
return Ret;
}
// Wide VGPR select should have been split in RegBankSelect.
if (Size > 32)
return false;
MachineInstr *Select =
BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
.addImm(0)
.add(I.getOperand(3))
.addImm(0)
.add(I.getOperand(2))
.add(I.getOperand(1));
bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
I.eraseFromParent();
return Ret;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq 0x8(%rdi), %rdx
callq 0xc22914
movb $0x1, %bpl
testb %al, %al
jne 0xc31518
movq 0x18(%rbx), %rax
movq %rax, (%rsp)
movq 0x20(%rbx), %rax
movl 0x4(%rax), %r13d
movq 0x68(%r14), %rdi
movq 0x48(%r14), %rdx
movq 0x60(%r14), %rcx
movl %r13d, %esi
callq 0x1e29b72
leaq 0x40(%rsp), %rdi
movq %rax, (%rdi)
movb %dl, 0x8(%rdi)
callq 0x2b60e74
movq %rax, %rbp
movq 0x20(%rbx), %r15
movl 0x24(%r15), %r12d
movq 0x48(%r14), %rdx
movq %r14, %rdi
movl %r12d, %esi
callq 0xc270e2
testb %al, %al
je 0xc3113e
cmpl $0x20, %ebp
jbe 0xc311d1
xorl %ebp, %ebp
jmp 0xc31518
movl %r13d, 0x24(%rsp)
xorl %r13d, %r13d
cmpl $0x40, %ebp
sete %bpl
movq 0x38(%rbx), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0xc3116a
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x8(%rsp), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0xc31194
leaq 0x8(%rsp), %rdi
leaq 0x28(%rsp), %rdx
callq 0x2a759cc
leaq 0x8(%rsp), %rax
movq $0x0, (%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0x30(%rsp)
movq 0x58(%r14), %rax
movq $-0x260, %rcx # imm = 0xFDA0
addq 0x8(%rax), %rcx
testb $0x4, 0x2c(%rbx)
jne 0xc31250
leaq 0x28(%rsp), %rdx
movq (%rsp), %rdi
movq %rbx, %rsi
movl $0x12, %r8d
callq 0x90f593
jmp 0xc31267
movq 0x38(%rbx), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0xc311ee
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x10(%rsp), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0xc31216
leaq 0x10(%rsp), %r15
leaq 0x28(%rsp), %rdx
movq %r15, %rdi
callq 0x2a759cc
movq $0x0, (%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0x30(%rsp)
movq 0x58(%r14), %rax
movq $-0x39aa0, %rcx # imm = 0xFFFC6560
addq 0x8(%rax), %rcx
testb $0x4, 0x2c(%rbx)
jne 0xc31431
leaq 0x28(%rsp), %rdx
movq (%rsp), %rdi
movq %rbx, %rsi
movl %r13d, %r8d
callq 0x90f593
jmp 0xc31445
leaq 0x28(%rsp), %rdx
movq (%rsp), %rdi
movq %rbx, %rsi
movl $0x12, %r8d
callq 0x9105ce
movq %rdx, %rcx
leaq 0x40(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movl $0x0, (%rdx)
movl %r12d, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rcx, 0x60(%rsp)
movq %rcx, %rdi
movq %rax, %rsi
callq 0x1d3c22c
movq 0x28(%rsp), %rsi
testq %rsi, %rsi
je 0xc312ac
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0xc312c0
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movb %bpl, %r13b
movq 0x48(%r14), %rbp
movl %r12d, %eax
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
movq 0x38(%rbp), %rcx
shlq $0x4, %rax
movq (%rcx,%rax), %rax
testb $0x4, %al
sete %cl
cmpq $0x8, %rax
setae %al
testb %cl, %al
jne 0xc3130c
addq $0x20, %r15
movq 0x60(%r14), %rdi
movq %r15, %rsi
movq %rbp, %rdx
callq 0xbef290
movq %rbp, %rdi
movl %r12d, %esi
movq %rax, %rdx
callq 0x1d82b04
movq 0x38(%rbx), %rsi
movq %rsi, 0x18(%rsp)
testq %rsi, %rsi
je 0xc31329
leaq 0x18(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
xorq $-0x117d, %r13 # imm = 0xEE83
movq 0x18(%rsp), %rsi
movq %rsi, 0x40(%rsp)
testq %rsi, %rsi
je 0xc31358
leaq 0x18(%rsp), %r15
leaq 0x40(%rsp), %rdx
movq %r15, %rdi
callq 0x2a759cc
movq $0x0, (%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0x48(%rsp)
movq 0x58(%r14), %rax
shlq $0x5, %r13
addq 0x8(%rax), %r13
testb $0x4, 0x2c(%rbx)
jne 0xc3138d
leaq 0x40(%rsp), %rdx
movq (%rsp), %rdi
movq %rbx, %rsi
movq %r13, %rcx
movl 0x24(%rsp), %r8d
callq 0x90f593
jmp 0xc313a6
leaq 0x40(%rsp), %rdx
movq (%rsp), %rdi
movq %rbx, %rsi
movq %r13, %rcx
movl 0x24(%rsp), %r8d
callq 0x9105ce
movq %rax, %r12
movq %rdx, %r15
movq 0x20(%rbx), %rdx
addq $0x40, %rdx
movq %r15, %rdi
movq %rax, %rsi
callq 0x1d3c22c
movq 0x20(%rbx), %rdx
addq $0x60, %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movq 0x40(%rsp), %rsi
testq %rsi, %rsi
je 0xc313e6
leaq 0x40(%rsp), %rdi
callq 0x2a758fc
movq 0x18(%rsp), %rsi
testq %rsi, %rsi
je 0xc313fa
leaq 0x18(%rsp), %rdi
callq 0x2a758fc
movq 0x58(%r14), %rsi
movq 0x60(%r14), %rdx
movq 0x68(%r14), %rcx
movq %r15, %rdi
callq 0x15e4a41
movl %eax, %r15d
movq 0x58(%r14), %rsi
movq 0x60(%r14), %rdx
movq 0x68(%r14), %rcx
movq 0x60(%rsp), %rdi
callq 0x15e4a41
movl %eax, %ebp
orb %r15b, %bpl
jmp 0xc31510
leaq 0x28(%rsp), %rdx
movq (%rsp), %rdi
movq %rbx, %rsi
movl %r13d, %r8d
callq 0x9105ce
movq %rax, %r12
movq %rdx, %r15
movl $0xfff00000, %ebp # imm = 0xFFF00000
leaq 0x40(%rsp), %r13
movl (%r13), %eax
andl %ebp, %eax
incl %eax
movl %eax, (%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x8(%r13)
movq %rdx, %rdi
movq %r12, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movq 0x20(%rbx), %rdx
addq $0x60, %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
andl (%r13), %ebp
incl %ebp
movl %ebp, (%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x8(%r13)
leaq 0x40(%rsp), %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movq 0x20(%rbx), %rdx
addq $0x40, %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movq 0x20(%rbx), %rdx
addq $0x20, %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movq 0x28(%rsp), %rsi
testq %rsi, %rsi
je 0xc314e6
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
movq 0x10(%rsp), %rsi
testq %rsi, %rsi
je 0xc314fa
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movq 0x58(%r14), %rsi
movq 0x60(%r14), %rdx
movq 0x68(%r14), %rcx
movq %r15, %rdi
callq 0x15e4a41
movl %eax, %ebp
movq %rbx, %rdi
callq 0x1d3deba
movl %ebp, %eax
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
llvm::AMDGPUInstructionSelector::selectG_BRCOND(llvm::MachineInstr&) const
|
bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
MachineBasicBlock *BB = I.getParent();
MachineOperand &CondOp = I.getOperand(0);
Register CondReg = CondOp.getReg();
const DebugLoc &DL = I.getDebugLoc();
unsigned BrOpcode;
Register CondPhysReg;
const TargetRegisterClass *ConstrainRC;
// In SelectionDAG, we inspect the IR block for uniformity metadata to decide
// whether the branch is uniform when selecting the instruction. In
// GlobalISel, we should push that decision into RegBankSelect. Assume for now
// RegBankSelect knows what it's doing if the branch condition is scc, even
// though it currently does not.
if (!isVCC(CondReg, *MRI)) {
if (MRI->getType(CondReg) != LLT::scalar(32))
return false;
CondPhysReg = AMDGPU::SCC;
BrOpcode = AMDGPU::S_CBRANCH_SCC1;
ConstrainRC = &AMDGPU::SReg_32RegClass;
} else {
// FIXME: Should scc->vcc copies and with exec?
// Unless the value of CondReg is a result of a V_CMP* instruction then we
// need to insert an and with exec.
if (!isVCmpResult(CondReg, *MRI)) {
const bool Is64 = STI.isWave64();
const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
.addReg(CondReg)
.addReg(Exec)
.setOperandDead(3); // Dead scc
CondReg = TmpReg;
}
CondPhysReg = TRI.getVCC();
BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
ConstrainRC = TRI.getBoolRC();
}
if (!MRI->getRegClassOrNull(CondReg))
MRI->setRegClass(CondReg, ConstrainRC);
BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
.addReg(CondReg);
BuildMI(*BB, &I, DL, TII.get(BrOpcode))
.addMBB(I.getOperand(1).getMBB());
I.eraseFromParent();
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rsi, %rbx
movq %rdi, %r15
movq 0x18(%rsi), %r12
movq 0x20(%rsi), %rax
movl 0x4(%rax), %r13d
movq 0x48(%rdi), %rdx
movl %r13d, %esi
callq 0xc270e2
movq 0x48(%r15), %rsi
testb %al, %al
je 0xc34d21
movl %r13d, %edi
callq 0xc35113
testb %al, %al
je 0xc34d69
jmp 0xc34ef5
testl %r13d, %r13d
jns 0xc34d62
movl %r13d, %eax
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
cmpl %eax, 0x1d0(%rsi)
jbe 0xc34d62
movq 0x1c8(%rsi), %rcx
cmpq $0x101, (%rcx,%rax,8) # imm = 0x101
jne 0xc34d62
movq $-0x113a, 0x8(%rsp) # imm = 0xEEC6
leaq 0x4b33469(%rip), %rdx # 0x57681c0
movl $0x12, %r14d
jmp 0xc34f27
xorl %eax, %eax
jmp 0xc35104
movq %r12, 0x8(%rsp)
movq 0x78(%r15), %rax
movb 0x184(%rax), %bpl
xorl %r12d, %r12d
cmpb $0x6, %bpl
setne %al
movq 0x48(%r15), %rdi
movq 0x60(%r15), %rcx
cmpb $0x0, 0x141(%rcx)
je 0xc34d9d
leaq 0x4b33425(%rip), %rsi # 0x57681c0
jmp 0xc34da4
leaq 0x4b33fdc(%rip), %rsi # 0x5768d80
movb %al, %r12b
leaq 0x332670a(%rip), %rdx # 0x3f5b4b8
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, 0x4(%rsp)
movq 0x38(%rbx), %rsi
movq %rsi, 0x20(%rsp)
testq %rsi, %rsi
je 0xc34dd6
leaq 0x20(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x20(%rsp), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0xc34dfe
leaq 0x20(%rsp), %r14
leaq 0x28(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
leal 0x1(,%r12,2), %eax
movl %eax, 0x44(%rsp)
xorl %ecx, %ecx
cmpb $0x6, %bpl
setne %cl
xorps %xmm0, %xmm0
movups %xmm0, 0x30(%rsp)
movq 0x58(%r15), %rax
shll $0x6, %ecx
orq $-0x1f0e0, %rcx # imm = 0xFFFE0F20
addq 0x8(%rax), %rcx
testb $0x4, 0x2c(%rbx)
jne 0xc34e4c
leaq 0x28(%rsp), %rdx
movq 0x8(%rsp), %rdi
movq %rbx, %rsi
movl 0x4(%rsp), %r8d
callq 0x90f593
jmp 0xc34e63
leaq 0x28(%rsp), %rdx
movq 0x8(%rsp), %rdi
movq %rbx, %rsi
movl 0x4(%rsp), %r8d
callq 0x9105ce
movq %rdx, %r14
xorl %r12d, %r12d
leaq 0x48(%rsp), %rbp
movq %r12, 0x8(%rbp)
movl %r12d, (%rbp)
movl %r13d, 0x4(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbp)
movq %rdx, %rdi
movq %rax, %r13
movq %rax, %rsi
movq %rbp, %rdx
callq 0x1d3c22c
movq %r12, 0x8(%rbp)
movl %r12d, (%rbp)
movl 0x44(%rsp), %eax
movl %eax, 0x4(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbp)
leaq 0x48(%rsp), %rdx
movq %r14, %rdi
movq %r13, %rsi
callq 0x1d3c22c
movq 0x20(%r14), %rax
orl $0x4000000, 0x60(%rax) # imm = 0x4000000
movq 0x28(%rsp), %rsi
testq %rsi, %rsi
je 0xc34ed7
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
movq 0x20(%rsp), %rsi
testq %rsi, %rsi
movl 0x4(%rsp), %r13d
je 0xc34ef0
leaq 0x20(%rsp), %rdi
callq 0x2a758fc
movq 0x8(%rsp), %r12
movq 0x60(%r15), %rdi
callq 0xbef36a
movl %eax, %r14d
movq 0x60(%r15), %rax
cmpb $0x0, 0x141(%rax)
je 0xc34f17
leaq 0x4b332ab(%rip), %rdx # 0x57681c0
jmp 0xc34f1e
leaq 0x4b33e62(%rip), %rdx # 0x5768d80
movq $-0x113c, 0x8(%rsp) # imm = 0xEEC4
movq 0x48(%r15), %rdi
movl %r13d, %eax
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
movq 0x38(%rdi), %rcx
shlq $0x4, %rax
movq (%rcx,%rax), %rax
testb $0x4, %al
sete %cl
cmpq $0x8, %rax
setae %al
testb %cl, %al
jne 0xc34f57
movl %r13d, %esi
callq 0x1d82b04
movq 0x38(%rbx), %rsi
movq %rsi, 0x18(%rsp)
testq %rsi, %rsi
je 0xc34f74
leaq 0x18(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x18(%rsp), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0xc34f9d
leaq 0x18(%rsp), %rbp
leaq 0x28(%rsp), %rdx
movq %rbp, %rdi
callq 0x2a759cc
movq $0x0, (%rbp)
xorps %xmm0, %xmm0
movups %xmm0, 0x30(%rsp)
movq 0x58(%r15), %rax
movq $-0x260, %rcx # imm = 0xFDA0
addq 0x8(%rax), %rcx
testb $0x4, 0x2c(%rbx)
jne 0xc34fcf
leaq 0x28(%rsp), %rdx
movq %r12, %rdi
movq %rbx, %rsi
movl %r14d, %r8d
callq 0x90f593
jmp 0xc34fe2
leaq 0x28(%rsp), %rdx
movq %r12, %rdi
movq %rbx, %rsi
movl %r14d, %r8d
callq 0x9105ce
movq %rdx, %rdi
leaq 0x48(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movl $0x0, (%rdx)
movl %r13d, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq 0x28(%rsp), %rsi
testq %rsi, %rsi
je 0xc3501f
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
movq 0x18(%rsp), %rsi
testq %rsi, %rsi
je 0xc35033
leaq 0x18(%rsp), %rdi
callq 0x2a758fc
movq 0x38(%rbx), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0xc35050
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x10(%rsp), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0xc35078
leaq 0x10(%rsp), %r14
leaq 0x28(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x28(%rsp), %r14
movups %xmm0, 0x8(%r14)
movq 0x58(%r15), %rax
movq 0x8(%rsp), %rcx
shlq $0x5, %rcx
addq 0x8(%rax), %rcx
movq %r12, %rdi
movq %rbx, %rsi
movq %r14, %rdx
callq 0x93f66e
movq %rdx, %rdi
movq 0x20(%rbx), %rcx
movq 0x30(%rcx), %rcx
movl $0xfff00000, %esi # imm = 0xFFF00000
leaq 0x48(%rsp), %rdx
andl (%rdx), %esi
orl $0x4, %esi
movl %esi, (%rdx)
movq $0x0, 0x8(%rdx)
movq %rcx, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq (%r14), %rsi
testq %rsi, %rsi
je 0xc350e6
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
movq 0x10(%rsp), %rsi
testq %rsi, %rsi
je 0xc350fa
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movq %rbx, %rdi
callq 0x1d3deba
movb $0x1, %al
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
isVCmpResult(llvm::Register, llvm::MachineRegisterInfo&)
|
static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
if (Reg.isPhysical())
return false;
MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
const unsigned Opcode = MI.getOpcode();
if (Opcode == AMDGPU::COPY)
return isVCmpResult(MI.getOperand(1).getReg(), MRI);
if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
Opcode == AMDGPU::G_XOR)
return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
isVCmpResult(MI.getOperand(2).getReg(), MRI);
if (auto *GI = dyn_cast<GIntrinsic>(&MI))
return GI->is(Intrinsic::amdgcn_class);
return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %rbx
movl %edi, %esi
leal -0x1(%rsi), %eax
cmpl $0x3fffffff, %eax # imm = 0x3FFFFFFF
jb 0xc3516b
movq %rbx, %rdi
callq 0x1d835be
movq %rax, %r14
movzwl 0x44(%rax), %r15d
leal -0x3c(%r15), %eax
cmpl $0x3, %eax
jae 0xc3515c
movq 0x20(%r14), %rax
movl 0x24(%rax), %edi
movq %rbx, %rsi
callq 0xc35113
testb %al, %al
je 0xc3516b
movq 0x20(%r14), %rax
movl 0x44(%rax), %esi
jmp 0xc3511d
cmpl $0x13, %r15d
jne 0xc35175
movq 0x20(%r14), %rax
movl 0x24(%rax), %esi
jmp 0xc3511d
xorl %eax, %eax
andb $0x1, %al
popq %rbx
popq %r14
popq %r15
retq
leal -0x7d(%r15), %eax
cmpl $-0x4, %eax
setb %al
testq %r14, %r14
sete %bl
orb %al, %bl
jne 0xc351a6
movq %r14, %rdi
callq 0x1d3dfae
movq 0x20(%r14), %rcx
movl %eax, %eax
shlq $0x5, %rax
cmpl $0x7a6, 0x10(%rcx,%rax) # imm = 0x7A6
sete %al
testb %bl, %bl
je 0xc3516d
addl $0xffffff73, %r15d # imm = 0xFFFFFF73
cmpl $0x2, %r15d
setb %al
jmp 0xc3516d
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
llvm::AMDGPUInstructionSelector::selectStackRestore(llvm::MachineInstr&) const
|
bool AMDGPUInstructionSelector::selectStackRestore(MachineInstr &MI) const {
Register SrcReg = MI.getOperand(0).getReg();
if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, *MRI))
return false;
MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
Register SP =
Subtarget->getTargetLowering()->getStackPointerRegisterToSaveRestore();
Register WaveAddr = getWaveAddress(DefMI);
MachineBasicBlock *MBB = MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
if (!WaveAddr) {
WaveAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), WaveAddr)
.addReg(SrcReg)
.addImm(Subtarget->getWavefrontSizeLog2())
.setOperandDead(3); // Dead scc
}
BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), SP)
.addReg(WaveAddr);
MI.eraseFromParent();
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rsi, %rbx
movq %rdi, %r15
movq 0x20(%rsi), %rax
movl 0x4(%rax), %r12d
movq 0x48(%rdi), %rdx
leaq 0x4b30d6f(%rip), %rsi # 0x57681c0
movl %r12d, %edi
callq 0x1e296b2
movq %rax, %r13
testq %rax, %rax
je 0xc376d8
movq 0x48(%r15), %rdi
movl %r12d, %esi
callq 0x1d835a8
movq 0x50(%r15), %rcx
xorl %ebp, %ebp
cmpw $0xe45, 0x44(%rax) # imm = 0xE45
jne 0xc37486
movq 0x20(%rax), %rax
movl 0x24(%rax), %ebp
movl 0x688(%rcx), %eax
movl %eax, 0x1c(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0x20(%rsp)
testl %ebp, %ebp
jne 0xc375ee
movq 0x48(%r15), %rdi
leaq 0x4b30d14(%rip), %rsi # 0x57681c0
leaq 0x3324005(%rip), %rdx # 0x3f5b4b8
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, %ebp
movq 0x38(%rbx), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0xc374d9
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x10(%rsp), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0xc37501
leaq 0x10(%rsp), %r14
leaq 0x28(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
movq %r13, 0x40(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x30(%rsp)
movq 0x58(%r15), %rax
movq $-0x23ea0, %rcx # imm = 0xFFFDC160
addq 0x8(%rax), %rcx
testb $0x4, 0x2c(%rbx)
jne 0xc3753a
leaq 0x28(%rsp), %rdx
movq 0x20(%rsp), %rdi
movq %rbx, %rsi
movl %ebp, %r8d
callq 0x90f593
jmp 0xc3754f
leaq 0x28(%rsp), %rdx
movq 0x20(%rsp), %rdi
movq %rbx, %rsi
movl %ebp, %r8d
callq 0x9105ce
movq %rdx, %r13
xorl %ecx, %ecx
leaq 0x48(%rsp), %r14
movq %rcx, 0x8(%r14)
movl $0x0, (%r14)
movl %r12d, 0x4(%r14)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r14)
movq %rdx, %rdi
movq %rax, %r12
movq %rax, %rsi
movq %r14, %rdx
callq 0x1d3c22c
movq 0x50(%r15), %rax
movsbq 0x184(%rax), %rax
movl %eax, %eax
movl $0xfff00000, %ecx # imm = 0xFFF00000
andl (%r14), %ecx
incl %ecx
movl %ecx, (%r14)
xorl %ecx, %ecx
movq %rcx, 0x8(%r14)
movq %rax, 0x10(%r14)
leaq 0x48(%rsp), %rdx
movq %r13, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movq 0x20(%r13), %rax
orl $0x4000000, 0x60(%rax) # imm = 0x4000000
movq 0x28(%rsp), %rsi
testq %rsi, %rsi
je 0xc375d5
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
movq 0x10(%rsp), %rsi
testq %rsi, %rsi
movq 0x40(%rsp), %r13
je 0xc375ee
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movq 0x38(%rbx), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0xc3760b
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x8(%rsp), %rsi
movq %rsi, 0x28(%rsp)
testq %rsi, %rsi
je 0xc37633
leaq 0x8(%rsp), %r14
leaq 0x28(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
movups %xmm0, 0x30(%rsp)
movq 0x58(%r15), %rax
movq $-0x260, %rcx # imm = 0xFDA0
addq 0x8(%rax), %rcx
testb $0x4, 0x2c(%rbx)
jne 0xc37669
leaq 0x28(%rsp), %rdx
movq 0x20(%rsp), %rdi
movq %rbx, %rsi
movl 0x1c(%rsp), %r8d
callq 0x90f593
jmp 0xc37680
leaq 0x28(%rsp), %rdx
movq 0x20(%rsp), %rdi
movq %rbx, %rsi
movl 0x1c(%rsp), %r8d
callq 0x9105ce
movq %rdx, %rdi
leaq 0x48(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movl $0x0, (%rdx)
movl %ebp, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq 0x28(%rsp), %rsi
testq %rsi, %rsi
je 0xc376bc
leaq 0x28(%rsp), %rdi
callq 0x2a758fc
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0xc376d0
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq %rbx, %rdi
callq 0x1d3deba
testq %r13, %r13
setne %al
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
buildRegSequence(llvm::SmallVectorImpl<llvm::Register>&, llvm::MachineInstr*, llvm::MachineRegisterInfo&)
|
static Register buildRegSequence(SmallVectorImpl<Register> &Elts,
MachineInstr *InsertPt,
MachineRegisterInfo &MRI) {
const TargetRegisterClass *DstRegClass;
switch (Elts.size()) {
case 8:
DstRegClass = &AMDGPU::VReg_256RegClass;
break;
case 4:
DstRegClass = &AMDGPU::VReg_128RegClass;
break;
case 2:
DstRegClass = &AMDGPU::VReg_64RegClass;
break;
default:
llvm_unreachable("unhandled Reg sequence size");
}
MachineIRBuilder B(*InsertPt);
auto MIB = B.buildInstr(AMDGPU::REG_SEQUENCE)
.addDef(MRI.createVirtualRegister(DstRegClass));
for (unsigned i = 0; i < Elts.size(); ++i) {
MIB.addReg(Elts[i]);
MIB.addImm(SIRegisterInfo::getSubRegFromChannel(i));
}
return MIB->getOperand(0).getReg();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x78, %rsp
movq %rdx, %r12
movq %rdi, %rbx
movslq 0x8(%rdi), %rax
leaq 0x4b47d38(%rip), %rcx # 0x577fcb8
movq -0x10(%rcx,%rax,8), %r13
leaq 0x20(%rsp), %r14
movq %r14, %rdi
callq 0x94cecc
movq %r14, %rdi
movl $0x12, %esi
callq 0x15dd5c4
movq %r14, %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rax, %r15
movq %rdx, %r14
leaq 0x3323501(%rip), %rdx # 0x3f5b4b8
xorl %ebp, %ebp
movq %r12, %rdi
movq %r13, %rsi
xorl %ecx, %ecx
callq 0x1d82fd6
movq %rsp, %rdx
movq $0x0, 0x8(%rdx)
movl $0x1000000, (%rdx) # imm = 0x1000000
movl %eax, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %r14, %rdi
movq %r15, %rsi
callq 0x1d3c22c
cmpl $0x0, 0x8(%rbx)
je 0xc38066
movq %rsp, %r12
xorl %r13d, %r13d
movq (%rbx), %rax
movl (%rax,%r13,4), %eax
movq %rbp, 0x8(%rsp)
movl %ebp, (%rsp)
movl %eax, 0x4(%rsp)
xorps %xmm0, %xmm0
leaq 0x10(%rsp), %rax
movups %xmm0, (%rax)
movl %ebp, (%rsp)
movq %r14, %rdi
movq %r15, %rsi
movq %r12, %rdx
callq 0x1d3c22c
movl %r13d, %edi
movl $0x1, %esi
callq 0xbe6fc6
movl %eax, %eax
movl (%rsp), %ecx
movl $0xfff00000, %edx # imm = 0xFFF00000
andl %edx, %ecx
incl %ecx
movl %ecx, (%rsp)
movq %rbp, 0x8(%rsp)
movq %rax, 0x10(%rsp)
movq %r14, %rdi
movq %r15, %rsi
movq %r12, %rdx
callq 0x1d3c22c
incq %r13
cmpl %r13d, 0x8(%rbx)
ja 0xc37ff8
movq 0x20(%r14), %rax
movl 0x4(%rax), %ebx
leaq 0x4ba8adc(%rip), %rax # 0x57e0b50
addq $0x10, %rax
movq %rax, 0x20(%rsp)
movq 0x40(%rsp), %rsi
testq %rsi, %rsi
je 0xc38091
leaq 0x40(%rsp), %rdi
callq 0x2a758fc
movl %ebx, %eax
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
llvm::AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(llvm::Register, llvm::MachineRegisterInfo const&) const
|
std::pair<Register, int64_t>
AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
Register Root, const MachineRegisterInfo &MRI) const {
MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
return {Root, 0};
MachineOperand &RHS = RootI->getOperand(2);
std::optional<ValueAndVReg> MaybeOffset =
getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
if (!MaybeOffset)
return {Root, 0};
return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rdx, %r15
movl %esi, %ebx
movl %esi, %edi
movq %rdx, %rsi
callq 0x15e5c06
cmpw $0xd4, 0x44(%rax)
jne 0xc38556
movq %rax, %r14
movq 0x20(%rax), %rax
movl 0x44(%rax), %esi
leaq 0x8(%rsp), %r12
movq %r12, %rdi
movq %r15, %rdx
movl $0x1, %ecx
callq 0x15e51fb
movb 0x18(%r12), %al
cmpb $0x1, %al
jne 0xc3855b
movq 0x20(%r14), %rcx
movl 0x24(%rcx), %ebx
movl 0x10(%rsp), %edx
cmpl $0x40, %edx
ja 0xc38560
movq 0x8(%rsp), %rsi
movl %edx, %ecx
negb %cl
shlq %cl, %rsi
sarq %cl, %rsi
xorl %r14d, %r14d
testl %edx, %edx
cmovneq %rsi, %r14
jmp 0xc38568
xorl %r14d, %r14d
jmp 0xc38587
xorl %r14d, %r14d
jmp 0xc38568
movq 0x8(%rsp), %rcx
movq (%rcx), %r14
testb %al, %al
je 0xc38587
movb $0x0, 0x20(%rsp)
cmpl $0x41, 0x10(%rsp)
jb 0xc38587
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0xc38587
callq 0x7802b0
movl %ebx, %eax
movq %r14, %rdx
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
llvm::AMDGPUInstructionSelector::isFlatScratchBaseLegalSV(llvm::Register) const
|
bool AMDGPUInstructionSelector::isFlatScratchBaseLegalSV(Register Addr) const {
MachineInstr *AddrMI = getDefIgnoringCopies(Addr, *MRI);
if (isNoUnsignedWrap(AddrMI))
return true;
// Starting with GFX12, VADDR and SADDR fields in VSCRATCH can use negative
// values.
if (STI.hasSignedScratchOffsets())
return true;
Register LHS = AddrMI->getOperand(1).getReg();
Register RHS = AddrMI->getOperand(2).getReg();
return KB->signBitIsZero(RHS) && KB->signBitIsZero(LHS);
}
|
pushq %rbp
pushq %rbx
pushq %rax
movl %esi, %eax
movq %rdi, %rbx
movq 0x48(%rdi), %rsi
movl %eax, %edi
callq 0x15e5c06
movq %rax, %rcx
movzwl 0x44(%rax), %edx
movb $0x1, %al
cmpl $0x3d, %edx
je 0xc38a56
cmpl $0xd4, %edx
jne 0xc38a1f
testb $0x8, 0x2d(%rcx)
jne 0xc38a56
movq 0x78(%rbx), %rdx
cmpl $0xa, 0x1f8(%rdx)
jg 0xc38a56
movq 0x20(%rcx), %rax
movl 0x24(%rax), %ebp
movl 0x44(%rax), %esi
movq 0x10(%rbx), %rdi
callq 0x155f902
testb %al, %al
je 0xc38a54
movq 0x10(%rbx), %rdi
movl %ebp, %esi
addq $0x8, %rsp
popq %rbx
popq %rbp
jmp 0x155f902
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUInstructionSelector.cpp
|
bool llvm::MIPatternMatch::BinaryOp_match<llvm::MIPatternMatch::bind_ty<llvm::Register>, llvm::MIPatternMatch::Or<llvm::MIPatternMatch::ConstantMatch<long>, llvm::MIPatternMatch::UnaryOp_match<llvm::MIPatternMatch::ConstantMatch<long>, 19u>>, 212u, false>::match<llvm::Register&>(llvm::MachineRegisterInfo const&, llvm::Register&)
|
bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
MachineInstr *TmpMI;
if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 3) {
return (L.match(MRI, TmpMI->getOperand(1).getReg()) &&
R.match(MRI, TmpMI->getOperand(2).getReg())) ||
(Commutable && (R.match(MRI, TmpMI->getOperand(1).getReg()) &&
L.match(MRI, TmpMI->getOperand(2).getReg())));
}
}
return false;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %rbx
movq %rdi, %r14
movl (%rdx), %esi
movq %rbx, %rdi
callq 0x1d835a8
testq %rax, %rax
je 0xc3c5ea
cmpw $0xd4, 0x44(%rax)
jne 0xc3c5ea
movzwl 0x28(%rax), %ecx
movzbl 0x2a(%rax), %edx
shll $0x10, %edx
orl %ecx, %edx
cmpl $0x3, %edx
jne 0xc3c5ea
movq 0x20(%rax), %rax
movl 0x24(%rax), %ecx
movq (%r14), %rdx
movl %ecx, (%rdx)
addq $0x8, %r14
movl 0x44(%rax), %eax
leaq 0x4(%rsp), %rdx
movl %eax, (%rdx)
movq %r14, %rdi
movq %rbx, %rsi
callq 0xc3c5f4
jmp 0xc3c5ec
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/llvm/CodeGen/GlobalISel/MIPatternMatch.h
|
AMDGPUDAGToDAGISel::getHi16Elt(llvm::SDValue) const
|
SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
if (In.isUndef())
return CurDAG->getUNDEF(MVT::i32);
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
SDLoc SL(In);
return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
}
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
SDLoc SL(In);
return CurDAG->getConstant(
C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
}
SDValue Src;
if (isExtractHiElt(In, Src))
return Src;
return SDValue();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movl 0x18(%rsi), %eax
cmpl $0xb, %eax
je 0xc3d7b5
cmpl $0x23, %eax
je 0xc3d7b5
cmpl $0x33, %eax
jne 0xc3d7ba
movq 0x38(%r14), %rdi
movl $0x7, %esi
xorl %edx, %edx
callq 0x9fbc02
movq %rax, %r12
movl %edx, %r15d
jmp 0xc3d934
movq %rbx, %rbp
jmp 0xc3d7bc
xorl %ebp, %ebp
testq %rbp, %rbp
je 0xc3d7f9
movq 0x48(%rbx), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0xc3d7de
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
movl %eax, 0x10(%rsp)
movq 0x38(%r14), %rdi
movq 0x58(%rbp), %rax
cmpl $0x41, 0x20(%rax)
jb 0xc3d7fb
movq 0x18(%rax), %rax
jmp 0xc3d7ff
jmp 0xc3d83e
addq $0x18, %rax
movq (%rax), %rsi
shlq $0x10, %rsi
movl $0x0, (%rsp)
leaq 0x8(%rsp), %r13
movq %r13, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, %r12
movl %edx, %r15d
movq (%r13), %rsi
testq %rsi, %rsi
je 0xc3d83e
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
testq %rbp, %rbp
jne 0xc3d934
movl 0x18(%rbx), %eax
cmpl $0x24, %eax
je 0xc3d854
cmpl $0xc, %eax
jne 0xc3d859
movq %rbx, %r13
jmp 0xc3d85c
xorl %r13d, %r13d
testq %r13, %r13
je 0xc3d904
movq 0x48(%rbx), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0xc3d882
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
movl %eax, 0x10(%rsp)
movq 0x38(%r14), %r14
movq 0x58(%r13), %rsi
addq $0x18, %rsi
leaq 0x18(%rsp), %r15
movq %r15, %rdi
callq 0x815f90
cmpl $0x41, 0x8(%r15)
jb 0xc3d8ae
movq 0x18(%rsp), %r15
movq (%r15), %rsi
shlq $0x10, %rsi
movl $0x0, (%rsp)
leaq 0x8(%rsp), %rdx
movq %r14, %rdi
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, %r12
movl %edx, %r15d
cmpl $0x41, 0x20(%rsp)
jb 0xc3d8f0
movq 0x18(%rsp), %rdi
testq %rdi, %rdi
je 0xc3d8f0
callq 0x7802b0
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0xc3d904
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
testq %r13, %r13
jne 0xc3d934
xorl %r15d, %r15d
leaq 0x8(%rsp), %rsi
movq %r15, (%rsi)
movl %r15d, 0x8(%rsi)
movq %rbx, %rdi
callq 0xc457a8
movl $0x0, %r12d
testb %al, %al
je 0xc3d934
movl 0x10(%rsp), %r15d
movq 0x8(%rsp), %r12
movq %r12, %rax
movl %r15d, %edx
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
|
AMDGPUDAGToDAGISel::SelectMAD_64_32(llvm::SDNode*)
|
void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
SDLoc SL(N);
bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
unsigned Opc;
if (Subtarget->hasMADIntraFwdBug())
Opc = Signed ? AMDGPU::V_MAD_I64_I32_gfx11_e64
: AMDGPU::V_MAD_U64_U32_gfx11_e64;
else
Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
Clamp };
CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x78, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq 0x48(%rsi), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0xc3fcaf
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
leaq 0x10(%rsp), %r15
movl %eax, 0x8(%r15)
cmpl $0x22d, 0x18(%rbx) # imm = 0x22D
movq 0x38(%r14), %rdi
movq 0x108(%r14), %rax
movl $0x1f8a, %ecx # imm = 0x1F8A
movl $0x1f9c, %edx # imm = 0x1F9C
cmovel %ecx, %edx
movl $0x1f89, %ecx # imm = 0x1F89
movl $0x1f9b, %ebp # imm = 0x1F9B
cmovel %ecx, %ebp
cmpb $0x0, 0x30b(%rax)
cmovnel %edx, %ebp
movl $0x0, (%rsp)
xorl %esi, %esi
movq %r15, %rdx
movl $0x2, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq 0x28(%rbx), %rcx
movl 0x8(%rcx), %esi
leaq 0x30(%rsp), %r9
movl %esi, 0x8(%r9)
movq (%rcx), %rsi
movq %rsi, (%r9)
movq 0x28(%rcx), %rsi
movq %rsi, 0x10(%r9)
movl 0x30(%rcx), %esi
movl %esi, 0x18(%r9)
movq 0x50(%rcx), %rsi
movq %rsi, 0x20(%r9)
movl 0x58(%rcx), %ecx
movl %ecx, 0x28(%r9)
movq %rax, 0x30(%r9)
movl %edx, 0x38(%r9)
movq 0x38(%r14), %rdi
movq 0x30(%rbx), %rcx
movzwl 0x42(%rbx), %r8d
movq %r9, 0x20(%rsp)
movq $0x4, 0x28(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rsi
movl %ebp, %edx
callq 0x178e7a0
movq (%r15), %rsi
testq %rsi, %rsi
je 0xc3fd8c
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
addq $0x78, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
|
AMDGPUDAGToDAGISel::SelectS_BFE(llvm::SDNode*)
|
void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
switch (N->getOpcode()) {
case ISD::AND:
if (N->getOperand(0).getOpcode() == ISD::SRL) {
// "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
// Predicate: isMask(mask)
const SDValue &Srl = N->getOperand(0);
ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (Shift && Mask) {
uint32_t ShiftVal = Shift->getZExtValue();
uint32_t MaskVal = Mask->getZExtValue();
if (isMask_32(MaskVal)) {
uint32_t WidthVal = llvm::popcount(MaskVal);
ReplaceNode(N, getBFE32(false, SDLoc(N), Srl.getOperand(0), ShiftVal,
WidthVal));
return;
}
}
}
break;
case ISD::SRL:
if (N->getOperand(0).getOpcode() == ISD::AND) {
// "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
// Predicate: isMask(mask >> b)
const SDValue &And = N->getOperand(0);
ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
if (Shift && Mask) {
uint32_t ShiftVal = Shift->getZExtValue();
uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
if (isMask_32(MaskVal)) {
uint32_t WidthVal = llvm::popcount(MaskVal);
ReplaceNode(N, getBFE32(false, SDLoc(N), And.getOperand(0), ShiftVal,
WidthVal));
return;
}
}
} else if (N->getOperand(0).getOpcode() == ISD::SHL) {
SelectS_BFEFromShifts(N);
return;
}
break;
case ISD::SRA:
if (N->getOperand(0).getOpcode() == ISD::SHL) {
SelectS_BFEFromShifts(N);
return;
}
break;
case ISD::SIGN_EXTEND_INREG: {
// sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
SDValue Src = N->getOperand(0);
if (Src.getOpcode() != ISD::SRL)
break;
const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
if (!Amt)
break;
unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
ReplaceNode(N, getBFE32(true, SDLoc(N), Src.getOperand(0),
Amt->getZExtValue(), Width));
return;
}
}
SelectCode(N);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movl 0x18(%rsi), %eax
cmpl $0xbe, %eax
jg 0xc40073
cmpl $0xb9, %eax
je 0xc400f4
cmpl $0xbe, %eax
jne 0xc40363
movq 0x28(%rbx), %rax
movq (%rax), %rax
cmpl $0xbd, 0x18(%rax)
je 0xc401cd
jmp 0xc40363
cmpl $0xbf, %eax
je 0xc4015d
cmpl $0xda, %eax
jne 0xc40363
movq 0x28(%rbx), %rax
movq (%rax), %r13
cmpl $0xbf, 0x18(%r13)
jne 0xc40363
movq 0x28(%r13), %rcx
movq 0x28(%rcx), %rbp
movl 0x18(%rbp), %ecx
cmpl $0x23, %ecx
je 0xc400b5
cmpl $0xb, %ecx
je 0xc400b5
xorl %ebp, %ebp
testq %rbp, %rbp
je 0xc40363
movq 0x28(%rax), %rax
movzbl 0x58(%rax), %ecx
movq 0x60(%rax), %rax
movb %cl, 0x18(%rsp)
movq %rax, 0x20(%rsp)
testq %rcx, %rcx
je 0xc40216
shll $0x4, %ecx
leaq 0x258ec5a(%rip), %rdx # 0x31ced40
movq -0x10(%rcx,%rdx), %rax
movb -0x8(%rcx,%rdx), %dl
jmp 0xc40220
movq 0x28(%rbx), %r12
movq (%r12), %rax
cmpl $0xbf, 0x18(%rax)
jne 0xc40363
movq 0x28(%rax), %rax
movq 0x28(%rax), %rax
movl 0x18(%rax), %ecx
cmpl $0x23, %ecx
je 0xc40120
cmpl $0xb, %ecx
je 0xc40120
xorl %eax, %eax
movq 0x28(%r12), %rcx
movl 0x18(%rcx), %edx
cmpl $0x23, %edx
je 0xc40134
cmpl $0xb, %edx
je 0xc40134
xorl %ecx, %ecx
testq %rax, %rax
je 0xc40363
testq %rcx, %rcx
je 0xc40363
movq 0x58(%rcx), %rcx
cmpl $0x41, 0x20(%rcx)
jb 0xc401e6
movq 0x18(%rcx), %rcx
jmp 0xc401ea
movq 0x28(%rbx), %r13
movq (%r13), %rcx
movl 0x18(%rcx), %eax
cmpl $0xbd, %eax
je 0xc401cd
cmpl $0xb9, %eax
jne 0xc40363
movq 0x28(%r13), %rax
movl 0x18(%rax), %edx
cmpl $0x23, %edx
je 0xc4018d
cmpl $0xb, %edx
je 0xc4018d
xorl %eax, %eax
movq 0x28(%rcx), %rcx
movq 0x28(%rcx), %rdx
movl 0x18(%rdx), %ecx
cmpl $0x23, %ecx
je 0xc401a4
cmpl $0xb, %ecx
je 0xc401a4
xorl %edx, %edx
testq %rax, %rax
je 0xc40363
testq %rdx, %rdx
je 0xc40363
movq 0x58(%rax), %rax
cmpl $0x41, 0x20(%rax)
jb 0xc40297
movq 0x18(%rax), %rax
jmp 0xc4029b
movq %r14, %rdi
movq %rbx, %rsi
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0xc4559e
addq $0x18, %rcx
movl (%rcx), %ecx
testl %ecx, %ecx
je 0xc40363
leal 0x1(%rcx), %edx
andl %ecx, %edx
jne 0xc40363
movq 0x58(%rax), %rax
cmpl $0x41, 0x20(%rax)
jb 0xc40388
movq 0x18(%rax), %rax
jmp 0xc4038c
leaq 0x18(%rsp), %rdi
callq 0x1e89e20
leaq 0x8(%rsp), %r12
movq %rax, (%r12)
movb %dl, 0x8(%r12)
movq %r12, %rdi
callq 0x2b60e74
movq %rax, %r15
movq 0x48(%rbx), %rsi
movq %rsi, (%r12)
testq %rsi, %rsi
je 0xc40255
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
movl %eax, 0x10(%rsp)
movq 0x28(%r13), %rax
movq (%rax), %rcx
movl 0x8(%rax), %r8d
movq 0x58(%rbp), %rax
cmpl $0x41, 0x20(%rax)
jb 0xc40277
movq 0x18(%rax), %rax
jmp 0xc4027b
addq $0x18, %rax
movl (%rax), %r9d
movl %r15d, (%rsp)
leaq 0x8(%rsp), %r15
movq %r14, %rdi
movl $0x1, %esi
movq %r15, %rdx
jmp 0xc40407
addq $0x18, %rax
movq (%rax), %rcx
movq 0x58(%rdx), %rax
cmpl $0x41, 0x20(%rax)
jb 0xc402ae
movq 0x18(%rax), %rax
jmp 0xc402b2
addq $0x18, %rax
movq (%rax), %rax
shrq %cl, %rax
testl %eax, %eax
je 0xc40363
leal 0x1(%rax), %edx
andl %eax, %edx
jne 0xc40363
movl %ecx, %r15d
movl %eax, %ecx
shrl %ecx
andl $0x55555555, %ecx # imm = 0x55555555
subl %ecx, %eax
movl $0x33333333, %ecx # imm = 0x33333333
movl %eax, %edx
andl %ecx, %edx
shrl $0x2, %eax
andl %ecx, %eax
addl %edx, %eax
movl %eax, %ecx
shrl $0x4, %ecx
addl %eax, %ecx
andl $0xf0f0f0f, %ecx # imm = 0xF0F0F0F
imull $0x1010101, %ecx, %ebp # imm = 0x1010101
shrl $0x18, %ebp
movq 0x48(%rbx), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0xc4031d
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
leaq 0x8(%rsp), %r12
movl %eax, 0x8(%r12)
movq (%r13), %rax
movq 0x28(%rax), %rax
movq (%rax), %rcx
movl 0x8(%rax), %r8d
movl %ebp, (%rsp)
movq %r14, %rdi
xorl %esi, %esi
movq %r12, %rdx
movl %r15d, %r9d
callq 0xc3f9e8
movq %r14, %rdi
movq %rbx, %rsi
movq %rax, %rdx
callq 0x9db81c
movq (%r12), %rsi
jmp 0xc4041d
leaq 0x2adbec6(%rip), %rdx # 0x371c230
movq %r14, %rdi
movq %rbx, %rsi
movl $0x7cf87, %ecx # imm = 0x7CF87
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x17a709c
addq $0x18, %rax
movl (%rax), %ebp
movl %ecx, %eax
shrl %eax
andl $0x55555555, %eax # imm = 0x55555555
subl %eax, %ecx
movl $0x33333333, %eax # imm = 0x33333333
movl %ecx, %edx
andl %eax, %edx
shrl $0x2, %ecx
andl %eax, %ecx
addl %edx, %ecx
movl %ecx, %eax
shrl $0x4, %eax
addl %ecx, %eax
andl $0xf0f0f0f, %eax # imm = 0xF0F0F0F
imull $0x1010101, %eax, %r13d # imm = 0x1010101
shrl $0x18, %r13d
movq 0x48(%rbx), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0xc403dd
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
leaq 0x8(%rsp), %r15
movl %eax, 0x8(%r15)
movq (%r12), %rax
movq 0x28(%rax), %rax
movq (%rax), %rcx
movl 0x8(%rax), %r8d
movl %r13d, (%rsp)
movq %r14, %rdi
xorl %esi, %esi
movq %r15, %rdx
movl %ebp, %r9d
callq 0xc3f9e8
movq %r14, %rdi
movq %rbx, %rsi
movq %rax, %rdx
callq 0x9db81c
movq (%r15), %rsi
testq %rsi, %rsi
je 0xc4042c
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
|
AMDGPUDAGToDAGISel::SelectScratchSVAddr(llvm::SDNode*, llvm::SDValue, llvm::SDValue&, llvm::SDValue&, llvm::SDValue&) const
|
bool AMDGPUDAGToDAGISel::SelectScratchSVAddr(SDNode *N, SDValue Addr,
SDValue &VAddr, SDValue &SAddr,
SDValue &Offset) const {
int64_t ImmOffset = 0;
SDValue LHS, RHS;
SDValue OrigAddr = Addr;
if (isBaseWithConstantOffset64(Addr, LHS, RHS)) {
int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
const SIInstrInfo *TII = Subtarget->getInstrInfo();
if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, true)) {
Addr = LHS;
ImmOffset = COffsetVal;
} else if (!LHS->isDivergent() && COffsetVal > 0) {
SDLoc SL(N);
// saddr + large_offset -> saddr + (vaddr = large_offset & ~MaxOffset) +
// (large_offset & MaxOffset);
int64_t SplitImmOffset, RemainderOffset;
std::tie(SplitImmOffset, RemainderOffset)
= TII->splitFlatOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, true);
if (isUInt<32>(RemainderOffset)) {
SDNode *VMov = CurDAG->getMachineNode(
AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
CurDAG->getTargetConstant(RemainderOffset, SDLoc(), MVT::i32));
VAddr = SDValue(VMov, 0);
SAddr = LHS;
if (!isFlatScratchBaseLegal(Addr))
return false;
if (checkFlatScratchSVSSwizzleBug(VAddr, SAddr, SplitImmOffset))
return false;
Offset = CurDAG->getTargetConstant(SplitImmOffset, SDLoc(), MVT::i16);
return true;
}
}
}
if (Addr.getOpcode() != ISD::ADD)
return false;
LHS = Addr.getOperand(0);
RHS = Addr.getOperand(1);
if (!LHS->isDivergent() && RHS->isDivergent()) {
SAddr = LHS;
VAddr = RHS;
} else if (!RHS->isDivergent() && LHS->isDivergent()) {
SAddr = RHS;
VAddr = LHS;
} else {
return false;
}
if (OrigAddr != Addr) {
if (!isFlatScratchBaseLegalSVImm(OrigAddr))
return false;
} else {
if (!isFlatScratchBaseLegalSV(OrigAddr))
return false;
}
if (checkFlatScratchSVSSwizzleBug(VAddr, SAddr, ImmOffset))
return false;
SAddr = SelectSAddrFI(CurDAG, SAddr);
Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %r9, %rbp
movq %r8, 0x68(%rsp)
movl %ecx, %r14d
movq %rdx, %r12
movq %rsi, 0x38(%rsp)
movq 0xd0(%rsp), %r15
xorl %ebx, %ebx
leaq 0x50(%rsp), %r13
movq %rbx, (%r13)
movl %ebx, 0x8(%r13)
leaq 0x40(%rsp), %r8
movq %rbx, (%r8)
movl %ebx, 0x8(%r8)
movq %rdi, 0x18(%rsp)
movq %rdx, %rsi
movl %ecx, %edx
movq %r13, %rcx
callq 0xc4106c
movq %r12, %rcx
movl %r14d, 0x14(%rsp)
movl %r14d, %edx
testb %al, %al
je 0xc4489b
movq %rbp, 0x60(%rsp)
movq 0x40(%rsp), %rax
movq 0x58(%rax), %rcx
movl 0x20(%rcx), %eax
cmpl $0x40, %eax
ja 0xc4466e
movq 0x18(%rcx), %rdx
movl %eax, %ecx
negb %cl
shlq %cl, %rdx
sarq %cl, %rdx
xorl %ebp, %ebp
testl %eax, %eax
cmovneq %rdx, %rbp
jmp 0xc44675
movq 0x18(%rcx), %rax
movq (%rax), %rbp
movq 0x18(%rsp), %rax
movl $0x320, %r15d # imm = 0x320
addq 0x108(%rax), %r15
movl $0x1, %ecx
movq %r15, %rdi
movq %rbp, %rsi
movl $0x5, %edx
callq 0xb73182
movq 0x50(%rsp), %rcx
testb %al, %al
je 0xc446b4
movb $0x1, %sil
movl 0x58(%rsp), %edx
movq %rbp, %rbx
jmp 0xc44885
xorl %ebx, %ebx
movb $0x1, %sil
testq %rbp, %rbp
jle 0xc44804
movb 0x1e(%rcx), %al
movq %r12, %rcx
movl 0x14(%rsp), %edx
andb $0x4, %al
jne 0xc44885
movq 0x38(%rsp), %rbx
movq 0x48(%rbx), %rsi
movq %rsi, 0x20(%rsp)
testq %rsi, %rsi
je 0xc446f6
leaq 0x20(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
movl %eax, 0x28(%rsp)
movl $0x1, %ecx
movq %r15, %rdi
movq %rbp, %rsi
movl $0x5, %edx
callq 0xb7328a
movq %rax, %rbx
movq %rdx, %rax
shrq $0x20, %rax
setne 0x38(%rsp)
jne 0xc44863
movq 0x18(%rsp), %rax
movq 0x38(%rax), %r15
xorps %xmm0, %xmm0
leaq 0x70(%rsp), %r14
movaps %xmm0, (%r14)
movl $0x0, (%rsp)
movq %r15, %rdi
movq %rdx, %rsi
movq %r14, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x88(%rsp)
movl %edx, 0x90(%rsp)
movups 0x88(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x20(%rsp), %rdx
movq %r15, %rdi
movl $0x20f3, %esi # imm = 0x20F3
movl $0x7, %ecx
xorl %r8d, %r8d
callq 0x178f448
movq %rax, %r15
movq (%r14), %rsi
testq %rsi, %rsi
movq 0x60(%rsp), %r14
je 0xc447ae
leaq 0x70(%rsp), %rdi
callq 0x2a758fc
movq 0x68(%rsp), %rbp
movq %r15, (%rbp)
movl $0x0, 0x8(%rbp)
movl 0x58(%rsp), %eax
movl %eax, 0x8(%r14)
movq 0x50(%rsp), %rax
movq %rax, (%r14)
movq 0x18(%rsp), %r15
movq %r15, %rdi
movq %r12, %rsi
callq 0xc41d2a
testb %al, %al
je 0xc447ff
movq (%rbp), %rsi
movl 0x8(%rbp), %edx
movq (%r14), %rcx
movl 0x8(%r14), %r8d
movq %r15, %rdi
movq %rbx, %r9
callq 0xc443ec
testb %al, %al
je 0xc44810
xorl %r14d, %r14d
jmp 0xc44863
movq %r12, %rcx
movl 0x14(%rsp), %edx
jmp 0xc446af
movq 0x38(%r15), %rdi
xorps %xmm0, %xmm0
leaq 0x70(%rsp), %r14
movaps %xmm0, (%r14)
movl $0x0, (%rsp)
movq %rbx, %rsi
movq %r14, %rdx
movl $0x6, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq 0xd0(%rsp), %rcx
movq %rax, (%rcx)
movl %edx, 0x8(%rcx)
movq (%r14), %rsi
testq %rsi, %rsi
je 0xc44860
leaq 0x70(%rsp), %rdi
callq 0x2a758fc
movb $0x1, %r14b
movq 0x20(%rsp), %rsi
testq %rsi, %rsi
je 0xc44877
leaq 0x20(%rsp), %rdi
callq 0x2a758fc
xorl %ebx, %ebx
movq %r12, %rcx
movl 0x14(%rsp), %edx
movb 0x38(%rsp), %sil
testb %sil, %sil
movq 0xd0(%rsp), %r15
movq 0x60(%rsp), %rbp
je 0xc44975
cmpl $0x38, 0x18(%rcx)
jne 0xc44972
movq 0x28(%rcx), %rax
movl 0x8(%rax), %esi
movl %esi, 0x58(%rsp)
movq (%rax), %rsi
movq %rsi, 0x50(%rsp)
movl 0x30(%rax), %edi
movl %edi, 0x48(%rsp)
movq 0x28(%rax), %rdi
movq %rdi, 0x40(%rsp)
movb 0x1e(%rsi), %sil
testb $0x4, %sil
jne 0xc448dd
movq 0x40(%rsp), %rdi
testb $0x4, 0x1e(%rdi)
jne 0xc448ff
xorl %r14d, %r14d
testb $0x4, %sil
je 0xc44975
movq 0x40(%rsp), %rsi
movb 0x1e(%rsi), %sil
andb $0x4, %sil
jne 0xc44975
addq $0x28, %rax
jmp 0xc44904
leaq 0x40(%rsp), %r13
movq 0x18(%rsp), %r14
movq (%rax), %rsi
movl 0x8(%rax), %eax
movl %eax, 0x8(%rbp)
movq %rsi, (%rbp)
movl 0x8(%r13), %eax
movq 0x68(%rsp), %rsi
movl %eax, 0x8(%rsi)
movq (%r13), %rax
movq %rsi, %r13
movq %rax, (%rsi)
cmpq %r12, %rcx
jne 0xc44944
cmpl 0x14(%rsp), %edx
jne 0xc44944
movq %r14, %rdi
movq %r12, %rsi
callq 0xc41de8
jmp 0xc4494f
movq %r14, %rdi
movq %r12, %rsi
callq 0xc41e6c
testb %al, %al
je 0xc44972
movq (%r13), %rsi
movl 0x8(%r13), %edx
movq (%rbp), %rcx
movl 0x8(%rbp), %r8d
movq %r14, %rdi
movq %rbx, %r9
callq 0xc443ec
testb %al, %al
je 0xc4498e
xorl %r14d, %r14d
andb $0x1, %r14b
movl %r14d, %eax
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq 0x38(%r14), %rdi
movq (%rbp), %rsi
movl 0x8(%rbp), %edx
callq 0xc442ca
movq %rax, (%rbp)
movl %edx, 0x8(%rbp)
movq 0x38(%r14), %rdi
xorps %xmm0, %xmm0
leaq 0x20(%rsp), %r14
movaps %xmm0, (%r14)
movl $0x0, (%rsp)
movq %rbx, %rsi
movq %r14, %rdx
movl $0x6, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq %rax, (%r15)
movl %edx, 0x8(%r15)
movq (%r14), %rsi
testq %rsi, %rsi
je 0xc449ee
leaq 0x20(%rsp), %rdi
callq 0x2a758fc
movb $0x1, %r14b
jmp 0xc44975
nop
|
/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
|
AMDGPUDAGToDAGISel::SelectMOVRELOffset(llvm::SDValue, llvm::SDValue&, llvm::SDValue&) const
|
bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
SDValue &Base,
SDValue &Offset) const {
SDLoc DL(Index);
if (CurDAG->isBaseWithConstantOffset(Index)) {
SDValue N0 = Index.getOperand(0);
SDValue N1 = Index.getOperand(1);
ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
// (add n0, c0)
// Don't peel off the offset (c0) if doing so could possibly lead
// the base (n0) to be negative.
// (or n0, |c0|) can never change a sign given isBaseWithConstantOffset.
if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) ||
(Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) {
Base = N0;
Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
return true;
}
}
if (isa<ConstantSDNode>(Index))
return false;
Base = Index;
Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %r8, 0x20(%rsp)
movq %rcx, %r14
movl %edx, %ebp
movq %rsi, %r12
movq %rdi, %r15
movq 0x48(%rsi), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0xc45433
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r12), %eax
movl %eax, 0x18(%rsp)
movq 0x38(%r15), %rdi
movq %r12, %rsi
movl %ebp, %edx
callq 0x17782b0
testb %al, %al
je 0xc4551a
movq 0x28(%r12), %rax
movq 0x28(%rax), %r8
movq 0x58(%r8), %rcx
movl 0x20(%rcx), %edx
cmpl $0x40, %edx
ja 0xc4547f
movq 0x18(%rcx), %rsi
movl %edx, %ecx
negb %cl
shlq %cl, %rsi
sarq %cl, %rsi
xorl %ecx, %ecx
testl %edx, %edx
cmovneq %rsi, %rcx
jmp 0xc45486
movq 0x18(%rcx), %rcx
movq (%rcx), %rcx
movq (%rax), %rsi
movl 0x8(%rax), %r13d
testq %rcx, %rcx
jle 0xc454ff
movq 0x38(%r15), %rdi
movl %r13d, %edx
xorl %ecx, %ecx
movl %ebp, 0xc(%rsp)
movq %r14, %rbp
movq %r8, %r14
movq %r15, %rbx
movq %rsi, %r15
callq 0x176a148
movq %r15, %rsi
movq %rbx, %r15
movq %r14, %r8
movq %rbp, %r14
movl 0xc(%rsp), %ebp
testb %al, %al
jne 0xc454ff
cmpl $0xba, 0x18(%r12)
jne 0xc4551a
movq 0x58(%r8), %rcx
movl 0x20(%rcx), %eax
cmpl $0x40, %eax
ja 0xc454f3
movq 0x18(%rcx), %rdi
movl %eax, %ecx
negb %cl
shlq %cl, %rdi
sarq %cl, %rdi
xorl %ecx, %ecx
testl %eax, %eax
cmovneq %rdi, %rcx
jmp 0xc454fa
movq 0x18(%rcx), %rax
movq (%rax), %rcx
testq %rcx, %rcx
js 0xc4551a
movq %rsi, (%r14)
movl %r13d, 0x8(%r14)
movq 0x38(%r15), %rdi
movq 0x58(%r8), %rax
cmpl $0x41, 0x20(%rax)
jb 0xc45546
movq 0x18(%rax), %rax
jmp 0xc4554a
movl 0x18(%r12), %eax
xorl %ebx, %ebx
cmpl $0xb, %eax
je 0xc45579
cmpl $0x23, %eax
je 0xc45579
movq %r12, (%r14)
movl %ebp, 0x8(%r14)
movq 0x38(%r15), %rdi
movl $0x0, (%rsp)
leaq 0x10(%rsp), %rdx
xorl %esi, %esi
jmp 0xc45559
addq $0x18, %rax
movq (%rax), %rsi
movl $0x0, (%rsp)
leaq 0x10(%rsp), %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq 0x20(%rsp), %rcx
movq %rax, (%rcx)
movl %edx, 0x8(%rcx)
movb $0x1, %bl
movq 0x10(%rsp), %rsi
testq %rsi, %rsi
je 0xc4558d
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movl %ebx, %eax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
|
buildRegSequence16(llvm::SmallVectorImpl<llvm::SDValue>&, llvm::SelectionDAG*, llvm::SDLoc const&)
|
static MachineSDNode *buildRegSequence16(SmallVectorImpl<SDValue> &Elts,
llvm::SelectionDAG *CurDAG,
const SDLoc &DL) {
SmallVector<SDValue, 8> PackedElts;
assert("unhandled Reg sequence size" &&
(Elts.size() == 8 || Elts.size() == 16));
// Pack 16-bit elements in pairs into 32-bit register. If both elements are
// unpacked from 32-bit source use it, otherwise pack them using v_perm.
for (unsigned i = 0; i < Elts.size(); i += 2) {
SDValue LoSrc = stripExtractLoElt(stripBitcast(Elts[i]));
SDValue HiSrc;
if (isExtractHiElt(Elts[i + 1], HiSrc) && LoSrc == HiSrc) {
PackedElts.push_back(HiSrc);
} else {
SDValue PackLoLo = CurDAG->getTargetConstant(0x05040100, DL, MVT::i32);
MachineSDNode *Packed =
CurDAG->getMachineNode(AMDGPU::V_PERM_B32_e64, DL, MVT::i32,
{Elts[i + 1], Elts[i], PackLoLo});
PackedElts.push_back(SDValue(Packed, 0));
}
}
return buildRegSequence32(PackedElts, CurDAG, DL);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x108, %rsp # imm = 0x108
movq %rdx, 0x20(%rsp)
movq %rsi, 0x18(%rsp)
leaq 0x88(%rsp), %rcx
movq %rcx, -0x10(%rcx)
movabsq $0x800000000, %rax # imm = 0x800000000
movq %rax, -0x8(%rcx)
cmpl $0x0, 0x8(%rdi)
je 0xc47557
movq %rdi, %r15
movl $0x2, %r13d
xorl %ebx, %ebx
movq (%r15), %rcx
shlq $0x4, %rbx
movq (%rcx,%rbx), %rax
movl 0x8(%rcx,%rbx), %ecx
movq %rax, 0x38(%rsp)
movl %ecx, 0x40(%rsp)
cmpl $0xe6, 0x18(%rax)
leaq 0x38(%rsp), %rcx
jne 0xc4743d
movq 0x28(%rax), %rcx
movq (%rcx), %rdi
movl 0x8(%rcx), %esi
callq 0xc46d5c
movq %rax, %r12
movl %edx, %ebp
movq $0x0, 0x28(%rsp)
movl $0x0, 0x30(%rsp)
leal -0x1(%r13), %r14d
movq (%r15), %rax
shlq $0x4, %r14
movq (%rax,%r14), %rdi
leaq 0x28(%rsp), %rsi
callq 0xc457a8
testb %al, %al
je 0xc474a0
movq 0x28(%rsp), %rsi
cmpq %rsi, %r12
sete %al
movl 0x30(%rsp), %edx
cmpl %edx, %ebp
sete %cl
andb %al, %cl
cmpb $0x1, %cl
jne 0xc474a0
leaq 0x78(%rsp), %rdi
jmp 0xc4753e
movl $0x0, (%rsp)
movl $0x5040100, %esi # imm = 0x5040100
movq 0x18(%rsp), %rbp
movq %rbp, %rdi
movq 0x20(%rsp), %r12
movq %r12, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq (%r15), %rcx
movl 0x8(%rcx,%r14), %esi
movl %esi, 0x40(%rsp)
movq (%rcx,%r14), %rsi
movq %rsi, 0x38(%rsp)
movl 0x8(%rcx,%rbx), %esi
leaq 0x48(%rsp), %rdi
movl %esi, 0x8(%rdi)
movq (%rcx,%rbx), %rcx
movq %rcx, (%rdi)
movq %rax, 0x58(%rsp)
movl %edx, 0x60(%rsp)
leaq 0x38(%rsp), %rax
movq %rax, 0x68(%rsp)
movq $0x3, 0x70(%rsp)
movups 0x68(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbp, %rdi
movl $0x2168, %esi # imm = 0x2168
movq %r12, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
callq 0x178f5b0
leaq 0x78(%rsp), %rdi
movq %rax, %rsi
xorl %edx, %edx
callq 0x9e21b4
movl %r13d, %ebx
leal 0x2(%r13), %eax
cmpl %r13d, 0x8(%r15)
movl %eax, %r13d
ja 0xc47413
leaq 0x78(%rsp), %r15
movq %r15, %rdi
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %rdx
callq 0xc4759b
movq %rax, %rbx
movq (%r15), %rdi
leaq 0x88(%rsp), %rax
cmpq %rax, %rdi
je 0xc47586
callq 0x780910
movq %rbx, %rax
addq $0x108, %rsp # imm = 0x108
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
|
AMDGPUDAGToDAGISel::SelectWMMAModsF32NegAbs(llvm::SDValue, llvm::SDValue&, llvm::SDValue&) const
|
bool AMDGPUDAGToDAGISel::SelectWMMAModsF32NegAbs(SDValue In, SDValue &Src,
SDValue &SrcMods) const {
Src = In;
unsigned Mods = SISrcMods::OP_SEL_1;
SmallVector<SDValue, 8> EltsF32;
if (auto *BV = dyn_cast<BuildVectorSDNode>(stripBitcast(In))) {
assert(BV->getNumOperands() > 0);
// Based on first element decide which mod we match, neg or abs
SDValue ElF32 = stripBitcast(BV->getOperand(0));
unsigned ModOpcode =
(ElF32.getOpcode() == ISD::FNEG) ? ISD::FNEG : ISD::FABS;
for (unsigned i = 0; i < BV->getNumOperands(); ++i) {
SDValue ElF32 = stripBitcast(BV->getOperand(i));
if (ElF32.getOpcode() != ModOpcode)
break;
EltsF32.push_back(ElF32.getOperand(0));
}
// All elements had ModOpcode modifier
if (BV->getNumOperands() == EltsF32.size())
selectWMMAModsNegAbs(ModOpcode, Mods, EltsF32, Src, CurDAG, SDLoc(In),
32);
}
SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xc8, %rsp
movq %r8, 0x30(%rsp)
movq %rsi, %rbx
movq %rdi, %r15
movq %rsi, (%rcx)
movl %edx, 0x8(%rcx)
movl $0x8, 0xc(%rsp)
leaq 0x48(%rsp), %r13
movq %r13, -0x10(%r13)
movabsq $0x800000000, %rax # imm = 0x800000000
movq %rax, -0x8(%r13)
cmpl $0xe6, 0x18(%rsi)
movq %rsi, %r14
jne 0xc47b8a
movq 0x28(%rbx), %rax
movq (%rax), %r14
testq %r14, %r14
je 0xc47ca8
cmpl $0x9b, 0x18(%r14)
jne 0xc47ca8
movq 0x28(%r14), %rax
movq (%rax), %rax
cmpl $0xe6, 0x18(%rax)
movq %r15, 0x28(%rsp)
movq %rcx, 0x20(%rsp)
jne 0xc47bc2
movq 0x28(%rax), %rax
movq (%rax), %rax
xorl %ebp, %ebp
cmpl $0xf0, 0x18(%rax)
sete %bpl
xorl $0xf1, %ebp
cmpw $0x0, 0x40(%r14)
je 0xc47c28
xorl %r15d, %r15d
leaq 0x38(%rsp), %r13
xorl %r12d, %r12d
movq 0x28(%r14), %rax
movq (%rax,%r15), %rax
cmpl $0xe6, 0x18(%rax)
jne 0xc47c00
movq 0x28(%rax), %rax
movq (%rax), %rax
cmpl %ebp, 0x18(%rax)
jne 0xc47c28
movq 0x28(%rax), %rax
movq (%rax), %rsi
movl 0x8(%rax), %edx
movq %r13, %rdi
callq 0x9e21b4
incq %r12
movzwl 0x40(%r14), %eax
addq $0x28, %r15
cmpq %rax, %r12
jb 0xc47be8
movzwl 0x40(%r14), %eax
cmpl %eax, 0x40(%rsp)
leaq 0x48(%rsp), %r13
movq 0x28(%rsp), %r15
movq 0x20(%rsp), %r14
jne 0xc47ca8
movq 0x38(%r15), %r12
movq 0x48(%rbx), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0xc47c63
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
leaq 0x10(%rsp), %r13
movl %eax, 0x8(%r13)
movl $0x20, (%rsp)
leaq 0xc(%rsp), %rsi
leaq 0x38(%rsp), %rdx
movl %ebp, %edi
movq %r14, %rcx
movq %r12, %r8
movq %r13, %r9
callq 0xc47a16
movq (%r13), %rsi
testq %rsi, %rsi
je 0xc47ca3
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
leaq 0x48(%rsp), %r13
movq 0x38(%r15), %r15
movl 0xc(%rsp), %r12d
movq 0x48(%rbx), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0xc47cce
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
leaq 0x10(%rsp), %r14
movl %eax, 0x8(%r14)
movl $0x0, (%rsp)
movq %r15, %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq 0x30(%rsp), %rcx
movq %rax, (%rcx)
movl %edx, 0x8(%rcx)
movq (%r14), %rsi
testq %rsi, %rsi
je 0xc47d1a
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movq 0x38(%rsp), %rdi
cmpq %r13, %rdi
je 0xc47d29
callq 0x780910
movb $0x1, %al
addq $0xc8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
|
llvm::AMDGPUTargetLowering::LowerOperation(llvm::SDValue, llvm::SelectionDAG&) const
|
SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default:
Op->print(errs(), &DAG);
llvm_unreachable("Custom lowering code for this "
"instruction is not implemented yet!");
break;
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
case ISD::FREM: return LowerFREM(Op, DAG);
case ISD::FCEIL: return LowerFCEIL(Op, DAG);
case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
case ISD::FRINT: return LowerFRINT(Op, DAG);
case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
case ISD::FROUNDEVEN:
return LowerFROUNDEVEN(Op, DAG);
case ISD::FROUND: return LowerFROUND(Op, DAG);
case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
case ISD::FLOG2:
return LowerFLOG2(Op, DAG);
case ISD::FLOG:
case ISD::FLOG10:
return LowerFLOGCommon(Op, DAG);
case ISD::FEXP:
case ISD::FEXP10:
return lowerFEXP(Op, DAG);
case ISD::FEXP2:
return lowerFEXP2(Op, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
return LowerFP_TO_INT(Op, DAG);
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
return LowerCTLZ_CTTZ(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
}
return Op;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rcx, %rbx
movq %rsi, %r14
movl 0x18(%rsi), %eax
cmpl $0xd7, %eax
jle 0xc4faa3
leal -0x101(%rax), %ecx
cmpl $0x22, %ecx
ja 0xc4faf6
leaq 0x2b498c8(%rip), %rax # 0x3799350
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc52e78
cmpl $0x9d, %eax
jle 0xc4fada
addl $0xffffff62, %eax # imm = 0xFFFFFF62
cmpl $0x2d, %eax
ja 0xc4fc9c
movabsq $0x318000000000, %rcx # imm = 0x318000000000
btq %rax, %rcx
jae 0xc4fb27
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc56816
cmpl $0x41, %eax
je 0xc4fb3e
cmpl $0x42, %eax
jne 0xc4fb50
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc5053e
leal -0xd8(%rax), %ecx
cmpl $0x7, %ecx
ja 0xc4fc83
leaq 0x2b49824(%rip), %rax # 0x3799330
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc56554
testq %rax, %rax
jne 0xc4fb6b
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc4feb8
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc50d14
cmpl $0x64, %eax
jne 0xc4fc9c
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc515de
cmpq $0x2, %rax
jne 0xc4fc9c
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc50270
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc53a7a
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc52206
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc527f0
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc52176
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc51a62
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc550e0
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc54d7a
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc4fcb0
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc5486e
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc4f8f4
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc51748
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc524d6
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc520d6
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc52b0a
cmpl $0xe9, %eax
jne 0xc4fc9c
movq %r14, %rsi
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0xc5543a
callq 0x2b7e53f
movq %r14, %rdi
movq %rax, %rsi
movq %rbx, %rdx
callq 0x17a0232
nop
|
/Target/AMDGPU/AMDGPUISelLowering.cpp
|
llvm::SmallPtrSetImpl<llvm::Value*>::insert(llvm::Value*)
|
std::pair<iterator, bool> insert(PtrType Ptr) {
auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
return std::make_pair(makeIterator(p.first), p.second);
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
movq %rdx, %rsi
callq 0x91dbe2
movq 0x8(%r14), %rcx
xorl %esi, %esi
cmpq (%r14), %rcx
sete %sil
movl 0x10(%r14,%rsi,4), %esi
leaq (%rcx,%rsi,8), %rcx
cmpq %rax, %rcx
je 0xc688cb
cmpq $-0x2, (%rax)
jb 0xc688cb
addq $0x8, %rax
cmpq %rcx, %rax
jne 0xc688bc
movq %rax, (%rbx)
movq %rcx, 0x8(%rbx)
movb %dl, 0x10(%rbx)
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/llvm/ADT/SmallPtrSet.h
|
bool llvm::DenseMapBase<llvm::DenseMap<llvm::Value const*, llvm::Value*, llvm::DenseMapInfo<llvm::Value const*, void>, llvm::detail::DenseMapPair<llvm::Value const*, llvm::Value*>>, llvm::Value const*, llvm::Value*, llvm::DenseMapInfo<llvm::Value const*, void>, llvm::detail::DenseMapPair<llvm::Value const*, llvm::Value*>>::LookupBucketFor<llvm::Value const*>(llvm::Value const* const&, llvm::detail::DenseMapPair<llvm::Value const*, llvm::Value*> const*&) const
|
unsigned getNumBuckets() const {
return NumBuckets;
}
|
movl 0x10(%rdi), %ecx
testl %ecx, %ecx
je 0xc68a0d
pushq %rbx
movq (%rdi), %rdi
movl (%rsi), %r8d
movl %r8d, %eax
shrl $0x4, %eax
shrl $0x9, %r8d
xorl %eax, %r8d
decl %ecx
andl %ecx, %r8d
movl $0x1, %r10d
xorl %r9d, %r9d
movl %r8d, %ebx
shlq $0x4, %rbx
leaq (%rdi,%rbx), %r11
movq (%rdi,%rbx), %rbx
cmpq %rbx, (%rsi)
jne 0xc689d4
movq %r11, (%rdx)
movb $0x1, %al
xorl %r11d, %r11d
testb %r11b, %r11b
jne 0xc689b1
jmp 0xc68a0a
cmpq $-0x1000, %rbx # imm = 0xF000
jne 0xc689ee
testq %r9, %r9
cmovneq %r9, %r11
movq %r11, (%rdx)
xorl %r11d, %r11d
xorl %eax, %eax
jmp 0xc689cd
xorq $-0x2000, %rbx # imm = 0xE000
orq %r9, %rbx
cmoveq %r11, %r9
addl %r10d, %r8d
incl %r10d
andl %ecx, %r8d
movb $0x1, %r11b
jmp 0xc689cd
popq %rbx
jmp 0xc68a16
movq $0x0, (%rdx)
xorl %eax, %eax
andb $0x1, %al
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::AMDGPULegalizerInfo::legalizeFlog2(llvm::MachineInstr&, llvm::MachineIRBuilder&) const
|
bool AMDGPULegalizerInfo::legalizeFlog2(MachineInstr &MI,
MachineIRBuilder &B) const {
// v_log_f32 is good enough for OpenCL, except it doesn't handle denormals.
// If we have to handle denormals, scale up the input and adjust the result.
// scaled = x * (is_denormal ? 0x1.0p+32 : 1.0)
// log2 = amdgpu_log2 - (is_denormal ? 32.0 : 0.0)
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
LLT Ty = B.getMRI()->getType(Dst);
unsigned Flags = MI.getFlags();
if (Ty == LLT::scalar(16)) {
const LLT F32 = LLT::scalar(32);
// Nothing in half is a denormal when promoted to f32.
auto Ext = B.buildFPExt(F32, Src, Flags);
auto Log2 = B.buildIntrinsic(Intrinsic::amdgcn_log, {F32})
.addUse(Ext.getReg(0))
.setMIFlags(Flags);
B.buildFPTrunc(Dst, Log2, Flags);
MI.eraseFromParent();
return true;
}
assert(Ty == LLT::scalar(32));
auto [ScaledInput, IsLtSmallestNormal] = getScaledLogInput(B, Src, Flags);
if (!ScaledInput) {
B.buildIntrinsic(Intrinsic::amdgcn_log, {MI.getOperand(0)})
.addUse(Src)
.setMIFlags(Flags);
MI.eraseFromParent();
return true;
}
auto Log2 = B.buildIntrinsic(Intrinsic::amdgcn_log, {Ty})
.addUse(ScaledInput)
.setMIFlags(Flags);
auto ThirtyTwo = B.buildFConstant(Ty, 32.0);
auto Zero = B.buildFConstant(Ty, 0.0);
auto ResultOffset =
B.buildSelect(Ty, IsLtSmallestNormal, ThirtyTwo, Zero, Flags);
B.buildFSub(Dst, Log2, ResultOffset, Flags);
MI.eraseFromParent();
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rdx, %rdi
movq %rsi, %rbx
movq 0x20(%rsi), %rax
movl 0x4(%rax), %esi
testl %esi, %esi
jns 0xc74301
movq 0x18(%rdi), %rcx
movl %esi, %edx
andl $0x7fffffff, %edx # imm = 0x7FFFFFFF
cmpl %edx, 0x1d0(%rcx)
jbe 0xc74301
movq 0x1c8(%rcx), %rcx
movq (%rcx,%rdx,8), %r13
jmp 0xc74304
xorl %r13d, %r13d
movq %rsi, 0x18(%rsp)
movl 0x24(%rax), %ebp
movl 0x2c(%rbx), %r14d
movl %r14d, %r15d
andl $0xffffff, %r15d # imm = 0xFFFFFF
cmpq $0x81, %r13
movq %rbx, 0x48(%rsp)
jne 0xc74431
btsq $0x20, %r15
movl $0x101, %ebx # imm = 0x101
leaq 0x20(%rsp), %r13
movq %rbx, (%r13)
xorl %eax, %eax
movl %eax, 0x8(%r13)
leaq 0x50(%rsp), %r12
movl %ebp, (%r12)
movl %eax, 0x10(%r12)
movq (%rdi), %rax
movq %r15, (%rsp)
movl $0x1, %ecx
movl $0x1, %r9d
movq %rdi, %rbp
movl $0xbe, %esi
movq %r13, %rdx
movq %r12, %r8
callq *0x20(%rax)
movq %rdx, 0x10(%rsp)
movq %rbx, (%r13)
xorl %eax, %eax
movl %eax, 0x8(%r13)
leaq 0x20(%rsp), %rdx
movl $0x1, %ecx
movq %rbp, %rdi
movl $0xb0e, %esi # imm = 0xB0E
callq 0x15e0a04
movq %rax, %rbx
movq %rdx, %r13
movq 0x10(%rsp), %rax
movq 0x20(%rax), %rax
movl 0x4(%rax), %eax
movq $0x0, 0x8(%r12)
xorl %ecx, %ecx
movl %ecx, (%r12)
movl %eax, 0x4(%r12)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r12)
leaq 0x50(%rsp), %r12
movq %rdx, %rdi
movq %rbx, %rsi
movq %r12, %rdx
callq 0x1d3c22c
andl $0xfffff3, %r14d # imm = 0xFFFFF3
movl $0xff00000c, %eax # imm = 0xFF00000C
andl 0x2c(%r13), %eax
orl %r14d, %eax
movl %eax, 0x2c(%r13)
movq 0x18(%rsp), %rax
leaq 0x20(%rsp), %rcx
movl %eax, (%rcx)
movl $0x1, %eax
movl %eax, 0x8(%rcx)
movq %rbx, (%r12)
movq %r13, 0x8(%r12)
movl %eax, 0x10(%r12)
leaq 0x20(%rsp), %rsi
leaq 0x50(%rsp), %rdx
movq %rbp, %rdi
movq %r15, %rcx
callq 0x15e0aa0
jmp 0xc7463e
movq %rdi, %rsi
movl %ebp, %edx
movl %r15d, %ecx
movq %rdi, 0x8(%rsp)
callq 0xc787ee
movq %rax, %r12
testl %r12d, %r12d
je 0xc745d5
movq %r12, %rax
shrq $0x20, %rax
movq %rax, 0x40(%rsp)
leaq 0x20(%rsp), %rax
movq %r13, (%rax)
xorl %ecx, %ecx
movl %ecx, 0x8(%rax)
movq %rax, %rdx
movl $0x1, %ecx
movq 0x8(%rsp), %rdi
movl $0xb0e, %esi # imm = 0xB0E
callq 0x15e0a04
movq %rax, 0x10(%rsp)
movq %rdx, %rbp
leaq 0x50(%rsp), %rbx
movq $0x0, 0x8(%rbx)
xorl %ecx, %ecx
movl %ecx, (%rbx)
movl %r12d, 0x4(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbx)
movq %rdx, %rdi
movq %rax, %rsi
movq %rbx, %rdx
callq 0x1d3c22c
andl $0xfffff3, %r14d # imm = 0xFFFFF3
movl $0xff00000c, %eax # imm = 0xFF00000C
movq %rbp, 0x38(%rsp)
andl 0x2c(%rbp), %eax
orl %r14d, %eax
movl %eax, 0x2c(%rbp)
movq %r13, (%rbx)
xorl %eax, %eax
movl %eax, 0x8(%rbx)
leaq 0x50(%rsp), %rbx
movsd 0x2b24f00(%rip), %xmm0 # 0x37993e0
movq 0x8(%rsp), %r14
movq %r14, %rdi
movq %rbx, %rsi
callq 0x15ded9a
movq %rax, %r12
movq %rdx, %rbp
movq %r13, (%rbx)
xorl %eax, %eax
movl %eax, 0x8(%rbx)
leaq 0x50(%rsp), %rbx
xorps %xmm0, %xmm0
movq %r14, %rdi
movq %rbx, %rsi
callq 0x15ded9a
leaq 0x80(%rsp), %rsi
movq %r13, (%rsi)
xorl %edi, %edi
movl %edi, 0x8(%rsi)
movq 0x40(%rsp), %rcx
movl %ecx, (%rbx)
movl %edi, 0x10(%rbx)
leaq 0x20(%rsp), %rcx
movq %r12, (%rcx)
movq %rbp, 0x8(%rcx)
movl $0x1, %r14d
movl %r14d, 0x10(%rcx)
leaq 0x90(%rsp), %r8
movq %rax, (%r8)
movq %rdx, 0x8(%r8)
movl %r14d, 0x10(%r8)
movl %r15d, %ebx
btsq $0x20, %rbx
leaq 0x50(%rsp), %r15
leaq 0x20(%rsp), %r12
movq 0x8(%rsp), %r13
movq %r13, %rdi
movq %r15, %rdx
movq %r12, %rcx
movq %rbx, %r9
callq 0x15e0c0c
movq 0x18(%rsp), %rcx
movl %ecx, (%r12)
movl %r14d, 0x8(%r12)
movq 0x10(%rsp), %rcx
movq %rcx, (%r15)
movq 0x38(%rsp), %rcx
movq %rcx, 0x8(%r15)
movl %r14d, 0x10(%r15)
movq %rax, 0x18(%r15)
movq %rdx, 0x20(%r15)
movl %r14d, 0x28(%r15)
movq (%r13), %rax
movq %rbx, (%rsp)
leaq 0x20(%rsp), %rdx
leaq 0x50(%rsp), %r8
movl $0x1, %ecx
movl $0x2, %r9d
movq %r13, %rdi
movl $0xad, %esi
callq *0x20(%rax)
jmp 0xc7463e
movq 0x20(%rbx), %rax
movl 0x4(%rax), %eax
leaq 0x20(%rsp), %rdx
movl %eax, (%rdx)
movl $0x1, 0x8(%rdx)
movl $0x1, %ecx
movq 0x8(%rsp), %rdi
movl $0xb0e, %esi # imm = 0xB0E
callq 0x15e0a04
movq %rdx, %rbx
leaq 0x50(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movl $0x0, (%rdx)
movl %ebp, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rbx, %rdi
movq %rax, %rsi
callq 0x1d3c22c
andl $0xfffff3, %r14d # imm = 0xFFFFF3
movl $0xff00000c, %eax # imm = 0xFF00000C
andl 0x2c(%rbx), %eax
orl %r14d, %eax
movl %eax, 0x2c(%rbx)
movq 0x48(%rsp), %rdi
callq 0x1d3deba
movb $0x1, %al
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPULegalizerInfo.cpp
|
llvm::AMDGPULibCalls::fold(llvm::CallInst*)
|
bool AMDGPULibCalls::fold(CallInst *CI) {
Function *Callee = CI->getCalledFunction();
// Ignore indirect calls.
if (!Callee || Callee->isIntrinsic() || CI->isNoBuiltin())
return false;
FuncInfo FInfo;
if (!parseFunctionName(Callee->getName(), FInfo))
return false;
// Further check the number of arguments to see if they match.
// TODO: Check calling convention matches too
if (!FInfo.isCompatibleSignature(CI->getFunctionType()))
return false;
LLVM_DEBUG(dbgs() << "AMDIC: try folding " << *CI << '\n');
if (TDOFold(CI, FInfo))
return true;
IRBuilder<> B(CI);
if (CI->isStrictFP())
B.setIsFPConstrained(true);
if (FPMathOperator *FPOp = dyn_cast<FPMathOperator>(CI)) {
// Under unsafe-math, evaluate calls if possible.
// According to Brian Sumner, we can do this for all f32 function calls
// using host's double function calls.
if (canIncreasePrecisionOfConstantFold(FPOp) && evaluateCall(CI, FInfo))
return true;
// Copy fast flags from the original call.
FastMathFlags FMF = FPOp->getFastMathFlags();
B.setFastMathFlags(FMF);
// Specialized optimizations for each function call.
//
// TODO: Handle native functions
switch (FInfo.getId()) {
case AMDGPULibFunc::EI_EXP:
if (FMF.none())
return false;
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::exp,
FMF.approxFunc());
case AMDGPULibFunc::EI_EXP2:
if (FMF.none())
return false;
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::exp2,
FMF.approxFunc());
case AMDGPULibFunc::EI_LOG:
if (FMF.none())
return false;
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::log,
FMF.approxFunc());
case AMDGPULibFunc::EI_LOG2:
if (FMF.none())
return false;
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::log2,
FMF.approxFunc());
case AMDGPULibFunc::EI_LOG10:
if (FMF.none())
return false;
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::log10,
FMF.approxFunc());
case AMDGPULibFunc::EI_FMIN:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::minnum,
true, true);
case AMDGPULibFunc::EI_FMAX:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::maxnum,
true, true);
case AMDGPULibFunc::EI_FMA:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::fma, true,
true);
case AMDGPULibFunc::EI_MAD:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::fmuladd,
true, true);
case AMDGPULibFunc::EI_FABS:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::fabs, true,
true, true);
case AMDGPULibFunc::EI_COPYSIGN:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::copysign,
true, true, true);
case AMDGPULibFunc::EI_FLOOR:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::floor, true,
true);
case AMDGPULibFunc::EI_CEIL:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::ceil, true,
true);
case AMDGPULibFunc::EI_TRUNC:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::trunc, true,
true);
case AMDGPULibFunc::EI_RINT:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::rint, true,
true);
case AMDGPULibFunc::EI_ROUND:
return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::round, true,
true);
case AMDGPULibFunc::EI_LDEXP: {
if (!shouldReplaceLibcallWithIntrinsic(CI, true, true))
return false;
Value *Arg1 = CI->getArgOperand(1);
if (VectorType *VecTy = dyn_cast<VectorType>(CI->getType());
VecTy && !isa<VectorType>(Arg1->getType())) {
Value *SplatArg1 = B.CreateVectorSplat(VecTy->getElementCount(), Arg1);
CI->setArgOperand(1, SplatArg1);
}
CI->setCalledFunction(Intrinsic::getDeclaration(
CI->getModule(), Intrinsic::ldexp,
{CI->getType(), CI->getArgOperand(1)->getType()}));
return true;
}
case AMDGPULibFunc::EI_POW: {
Module *M = Callee->getParent();
AMDGPULibFunc PowrInfo(AMDGPULibFunc::EI_POWR, FInfo);
FunctionCallee PowrFunc = getFunction(M, PowrInfo);
CallInst *Call = cast<CallInst>(FPOp);
// pow(x, y) -> powr(x, y) for x >= -0.0
// TODO: Account for flags on current call
if (PowrFunc &&
cannotBeOrderedLessThanZero(
FPOp->getOperand(0), /*Depth=*/0,
SimplifyQuery(M->getDataLayout(), TLInfo, DT, AC, Call))) {
Call->setCalledFunction(PowrFunc);
return fold_pow(FPOp, B, PowrInfo) || true;
}
// pow(x, y) -> pown(x, y) for known integral y
if (isKnownIntegral(FPOp->getOperand(1), M->getDataLayout(),
FPOp->getFastMathFlags())) {
FunctionType *PownType = getPownType(CI->getFunctionType());
AMDGPULibFunc PownInfo(AMDGPULibFunc::EI_POWN, PownType, true);
FunctionCallee PownFunc = getFunction(M, PownInfo);
if (PownFunc) {
// TODO: If the incoming integral value is an sitofp/uitofp, it won't
// fold out without a known range. We can probably take the source
// value directly.
Value *CastedArg =
B.CreateFPToSI(FPOp->getOperand(1), PownType->getParamType(1));
// Have to drop any nofpclass attributes on the original call site.
Call->removeParamAttrs(
1, AttributeFuncs::typeIncompatible(CastedArg->getType()));
Call->setCalledFunction(PownFunc);
Call->setArgOperand(1, CastedArg);
return fold_pow(FPOp, B, PownInfo) || true;
}
}
return fold_pow(FPOp, B, FInfo);
}
case AMDGPULibFunc::EI_POWR:
case AMDGPULibFunc::EI_POWN:
return fold_pow(FPOp, B, FInfo);
case AMDGPULibFunc::EI_ROOTN:
return fold_rootn(FPOp, B, FInfo);
case AMDGPULibFunc::EI_SQRT:
// TODO: Allow with strictfp + constrained intrinsic
return tryReplaceLibcallWithSimpleIntrinsic(
B, CI, Intrinsic::sqrt, true, true, /*AllowStrictFP=*/false);
case AMDGPULibFunc::EI_COS:
case AMDGPULibFunc::EI_SIN:
return fold_sincos(FPOp, B, FInfo);
default:
break;
}
} else {
// Specialized optimizations for each function call
switch (FInfo.getId()) {
case AMDGPULibFunc::EI_READ_PIPE_2:
case AMDGPULibFunc::EI_READ_PIPE_4:
case AMDGPULibFunc::EI_WRITE_PIPE_2:
case AMDGPULibFunc::EI_WRITE_PIPE_4:
return fold_read_write_pipe(CI, B, FInfo);
default:
break;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq -0x20(%rsi), %rax
xorl %r12d, %r12d
testq %rax, %rax
je 0xc8a2e4
cmpb $0x0, (%rax)
cmoveq %rax, %r12
testq %r12, %r12
je 0xc8a2f4
movq 0x18(%r12), %rax
cmpq 0x50(%rbx), %rax
je 0xc8a2f7
xorl %r12d, %r12d
testq %r12, %r12
je 0xc8a310
testb $0x20, 0x21(%r12)
jne 0xc8a310
movq %rbx, %rdi
callq 0xc89c94
testb %al, %al
je 0xc8a326
xorl %ebp, %ebp
movl %ebp, %eax
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x10(%rsp), %r15
movq $0x0, (%r15)
movq %r12, %rdi
callq 0x2a9f76a
movq %rax, %rdi
movq %rdx, %rsi
movq %r15, %rdx
callq 0xc8faa6
testb %al, %al
je 0xc8a45f
movq 0x50(%rbx), %rsi
leaq 0x10(%rsp), %rdi
callq 0xc9063e
testb %al, %al
je 0xc8a45f
leaq 0x10(%rsp), %rdx
movq %rbx, %rsi
callq 0xc8aae8
movb $0x1, %bpl
testb %al, %al
jne 0xc8a461
leaq 0x28(%rsp), %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
xorl %r8d, %r8d
callq 0x92eb52
movq %rbx, %rdi
callq 0x93297e
testb %al, %al
je 0xc8a3a6
movb $0x1, 0x94(%rsp)
movq %rbx, %rdi
callq 0x931f84
xorb $0x1, %al
testq %rbx, %rbx
sete %cl
orb %al, %cl
jne 0xc8a47a
cmpb $0x0, 0x18(%r14)
jne 0xc8a3cb
cmpb $-0x2, 0x1(%rbx)
jb 0xc8a3e9
leaq 0x10(%rsp), %rdx
movq %r14, %rdi
movq %rbx, %rsi
callq 0xc8af5e
movb $0x1, %bpl
movb $0x1, %r15b
testb %al, %al
jne 0xc8a86f
movzbl 0x1(%rbx), %eax
shrl %eax
cmpl $0x7f, %eax
movl $0xffffffff, %r8d # imm = 0xFFFFFFFF
cmovnel %eax, %r8d
movl %r8d, 0x90(%rsp)
movq 0x10(%rsp), %rax
movl 0x8(%rax), %eax
xorl %ebp, %ebp
cmpl $0x58, %eax
jle 0xc8a4a5
cmpl $0x86, %eax
jle 0xc8a4f2
cmpl $0x87, %eax
je 0xc8a53f
cmpl $0x8c, %eax
je 0xc8a559
cmpl $0xa1, %eax
jne 0xc8a86f
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0x157, %ecx # imm = 0x157
jmp 0xc8a858
xorl %ebp, %ebp
movq 0x10(%rsp), %rdi
testq %rdi, %rdi
je 0xc8a312
movq (%rdi), %rax
callq *0x8(%rax)
jmp 0xc8a312
movq 0x10(%rsp), %rax
movl $0xffffff3a, %ecx # imm = 0xFFFFFF3A
addl 0x8(%rax), %ecx
cmpl $0x3, %ecx
ja 0xc8a49e
leaq 0x28(%rsp), %rdx
movq %rbx, %rsi
callq 0xc89d24
jmp 0xc8a869
xorl %ebp, %ebp
jmp 0xc8a86f
cmpl $0x30, %eax
jle 0xc8a524
addl $-0x31, %eax
cmpl $0xc, %eax
ja 0xc8a4a0
leaq 0x2b112af(%rip), %rcx # 0x379b768
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movb $0x1, %bpl
testl %r8d, %r8d
je 0xc8a8de
andl $0x40, %r8d
shrl $0x6, %r8d
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0x56, %ecx
jmp 0xc8a8d1
leal -0x72(%rax), %ecx
cmpl $0xe, %ecx
ja 0xc8a575
leaq 0x2b112c3(%rip), %rax # 0x379b7c4
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
leaq 0x28(%rsp), %rdx
leaq 0x10(%rsp), %rcx
movq %r14, %rdi
movq %rbx, %rsi
callq 0xc8b38e
jmp 0xc8a869
cmpl $0x20, %eax
je 0xc8a73f
cmpl $0x25, %eax
je 0xc8a75b
cmpl $0x26, %eax
jne 0xc8a86f
leaq 0x28(%rsp), %rdx
leaq 0x10(%rsp), %rcx
movq %r14, %rdi
movq %rbx, %rsi
callq 0xc8c604
jmp 0xc8a869
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0x144, %ecx # imm = 0x144
jmp 0xc8a858
addl $-0x59, %eax
cmpl $0x9, %eax
ja 0xc8a4a0
leaq 0x2b11214(%rip), %rcx # 0x379b79c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
xorl %r15d, %r15d
movq %rbx, %rsi
movl $0x1, %edx
movl $0x1, %ecx
xorl %r8d, %r8d
callq 0xc8b2f2
testb %al, %al
je 0xc8a86c
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0xc8a624
movl $0xfe, %ecx
andl 0x8(%rax), %ecx
cmpl $0x12, %ecx
jne 0xc8a624
movl 0x4(%rbx), %ecx
shll $0x5, %ecx
movq %rbx, %rdx
subq %rcx, %rdx
movq 0x20(%rdx), %rdx
movq 0x8(%rdx), %rcx
movl $0xfe, %esi
andl 0x8(%rcx), %esi
cmpl $0x12, %esi
je 0xc8a624
movl 0x20(%rax), %ecx
xorl %esi, %esi
cmpb $0x13, 0x8(%rax)
sete %sil
shlq $0x20, %rsi
orq %rcx, %rsi
leaq 0xb8(%rsp), %rcx
movw $0x101, 0x20(%rcx) # imm = 0x101
leaq 0x28(%rsp), %rdi
callq 0x2a4d0e0
movq %rbx, %rdi
movl $0x1, %esi
movq %rax, %rdx
callq 0xaf6f7c
movq %rbx, %rdi
callq 0x2a51276
movq 0x8(%rbx), %rcx
leaq 0xb8(%rsp), %rdx
movq %rcx, (%rdx)
movl 0x4(%rbx), %ecx
shll $0x5, %ecx
movq %rbx, %rsi
subq %rcx, %rsi
movq 0x20(%rsi), %rcx
movq 0x8(%rcx), %rcx
movq %rcx, 0x8(%rdx)
movl $0x2, %ecx
movq %rax, %rdi
movl $0xcb, %esi
callq 0x2a43af5
movq 0x18(%rax), %rcx
movq %rcx, 0x50(%rbx)
movq %rbx, %rdi
movq %rax, %rsi
callq 0x93240c
movb $0x1, %bpl
movb $0x1, %r15b
jmp 0xc8a86f
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xa7, %ecx
jmp 0xc8a858
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xa6, %ecx
jmp 0xc8a858
movb $0x1, %bpl
testl %r8d, %r8d
je 0xc8a8de
andl $0x40, %r8d
shrl $0x6, %r8d
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0x58, %ecx
jmp 0xc8a8d1
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xf0, %ecx
jmp 0xc8a858
movl $0x1, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xa5, %ecx
jmp 0xc8a858
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xe6, %ecx
jmp 0xc8a858
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0x14, %ecx
jmp 0xc8a858
movl $0x1, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0x19, %ecx
jmp 0xc8a858
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0x12b, %ecx # imm = 0x12B
jmp 0xc8a858
movq 0x28(%r12), %r15
leaq 0x20(%rsp), %r12
leaq 0x10(%rsp), %rdx
movq %r12, %rdi
movl $0x74, %esi
callq 0xc90a14
movq %r15, %rsi
movq %r12, %rdx
callq 0xc896ea
testq %rdx, %rdx
je 0xc8a93a
movq %rax, %r13
movq %rdx, %r12
movl 0x4(%rbx), %eax
btl $0x1e, %eax
jb 0xc8a8e3
shll $0x5, %eax
movq %rbx, %rcx
subq %rax, %rcx
jmp 0xc8a8e7
leaq 0x28(%rsp), %rdx
leaq 0x10(%rsp), %rcx
movq %rbx, %rsi
callq 0xc8c2a4
jmp 0xc8a869
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0x12c, %ecx # imm = 0x12C
jmp 0xc8a858
movb $0x1, %bpl
testl %r8d, %r8d
je 0xc8a8de
andl $0x40, %r8d
shrl $0x6, %r8d
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xd6, %ecx
jmp 0xc8a8d1
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xa8, %ecx
movl $0x1, %r8d
movl $0x1, %r9d
callq 0xc8b2ac
movl %eax, %r15d
movb $0x1, %bpl
andb %r15b, %bpl
leaq 0x28(%rsp), %rdi
callq 0xa2b88a
jmp 0xc8a461
movb $0x1, %bpl
testl %r8d, %r8d
je 0xc8a8de
andl $0x40, %r8d
shrl $0x6, %r8d
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xd4, %ecx
jmp 0xc8a8d1
movb $0x1, %bpl
testl %r8d, %r8d
je 0xc8a8de
andl $0x40, %r8d
shrl $0x6, %r8d
movl $0x0, (%rsp)
leaq 0x28(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $0xd5, %ecx
xorl %r9d, %r9d
callq 0xc8b2ac
movl %eax, %r15d
jmp 0xc8a86f
xorl %r15d, %r15d
jmp 0xc8a86f
movq -0x8(%rbx), %rcx
movq (%rcx), %rdi
leaq 0x120(%r15), %rax
movq (%r14), %rdx
leaq 0xb8(%rsp), %rcx
movq %rax, (%rcx)
movq %rdx, 0x8(%rcx)
movdqu 0x8(%r14), %xmm0
pshufd $0x4e, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,1]
movdqu %xmm0, 0x10(%rcx)
movq %rbx, 0x20(%rcx)
pxor %xmm0, %xmm0
movdqu %xmm0, 0x28(%rcx)
movw $0x101, 0x38(%rcx) # imm = 0x101
movl $0x1c, %esi
xorl %edx, %edx
callq 0x26fb27b
testb $0x1c, %al
je 0xc8a9cb
movl 0x4(%rbx), %eax
btl $0x1e, %eax
jb 0xc8a94e
shll $0x5, %eax
movq %rbx, %rcx
subq %rax, %rcx
jmp 0xc8a952
movq -0x8(%rbx), %rcx
movq 0x20(%rcx), %rdi
leaq 0x120(%r15), %rsi
movzbl 0x1(%rbx), %eax
shrl %eax
cmpl $0x7f, %eax
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
cmovnel %eax, %edx
callq 0xc8c02b
testb %al, %al
je 0xc8aa04
movq 0x50(%rbx), %rdi
callq 0xc8c1fd
movq %rax, %r13
leaq 0x18(%rsp), %r12
movq %r12, %rdi
movl $0x73, %esi
movq %rax, %rdx
movl $0x1, %ecx
callq 0xc90a54
movq %r15, %rsi
movq %r12, %rdx
callq 0xc896ea
testq %rdx, %rdx
je 0xc8a9f4
movq %rax, %r12
movq %rdx, %r15
movl 0x4(%rbx), %eax
btl $0x1e, %eax
jb 0xc8aa21
shll $0x5, %eax
movq %rbx, %rcx
subq %rax, %rcx
jmp 0xc8aa25
movq %r13, 0x50(%rbx)
movq %rbx, %rdi
movq %r12, %rsi
callq 0x93240c
leaq 0x28(%rsp), %rdx
leaq 0x20(%rsp), %rcx
movq %r14, %rdi
movq %rbx, %rsi
callq 0xc8b38e
jmp 0xc8aacb
movq 0x18(%rsp), %rdi
testq %rdi, %rdi
je 0xc8aa04
movq (%rdi), %rax
callq *0x8(%rax)
leaq 0x28(%rsp), %rdx
leaq 0x10(%rsp), %rcx
movq %r14, %rdi
movq %rbx, %rsi
callq 0xc8b38e
movl %eax, %r15d
jmp 0xc8aace
movq -0x8(%rbx), %rcx
movq 0x20(%rcx), %rsi
movq 0x10(%r13), %rax
movq 0x10(%rax), %rdx
leaq 0xb8(%rsp), %rcx
movw $0x101, 0x20(%rcx) # imm = 0x101
leaq 0x28(%rsp), %rdi
callq 0xc10dec
movq %rax, %rbp
movq 0x8(%rax), %rsi
leaq 0xb8(%rsp), %r13
movq %r13, %rdi
movl $0x3, %edx
callq 0x29ad999
movq %rbx, %rdi
movl $0x1, %esi
movq %r13, %rdx
callq 0xc8c26a
leaq 0xc8(%rsp), %rdi
movq 0x10(%rdi), %rsi
callq 0xc8d9bc
movq %r12, 0x50(%rbx)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x93240c
movq %rbx, %rdi
movl $0x1, %esi
movq %rbp, %rdx
callq 0xaf6f7c
leaq 0x18(%rsp), %r15
movq %r14, %rdi
movq %rbx, %rsi
leaq 0x28(%rsp), %rdx
movq %r15, %rcx
callq 0xc8b38e
movq (%r15), %rdi
testq %rdi, %rdi
je 0xc8aacb
movq (%rdi), %rax
callq *0x8(%rax)
movb $0x1, %r15b
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0xc8a86c
movq (%rdi), %rax
callq *0x8(%rax)
jmp 0xc8a86c
nop
|
/Target/AMDGPU/AMDGPULibCalls.cpp
|
(anonymous namespace)::AMDGPUMachineCFGStructurizer::initFallthroughMap(llvm::MachineFunction&)
|
void AMDGPUMachineCFGStructurizer::initFallthroughMap(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "Fallthrough Map:\n");
for (auto &MBBI : MF) {
MachineBasicBlock *MBB = MBBI.getFallThrough();
if (MBB != nullptr) {
LLVM_DEBUG(dbgs() << "Fallthrough: " << MBBI.getNumber() << " -> "
<< MBB->getNumber() << "\n");
}
FallthroughMap[&MBBI] = MBB;
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rsi, %rbx
movq 0x148(%rsi), %r15
addq $0x140, %rbx # imm = 0x140
cmpq %rbx, %r15
je 0xca310f
movq %rdi, %r14
addq $0x88, %r14
leaq 0x8(%rsp), %r12
movq %r15, %rdi
movl $0x1, %esi
callq 0x1cfd2e4
movq %rax, %r13
movq %r15, 0x8(%rsp)
movq %r14, %rdi
movq %r12, %rsi
callq 0xca32e4
movq %r13, 0x8(%rax)
movq 0x8(%r15), %r15
cmpq %rbx, %r15
jne 0xca30e2
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
nop
|
/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
|
llvm::DenseMapBase<llvm::DenseMap<std::pair<unsigned int, llvm::MachineBasicBlock*>, llvm::detail::DenseSetEmpty, llvm::DenseMapInfo<std::pair<unsigned int, llvm::MachineBasicBlock*>, void>, llvm::detail::DenseSetPair<std::pair<unsigned int, llvm::MachineBasicBlock*>>>, std::pair<unsigned int, llvm::MachineBasicBlock*>, llvm::detail::DenseSetEmpty, llvm::DenseMapInfo<std::pair<unsigned int, llvm::MachineBasicBlock*>, void>, llvm::detail::DenseSetPair<std::pair<unsigned int, llvm::MachineBasicBlock*>>>::moveFromOldBuckets(llvm::detail::DenseSetPair<std::pair<unsigned int, llvm::MachineBasicBlock*>>*, llvm::detail::DenseSetPair<std::pair<unsigned int, llvm::MachineBasicBlock*>>*)
|
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
initEmpty();
// Insert all the old elements.
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
!KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
// Insert the key/value into the new table.
BucketT *DestBucket;
bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
(void)FoundVal; // silence warning.
assert(!FoundVal && "Key already in new map?");
DestBucket->getFirst() = std::move(B->getFirst());
::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
incrementNumEntries();
// Free the value.
B->getSecond().~ValueT();
}
B->getFirst().~KeyT();
}
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movq $0x0, 0x8(%rdi)
movl 0x10(%rdi), %eax
testq %rax, %rax
je 0xca7c3c
movq (%r15), %rcx
shlq $0x4, %rax
addq %rcx, %rax
movl $0xffffffff, (%rcx) # imm = 0xFFFFFFFF
movq $-0x1000, 0x8(%rcx) # imm = 0xF000
addq $0x10, %rcx
cmpq %rax, %rcx
jne 0xca7c25
cmpq %rbx, %r14
je 0xca7ca1
movq %rsp, %r12
movl (%r14), %eax
cmpl $-0x1, %eax
sete %dl
movq 0x8(%r14), %rcx
cmpq $-0x1000, %rcx # imm = 0xF000
sete %sil
testb %sil, %dl
jne 0xca7c98
cmpl $-0x2, %eax
sete %al
cmpq $-0x2000, %rcx # imm = 0xE000
sete %cl
testb %cl, %al
jne 0xca7c98
movq %r15, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq 0xca79f2
movq (%rsp), %rax
movl (%r14), %ecx
movl %ecx, (%rax)
movq 0x8(%r14), %rcx
movq %rcx, 0x8(%rax)
incl 0x8(%r15)
addq $0x10, %r14
cmpq %rbx, %r14
jne 0xca7c44
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::DenseMapBase<llvm::DenseMap<std::pair<llvm::MachineBasicBlock*, llvm::MachineBasicBlock*>, llvm::detail::DenseSetEmpty, llvm::DenseMapInfo<std::pair<llvm::MachineBasicBlock*, llvm::MachineBasicBlock*>, void>, llvm::detail::DenseSetPair<std::pair<llvm::MachineBasicBlock*, llvm::MachineBasicBlock*>>>, std::pair<llvm::MachineBasicBlock*, llvm::MachineBasicBlock*>, llvm::detail::DenseSetEmpty, llvm::DenseMapInfo<std::pair<llvm::MachineBasicBlock*, llvm::MachineBasicBlock*>, void>, llvm::detail::DenseSetPair<std::pair<llvm::MachineBasicBlock*, llvm::MachineBasicBlock*>>>::moveFromOldBuckets(llvm::detail::DenseSetPair<std::pair<llvm::MachineBasicBlock*, llvm::MachineBasicBlock*>>*, llvm::detail::DenseSetPair<std::pair<llvm::MachineBasicBlock*, llvm::MachineBasicBlock*>>*)
|
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
initEmpty();
// Insert all the old elements.
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
!KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
// Insert the key/value into the new table.
BucketT *DestBucket;
bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
(void)FoundVal; // silence warning.
assert(!FoundVal && "Key already in new map?");
DestBucket->getFirst() = std::move(B->getFirst());
::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
incrementNumEntries();
// Free the value.
B->getSecond().~ValueT();
}
B->getFirst().~KeyT();
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movq $0x0, 0x8(%rdi)
movl 0x10(%rdi), %eax
testq %rax, %rax
je 0xca97a3
movq (%r15), %rcx
shlq $0x4, %rax
addq %rcx, %rax
movq $-0x1000, %rdx # imm = 0xF000
movq %rdx, (%rcx)
movq %rdx, 0x8(%rcx)
addq $0x10, %rcx
cmpq %rax, %rcx
jne 0xca9793
cmpq %rbx, %r14
je 0xca9809
movq $-0x1000, %r13 # imm = 0xF000
movq $-0x2000, %rbp # imm = 0xE000
movq %rsp, %r12
movq (%r14), %rax
movq 0x8(%r14), %rcx
movq %rax, %rdx
xorq %r13, %rdx
movq %rcx, %rsi
xorq %r13, %rsi
orq %rdx, %rsi
je 0xca9800
xorq %rbp, %rax
xorq %rbp, %rcx
orq %rax, %rcx
je 0xca9800
movq %r15, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq 0xca9544
movq (%rsp), %rax
movq (%r14), %rcx
movq %rcx, (%rax)
movq 0x8(%r14), %rcx
movq %rcx, 0x8(%rax)
incl 0x8(%r15)
addq $0x10, %r14
cmpq %rbx, %r14
jne 0xca97b9
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/llvm/ADT/DenseMap.h
|
(anonymous namespace)::SchedGroup::canAddMI(llvm::MachineInstr const&) const
|
bool PipelineSolver::checkOptimal() {
if (static_cast<size_t>(CurrSyncGroupIdx) == PipelineInstrs.size()) {
if (BestCost == -1 || CurrCost < BestCost) {
BestPipeline = CurrPipeline;
BestCost = CurrCost;
LLVM_DEBUG(dbgs() << "Found Fit with cost " << BestCost << "\n");
}
assert(BestCost >= 0);
}
bool DoneExploring = false;
if (MaxBranchesExplored > 0 && BranchesExplored >= MaxBranchesExplored)
DoneExploring = true;
return (DoneExploring || BestCost == 0);
}
|
pushq %r14
pushq %rbx
pushq %rax
movq 0x10(%rsi), %rdx
testb $0x10, 0x10(%rdx)
jne 0xcac679
movq %rsi, %rbx
movq %rdi, %r14
movl (%rdi), %ecx
testb $0x1, %cl
jne 0xcac683
testb $0x2, %cl
je 0xcac528
movq 0x18(%rdx), %rsi
testb $0x2, %sil
jne 0xcac6c6
testb $0x4, %cl
je 0xcac539
movb $0x1, %al
testb $0x1, 0x18(%rdx)
jne 0xcac67b
testb $0x8, %cl
je 0xcac575
movq 0x18(%rdx), %rsi
movzwl 0x44(%rbx), %eax
addl $0xffffeb9a, %eax # imm = 0xFFFFEB9A
cmpw $-0x2, %ax
setb %dil
movabsq $-0x7800000000000000, %r8 # imm = 0x8800000000000000
movb $0x1, %al
testq %r8, %rsi
jne 0xcac67b
shrq $0x36, %rsi
andb %dil, %sil
jne 0xcac67b
testb $0x10, %cl
je 0xcac59d
movq 0x18(%rdx), %rax
testl $0x760000, %eax # imm = 0x760000
setne %dl
andl $0x3000000, %eax # imm = 0x3000000
cmpl $0x1000000, %eax # imm = 0x1000000
sete %al
orb %dl, %al
movb $0x1, %al
jne 0xcac67b
testb $0x20, %cl
je 0xcac5da
movq %rbx, %rdi
movl $0x1, %esi
callq 0x90f34a
testb %al, %al
je 0xcac5da
movq 0x10(%rbx), %rax
movq 0x18(%rax), %rax
testl $0x760000, %eax # imm = 0x760000
setne %cl
andl $0x3000000, %eax # imm = 0x3000000
cmpl $0x1000000, %eax # imm = 0x1000000
sete %al
orb %cl, %al
movb $0x1, %al
jne 0xcac67b
testb $0x40, (%r14)
je 0xcac614
movq %rbx, %rdi
movl $0x1, %esi
callq 0x90f36e
testb %al, %al
je 0xcac614
movq 0x10(%rbx), %rax
movq 0x18(%rax), %rax
testl $0x760000, %eax # imm = 0x760000
setne %cl
andl $0x3000000, %eax # imm = 0x3000000
cmpl $0x1000000, %eax # imm = 0x1000000
sete %al
orb %cl, %al
movb $0x1, %al
jne 0xcac67b
movl (%r14), %ecx
testb %cl, %cl
jns 0xcac627
movq 0x10(%rbx), %rdx
movb $0x1, %al
testb $0x2, 0x1b(%rdx)
jne 0xcac67b
btl $0x8, %ecx
jae 0xcac64a
movq %rbx, %rdi
movl $0x1, %esi
callq 0x90f34a
testb %al, %al
je 0xcac64a
movq 0x10(%rbx), %rcx
movb $0x1, %al
testb $0x2, 0x1b(%rcx)
jne 0xcac67b
testb $0x2, 0x1(%r14)
je 0xcac66e
movq %rbx, %rdi
movl $0x1, %esi
callq 0x90f36e
testb %al, %al
je 0xcac66e
movq 0x10(%rbx), %rcx
movb $0x1, %al
testb $0x2, 0x1b(%rcx)
jne 0xcac67b
testb $0x4, 0x1(%r14)
jne 0xcac700
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
movq 0x18(%rdx), %rsi
movb $0x1, %al
testb $0x2, %sil
jne 0xcac67b
movzwl 0x44(%rbx), %edi
addl $0xffffeb9a, %edi # imm = 0xFFFFEB9A
cmpw $-0x2, %di
setb %dil
movabsq $-0x7800000000000000, %r8 # imm = 0x8800000000000000
orq $0x10001, %r8 # imm = 0x10001
testq %r8, %rsi
jne 0xcac67b
shrq $0x36, %rsi
andb %dil, %sil
je 0xcac515
jmp 0xcac67b
movzwl 0x44(%rbx), %eax
addl $0xffffeb9a, %eax # imm = 0xFFFFEB9A
cmpw $-0x2, %ax
setb %dil
movabsq $-0x7800000000000000, %rax # imm = 0x8800000000000000
orq $0x10000, %rax # imm = 0x10000
testq %rax, %rsi
jne 0xcac528
shrq $0x36, %rsi
movb $0x1, %al
andb %dil, %sil
je 0xcac67b
jmp 0xcac528
movq 0x10(%rbx), %rax
movb 0x1a(%rax), %al
andb $0x1, %al
jmp 0xcac67b
|
/Target/AMDGPU/AMDGPUIGroupLP.cpp
|
(anonymous namespace)::MFMASmallGemmSingleWaveOpt::EnablesInitialMFMA::~EnablesInitialMFMA()
|
bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
SmallVectorImpl<SchedGroup> &SyncPipe) override {
if (!SyncPipe.size())
return false;
int MFMAsFound = 0;
if (!Cache->size()) {
for (auto &Elt : SyncPipe[0].DAG->SUnits) {
if (TII->isMFMAorWMMA(*Elt.getInstr())) {
++MFMAsFound;
if (MFMAsFound > 4)
break;
Cache->push_back(&Elt);
}
}
}
assert(Cache->size());
auto DAG = SyncPipe[0].DAG;
for (auto &Elt : *Cache) {
if (DAG->IsReachable(Elt, const_cast<SUnit *>(SU)))
return true;
}
return false;
}
|
pushq %rbx
movq %rdi, %rbx
leaq 0x4ad1c8f(%rip), %rax # 0x5781f40
movq %rax, (%rdi)
cmpb $0x1, 0x48(%rdi)
jne 0xcb02d0
movb $0x0, 0x48(%rbx)
movq 0x18(%rbx), %rdi
leaq 0x28(%rbx), %rax
cmpq %rax, %rdi
je 0xcb02d0
callq 0x780910
movl $0x50, %esi
movq %rbx, %rdi
popq %rbx
jmp 0x7800d0
|
/Target/AMDGPU/AMDGPUIGroupLP.cpp
|
(anonymous namespace)::AMDGPUOpenCLEnqueuedBlockLowering::runOnModule(llvm::Module&)
|
bool AMDGPUOpenCLEnqueuedBlockLowering::runOnModule(Module &M) {
DenseSet<Function *> Callers;
auto &C = M.getContext();
bool Changed = false;
// ptr kernel_object, i32 private_segment_size, i32 group_segment_size
StructType *HandleTy = nullptr;
for (auto &F : M.functions()) {
if (F.hasFnAttribute("enqueued-block")) {
if (!F.hasName()) {
SmallString<64> Name;
Mangler::getNameWithPrefix(Name, "__amdgpu_enqueued_kernel",
M.getDataLayout());
F.setName(Name);
}
LLVM_DEBUG(dbgs() << "found enqueued kernel: " << F.getName() << '\n');
auto RuntimeHandle = (F.getName() + ".runtime_handle").str();
if (!HandleTy) {
Type *Int32 = Type::getInt32Ty(C);
HandleTy =
StructType::create(C, {PointerType::getUnqual(C), Int32, Int32},
"block.runtime.handle.t");
}
auto *GV = new GlobalVariable(
M, HandleTy,
/*isConstant=*/true, GlobalValue::ExternalLinkage,
/*Initializer=*/Constant::getNullValue(HandleTy), RuntimeHandle,
/*InsertBefore=*/nullptr, GlobalValue::NotThreadLocal,
AMDGPUAS::GLOBAL_ADDRESS,
/*isExternallyInitialized=*/true);
LLVM_DEBUG(dbgs() << "runtime handle created: " << *GV << '\n');
F.replaceAllUsesWith(ConstantExpr::getAddrSpaceCast(GV, F.getType()));
F.addFnAttr("runtime-handle", RuntimeHandle);
F.setLinkage(GlobalValue::ExternalLinkage);
Changed = true;
}
}
return Changed;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq 0x20(%rsi), %rbx
leaq 0x18(%rsi), %r13
cmpq %r13, %rbx
je 0xcb9d84
movq (%rsi), %rax
movq %rax, 0x8(%rsp)
movq %rsi, 0x20(%rsp)
leaq 0x120(%rsi), %rax
movq %rax, 0x10(%rsp)
xorl %r14d, %r14d
leaq 0x28(%rsp), %rbp
xorl %r15d, %r15d
movq %r13, 0x18(%rsp)
leaq -0x38(%rbx), %r12
testq %rbx, %rbx
cmoveq %rbx, %r12
movl $0xe, %edx
movq %r12, %rdi
leaq 0x2ae3bef(%rip), %rsi # 0x379d780
callq 0x2a40a70
testb %al, %al
je 0xcb9d75
testb $0x10, 0x7(%r12)
jne 0xcb9c12
leaq 0x68(%rsp), %r14
movq %r14, 0x50(%rsp)
movq $0x0, 0x58(%rsp)
movq $0x40, 0x60(%rsp)
leaq 0x2ae3bc6(%rip), %rax # 0x379d78f
movq %rax, 0x28(%rsp)
movw $0x103, 0x48(%rsp) # imm = 0x103
leaq 0x50(%rsp), %rdi
movq %rbp, %rsi
movq 0x10(%rsp), %rdx
callq 0x2a74440
movw $0x105, 0x48(%rsp) # imm = 0x105
movups 0x50(%rsp), %xmm0
movups %xmm0, 0x28(%rsp)
movq %r12, %rdi
movq %rbp, %rsi
callq 0x2a9f9d8
movq 0x50(%rsp), %rdi
cmpq %r14, %rdi
je 0xcb9c12
callq 0x780910
movq %r12, %rdi
callq 0x2a9f76a
movw $0x305, 0x70(%rsp) # imm = 0x305
movq %rax, 0x50(%rsp)
movq %rdx, 0x58(%rsp)
leaq 0x2ae3b76(%rip), %rax # 0x379d7a8
movq %rax, 0x60(%rsp)
movq %rbp, %rdi
movq %rbp, %r13
leaq 0x50(%rsp), %rbp
movq %rbp, %rsi
callq 0x2b6085c
testq %r15, %r15
jne 0xcb9c9b
movq 0x8(%rsp), %r15
movq %r15, %rdi
callq 0x2a9aad4
movq %rax, %r14
movq %r15, %rdi
xorl %esi, %esi
callq 0x2a9abe8
movq %rax, 0x50(%rsp)
movq %r14, 0x58(%rsp)
movq %r14, 0x60(%rsp)
movl $0x3, %edx
movl $0x16, %r8d
movq %r15, %rdi
movq %rbp, %rsi
leaq 0x2ae3b28(%rip), %rcx # 0x379d7b8
xorl %r9d, %r9d
callq 0x2a9b1fe
movq %rax, %r15
movl $0x58, %edi
movl $0x1, %esi
callq 0x2a9ec74
movq %rax, %r14
movq %r15, %rdi
callq 0x29e17e6
movw $0x104, 0x70(%rsp) # imm = 0x104
movq %rbp, %r10
movq %r13, %rbp
movq %r13, 0x50(%rsp)
subq $0x8, %rsp
movq %r14, %rdi
movq 0x28(%rsp), %rsi
movq %r15, %rdx
movl $0x1, %ecx
xorl %r8d, %r8d
movq %rax, %r9
pushq $0x1
movabsq $0x100000001, %rax # imm = 0x100000001
pushq %rax
pushq $0x0
pushq $0x0
pushq %r10
callq 0x2a47f46
addq $0x30, %rsp
movq 0x8(%r12), %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x29e71d8
movq %r12, %rdi
movq %rax, %rsi
callq 0x2a9fd6e
movq 0x28(%rsp), %rcx
movq 0x30(%rsp), %r8
movl $0xe, %edx
movq %r12, %rdi
leaq 0x27c758c(%rip), %rsi # 0x34812bc
callq 0x2a40808
movl 0x20(%r12), %eax
movl %eax, %ecx
andl $-0x10, %ecx
movl %ecx, %edx
orl $0x4000, %edx # imm = 0x4000
testb $0x30, %al
cmovel %ecx, %edx
movl %edx, 0x20(%r12)
movq 0x28(%rsp), %rdi
leaq 0x38(%rsp), %rax
cmpq %rax, %rdi
je 0xcb9d6d
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x7800d0
movb $0x1, %r14b
movq 0x18(%rsp), %r13
movq 0x8(%rbx), %rbx
cmpq %r13, %rbx
jne 0xcb9b77
jmp 0xcb9d87
xorl %r14d, %r14d
movl $0x8, %edx
xorl %edi, %edi
xorl %esi, %esi
callq 0x2b410f1
andb $0x1, %r14b
movl %r14d, %eax
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp
|
llvm::DenseMapBase<llvm::DenseMap<llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, llvm::AMDGPUPerfHintAnalysis::FuncInfo>>, llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, llvm::AMDGPUPerfHintAnalysis::FuncInfo>>::erase(llvm::DenseMapIterator<llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::Function const*, llvm::AMDGPUPerfHintAnalysis::FuncInfo, llvm::ValueMapConfig<llvm::Function const*, llvm::sys::SmartMutex<false>>>, llvm::AMDGPUPerfHintAnalysis::FuncInfo>, false>)
|
void erase(iterator I) {
BucketT *TheBucket = &*I;
TheBucket->getSecond().~ValueT();
TheBucket->getFirst() = getTombstoneKey();
decrementNumEntries();
incrementNumTombstones();
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rsi, %r15
movq %rdi, %rbx
leaq 0x8(%rsp), %r14
movq $0x2, (%r14)
xorl %eax, %eax
movq %rax, 0x8(%r14)
movq $-0x2000, %r12 # imm = 0xE000
movq %r12, 0x10(%r14)
leaq 0x4ac77b7(%rip), %rcx # 0x5782928
movq %rcx, -0x8(%r14)
movq %rax, 0x18(%r14)
leaq 0x8(%rsi), %rdi
movq %r14, %rsi
callq 0xb1e8aa
movq 0x18(%r14), %rax
movq %rax, 0x20(%r15)
movq 0x10(%r14), %rax
cmpq %r12, %rax
je 0xcbb1ab
cmpq $-0x1000, %rax # imm = 0xF000
je 0xcbb1ab
testq %rax, %rax
je 0xcbb1ab
movq %r14, %rdi
callq 0x2aa1dc0
movq 0x8(%rbx), %xmm0
paddd 0x26e8498(%rip), %xmm0 # 0x33a3650
movq %xmm0, 0x8(%rbx)
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::DenseMapBase<llvm::DenseMap<llvm::BasicBlock*, llvm::SmallDenseSet<llvm::Instruction*, 4u, llvm::DenseMapInfo<llvm::Instruction*, void>>, llvm::DenseMapInfo<llvm::BasicBlock*, void>, llvm::detail::DenseMapPair<llvm::BasicBlock*, llvm::SmallDenseSet<llvm::Instruction*, 4u, llvm::DenseMapInfo<llvm::Instruction*, void>>>>, llvm::BasicBlock*, llvm::SmallDenseSet<llvm::Instruction*, 4u, llvm::DenseMapInfo<llvm::Instruction*, void>>, llvm::DenseMapInfo<llvm::BasicBlock*, void>, llvm::detail::DenseMapPair<llvm::BasicBlock*, llvm::SmallDenseSet<llvm::Instruction*, 4u, llvm::DenseMapInfo<llvm::Instruction*, void>>>>::moveFromOldBuckets(llvm::detail::DenseMapPair<llvm::BasicBlock*, llvm::SmallDenseSet<llvm::Instruction*, 4u, llvm::DenseMapInfo<llvm::Instruction*, void>>>*, llvm::detail::DenseMapPair<llvm::BasicBlock*, llvm::SmallDenseSet<llvm::Instruction*, 4u, llvm::DenseMapInfo<llvm::Instruction*, void>>>*)
|
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
initEmpty();
// Insert all the old elements.
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
!KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
// Insert the key/value into the new table.
BucketT *DestBucket;
bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
(void)FoundVal; // silence warning.
assert(!FoundVal && "Key already in new map?");
DestBucket->getFirst() = std::move(B->getFirst());
::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
incrementNumEntries();
// Free the value.
B->getSecond().~ValueT();
}
B->getFirst().~KeyT();
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movq $0x0, 0x8(%rdi)
movl 0x10(%rdi), %eax
testq %rax, %rax
je 0xcd1f7c
movq (%r15), %rcx
leaq (%rax,%rax,2), %rax
shlq $0x4, %rax
addq $-0x30, %rax
movabsq $-0x5555555555555555, %rdx # imm = 0xAAAAAAAAAAAAAAAB
mulq %rdx
movq %rdx, %xmm0
shrq $0x5, %rdx
addq $0x2, %rdx
andq $-0x2, %rdx
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
psrlq $0x5, %xmm0
xorl %eax, %eax
movdqa 0x1ee311b(%rip), %xmm1 # 0x2bb5020
movdqa 0x1ee3123(%rip), %xmm2 # 0x2bb5030
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movq %rax, %xmm4
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
por %xmm1, %xmm4
pxor %xmm2, %xmm4
movdqa %xmm4, %xmm5
pcmpgtd %xmm0, %xmm5
pcmpeqd %xmm0, %xmm4
pshufd $0xf5, %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
pand %xmm5, %xmm6
pshufd $0xf5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
por %xmm6, %xmm4
movd %xmm4, %esi
notl %esi
testb $0x1, %sil
je 0xcd1f58
movq $-0x1000, (%rcx) # imm = 0xF000
pxor %xmm3, %xmm4
pextrw $0x4, %xmm4, %esi
testb $0x1, %sil
je 0xcd1f6f
movq $-0x1000, 0x30(%rcx) # imm = 0xF000
addq $0x2, %rax
addq $0x60, %rcx
cmpq %rax, %rdx
jne 0xcd1f15
cmpq %rbx, %r14
je 0xcd2014
movl $0x1000, %r13d # imm = 0x1000
leaq 0x8(%rsp), %r12
movq (%r14), %rax
orq %r13, %rax
cmpq $-0x1000, %rax # imm = 0xF000
jne 0xcd1fa9
addq $0x30, %r14
cmpq %rbx, %r14
jne 0xcd1f90
jmp 0xcd2014
movq %r15, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq 0xcd1c32
movdqa 0x2ad2ab1(%rip), %xmm0 # 0x37a4a70
movq 0x8(%rsp), %rax
movq (%r14), %rcx
movq %rcx, (%rax)
leaq 0x8(%rax), %rdi
movq $0x1, 0x8(%rax)
xorl %ecx, %ecx
movdqu %xmm0, 0x10(%rax,%rcx,8)
addq $0x2, %rcx
cmpq $0x4, %rcx
jne 0xcd1fd8
leaq 0x8(%r14), %rsi
callq 0xcd2128
incl 0x8(%r15)
testb $0x1, 0x8(%r14)
jne 0xcd1f9e
movq 0x10(%r14), %rdi
movl 0x18(%r14), %esi
shlq $0x3, %rsi
movl $0x8, %edx
callq 0x2b410f1
jmp 0xcd1f9e
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/llvm/ADT/DenseMap.h
|
llvm::AMDGPURegisterBankInfo::executeInWaterfallLoop(llvm::MachineIRBuilder&, llvm::iterator_range<llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>>, llvm::SmallSet<llvm::Register, 4u, std::less<llvm::Register>>&) const
|
bool AMDGPURegisterBankInfo::executeInWaterfallLoop(
MachineIRBuilder &B, iterator_range<MachineBasicBlock::iterator> Range,
SmallSet<Register, 4> &SGPROperandRegs) const {
// Track use registers which have already been expanded with a readfirstlane
// sequence. This may have multiple uses if moving a sequence.
DenseMap<Register, Register> WaterfalledRegMap;
MachineBasicBlock &MBB = B.getMBB();
MachineFunction *MF = &B.getMF();
const TargetRegisterClass *WaveRC = TRI->getWaveMaskRegClass();
const unsigned MovExecOpc =
Subtarget.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
const unsigned MovExecTermOpc =
Subtarget.isWave32() ? AMDGPU::S_MOV_B32_term : AMDGPU::S_MOV_B64_term;
const unsigned XorTermOpc = Subtarget.isWave32() ?
AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
const unsigned AndSaveExecOpc = Subtarget.isWave32() ?
AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64;
const unsigned ExecReg = Subtarget.isWave32() ?
AMDGPU::EXEC_LO : AMDGPU::EXEC;
#ifndef NDEBUG
const int OrigRangeSize = std::distance(Range.begin(), Range.end());
#endif
MachineRegisterInfo &MRI = *B.getMRI();
Register SaveExecReg = MRI.createVirtualRegister(WaveRC);
Register InitSaveExecReg = MRI.createVirtualRegister(WaveRC);
// Don't bother using generic instructions/registers for the exec mask.
B.buildInstr(TargetOpcode::IMPLICIT_DEF)
.addDef(InitSaveExecReg);
Register PhiExec = MRI.createVirtualRegister(WaveRC);
Register NewExec = MRI.createVirtualRegister(WaveRC);
// To insert the loop we need to split the block. Move everything before this
// point to a new block, and insert a new empty block before this instruction.
MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
MachineBasicBlock *BodyBB = MF->CreateMachineBasicBlock();
MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
MachineBasicBlock *RestoreExecBB = MF->CreateMachineBasicBlock();
MachineFunction::iterator MBBI(MBB);
++MBBI;
MF->insert(MBBI, LoopBB);
MF->insert(MBBI, BodyBB);
MF->insert(MBBI, RestoreExecBB);
MF->insert(MBBI, RemainderBB);
LoopBB->addSuccessor(BodyBB);
BodyBB->addSuccessor(RestoreExecBB);
BodyBB->addSuccessor(LoopBB);
// Move the rest of the block into a new block.
RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
RemainderBB->splice(RemainderBB->begin(), &MBB, Range.end(), MBB.end());
MBB.addSuccessor(LoopBB);
RestoreExecBB->addSuccessor(RemainderBB);
B.setInsertPt(*LoopBB, LoopBB->end());
B.buildInstr(TargetOpcode::PHI)
.addDef(PhiExec)
.addReg(InitSaveExecReg)
.addMBB(&MBB)
.addReg(NewExec)
.addMBB(BodyBB);
const DebugLoc &DL = B.getDL();
MachineInstr &FirstInst = *Range.begin();
// Move the instruction into the loop body. Note we moved everything after
// Range.end() already into a new block, so Range.end() is no longer valid.
BodyBB->splice(BodyBB->end(), &MBB, Range.begin(), MBB.end());
// Figure out the iterator range after splicing the instructions.
MachineBasicBlock::iterator NewBegin = FirstInst.getIterator();
auto NewEnd = BodyBB->end();
B.setMBB(*LoopBB);
LLT S1 = LLT::scalar(1);
Register CondReg;
assert(std::distance(NewBegin, NewEnd) == OrigRangeSize);
for (MachineInstr &MI : make_range(NewBegin, NewEnd)) {
for (MachineOperand &Op : MI.all_uses()) {
Register OldReg = Op.getReg();
if (!SGPROperandRegs.count(OldReg))
continue;
// See if we already processed this register in another instruction in the
// sequence.
auto OldVal = WaterfalledRegMap.find(OldReg);
if (OldVal != WaterfalledRegMap.end()) {
Op.setReg(OldVal->second);
continue;
}
Register OpReg = Op.getReg();
LLT OpTy = MRI.getType(OpReg);
const RegisterBank *OpBank = getRegBank(OpReg, MRI, *TRI);
if (OpBank != &AMDGPU::VGPRRegBank) {
// Insert copy from AGPR to VGPR before the loop.
B.setMBB(MBB);
OpReg = B.buildCopy(OpTy, OpReg).getReg(0);
MRI.setRegBank(OpReg, AMDGPU::VGPRRegBank);
B.setMBB(*LoopBB);
}
Register CurrentLaneReg = buildReadFirstLane(B, MRI, OpReg);
// Build the comparison(s).
unsigned OpSize = OpTy.getSizeInBits();
bool Is64 = OpSize % 64 == 0;
unsigned PartSize = Is64 ? 64 : 32;
LLT PartTy = LLT::scalar(PartSize);
unsigned NumParts = OpSize / PartSize;
SmallVector<Register, 8> OpParts;
SmallVector<Register, 8> CurrentLaneParts;
if (NumParts == 1) {
OpParts.push_back(OpReg);
CurrentLaneParts.push_back(CurrentLaneReg);
} else {
auto UnmergeOp = B.buildUnmerge(PartTy, OpReg);
auto UnmergeCurrentLane = B.buildUnmerge(PartTy, CurrentLaneReg);
for (unsigned i = 0; i < NumParts; ++i) {
OpParts.push_back(UnmergeOp.getReg(i));
CurrentLaneParts.push_back(UnmergeCurrentLane.getReg(i));
MRI.setRegBank(OpParts[i], AMDGPU::VGPRRegBank);
MRI.setRegBank(CurrentLaneParts[i], AMDGPU::SGPRRegBank);
}
}
for (unsigned i = 0; i < NumParts; ++i) {
auto CmpReg = B.buildICmp(CmpInst::ICMP_EQ, S1, CurrentLaneParts[i],
OpParts[i]).getReg(0);
MRI.setRegBank(CmpReg, AMDGPU::VCCRegBank);
if (!CondReg) {
CondReg = CmpReg;
} else {
CondReg = B.buildAnd(S1, CondReg, CmpReg).getReg(0);
MRI.setRegBank(CondReg, AMDGPU::VCCRegBank);
}
}
Op.setReg(CurrentLaneReg);
// Make sure we don't re-process this register again.
WaterfalledRegMap.insert(std::pair(OldReg, Op.getReg()));
}
}
// The ballot becomes a no-op during instruction selection.
CondReg = B.buildIntrinsic(Intrinsic::amdgcn_ballot,
{LLT::scalar(Subtarget.isWave32() ? 32 : 64)})
.addReg(CondReg)
.getReg(0);
MRI.setRegClass(CondReg, WaveRC);
// Update EXEC, save the original EXEC value to VCC.
B.buildInstr(AndSaveExecOpc)
.addDef(NewExec)
.addReg(CondReg, RegState::Kill);
MRI.setSimpleHint(NewExec, CondReg);
B.setInsertPt(*BodyBB, BodyBB->end());
// Update EXEC, switch all done bits to 0 and all todo bits to 1.
B.buildInstr(XorTermOpc)
.addDef(ExecReg)
.addReg(ExecReg)
.addReg(NewExec);
// XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
// s_cbranch_scc0?
// Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
B.buildInstr(AMDGPU::SI_WATERFALL_LOOP).addMBB(LoopBB);
// Save the EXEC mask before the loop.
BuildMI(MBB, MBB.end(), DL, TII->get(MovExecOpc), SaveExecReg)
.addReg(ExecReg);
// Restore the EXEC mask after the loop.
B.setMBB(*RestoreExecBB);
B.buildInstr(MovExecTermOpc)
.addDef(ExecReg)
.addReg(SaveExecReg);
// Set the insert point after the original instruction, so any new
// instructions will be in the remainder.
B.setInsertPt(*RemainderBB, RemainderBB->begin());
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1e8, %rsp # imm = 0x1E8
movq %r8, 0x100(%rsp)
movq %rcx, 0x78(%rsp)
movq %rdx, 0x60(%rsp)
movq %rsi, %r12
xorps %xmm0, %xmm0
movaps %xmm0, 0x110(%rsp)
movl $0x0, 0x120(%rsp)
movq 0x38(%rsi), %rax
movq %rax, 0xa0(%rsp)
movq 0xa0(%rdi), %rax
movq %rdi, 0xb0(%rsp)
movq 0xa8(%rdi), %rcx
cmpb $0x0, 0x141(%rcx)
je 0xcdcd04
leaq 0x4a8b7f6(%rip), %r14 # 0x57684f8
jmp 0xcdcd0b
leaq 0x4a8c0fd(%rip), %r14 # 0x5768e08
movb 0x184(%rax), %al
movb %al, 0x13(%rsp)
movq 0x8(%r12), %r13
movq 0x18(%r12), %rbx
leaq 0x327e792(%rip), %r15 # 0x3f5b4b8
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, 0x90(%rsp)
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, %ebp
movq %r12, %rdi
movl $0xa, %esi
callq 0x15dd5c4
movq %r12, 0x18(%rsp)
movq %r12, %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rdx, %rdi
leaq 0x30(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movl $0x1000000, (%rdx) # imm = 0x1000000
movl %ebp, 0x28(%rsp)
movl %ebp, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, 0x68(%rsp)
movq %rbx, 0x70(%rsp)
movq %rbx, %rdi
movq %r14, 0x130(%rsp)
movq %r14, %rsi
movq %r15, %rdx
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, 0x8c(%rsp)
movq %r13, %rdi
xorl %esi, %esi
xorl %ecx, %ecx
callq 0x1d346fa
movq %rax, %r14
movq %r13, %rdi
xorl %esi, %esi
xorl %ecx, %ecx
callq 0x1d346fa
movq %rax, %r12
movq %r13, %rdi
xorl %esi, %esi
xorl %ecx, %ecx
callq 0x1d346fa
movq %rax, 0xf0(%rsp)
movq %r13, %rdi
xorl %esi, %esi
xorl %ecx, %ecx
callq 0x1d346fa
movq %rax, %rbp
movq 0xa0(%rsp), %rbx
movq 0x8(%rbx), %r15
addq $0x140, %r13 # imm = 0x140
movq %r13, %rdi
movq %r14, %rsi
callq 0x1cfab94
movq (%r15), %rax
movq %r15, 0x8(%r14)
movq %rax, (%r14)
movq %r14, 0x8(%rax)
movq %r14, (%r15)
movq %r13, %rdi
movq %r12, %rsi
callq 0x1cfab94
movq (%r15), %rax
movq %r15, 0x8(%r12)
movq %rax, (%r12)
movq %r12, 0x8(%rax)
movq %r12, (%r15)
movq %r13, %rdi
movq %rbp, %rsi
callq 0x1cfab94
movq (%r15), %rax
movq %r15, 0x8(%rbp)
movq %rax, (%rbp)
movq %rbp, 0x8(%rax)
movq %rbp, (%r15)
movq %r13, %rdi
movq %rbx, %r13
movq 0xf0(%rsp), %rbx
movq %rbx, %rsi
callq 0x1cfab94
movq (%r15), %rax
movq %r15, 0x8(%rbx)
movq %rax, (%rbx)
movq %rbx, 0x8(%rax)
movq %rbx, (%r15)
movq %r14, %rdi
movq %r12, %rsi
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
callq 0x1cfcd48
movq %r12, %rdi
movq %rbp, 0xe8(%rsp)
movq %rbp, %rsi
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
callq 0x1cfcd48
movq %r12, 0xe0(%rsp)
movq %r12, %rdi
movq %r14, 0xa8(%rsp)
movq %r14, %rsi
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
callq 0x1cfcd48
movq %rbx, %rdi
movq %r13, %rsi
callq 0x1cfd150
leaq 0x30(%r13), %rbp
movq 0x78(%rsp), %rcx
cmpq %rcx, %rbp
je 0xcdcf1a
movq 0x38(%rbx), %rsi
leaq 0x28(%r13), %rdx
movq %rbx, %rdi
addq $0x28, %rdi
movq %rbp, %r8
callq 0x9a5456
movq %r13, %rdi
movq 0xa8(%rsp), %r14
movq %r14, %rsi
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
callq 0x1cfcd48
movq 0xe8(%rsp), %rdi
movq %rbx, %rsi
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
callq 0x1cfcd48
movq %r14, %rax
addq $0x30, %rax
movq 0x18(%rsp), %r12
movq %r14, 0x38(%r12)
movq %rax, 0xf8(%rsp)
movq %rax, 0x40(%r12)
movq %r12, %rdi
xorl %esi, %esi
callq 0x15dd5c4
movq %r12, %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rax, %r14
movq %r13, %rbx
movq %rdx, %r13
leaq 0x30(%rsp), %r15
xorl %eax, %eax
movq %rax, 0x8(%r15)
movl $0x1000000, (%r15) # imm = 0x1000000
movl 0x68(%rsp), %eax
movl %eax, 0x4(%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r15)
movq %rdx, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x1d3c22c
xorl %eax, %eax
movq %rax, 0x8(%r15)
movl %eax, (%r15)
movl 0x28(%rsp), %eax
movl %eax, 0x4(%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r15)
leaq 0x30(%rsp), %r15
movq %r13, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x1d3c22c
movq %rbp, 0xb8(%rsp)
movl $0xfff00000, %ebp # imm = 0xFFF00000
movl (%r15), %eax
andl %ebp, %eax
orl $0x4, %eax
movl %eax, (%r15)
xorl %eax, %eax
movq %rax, 0x8(%r15)
movq %rbx, 0x10(%r15)
leaq 0x30(%rsp), %r15
movq %r13, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x1d3c22c
xorl %eax, %eax
movq %rax, 0x8(%r15)
movl %eax, (%r15)
xorl %ebx, %ebx
movl 0x8c(%rsp), %eax
movl %eax, 0x4(%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r15)
leaq 0x30(%rsp), %r15
movq %r13, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x1d3c22c
andl (%r15), %ebp
orl $0x4, %ebp
movl %ebp, (%r15)
movq 0xb8(%rsp), %rbp
movq %rbx, 0x8(%r15)
movq 0xe0(%rsp), %rax
movq %rax, 0x10(%r15)
leaq 0x30(%rsp), %rdx
movq %r13, %rdi
movq %r14, %rsi
movq %rax, %r14
callq 0x1d3c22c
leaq 0x30(%r14), %rcx
movq 0x60(%rsp), %rsi
cmpq %rsi, %rbp
movq %rcx, 0x98(%rsp)
je 0xcdd0bf
movq 0xa0(%rsp), %rax
leaq 0x28(%rax), %rdx
movq %r14, %rdi
addq $0x28, %rdi
movq %rcx, %rsi
movq 0x60(%rsp), %rcx
movq %rbp, %r8
callq 0x9a5456
movq 0x98(%rsp), %rcx
movq 0x60(%rsp), %rsi
movq 0xa8(%rsp), %rax
movq %rax, 0x38(%r12)
movq 0xf8(%rsp), %rax
movq %rax, 0x40(%r12)
movl $0x0, 0x14(%rsp)
cmpq %rsi, %rcx
movq 0x70(%rsp), %rbx
movq 0x100(%rsp), %r14
je 0xcdd670
leaq 0x24(%rsp), %r15
movl $0x0, 0x14(%rsp)
leaq 0x178(%rsp), %rdi
movq %rsi, 0x60(%rsp)
callq 0xb74da2
movq 0x178(%rsp), %rbp
movq 0x190(%rsp), %r13
cmpq %r13, %rbp
je 0xcdd637
movq 0x180(%rsp), %rax
movq %rax, 0x28(%rsp)
movq 0x188(%rsp), %rax
movq %rax, 0x78(%rsp)
movq %r13, 0x140(%rsp)
movl 0x4(%rbp), %eax
movl %eax, 0x24(%rsp)
movq %r14, %rdi
movq %r15, %rsi
callq 0xcddaf4
testq %rax, %rax
je 0xcdd618
leaq 0x110(%rsp), %rdi
movq %r15, %rsi
callq 0xcddb30
movl 0x120(%rsp), %ecx
shlq $0x3, %rcx
addq 0x110(%rsp), %rcx
cmpq %rcx, %rax
je 0xcdd1a4
movl 0x4(%rax), %esi
movq %rbp, %rdi
callq 0x1d531ce
jmp 0xcdd618
movl 0x4(%rbp), %r15d
testl %r15d, %r15d
jns 0xcdd1ca
movl %r15d, %eax
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
cmpl %eax, 0x1d0(%rbx)
jbe 0xcdd1ca
movq 0x1c8(%rbx), %rcx
movq (%rcx,%rax,8), %rax
jmp 0xcdd1cc
xorl %eax, %eax
movq %rax, 0x108(%rsp)
movq 0xb0(%rsp), %rdi
movq 0xa8(%rdi), %rcx
movl %r15d, %esi
movq %rbx, %rdx
callq 0x1e294f4
leaq 0x4aa5e8b(%rip), %r14 # 0x5783080
cmpq %r14, %rax
je 0xcdd284
movq 0xa0(%rsp), %rax
movq %rax, 0x38(%r12)
movq 0xb8(%rsp), %rax
movq %rax, 0x40(%r12)
movq 0x108(%rsp), %rax
movq %rax, 0x148(%rsp)
xorl %eax, %eax
movl %eax, 0x150(%rsp)
movl %r15d, 0x30(%rsp)
movl %eax, 0x40(%rsp)
movq %r12, %rdi
leaq 0x148(%rsp), %rsi
leaq 0x30(%rsp), %r13
movq %r13, %rdx
callq 0x15de680
movq 0x20(%rdx), %rax
movl 0x4(%rax), %r15d
movq %rbx, %rdi
movl %r15d, %esi
movq %r14, %rdx
callq 0x1d82b1c
movq 0xa8(%rsp), %rax
movq %rax, 0x38(%r12)
movq 0xf8(%rsp), %rax
movq %rax, 0x40(%r12)
jmp 0xcdd289
leaq 0x30(%rsp), %r13
movq 0xb0(%rsp), %rdi
movq %r12, %rsi
movq %rbx, %rdx
movl %r15d, %ecx
callq 0xcdc9d4
movl %eax, %r14d
leaq 0x108(%rsp), %rdi
callq 0x94022c
movq %rax, 0x30(%rsp)
movb %dl, 0x38(%rsp)
movq %r13, %rdi
callq 0x2b60e74
movl %eax, %ecx
andl $0x3f, %ecx
cmpl $0x1, %ecx
movl $0x0, %ecx
adcb $0x5, %cl
movl %eax, %ebx
shrl %cl, %ebx
leaq 0x158(%rsp), %rcx
movq %rcx, 0x148(%rsp)
movabsq $0x800000000, %rdx # imm = 0x800000000
movq %rdx, 0x150(%rsp)
leaq 0x1c8(%rsp), %rcx
movq %rcx, 0x1b8(%rsp)
movq %rdx, 0x1c0(%rsp)
cmpl $0x1, %ebx
movl %r14d, 0xc4(%rsp)
jne 0xcdd347
movl %ebx, 0x94(%rsp)
leaq 0x148(%rsp), %rdi
movl %r15d, %esi
callq 0x962e12
leaq 0x1b8(%rsp), %rdi
movl %r14d, %esi
callq 0x962e12
jmp 0xcdd438
movl %r14d, %ecx
xorl %r14d, %r14d
testb $0x3f, %al
sete %r14b
shll $0x8, %r14d
addq $0x101, %r14 # imm = 0x101
movl %r15d, 0x30(%rsp)
movl %ecx, %r15d
xorl %eax, %eax
movl %eax, 0x40(%rsp)
movq %r12, %rdi
movq %r14, %rsi
movq %r13, %rdx
callq 0x15de33a
movq %rdx, 0x138(%rsp)
movl %r15d, 0x30(%rsp)
xorl %eax, %eax
movl %eax, 0x40(%rsp)
movq %r12, %rdi
movq %r14, %rsi
movq %r13, %rdx
callq 0x15de33a
testl %ebx, %ebx
je 0xcdd579
movl %ebx, 0x94(%rsp)
movl %ebx, %ecx
shlq $0x2, %rcx
movq %rcx, 0x68(%rsp)
xorl %r12d, %r12d
movq 0x70(%rsp), %r15
leaq 0x4aa5c8c(%rip), %rbx # 0x5783050
movq 0x138(%rsp), %r13
movq %rdx, %r14
movq 0x20(%r13), %rax
movl 0x4(%rax,%r12,8), %esi
leaq 0x148(%rsp), %rdi
callq 0x962e12
movq 0x20(%r14), %rax
movl 0x4(%rax,%r12,8), %esi
leaq 0x1b8(%rsp), %rdi
callq 0x962e12
movq 0x148(%rsp), %rax
movl (%rax,%r12), %esi
movq %r15, %rdi
leaq 0x4aa5c6f(%rip), %rdx # 0x5783080
callq 0x1d82b1c
movq 0x1b8(%rsp), %rax
movl (%rax,%r12), %esi
movq %r15, %rdi
movq %rbx, %rdx
callq 0x1d82b1c
addq $0x4, %r12
cmpq %r12, 0x68(%rsp)
jne 0xcdd3cf
movl 0x94(%rsp), %eax
testl %eax, %eax
je 0xcdd574
movl %eax, %eax
movq %rax, 0x68(%rsp)
xorl %r15d, %r15d
movl 0x14(%rsp), %ebx
movq 0x18(%rsp), %r12
leaq 0x30(%rsp), %r14
movq $0x9, 0x1a8(%rsp)
xorl %ecx, %ecx
movl %ecx, 0x1b0(%rsp)
movq 0x1b8(%rsp), %rax
movl (%rax,%r15,4), %eax
movl %eax, 0x30(%rsp)
movl %ecx, 0x40(%rsp)
movq 0x148(%rsp), %rax
movl (%rax,%r15,4), %eax
movl %eax, 0xc8(%rsp)
movl %ecx, 0xd8(%rsp)
movq %r12, %rdi
movl $0x20, %esi
leaq 0x1a8(%rsp), %rdx
movq %r14, %rcx
leaq 0xc8(%rsp), %r8
callq 0x15e0ac4
movq 0x20(%rdx), %rax
movl 0x4(%rax), %r12d
movq 0x70(%rsp), %r14
movq %r14, %rdi
movl %r12d, %esi
leaq 0x4aa5b8c(%rip), %r13 # 0x5783068
movq %r13, %rdx
callq 0x1d82b1c
testl %ebx, %ebx
je 0xcdd554
movq $0x9, 0xc8(%rsp)
xorl %eax, %eax
movl %eax, 0xd0(%rsp)
movl %ebx, 0x30(%rsp)
movl %eax, 0x40(%rsp)
movl %r12d, 0x48(%rsp)
movl %eax, 0x58(%rsp)
movq 0x18(%rsp), %rdi
movq (%rdi), %rax
movq $0x0, (%rsp)
movl $0x1, %ecx
movl $0x2, %r9d
movl $0x3c, %esi
leaq 0xc8(%rsp), %rdx
leaq 0x30(%rsp), %r8
callq *0x20(%rax)
movq 0x20(%rdx), %rax
movl 0x4(%rax), %r12d
movq %r14, %rdi
movl %r12d, %esi
movq %r13, %rdx
callq 0x1d82b1c
incq %r15
movl %r12d, %eax
movl %r12d, %ebx
cmpq %r15, 0x68(%rsp)
movq 0x18(%rsp), %r12
leaq 0x30(%rsp), %r14
jne 0xcdd45f
jmp 0xcdd582
movq 0x18(%rsp), %r12
leaq 0x30(%rsp), %r14
movl 0x14(%rsp), %eax
movl %eax, 0x14(%rsp)
movq %rbp, %rdi
movl 0xc4(%rsp), %esi
callq 0x1d531ce
movl 0x4(%rbp), %eax
movl 0x24(%rsp), %ecx
movl %ecx, 0xc8(%rsp)
movl %eax, 0xcc(%rsp)
movq %r14, %rdi
leaq 0x110(%rsp), %rsi
leaq 0xc8(%rsp), %rdx
leaq 0xcc(%rsp), %rcx
callq 0xceb5b0
movq 0x1b8(%rsp), %rdi
leaq 0x1c8(%rsp), %rax
cmpq %rax, %rdi
je 0xcdd5e4
callq 0x780910
movq 0x148(%rsp), %rdi
leaq 0x158(%rsp), %r13
cmpq %r13, %rdi
movq 0x70(%rsp), %rbx
movq 0x100(%rsp), %r14
leaq 0x24(%rsp), %r15
je 0xcdd610
callq 0x780910
movq 0x140(%rsp), %r13
addq $0x20, %rbp
cmpq 0x28(%rsp), %rbp
je 0xcdd62e
movq %rbp, %rdi
callq *0x78(%rsp)
testb %al, %al
je 0xcdd618
cmpq %r13, %rbp
jne 0xcdd151
movq 0x60(%rsp), %rsi
testb $0x4, (%rsi)
jne 0xcdd65b
testb $0x8, 0x2c(%rsi)
movq 0x98(%rsp), %rax
je 0xcdd663
movq 0x8(%rsi), %rsi
testb $0x8, 0x2c(%rsi)
jne 0xcdd64f
jmp 0xcdd663
movq 0x98(%rsp), %rax
movq 0x8(%rsi), %rsi
cmpq %rax, %rsi
jne 0xcdd104
xorl %eax, %eax
xorl %ecx, %ecx
cmpb $0x5, 0x13(%rsp)
setne %al
sete %cl
leal 0x1(%rcx,%rcx), %ecx
movl %ecx, 0x28(%rsp)
leal 0xf89(%rax,%rax), %r15d
leal 0x12ae(%rax,%rax), %eax
movl %eax, 0x78(%rsp)
movq 0xb0(%rsp), %rax
movq 0xa0(%rax), %rax
xorl %ecx, %ecx
cmpb $0x5, 0x184(%rax)
setne %cl
shll $0x8, %ecx
addq $0x101, %rcx # imm = 0x101
leaq 0x178(%rsp), %rdx
movq %rcx, (%rdx)
xorl %eax, %eax
movl %eax, 0x8(%rdx)
movl $0x1, %ecx
movq %r12, %rdi
movl $0x7a2, %esi # imm = 0x7A2
callq 0x15e0a04
movq %rdx, %r14
leaq 0x30(%rsp), %rbp
xorl %ecx, %ecx
movq %rcx, 0x8(%rbp)
movl %ecx, (%rbp)
movl 0x14(%rsp), %ecx
movl %ecx, 0x4(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbp)
movq %rdx, %rdi
movq %rax, %rsi
movq %rbp, %rdx
callq 0x1d3c22c
movq 0x20(%r14), %rax
movl 0x4(%rax), %r14d
movq %rbx, %rdi
movl %r14d, %esi
movq 0x130(%rsp), %rdx
callq 0x1d82b04
movq %r12, %rdi
movl %r15d, %esi
callq 0x15dd5c4
movq %r12, %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rax, %r13
movq %rdx, %r15
xorl %r12d, %r12d
movq %r12, 0x8(%rbp)
movl $0x1000000, %eax # imm = 0x1000000
movl %eax, (%rbp)
movl 0x8c(%rsp), %ebx
movl %ebx, 0x4(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbp)
leaq 0x30(%rsp), %rbp
movq %rdx, %rdi
movq %r13, %rsi
movq %rbp, %rdx
callq 0x1d3c22c
movq %r12, 0x8(%rbp)
movl $0x4000000, (%rbp) # imm = 0x4000000
movl %r14d, 0x4(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbp)
leaq 0x30(%rsp), %rbp
movq %r15, %rdi
movq %r13, %rsi
movq %rbp, %rdx
callq 0x1d3c22c
movl %ebx, %eax
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
movq 0x70(%rsp), %rcx
movq 0xf0(%rcx), %rcx
leaq (%rax,%rax,4), %rax
leaq (%rcx,%rax,8), %rdi
addq $0x8, %rdi
xorl %eax, %eax
movl %eax, -0x8(%rdi)
movl %eax, 0x8(%rdi)
xorl %r12d, %r12d
movl %r14d, %esi
callq 0x962e12
movq 0x18(%rsp), %rax
movq 0xe0(%rsp), %rcx
movq %rcx, 0x38(%rax)
movq 0x18(%rsp), %rax
movq 0x98(%rsp), %rcx
movq %rcx, 0x40(%rax)
movq 0x18(%rsp), %rdi
movl 0x78(%rsp), %esi
callq 0x15dd5c4
movq 0x18(%rsp), %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rax, %r14
movq %rdx, %r15
movq %r12, 0x8(%rbp)
movl $0x1000000, %eax # imm = 0x1000000
movl %eax, (%rbp)
movl 0x28(%rsp), %r13d
movl %r13d, 0x4(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbp)
leaq 0x30(%rsp), %r12
movq %rdx, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq 0x1d3c22c
xorl %eax, %eax
movq %rax, 0x8(%r12)
movl %eax, (%r12)
movl %r13d, %ebp
movl %r13d, 0x4(%r12)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r12)
leaq 0x30(%rsp), %r12
movq %r15, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq 0x1d3c22c
xorl %eax, %eax
movq %rax, 0x8(%r12)
movl %eax, (%r12)
xorl %r13d, %r13d
movl %ebx, 0x4(%r12)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r12)
leaq 0x30(%rsp), %r12
movq %r15, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq 0x1d3c22c
movq 0x18(%rsp), %rdi
movl $0xf6a, %esi # imm = 0xF6A
callq 0x15dd5c4
movq 0x18(%rsp), %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rdx, %rdi
movl $0xfff00000, %ecx # imm = 0xFFF00000
andl (%r12), %ecx
orl $0x4, %ecx
movl %ecx, (%r12)
movq %r13, 0x8(%r12)
movq 0xa8(%rsp), %rcx
movq %rcx, 0x10(%r12)
movq 0x18(%rsp), %r12
leaq 0x30(%rsp), %rdx
movq %rax, %rsi
callq 0x1d3c22c
movq 0x20(%r12), %rsi
movq %rsi, 0x80(%rsp)
testq %rsi, %rsi
je 0xcdd91a
leaq 0x80(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x80(%rsp), %rsi
movq %rsi, 0x178(%rsp)
testq %rsi, %rsi
movq 0xa0(%rsp), %r15
movq 0xb8(%rsp), %rbx
je 0xcdd95e
leaq 0x80(%rsp), %r14
leaq 0x178(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
cmpb $0x5, 0x13(%rsp)
xorps %xmm0, %xmm0
leaq 0x178(%rsp), %r14
movups %xmm0, 0x8(%r14)
movq 0xb0(%rsp), %rax
movq 0xb0(%rax), %rax
movq $-0x24160, %rdx # imm = 0xFFFDBEA0
movq $-0x241c0, %rcx # imm = 0xFFFDBE40
cmoveq %rdx, %rcx
addq 0x8(%rax), %rcx
movq %r15, %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl 0x90(%rsp), %r8d
callq 0x90f593
movq %rdx, %rdi
leaq 0x30(%rsp), %rdx
movq $0x0, 0x8(%rdx)
movl $0x0, (%rdx)
movl %ebp, %ebx
movl %ebp, 0x4(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
movq (%r14), %rsi
testq %rsi, %rsi
je 0xcdd9ed
leaq 0x178(%rsp), %rdi
callq 0x2a758fc
movq 0x80(%rsp), %rsi
testq %rsi, %rsi
je 0xcdda07
leaq 0x80(%rsp), %rdi
callq 0x2a758fc
xorl %eax, %eax
cmpb $0x5, 0x13(%rsp)
setne %al
leal (%rax,%rax,2), %esi
addl $0x120d, %esi # imm = 0x120D
movq 0xe8(%rsp), %rax
movq %rax, 0x38(%r12)
addq $0x30, %rax
movq %rax, 0x40(%r12)
movq %r12, %rdi
callq 0x15dd5c4
movq %r12, %rdi
movq %rax, %rsi
callq 0x15dd6ec
movq %rax, %r14
movq %rdx, %r15
xorl %r13d, %r13d
movq %r12, %rbp
leaq 0x30(%rsp), %r12
movq %r13, 0x8(%r12)
movl $0x1000000, (%r12) # imm = 0x1000000
movl %ebx, 0x4(%r12)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r12)
movq %rdx, %rdi
movq %rax, %rsi
movq %r12, %rdx
callq 0x1d3c22c
movq %r13, 0x8(%r12)
movl $0x0, (%r12)
movl 0x90(%rsp), %eax
movl %eax, 0x4(%r12)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r12)
leaq 0x30(%rsp), %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x1d3c22c
movq 0xf0(%rsp), %rcx
movq 0x38(%rcx), %rax
movq %rcx, 0x38(%rbp)
movq %rax, 0x40(%rbp)
movq 0x110(%rsp), %rdi
movl 0x120(%rsp), %esi
shlq $0x3, %rsi
movl $0x4, %edx
callq 0x2b410f1
movb $0x1, %al
addq $0x1e8, %rsp # imm = 0x1E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
|
substituteSimpleCopyRegs(llvm::RegisterBankInfo::OperandsMapper const&, unsigned int)
|
static bool substituteSimpleCopyRegs(
const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper, unsigned OpIdx) {
SmallVector<unsigned, 1> SrcReg(OpdMapper.getVRegs(OpIdx));
if (!SrcReg.empty()) {
assert(SrcReg.size() == 1);
OpdMapper.getMI().getOperand(OpIdx).setReg(SrcReg[0]);
return true;
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x20, %rsp
movl %esi, %ebx
movq %rdi, %r14
xorl %edx, %edx
callq 0x1e2a2f8
leaq 0x18(%rsp), %r12
movq %r12, -0x10(%r12)
movabsq $0x100000000, %rcx # imm = 0x100000000
movq %rcx, -0x8(%r12)
leaq 0x8(%rsp), %r15
movq %r15, %rdi
movq %rax, %rsi
callq 0xceb366
movl 0x8(%r15), %ebp
testl %ebp, %ebp
je 0xce6152
movq 0x68(%r14), %rax
movl %ebx, %edi
shlq $0x5, %rdi
addq 0x20(%rax), %rdi
movq 0x8(%rsp), %rax
movl (%rax), %esi
callq 0x1d531ce
movq 0x8(%rsp), %rdi
cmpq %r12, %rdi
je 0xce6161
callq 0x780910
testl %ebp, %ebp
setne %al
addq $0x20, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
|
HazardFnResult llvm::function_ref<HazardFnResult (llvm::GCNHazardRecognizer::fixVALUPartialForwardingHazard(llvm::MachineInstr*)::StateType&, llvm::MachineInstr const&)>::callback_fn<llvm::GCNHazardRecognizer::fixVALUPartialForwardingHazard(llvm::MachineInstr*)::$_0>(long, llvm::GCNHazardRecognizer::fixVALUPartialForwardingHazard(llvm::MachineInstr*)::StateType&, llvm::MachineInstr const&)
|
static Ret callback_fn(intptr_t callable, Params ...params) {
return (*reinterpret_cast<Callable*>(callable))(
std::forward<Params>(params)...);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movl $0x1, %ebp
cmpl $0x8, 0x2c(%rsi)
jg 0xcff539
movq %rdx, %r14
movq 0x10(%rdx), %rax
movzwl 0x1a(%rax), %eax
testl $0x3f6, %eax # imm = 0x3F6
je 0xcff54a
movl %ebp, %eax
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %rbx
movq %rdi, %r15
movq (%rdi), %rax
movq %rax, 0x8(%rsp)
cmpw $0x128c, 0x44(%r14) # imm = 0x128C
jne 0xcff571
movq 0x20(%r14), %rax
movl 0x10(%rax), %edi
callq 0x1918480
testl %eax, %eax
je 0xcff539
movq 0x10(%r14), %rax
movq 0x18(%rax), %rax
testb $0x2, %al
jne 0xcff5d6
testb $0x1, %al
je 0xcff654
xorl %r15d, %r15d
cmpl $0x7fffffff, 0x28(%rbx) # imm = 0x7FFFFFFF
jne 0xcff657
cmpl $0x2, (%rbx)
jb 0xcff657
movq 0x8(%rsp), %rax
movq 0x48(%rax), %rdx
xorl %r15d, %r15d
movq %r14, %rdi
movl $0x1, %esi
xorl %ecx, %ecx
movl $0x1, %r8d
callq 0x1d3e7a8
cmpl $-0x1, %eax
je 0xcff657
movl 0x2c(%rbx), %eax
movl %eax, 0x28(%rbx)
movb $0x1, %r15b
jmp 0xcff657
movq 0x8(%r15), %rax
movl 0x20(%rax), %ebp
testq %rbp, %rbp
je 0xcff654
movq 0x18(%rax), %r13
shlq $0x2, %rbp
xorl %r12d, %r12d
xorl %r15d, %r15d
movl (%r13,%r12), %eax
movl %eax, 0x4(%rsp)
movq %rbx, %rdi
leaq 0x4(%rsp), %rsi
leaq 0x10(%rsp), %rdx
callq 0xcff7aa
testb %al, %al
jne 0xcff649
movl 0x4(%rsp), %esi
movq 0x8(%rsp), %rax
movq 0x48(%rax), %rdx
movq %r14, %rdi
xorl %ecx, %ecx
movl $0x1, %r8d
callq 0x1d3e7a8
cmpl $-0x1, %eax
je 0xcff649
movl 0x2c(%rbx), %r15d
movq %rbx, %rdi
leaq 0x4(%rsp), %rsi
callq 0xcff83c
movl %r15d, 0x4(%rax)
movb $0x1, %r15b
addq $0x4, %r12
cmpq %r12, %rbp
jne 0xcff5f0
jmp 0xcff657
xorl %r15d, %r15d
cmpl $0x5, 0x2c(%rbx)
jl 0xcff674
xorl %ebp, %ebp
cmpl $0x2, (%rbx)
setae %al
testb %r15b, %al
jne 0xcff683
movb %al, %bpl
incl %ebp
jmp 0xcff539
movl $0x2, %ebp
testb $0x1, %r15b
je 0xcff539
movl 0x28(%rbx), %eax
movl $0x2, %ebp
cmpl $0x7fffffff, %eax # imm = 0x7FFFFFFF
je 0xcff539
movl (%rbx), %esi
leaq 0x8(%rbx), %rcx
cmpl $0x1, %esi
ja 0xcff6ba
testl %esi, %esi
cmoveq 0x8(%rbx), %rcx
movl $0x4, %edx
cmovel 0x10(%rbx), %edx
leaq (%rcx,%rdx,8), %rcx
movq %rcx, %rdx
jmp 0xcff6eb
testb $0x1, %sil
cmoveq 0x8(%rbx), %rcx
movl $0x4, %edi
cmovel 0x10(%rbx), %edi
leaq (%rcx,%rdi,8), %rdx
testl %edi, %edi
je 0xcff6eb
leaq (,%rdi,8), %rdi
cmpl $-0x2, (%rcx)
jb 0xcff6eb
addq $0x8, %rcx
addq $-0x8, %rdi
jne 0xcff6dc
leaq 0x8(%rbx), %rdi
testb $0x1, %sil
cmoveq 0x8(%rbx), %rdi
movl $0x4, %esi
cmovel 0x10(%rbx), %esi
leaq (%rdi,%rsi,8), %r8
cmpq %r8, %rcx
je 0xcff74b
movl $0x7fffffff, %edi # imm = 0x7FFFFFFF
movl $0x7fffffff, %esi # imm = 0x7FFFFFFF
movl 0x4(%rcx), %r9d
cmpl $0x7fffffff, %r9d # imm = 0x7FFFFFFF
je 0xcff736
cmpl %eax, %r9d
jge 0xcff72f
cmpl %esi, %r9d
cmovll %r9d, %esi
jmp 0xcff736
cmpl %edi, %r9d
cmovll %r9d, %edi
addq $0x8, %rcx
cmpq %rdx, %rcx
je 0xcff744
cmpl $-0x2, (%rcx)
jae 0xcff736
cmpq %r8, %rcx
jne 0xcff714
jmp 0xcff755
movl $0x7fffffff, %esi # imm = 0x7FFFFFFF
movl $0x7fffffff, %edi # imm = 0x7FFFFFFF
cmpl $0x7fffffff, %esi # imm = 0x7FFFFFFF
je 0xcff539
cmpl $0x5, %esi
setge %cl
movl %esi, %edx
subl %eax, %edx
cmpl $-0x3, %edx
setl %dl
orb %cl, %dl
movl $0x1, %ebp
jne 0xcff539
movl $0x2, %ebp
cmpl $0x7fffffff, %edi # imm = 0x7FFFFFFF
je 0xcff539
subl %edi, %esi
subl %eax, %edi
cmpl $0x3, %edi
setge %al
cmpl $-0x3, %esi
setl %cl
orb %al, %cl
movzbl %cl, %ebp
jmp 0xcff539
nop
|
/llvm/ADT/STLFunctionalExtras.h
|
llvm::R600SchedStrategy::LoadAlu()
|
void R600SchedStrategy::LoadAlu() {
std::vector<SUnit *> &QSrc = Pending[IDAlu];
for (SUnit *SU : QSrc) {
AluKind AK = getAluKind(SU);
AvailableAlus[AK].push_back(SU);
}
QSrc.clear();
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rdi, %rbx
movq 0x70(%rdi), %r15
movq 0x78(%rdi), %r12
cmpq %r12, %r15
je 0xd03cf2
leaq 0xb8(%rbx), %r13
leaq 0x8(%rsp), %r14
movq (%r15), %rsi
movq %rsi, 0x8(%rsp)
movq %rbx, %rdi
callq 0xd037f8
movl %eax, %eax
leaq (%rax,%rax,2), %rax
leaq (,%rax,8), %rdi
addq %r13, %rdi
movq 0x8(%rdi), %rsi
cmpq 0x10(%rdi), %rsi
je 0xd03ce1
movq 0x8(%rsp), %rax
movq %rax, (%rsi)
addq $0x8, 0x8(%rdi)
jmp 0xd03ce9
movq %r14, %rdx
callq 0xbd1d96
addq $0x8, %r15
cmpq %r12, %r15
jne 0xd03ca7
movq 0x70(%rbx), %rax
cmpq %rax, 0x78(%rbx)
je 0xd03d00
movq %rax, 0x78(%rbx)
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/Target/AMDGPU/R600MachineScheduler.cpp
|
llvm::ARMConstantPoolMBB::print(llvm::raw_ostream&) const
|
void ARMConstantPoolMBB::print(raw_ostream &O) const {
O << printMBBReference(*MBB);
ARMConstantPoolValue::print(O);
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x20, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq 0x28(%rdi), %rsi
movq %rsp, %r15
movq %r15, %rdi
callq 0x1cfab69
cmpq $0x0, 0x10(%r15)
je 0xd2aef3
movq %rsp, %r15
movq %r15, %rdi
movq %rbx, %rsi
callq *0x18(%r15)
movq 0x10(%r15), %rax
testq %rax, %rax
je 0xd2aede
movq %rsp, %rdi
movq %rdi, %rsi
movl $0x3, %edx
callq *%rax
movq %r14, %rdi
movq %rbx, %rsi
callq 0xd2a412
addq $0x20, %rsp
popq %rbx
popq %r14
popq %r15
retq
callq 0x7807b0
|
/Target/ARM/ARMConstantPoolValue.cpp
|
llvm::ARMTTIImpl::getShuffleCost(llvm::TargetTransformInfo::ShuffleKind, llvm::VectorType*, llvm::ArrayRef<int>, llvm::TargetTransformInfo::TargetCostKind, int, llvm::VectorType*, llvm::ArrayRef<llvm::Value const*>, llvm::Instruction const*)
|
InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
VectorType *Tp, ArrayRef<int> Mask,
TTI::TargetCostKind CostKind,
int Index, VectorType *SubTp,
ArrayRef<const Value *> Args,
const Instruction *CxtI) {
Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp);
// Treat extractsubvector as single op permutation.
bool IsExtractSubvector = Kind == TTI::SK_ExtractSubvector;
if (IsExtractSubvector)
Kind = TTI::SK_PermuteSingleSrc;
if (ST->hasNEON()) {
if (Kind == TTI::SK_Broadcast) {
static const CostTblEntry NEONDupTbl[] = {
// VDUP handles these cases.
{ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
{ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
{ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
{ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
{ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
if (const auto *Entry =
CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
return LT.first * Entry->Cost;
}
if (Kind == TTI::SK_Reverse) {
static const CostTblEntry NEONShuffleTbl[] = {
// Reverse shuffle cost one instruction if we are shuffling within a
// double word (vrev) or two if we shuffle a quad word (vrev, vext).
{ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
{ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
{ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
{ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
{ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
{ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
{ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
if (const auto *Entry =
CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
return LT.first * Entry->Cost;
}
if (Kind == TTI::SK_Select) {
static const CostTblEntry NEONSelShuffleTbl[] = {
// Select shuffle cost table for ARM. Cost is the number of
// instructions
// required to create the shuffled vector.
{ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
{ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
{ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
{ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
{ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
{ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
ISD::VECTOR_SHUFFLE, LT.second))
return LT.first * Entry->Cost;
}
}
if (ST->hasMVEIntegerOps()) {
if (Kind == TTI::SK_Broadcast) {
static const CostTblEntry MVEDupTbl[] = {
// VDUP handles these cases.
{ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
{ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
{ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
{ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
LT.second))
return LT.first * Entry->Cost *
ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput);
}
if (!Mask.empty()) {
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
if (LT.second.isVector() &&
Mask.size() <= LT.second.getVectorNumElements() &&
(isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) ||
isVREVMask(Mask, LT.second, 64)))
return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first;
}
}
// Restore optimal kind.
if (IsExtractSubvector)
Kind = TTI::SK_ExtractSubvector;
int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)
: 1;
return BaseCost *
BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x88, %rsp
movl %r9d, 0x5c(%rsp)
movq %rdx, %r12
movq %rdi, %r13
leaq 0xc8(%rsp), %rax
movq %rax, (%rsp)
leaq 0xc0(%rsp), %r9
movq %rcx, 0x38(%rsp)
movq %rcx, %rdx
movq %r8, 0x30(%rsp)
movq %r8, %rcx
movq %r12, %r8
callq 0xd4ed3e
cmpl $0x5, %eax
movl $0x7, %ebp
movl %eax, 0x58(%rsp)
cmovnel %eax, %ebp
movq 0x10(%r13), %rax
cmpb $0x1, 0x155(%rax)
jne 0xd4e891
testl %ebp, %ebp
je 0xd4e896
jmp 0xd4e91a
jmp 0xd4ea32
leaq 0x40(%rsp), %rbx
movq %rbx, %rdi
movq %r13, %rsi
movq %r12, %rdx
callq 0xd4a04c
movzbl 0x10(%rbx), %edx
shlq $0x20, %rdx
orq $0xa4, %rdx
leaq 0x2a71fc1(%rip), %rdi # 0x37c0880
leaq 0x2a72032(%rip), %rbx # 0x37c08f8
movq %rbx, %rsi
callq 0x9346d9
cmpq %rbx, %rax
sete %dl
testq %rax, %rax
sete %cl
orb %dl, %cl
je 0xd4e8e0
jmp 0xd4e912
movl 0x8(%rax), %eax
movl %eax, %ebx
movabsq $0x7fffffffffffffff, %rdx # imm = 0x7FFFFFFFFFFFFFFF
leaq 0x1(%rdx), %rsi
testl %eax, %eax
cmoveq %rsi, %rdx
movq 0x40(%rsp), %rax
testq %rax, %rax
cmovleq %rsi, %rdx
movl 0x48(%rsp), %r15d
imulq %rax, %rbx
cmovoq %rdx, %rbx
testb %cl, %cl
je 0xd4ed26
cmpl $0x1, %ebp
jne 0xd4e9a6
leaq 0x40(%rsp), %r14
movq %r14, %rdi
movq %r13, %rsi
movq %r12, %rdx
callq 0xd4a04c
movzbl 0x10(%r14), %edx
shlq $0x20, %rdx
orq $0xa4, %rdx
leaq 0x2a71fb3(%rip), %rdi # 0x37c0900
leaq 0x2a72024(%rip), %r14 # 0x37c0978
movq %r14, %rsi
callq 0x9346d9
cmpq %r14, %rax
sete %dl
testq %rax, %rax
sete %cl
orb %dl, %cl
jne 0xd4e99e
movl 0x8(%rax), %eax
movl %eax, %ebx
movabsq $0x7fffffffffffffff, %rdx # imm = 0x7FFFFFFFFFFFFFFF
leaq 0x1(%rdx), %rsi
testl %eax, %eax
cmoveq %rsi, %rdx
movq 0x40(%rsp), %rax
testq %rax, %rax
cmovleq %rsi, %rdx
movl 0x48(%rsp), %r15d
imulq %rax, %rbx
cmovoq %rdx, %rbx
testb %cl, %cl
je 0xd4ed26
cmpl $0x2, %ebp
jne 0xd4ea32
leaq 0x40(%rsp), %r14
movq %r14, %rdi
movq %r13, %rsi
movq %r12, %rdx
callq 0xd4a04c
movzbl 0x10(%r14), %edx
shlq $0x20, %rdx
orq $0xa4, %rdx
leaq 0x2a71fa7(%rip), %rdi # 0x37c0980
leaq 0x2a7200c(%rip), %r14 # 0x37c09ec
movq %r14, %rsi
callq 0x9346d9
cmpq %r14, %rax
sete %dl
testq %rax, %rax
sete %cl
orb %dl, %cl
jne 0xd4ea2a
movl 0x8(%rax), %eax
movl %eax, %ebx
movabsq $0x7fffffffffffffff, %rdx # imm = 0x7FFFFFFFFFFFFFFF
leaq 0x1(%rdx), %rsi
testl %eax, %eax
cmoveq %rsi, %rdx
movq 0x40(%rsp), %rax
testq %rax, %rax
cmovleq %rsi, %rdx
movl 0x48(%rsp), %r15d
imulq %rax, %rbx
cmovoq %rdx, %rbx
testb %cl, %cl
je 0xd4ed26
movq 0x10(%r13), %rax
cmpb $0x1, 0x152(%rax)
jne 0xd4ec80
testl %ebp, %ebp
jne 0xd4eaf2
leaq 0x40(%rsp), %r14
movq %r14, %rdi
movq %r13, %rsi
movq %r12, %rdx
callq 0xd4a04c
movzbl 0x10(%r14), %edx
shlq $0x20, %rdx
orq $0xa4, %rdx
leaq 0x2a71f7b(%rip), %rdi # 0x37c09f0
leaq 0x2a71fb0(%rip), %r14 # 0x37c0a2c
movq %r14, %rsi
callq 0x9346d9
cmpq %r14, %rax
sete %dl
testq %rax, %rax
sete %cl
orb %dl, %cl
jne 0xd4eaea
movl 0x8(%rax), %eax
movl %eax, %edx
movabsq $0x7fffffffffffffff, %rsi # imm = 0x7FFFFFFFFFFFFFFF
leaq 0x1(%rsi), %rdi
testl %eax, %eax
movq %rdi, %rax
cmovneq %rsi, %rax
movq 0x40(%rsp), %r8
testq %r8, %r8
cmovleq %rdi, %rax
imulq %r8, %rdx
cmovoq %rax, %rdx
movq 0x10(%r13), %rax
movl 0x1f4(%rax), %eax
testl %eax, %eax
cmoveq %rdi, %rsi
movl 0x48(%rsp), %r15d
testq %rdx, %rdx
cmovleq %rdi, %rsi
movl %eax, %ebx
imulq %rdx, %rbx
cmovoq %rsi, %rbx
testb %cl, %cl
je 0xd4ed26
cmpq $0x0, 0x30(%rsp)
je 0xd4ec80
leaq 0x40(%rsp), %r14
movq %r14, %rdi
movq %r13, %rsi
movq %r12, %rdx
callq 0xd4a04c
movb 0x10(%r14), %al
leal -0x11(%rax), %ecx
movb $0x1, %bpl
cmpb $-0x54, %cl
ja 0xd4ec77
addb $0x77, %al
cmpb $0x34, %al
ja 0xd4eb36
leaq 0x264cc59(%rip), %rdi # 0x339b78a
callq 0x2b60def
movzbl 0x50(%rsp), %eax
leaq 0x264e25e(%rip), %rcx # 0x339cda0
movzwl -0x2(%rcx,%rax,2), %ecx
cmpq 0x30(%rsp), %rcx
jb 0xd4ec77
leaq 0x78(%rsp), %rdi
movb %al, (%rdi)
movq $0x0, 0x8(%rdi)
callq 0x9254d2
movq %rax, %rsi
cmpl $0x20, %esi
ja 0xd4ebf8
movl %esi, %eax
movabsq $0x100010100, %rcx # imm = 0x100010100
btq %rax, %rcx
jae 0xd4ebf8
movq 0x38(%rsp), %rax
movl (%rax), %ecx
testl %ecx, %ecx
js 0xd4eb93
incl %ecx
jmp 0xd4eb9e
movl $0x10, %eax
xorl %edx, %edx
divl %esi
movl %eax, %ecx
cmpl $0xf, %esi
ja 0xd4ebf8
imull %ecx, %esi
cmpl $0x10, %esi
jne 0xd4ebf8
movl $0xffffffff, %edi # imm = 0xFFFFFFFF
andq 0x30(%rsp), %rdi
sete %r9b
je 0xd4ebf2
movl %ecx, %r8d
xorl %esi, %esi
xorl %r9d, %r9d
movq 0x38(%rsp), %rax
movl (%rax,%rsi,4), %r10d
testl %r10d, %r10d
js 0xd4ebe6
movl %esi, %eax
xorl %edx, %edx
divl %ecx
leal (%r8,%rsi), %eax
subl %edx, %eax
notl %edx
addl %eax, %edx
cmpl %edx, %r10d
jne 0xd4ebf2
incq %rsi
cmpq %rdi, %rsi
setae %r9b
jne 0xd4ebc3
testb $0x1, %r9b
jne 0xd4ec36
movl 0x50(%rsp), %edx
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %rsi
xorl %ecx, %ecx
movl $0x20, %r8d
callq 0xd4eea4
testb %al, %al
jne 0xd4ec36
movl 0x50(%rsp), %edx
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %rsi
xorl %ecx, %ecx
movl $0x40, %r8d
callq 0xd4eea4
testb %al, %al
je 0xd4ec77
movq 0x10(%r13), %rax
movq 0x40(%rsp), %rcx
movabsq $0x7fffffffffffffff, %rdx # imm = 0x7FFFFFFFFFFFFFFF
leaq 0x1(%rdx), %rsi
testq %rcx, %rcx
cmovleq %rsi, %rdx
movl 0x1f4(%rax), %ebx
testq %rbx, %rbx
cmoveq %rsi, %rdx
imulq %rcx, %rbx
cmovoq %rdx, %rbx
xorl %r15d, %r15d
cmpl $0x1, 0x48(%rsp)
sete %r15b
xorl %ebp, %ebp
testb %bpl, %bpl
je 0xd4ed26
movq 0x10(%r13), %rax
movl $0x1, %r14d
cmpb $0x1, 0x152(%rax)
jne 0xd4eca9
movl $0xfe, %ecx
andl 0x8(%r12), %ecx
cmpl $0x12, %ecx
jne 0xd4eca9
movslq 0x1f4(%rax), %r14
movl 0xc0(%rsp), %eax
movq 0xc8(%rsp), %rcx
xorps %xmm0, %xmm0
movaps %xmm0, 0x60(%rsp)
movups %xmm0, 0x10(%rsp)
movq %rcx, 0x8(%rsp)
movl %eax, (%rsp)
movq $0x0, 0x20(%rsp)
movq %r13, %rdi
movl 0x58(%rsp), %esi
movq %r12, %rdx
movq 0x38(%rsp), %rcx
movq 0x30(%rsp), %r8
movl 0x5c(%rsp), %r9d
callq 0xd4ef46
movq %r14, %rbx
imulq %rax, %rbx
jno 0xd4ed1c
movabsq $0x7fffffffffffffff, %rbx # imm = 0x7FFFFFFFFFFFFFFF
testq %r14, %r14
jle 0xd4ed11
testq %rax, %rax
jg 0xd4ed1c
leaq 0x1(%rbx), %rcx
testq %rax, %r14
cmovnsq %rcx, %rbx
xorl %r15d, %r15d
cmpl $0x1, %edx
sete %r15b
movq %rbx, %rax
movl %r15d, %edx
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/ARM/ARMTargetTransformInfo.cpp
|
llvm::DenseMapBase<llvm::DenseMap<llvm::ValueMapCallbackVH<llvm::Value*, llvm::SCEVWrapPredicate::IncrementWrapFlags, llvm::ValueMapConfig<llvm::Value*, llvm::sys::SmartMutex<false>>>, llvm::SCEVWrapPredicate::IncrementWrapFlags, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::Value*, llvm::SCEVWrapPredicate::IncrementWrapFlags, llvm::ValueMapConfig<llvm::Value*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::Value*, llvm::SCEVWrapPredicate::IncrementWrapFlags, llvm::ValueMapConfig<llvm::Value*, llvm::sys::SmartMutex<false>>>, llvm::SCEVWrapPredicate::IncrementWrapFlags>>, llvm::ValueMapCallbackVH<llvm::Value*, llvm::SCEVWrapPredicate::IncrementWrapFlags, llvm::ValueMapConfig<llvm::Value*, llvm::sys::SmartMutex<false>>>, llvm::SCEVWrapPredicate::IncrementWrapFlags, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::Value*, llvm::SCEVWrapPredicate::IncrementWrapFlags, llvm::ValueMapConfig<llvm::Value*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::Value*, llvm::SCEVWrapPredicate::IncrementWrapFlags, llvm::ValueMapConfig<llvm::Value*, llvm::sys::SmartMutex<false>>>, llvm::SCEVWrapPredicate::IncrementWrapFlags>>::destroyAll()
|
unsigned getNumBuckets() const {
return NumBuckets;
}
|
movl 0x10(%rdi), %eax
testq %rax, %rax
je 0xd54ecf
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
leaq 0x38(%rsp), %rbx
movl $0x2, %ecx
movq %rcx, (%rbx)
xorl %r15d, %r15d
movq %r15, 0x8(%rbx)
movq $-0x1000, 0x10(%rbx) # imm = 0xF000
leaq 0x4a35e73(%rip), %rdx # 0x578ac90
movq %rdx, -0x8(%rbx)
movq %r15, 0x18(%rbx)
leaq 0x10(%rsp), %r14
movq %rcx, (%r14)
movq %r15, 0x8(%r14)
movq $-0x2000, 0x10(%r14) # imm = 0xE000
movq %rdx, -0x8(%r14)
movq %r15, 0x18(%r14)
movq (%rdi), %r12
shlq $0x4, %rax
leaq (%rax,%rax,2), %r13
leaq 0x8(%r12), %rbp
movq 0x18(%r12,%r15), %rax
cmpq $-0x2000, %rax # imm = 0xE000
je 0xd54e74
cmpq $-0x1000, %rax # imm = 0xF000
je 0xd54e74
testq %rax, %rax
je 0xd54e74
leaq (%r15,%rbp), %rdi
callq 0x2aa1dc0
addq $0x30, %r15
cmpq %r15, %r13
jne 0xd54e51
movq 0x20(%rsp), %rax
cmpq $-0x2000, %rax # imm = 0xE000
je 0xd54e9f
cmpq $-0x1000, %rax # imm = 0xF000
je 0xd54e9f
testq %rax, %rax
je 0xd54e9f
movq %r14, %rdi
callq 0x2aa1dc0
movq 0x48(%rsp), %rax
cmpq $-0x2000, %rax # imm = 0xE000
je 0xd54ec1
cmpq $-0x1000, %rax # imm = 0xF000
je 0xd54ec1
testq %rax, %rax
je 0xd54ec1
movq %rbx, %rdi
callq 0x2aa1dc0
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/llvm/ADT/DenseMap.h
|
void llvm::cl::apply<llvm::cl::opt<llvm::TailPredication::Mode, false, llvm::cl::parser<llvm::TailPredication::Mode>>, char [17], llvm::cl::desc, llvm::cl::initializer<llvm::TailPredication::Mode>, llvm::cl::ValuesClass>(llvm::cl::opt<llvm::TailPredication::Mode, false, llvm::cl::parser<llvm::TailPredication::Mode>>*, char const (&) [17], llvm::cl::desc const&, llvm::cl::initializer<llvm::TailPredication::Mode> const&, llvm::cl::ValuesClass const&)
|
void apply(Opt *O, const Mod &M, const Mods &... Ms) {
applicator<Mod>::opt(M, *O);
apply(O, Ms...);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %r8, %rbx
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, %r13
movq %rsi, %rdi
callq 0x7802c0
movq %r13, %rdi
movq %r12, %rsi
movq %rax, %rdx
callq 0x2b1f336
movups (%r15), %xmm0
movups %xmm0, 0x20(%r13)
movq (%r14), %rax
movl (%rax), %ecx
movl %ecx, 0x80(%r13)
movb $0x1, 0x94(%r13)
movl (%rax), %eax
movl %eax, 0x90(%r13)
movq %rbx, %rdi
movq %r13, %rsi
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
jmp 0xd6018a
nop
|
/llvm/Support/CommandLine.h
|
LowerEXTRACT_SUBVECTOR(llvm::SDValue, llvm::SelectionDAG&, llvm::ARMSubtarget const*)
|
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
SDLoc dl(Op);
EVT VT = Op.getValueType();
EVT Op1VT = V1.getValueType();
unsigned NumElts = VT.getVectorNumElements();
unsigned Index = V2->getAsZExtVal();
assert(VT.getScalarSizeInBits() == 1 &&
"Unexpected custom EXTRACT_SUBVECTOR lowering");
assert(ST->hasMVEIntegerOps() &&
"EXTRACT_SUBVECTOR lowering only supported for MVE");
SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
// We now have Op1 promoted to a vector of integers, where v8i1 gets
// promoted to v8i16, etc.
MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
if (NumElts == 2) {
EVT SubVT = MVT::v4i32;
SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT);
for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j += 2) {
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1,
DAG.getIntPtrConstant(i, dl));
SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
DAG.getConstant(j, dl, MVT::i32));
SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
DAG.getConstant(j + 1, dl, MVT::i32));
}
SDValue Cmp = DAG.getNode(ARMISD::VCMPZ, dl, MVT::v4i1, SubVec,
DAG.getConstant(ARMCC::NE, dl, MVT::i32));
return DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v2i1, Cmp);
}
EVT SubVT = MVT::getVectorVT(ElType, NumElts);
SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT);
for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) {
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1,
DAG.getIntPtrConstant(i, dl));
SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
DAG.getConstant(j, dl, MVT::i32));
}
// Now return the result of comparing the subvector with zero,
// which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec,
DAG.getConstant(ARMCC::NE, dl, MVT::i32));
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b8, %rsp # imm = 0x1B8
movq %rdx, 0x40(%rsp)
movl %esi, %ebx
movq %rdi, %r12
movq 0x28(%rdi), %rax
movq (%rax), %r14
movl 0x8(%rax), %r15d
movq 0x28(%rax), %r13
movq 0x48(%rdi), %rsi
movq %rsi, 0x30(%rsp)
testq %rsi, %rsi
je 0xde7ac1
leaq 0x30(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r12), %eax
movl %eax, 0x38(%rsp)
movq 0x30(%r12), %rax
movl %ebx, %ecx
shlq $0x4, %rcx
movb (%rax,%rcx), %dl
movq 0x8(%rax,%rcx), %rax
leaq 0x78(%rsp), %rdi
movb %dl, (%rdi)
movq %rax, 0x8(%rdi)
movq 0x30(%r14), %rax
movq %r15, %rcx
shlq $0x4, %rcx
movb (%rax,%rcx), %bpl
callq 0x92c1fe
movl %eax, %r12d
movq 0x58(%r13), %rax
cmpl $0x41, 0x20(%rax)
jb 0xde7b0f
movq 0x18(%rax), %rax
jmp 0xde7b13
addq $0x18, %rax
movq (%rax), %r13
movq 0x30(%rsp), %rsi
movq %rsi, 0x68(%rsp)
testq %rsi, %rsi
je 0xde7b34
leaq 0x68(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x38(%rsp), %eax
leaq 0x68(%rsp), %rbx
movl %eax, 0x8(%rbx)
movzbl %bpl, %ecx
movq %rbx, %rdi
movq %r14, %rsi
movl %r15d, %edx
movq 0x40(%rsp), %r14
movq %r14, %r8
callq 0xe16075
movq %rax, %r15
movl %edx, 0x4c(%rsp)
movq (%rbx), %rsi
testq %rsi, %rsi
je 0xde7b73
leaq 0x68(%rsp), %rdi
callq 0x2a758fc
movb 0x78(%rsp), %al
addb $-0x12, %al
movzbl %al, %ecx
shll $0x3, %ecx
movabsq $0x27323c8080, %rax # imm = 0x27323C8080
shrq %cl, %rax
leaq 0x1a8(%rsp), %rdi
movb %al, (%rdi)
movq $0x0, 0x8(%rdi)
callq 0x931436
cmpl $0x2, %r12d
jne 0xde7e4b
leaq 0x30(%rsp), %rdx
movq %r14, %rdi
movl $0x33, %esi
movl $0x3c, %ecx
xorl %r8d, %r8d
callq 0x1778aaa
movl %edx, %ebp
movq %r13, %rcx
cmpl $-0x3, %ecx
ja 0xde7d98
movq %r15, 0x60(%rsp)
movl %ecx, %r15d
addl $0x2, %ecx
xorl %r13d, %r13d
movq %rcx, 0x50(%rsp)
movq 0x40(%rsp), %r14
movq %rax, 0x58(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0x198(%rsp)
movl 0x4c(%rsp), %eax
movl %eax, 0x1a0(%rsp)
movq %r14, %rdi
movq %r15, %rsi
leaq 0x30(%rsp), %rbx
movq %rbx, %rdx
xorl %ecx, %ecx
callq 0x17638c6
movq %rax, 0x188(%rsp)
movl %edx, 0x190(%rsp)
movups 0x188(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x198(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x9d, %esi
movq %rbx, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
callq 0x17638a8
movq %rax, %r12
xorl %eax, %eax
movl %edx, %ebx
movq 0x58(%rsp), %rcx
movq %rcx, 0x178(%rsp)
movl %ebp, 0x180(%rsp)
movq %r12, 0x168(%rsp)
movl %edx, 0x170(%rsp)
movl %eax, (%rsp)
movq %r14, %rdi
movq %r13, %rsi
leaq 0x30(%rsp), %rdx
movq %rdx, %rbp
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, 0x158(%rsp)
movl %edx, 0x160(%rsp)
movups 0x158(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0x168(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x178(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x9c, %esi
movq %rbp, %rdx
movl $0x3c, %ecx
xorl %r8d, %r8d
callq 0x1764b02
movq %rax, 0x148(%rsp)
movl %edx, 0x150(%rsp)
movq %r12, 0x138(%rsp)
movl %ebx, 0x140(%rsp)
xorl %eax, %eax
leaq 0x1(%r13), %rsi
movl %eax, (%rsp)
movq %r14, %rdi
movq %rbp, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, 0x128(%rsp)
movl %edx, 0x130(%rsp)
movups 0x128(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0x138(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x148(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x9c, %esi
movq %rbp, %rdx
movl $0x3c, %ecx
xorl %r8d, %r8d
callq 0x1764b02
movq 0x50(%rsp), %rcx
movl %edx, %ebp
incq %r15
addq $0x2, %r13
cmpq %rcx, %r15
jb 0xde7bed
movq %rax, 0x118(%rsp)
movl %ebp, 0x120(%rsp)
movl $0x0, (%rsp)
leaq 0x30(%rsp), %rbx
movl $0x1, %esi
movq 0x40(%rsp), %r14
movq %r14, %rdi
movq %rbx, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, 0x108(%rsp)
movl %edx, 0x110(%rsp)
movups 0x108(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x118(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x224, %esi # imm = 0x224
movq %rbx, %rdx
movl $0x14, %ecx
xorl %r8d, %r8d
callq 0x17638a8
movq %rax, 0xf8(%rsp)
movl %edx, 0x100(%rsp)
movups 0xf8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x21e, %esi # imm = 0x21E
movq %rbx, %rdx
movl $0x12, %ecx
xorl %r8d, %r8d
callq 0x176388a
jmp 0xde8037
movzbl %al, %edi
movl %r12d, %esi
callq 0x920720
movzbl %al, %ebx
leaq 0x30(%rsp), %rdx
movq %r14, %rdi
movl $0x33, %esi
movl %ebx, %ecx
xorl %r8d, %r8d
callq 0x1778aaa
movq %rax, %r14
movl %edx, %ebp
leal (%r12,%r13), %eax
cmpl %r13d, %eax
jbe 0xde7fb3
movl %r13d, %eax
movq %rax, 0x58(%rsp)
movq %r15, 0x60(%rsp)
movl %r12d, %eax
movq %rax, 0x50(%rsp)
xorl %r12d, %r12d
leaq 0x30(%rsp), %r13
movl %ebx, %r15d
movq 0x40(%rsp), %rbx
movq 0x58(%rsp), %rax
leaq (%rax,%r12), %rsi
movq 0x60(%rsp), %rax
movq %rax, 0xe8(%rsp)
movl 0x4c(%rsp), %eax
movl %eax, 0xf0(%rsp)
movq %rbx, %rdi
movq %r13, %rdx
xorl %ecx, %ecx
callq 0x17638c6
movq %rax, 0xd8(%rsp)
movl %edx, 0xe0(%rsp)
movups 0xd8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xe8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x9d, %esi
movq %r13, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
callq 0x17638a8
movq %r14, 0xc8(%rsp)
movl %ebp, 0xd0(%rsp)
movq %rax, 0xb8(%rsp)
movl %edx, 0xc0(%rsp)
movl $0x0, (%rsp)
movq %rbx, %rdi
movq %r12, %rsi
movq %r13, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, 0xa8(%rsp)
movl %edx, 0xb0(%rsp)
movups 0xa8(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0xb8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xc8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x9c, %esi
movq %r13, %rdx
movl %r15d, %ecx
xorl %r8d, %r8d
callq 0x1764b02
movq %rax, %r14
movl %edx, %ebp
incq %r12
cmpl %r12d, 0x50(%rsp)
jne 0xde7ea7
jmp 0xde7fb8
movq 0x40(%rsp), %rbx
movq 0x80(%rsp), %r15
movq %r14, 0x98(%rsp)
movl %ebp, 0xa0(%rsp)
movl 0x78(%rsp), %ebp
movl $0x0, (%rsp)
leaq 0x30(%rsp), %r14
movl $0x1, %esi
movq %rbx, %rdi
movq %r14, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, 0x88(%rsp)
movl %edx, 0x90(%rsp)
movups 0x88(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x98(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x224, %esi # imm = 0x224
movq %r14, %rdx
movl %ebp, %ecx
movq %r15, %r8
callq 0x17638a8
movq %rax, %rbx
movl %edx, %ebp
movq 0x30(%rsp), %rsi
testq %rsi, %rsi
je 0xde8050
leaq 0x30(%rsp), %rdi
callq 0x2a758fc
movq %rbx, %rax
movl %ebp, %edx
addq $0x1b8, %rsp # imm = 0x1B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/ARM/ARMISelLowering.cpp
|
PerformVMOVrhCombine(llvm::SDNode*, llvm::SelectionDAG&)
|
static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (VMOVrh (fpconst x)) -> const x
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N0)) {
APFloat V = C->getValueAPF();
return DAG.getConstant(V.bitcastToAPInt().getZExtValue(), SDLoc(N), VT);
}
// fold (VMOVrh (load x)) -> (zextload (i16*)x)
if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue Load =
DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(),
LN0->getBasePtr(), MVT::i16, LN0->getMemOperand());
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
return Load;
}
// Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n)
if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
isa<ConstantSDNode>(N0->getOperand(1)))
return DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0),
N0->getOperand(1));
return SDValue();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq 0x28(%rdi), %rax
movq 0x30(%rdi), %rdx
movq (%rax), %r12
movl 0x18(%r12), %ecx
cmpl $0x24, %ecx
je 0xe06ade
cmpl $0xc, %ecx
jne 0xe06ae3
movq %r12, %rsi
jmp 0xe06ae5
xorl %esi, %esi
movb (%rdx), %bpl
movq 0x8(%rdx), %r15
testq %rsi, %rsi
je 0xe06bb0
movq 0x58(%rsi), %rsi
leaq 0x38(%rsp), %r12
addq $0x20, %rsi
movq %r12, %rdi
callq 0x81e6da
leaq 0x60(%rsp), %r13
leaq 0x30(%rsp), %rsi
movq %r13, %rdi
callq 0x815f90
cmpl $0x41, 0x8(%r13)
jb 0xe06b28
movq 0x60(%rsp), %r13
movq (%r13), %r13
movq 0x48(%r14), %rsi
movq %rsi, 0x50(%rsp)
testq %rsi, %rsi
je 0xe06b49
leaq 0x50(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r14), %eax
leaq 0x50(%rsp), %r14
movl %eax, 0x8(%r14)
movl $0x0, (%rsp)
movzbl %bpl, %ecx
movq %rbx, %rdi
movq %r13, %rsi
movq %r14, %rdx
movq %r15, %r8
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, %r13
movl %edx, %r15d
movq (%r14), %rsi
testq %rsi, %rsi
je 0xe06b8d
leaq 0x50(%rsp), %rdi
callq 0x2a758fc
cmpl $0x41, 0x68(%rsp)
jb 0xe06ba3
movq 0x60(%rsp), %rdi
testq %rdi, %rdi
je 0xe06ba3
callq 0x7802b0
movq %r12, %rdi
callq 0x81603c
jmp 0xe06d5b
cmpl $0x121, %ecx # imm = 0x121
setne %cl
testq %r12, %r12
sete %dl
orb %cl, %dl
jne 0xe06cc0
movzwl 0x1e(%r12), %ecx
testl $0xf80, %ecx # imm = 0xF80
jne 0xe06cc0
movl 0x8(%rax), %edx
movq %r12, %rdi
movl $0x1, %esi
callq 0x179219e
testb %al, %al
je 0xe06cc0
movq 0x48(%r14), %rsi
movq %rsi, 0x30(%rsp)
testq %rsi, %rsi
je 0xe06c0e
leaq 0x30(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r14), %eax
leaq 0x30(%rsp), %rcx
movl %eax, 0x8(%rcx)
movq %rcx, %rdx
movq 0x28(%r12), %rax
movups (%rax), %xmm0
movaps %xmm0, 0x80(%rsp)
movb $0x6, 0x70(%rsp)
movq $0x0, 0x78(%rsp)
movq 0x68(%r12), %r9
movups 0x70(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0x28(%rax), %xmm0
movups %xmm0, 0x10(%rsp)
movaps 0x80(%rsp), %xmm0
movups %xmm0, (%rsp)
movzbl %bpl, %ecx
movq %rbx, %rdi
movl $0x3, %esi
movq %rdx, %rbp
movq %r15, %r8
callq 0x1786f64
movq %rax, %r13
movl %edx, %r15d
movq (%rbp), %rsi
testq %rsi, %rsi
je 0xe06c8f
leaq 0x30(%rsp), %rdi
callq 0x2a758fc
movq %rbx, %rdi
movq %r14, %rsi
xorl %edx, %edx
movq %r13, %rcx
xorl %r8d, %r8d
callq 0x178f032
movq %rbx, %rdi
movq %r12, %rsi
movl $0x1, %edx
movq %r13, %rcx
movl $0x1, %r8d
callq 0x178f032
jmp 0xe06d5b
cmpl $0x9d, 0x18(%r12)
jne 0xe06d55
movq 0x28(%r12), %rax
movq 0x28(%rax), %rax
movl 0x18(%rax), %eax
cmpl $0x23, %eax
je 0xe06ce5
cmpl $0xb, %eax
jne 0xe06d55
movq 0x48(%r14), %rsi
movq %rsi, 0x30(%rsp)
testq %rsi, %rsi
je 0xe06d02
leaq 0x30(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r14), %eax
leaq 0x30(%rsp), %r14
movl %eax, 0x8(%r14)
movq 0x28(%r12), %rax
movups (%rax), %xmm0
movups 0x28(%rax), %xmm1
movups %xmm1, 0x10(%rsp)
movups %xmm0, (%rsp)
movzbl %bpl, %ecx
movq %rbx, %rdi
movl $0x239, %esi # imm = 0x239
movq %r14, %rdx
movq %r15, %r8
callq 0x17638a8
movq %rax, %r13
movl %edx, %r15d
movq (%r14), %rsi
testq %rsi, %rsi
je 0xe06d5b
leaq 0x30(%rsp), %rdi
callq 0x2a758fc
jmp 0xe06d5b
xorl %r15d, %r15d
xorl %r13d, %r13d
movq %r13, %rax
movl %r15d, %edx
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/ARM/ARMISelLowering.cpp
|
(anonymous namespace)::HexagonBitSimplify::replaceRegWithSub(llvm::Register, llvm::Register, unsigned int, llvm::MachineRegisterInfo&)
|
bool HexagonBitSimplify::replaceRegWithSub(Register OldR, Register NewR,
unsigned NewSR,
MachineRegisterInfo &MRI) {
if (!OldR.isVirtual() || !NewR.isVirtual())
return false;
if (hasTiedUse(OldR, MRI, NewSR))
return false;
auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
decltype(End) NextI;
for (auto I = Begin; I != End; I = NextI) {
NextI = std::next(I);
I->setReg(NewR);
I->setSubReg(NewSR);
}
return Begin != End;
}
|
testl %esi, %edi
jns 0xeda59f
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rcx, %r14
movl %edx, %ebx
movl %esi, %ebp
movl %edi, %r15d
movq %rcx, %rsi
callq 0xed9552
testb %al, %al
je 0xeda5a2
xorl %eax, %eax
jmp 0xeda5fa
xorl %eax, %eax
retq
movq %r14, %rdi
movl %r15d, %esi
callq 0x9827f4
movq %rax, %r14
testq %rax, %rax
je 0xeda5f4
shll $0x8, %ebx
andl $0xfff00, %ebx # imm = 0xFFF00
movl $0xfff000ff, %r12d # imm = 0xFFF000FF
movq %r14, %r13
movq %r13, %r15
movq 0x18(%r13), %r13
testq %r13, %r13
je 0xeda5da
testb $0x1, 0x3(%r13)
jne 0xeda5ca
movq %r15, %rdi
movl %ebp, %esi
callq 0x1d531ce
movl (%r15), %eax
andl %r12d, %eax
orl %ebx, %eax
movl %eax, (%r15)
testq %r13, %r13
jne 0xeda5c7
testq %r14, %r14
setne %al
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/Hexagon/HexagonBitSimplify.cpp
|
llvm::HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(llvm::SDValue, llvm::SelectionDAG&) const
|
SDValue
HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
SDValue Align = Op.getOperand(2);
SDLoc dl(Op);
ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align);
assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
unsigned A = AlignConst->getSExtValue();
auto &HFI = *Subtarget.getFrameLowering();
// "Zero" means natural stack alignment.
if (A == 0)
A = HFI.getStackAlign().value();
LLVM_DEBUG({
dbgs () << __func__ << " Align: " << A << " Size: ";
Size.getNode()->dump(&DAG);
dbgs() << "\n";
});
SDValue AC = DAG.getConstant(A, dl, MVT::i32);
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
DAG.ReplaceAllUsesOfValueWith(Op, AA);
return AA;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x78, %rsp
movq %rcx, %r14
movl %edx, %ebx
movq %rsi, %r15
movq %rdi, %r12
movq 0x28(%rsi), %rax
movups (%rax), %xmm0
movaps %xmm0, 0x60(%rsp)
movups 0x28(%rax), %xmm0
movaps %xmm0, 0x50(%rsp)
movq 0x50(%rax), %r13
movq 0x48(%rsi), %rsi
movq %rsi, 0x30(%rsp)
testq %rsi, %rsi
je 0xf3e02d
leaq 0x30(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r15), %eax
movl %eax, 0x38(%rsp)
movq 0x58(%r13), %rcx
movl 0x20(%rcx), %eax
cmpl $0x40, %eax
ja 0xf3e05a
movq 0x18(%rcx), %rdx
movl %eax, %ecx
negb %cl
shlq %cl, %rdx
sarq %cl, %rdx
xorl %r13d, %r13d
testl %eax, %eax
cmovneq %rdx, %r13
jmp 0xf3e061
movq 0x18(%rcx), %rax
movl (%rax), %r13d
movq 0x4e100(%r12), %rdi
movq (%rdi), %rax
callq *0x88(%rax)
testl %r13d, %r13d
jne 0xf3e083
movb 0xc(%rax), %cl
movl $0x1, %r13d
shlq %cl, %r13
movl %r13d, %esi
movl $0x0, (%rsp)
leaq 0x30(%rsp), %r12
movq %r14, %rdi
movq %r12, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, %r13
movl %edx, %ebp
movq %r14, %rdi
movl $0x7, %esi
xorl %edx, %edx
movl $0x1, %ecx
xorl %r8d, %r8d
callq 0x1762ed2
movl %edx, %r8d
movq %r13, 0x40(%rsp)
movl %ebp, 0x48(%rsp)
movups 0x40(%rsp), %xmm0
movups %xmm0, 0x20(%rsp)
movaps 0x50(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movaps 0x60(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x1e9, %esi # imm = 0x1E9
movq %r12, %rdx
movq %rax, %rcx
callq 0x178dc8a
movq %rax, %r13
movl %edx, %ebp
movq %r14, %rdi
movq %r15, %rsi
movl %ebx, %edx
movq %rax, %rcx
movl %ebp, %r8d
callq 0x178f032
movq (%r12), %rsi
testq %rsi, %rsi
je 0xf3e12b
leaq 0x30(%rsp), %rdi
callq 0x2a758fc
movq %r13, %rax
movl %ebp, %edx
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/Hexagon/HexagonISelLowering.cpp
|
llvm::HexagonTargetLowering::getTargetNodeName(unsigned int) const
|
const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch ((HexagonISD::NodeType)Opcode) {
case HexagonISD::ADDC: return "HexagonISD::ADDC";
case HexagonISD::SUBC: return "HexagonISD::SUBC";
case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
case HexagonISD::CALL: return "HexagonISD::CALL";
case HexagonISD::CALLnr: return "HexagonISD::CALLnr";
case HexagonISD::CALLR: return "HexagonISD::CALLR";
case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
case HexagonISD::CONST32: return "HexagonISD::CONST32";
case HexagonISD::CP: return "HexagonISD::CP";
case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT";
case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
case HexagonISD::INSERT: return "HexagonISD::INSERT";
case HexagonISD::JT: return "HexagonISD::JT";
case HexagonISD::RET_GLUE: return "HexagonISD::RET_GLUE";
case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
case HexagonISD::VASL: return "HexagonISD::VASL";
case HexagonISD::VASR: return "HexagonISD::VASR";
case HexagonISD::VLSR: return "HexagonISD::VLSR";
case HexagonISD::MFSHL: return "HexagonISD::MFSHL";
case HexagonISD::MFSHR: return "HexagonISD::MFSHR";
case HexagonISD::SSAT: return "HexagonISD::SSAT";
case HexagonISD::USAT: return "HexagonISD::USAT";
case HexagonISD::SMUL_LOHI: return "HexagonISD::SMUL_LOHI";
case HexagonISD::UMUL_LOHI: return "HexagonISD::UMUL_LOHI";
case HexagonISD::USMUL_LOHI: return "HexagonISD::USMUL_LOHI";
case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW";
case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0";
case HexagonISD::VROR: return "HexagonISD::VROR";
case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
case HexagonISD::READTIMER: return "HexagonISD::READTIMER";
case HexagonISD::PTRUE: return "HexagonISD::PTRUE";
case HexagonISD::PFALSE: return "HexagonISD::PFALSE";
case HexagonISD::D2P: return "HexagonISD::D2P";
case HexagonISD::P2D: return "HexagonISD::P2D";
case HexagonISD::V2Q: return "HexagonISD::V2Q";
case HexagonISD::Q2V: return "HexagonISD::Q2V";
case HexagonISD::QCAT: return "HexagonISD::QCAT";
case HexagonISD::QTRUE: return "HexagonISD::QTRUE";
case HexagonISD::QFALSE: return "HexagonISD::QFALSE";
case HexagonISD::TL_EXTEND: return "HexagonISD::TL_EXTEND";
case HexagonISD::TL_TRUNCATE: return "HexagonISD::TL_TRUNCATE";
case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST";
case HexagonISD::VALIGN: return "HexagonISD::VALIGN";
case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR";
case HexagonISD::ISEL: return "HexagonISD::ISEL";
case HexagonISD::OP_END: break;
}
return nullptr;
}
|
addl $0xfffffe1b, %esi # imm = 0xFFFFFE1B
cmpl $0x32, %esi
ja 0xf41bb4
movl %esi, %eax
leaq 0x2b5cc68(%rip), %rcx # 0x3a9e814
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
retq
xorl %eax, %eax
retq
nop
|
/Target/Hexagon/HexagonISelLowering.cpp
|
llvm::HexagonTargetLowering::isExtractSubvectorCheap(llvm::EVT, llvm::EVT, unsigned int) const
|
bool HexagonTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const {
assert(ResVT.getVectorElementType() == SrcVT.getVectorElementType());
if (!ResVT.isSimple() || !SrcVT.isSimple())
return false;
MVT ResTy = ResVT.getSimpleVT(), SrcTy = SrcVT.getSimpleVT();
if (ResTy.getVectorElementType() != MVT::i1)
return true;
// Non-HVX bool vectors are relatively cheap.
return SrcTy.getVectorNumElements() <= 8;
}
|
pushq %rbx
movl %ecx, %ebx
testb %sil, %sil
setne %al
testb %bl, %bl
setne %cl
andb %al, %cl
cmpb $0x1, %cl
jne 0xf42257
movzbl %sil, %ecx
leaq 0x228d81a(%rip), %rdx # 0x31cfa40
movb $0x1, %al
cmpb $0x2, -0x1(%rcx,%rdx)
jne 0xf42259
leal 0x77(%rbx), %eax
cmpb $0x34, %al
ja 0xf42242
leaq 0x245954d(%rip), %rdi # 0x339b78a
callq 0x2b60def
movzbl %bl, %eax
leaq 0x245ab54(%rip), %rcx # 0x339cda0
cmpw $0x9, -0x2(%rcx,%rax,2)
setb %al
jmp 0xf42259
xorl %eax, %eax
popq %rbx
retq
nop
|
/Target/Hexagon/HexagonISelLowering.cpp
|
llvm::HexagonTargetLowering::LowerHvxFpToInt(llvm::SDValue, llvm::SelectionDAG&) const
|
SDValue
HexagonTargetLowering::LowerHvxFpToInt(SDValue Op, SelectionDAG &DAG) const {
// Catch invalid conversion ops (just in case).
assert(Op.getOpcode() == ISD::FP_TO_SINT ||
Op.getOpcode() == ISD::FP_TO_UINT);
MVT ResTy = ty(Op);
MVT FpTy = ty(Op.getOperand(0)).getVectorElementType();
MVT IntTy = ResTy.getVectorElementType();
if (Subtarget.useHVXIEEEFPOps()) {
// There are only conversions from f16.
if (FpTy == MVT::f16) {
// Other int types aren't legal in HVX, so we shouldn't see them here.
assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32);
// Conversions to i8 and i16 are legal.
if (IntTy == MVT::i8 || IntTy == MVT::i16)
return Op;
}
}
if (IntTy.getSizeInBits() != FpTy.getSizeInBits())
return EqualizeFpIntConversion(Op, DAG);
return ExpandHvxFpToInt(Op, DAG);
}
|
pushq %rax
movq %rsi, %rax
movq 0x28(%rsi), %rsi
movq 0x30(%rax), %r8
movl %edx, %r9d
shlq $0x4, %r9
movzbl (%r8,%r9), %r9d
movq (%rsi), %r8
movl 0x8(%rsi), %esi
movq 0x30(%r8), %r8
shlq $0x4, %rsi
movzbl (%r8,%rsi), %esi
leaq 0x227ab8e(%rip), %r10 # 0x31cfa40
movb -0x1(%rsi,%r10), %r8b
movzbl -0x1(%r9,%r10), %esi
movq 0x4e100(%rdi), %r9
cmpb $0x1, 0x12c(%r9)
jne 0xf54ee8
cmpl $0x0, 0x13c(%r9)
jle 0xf54ee8
cmpb $0xb, %r8b
jne 0xf54ee8
leal -0x5(%rsi), %r9d
cmpb $0x2, %r9b
jb 0xf54f24
movzbl %r8b, %r8d
shll $0x4, %esi
leaq 0x2279e4a(%rip), %r9 # 0x31ced40
movq -0x10(%rsi,%r9), %r10
shll $0x4, %r8d
cmpq -0x10(%r8,%r9), %r10
jne 0xf54f1c
movb -0x8(%r8,%r9), %r8b
cmpb %r8b, -0x8(%rsi,%r9)
jne 0xf54f1c
movq %rax, %rsi
callq 0xf5506e
jmp 0xf54f24
movq %rax, %rsi
callq 0xf54f26
popq %rcx
retq
|
/Target/Hexagon/HexagonISelLoweringHVX.cpp
|
llvm::SDValue lowerVectorBitRevImm<3u>(llvm::SDNode*, llvm::SelectionDAG&)
|
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG) {
SDLoc DL(Node);
EVT ResTy = Node->getValueType(0);
auto *CImm = cast<ConstantSDNode>(Node->getOperand(2));
// Check the unsigned ImmArg.
if (!isUInt<N>(CImm->getZExtValue())) {
DAG.getContext()->emitError(Node->getOperationName(0) +
": argument out of range.");
return DAG.getNode(ISD::UNDEF, DL, ResTy);
}
APInt Imm = APInt(ResTy.getScalarSizeInBits(), 1) << CImm->getAPIntValue();
SDValue BitImm = DAG.getConstant(Imm, DL, ResTy);
return DAG.getNode(ISD::XOR, DL, ResTy, Node->getOperand(1), BitImm);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0xc0, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq 0x48(%rdi), %rsi
movq %rsi, 0x38(%rsp)
testq %rsi, %rsi
je 0xfdf84d
leaq 0x38(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r14), %eax
movl %eax, 0x40(%rsp)
movq 0x30(%r14), %rax
movb (%rax), %cl
movq 0x8(%rax), %rax
movb %cl, 0x28(%rsp)
movq %rax, 0x30(%rsp)
movq 0x28(%r14), %rax
movq 0x50(%rax), %r12
movq 0x58(%r12), %rax
cmpl $0x41, 0x20(%rax)
jb 0xfdf881
movq 0x18(%rax), %rax
jmp 0xfdf885
addq $0x18, %rax
cmpq $0x8, (%rax)
jae 0xfdf966
leaq 0x28(%rsp), %rdi
callq 0x9254d2
leaq 0x48(%rsp), %r15
movl $0x1, %edx
movq %r15, %rdi
movl %eax, %esi
xorl %ecx, %ecx
callq 0x91d2c6
movq 0x58(%r12), %rdx
addq $0x18, %rdx
leaq 0x78(%rsp), %rdi
movq %r15, %rsi
callq 0xa501e8
cmpl $0x41, 0x8(%r15)
jb 0xfdf8db
movq 0x48(%rsp), %rdi
testq %rdi, %rdi
je 0xfdf8db
callq 0x7802b0
movq 0x30(%rsp), %r8
movl 0x28(%rsp), %ecx
movl $0x0, (%rsp)
leaq 0x78(%rsp), %r15
leaq 0x38(%rsp), %r12
movq %rbx, %rdi
movq %r15, %rsi
movq %r12, %rdx
xorl %r9d, %r9d
callq 0x17642f0
movq 0x30(%rsp), %r8
movq 0x28(%r14), %rsi
movq %rax, 0x68(%rsp)
movl %edx, 0x70(%rsp)
movl 0x28(%rsp), %ecx
movups 0x68(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x28(%rsi), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0xbb, %esi
movq %r12, %rdx
callq 0x17638a8
movq %rax, %rbx
movl %edx, %ebp
cmpl $0x41, 0x8(%r15)
jb 0xfdfa46
movq 0x78(%rsp), %rdi
testq %rdi, %rdi
je 0xfdfa46
callq 0x7802b0
jmp 0xfdfa46
movq 0x40(%rbx), %r15
leaq 0xa0(%rsp), %r12
movq %r12, %rdi
movq %r14, %rsi
xorl %edx, %edx
callq 0x179af2a
leaq 0x2b0430d(%rip), %rsi # 0x3ae3c93
movq %r12, %rdi
callq 0x780c80
leaq 0x58(%rsp), %r14
movq %r14, -0x10(%r14)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0xfdf9b5
movq %rdx, 0x48(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x58(%rsp)
jmp 0xfdf9bc
movups (%rcx), %xmm0
movups %xmm0, (%r14)
movq 0x8(%rax), %rdx
leaq 0x48(%rsp), %r12
movq %rdx, 0x8(%r12)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0x78(%rsp), %rsi
movw $0x104, 0x20(%rsi) # imm = 0x104
movq %r12, (%rsi)
movq %r15, %rdi
callq 0x2a603f6
movq (%r12), %rdi
cmpq %r14, %rdi
je 0xfdfa05
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x7800d0
leaq 0xb0(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0xfdfa26
movq 0xb0(%rsp), %rsi
incq %rsi
callq 0x7800d0
movq 0x30(%rsp), %r8
movl 0x28(%rsp), %ecx
leaq 0x38(%rsp), %rdx
movq %rbx, %rdi
movl $0x33, %esi
callq 0x1778aaa
movq %rax, %rbx
movl %edx, %ebp
movq 0x38(%rsp), %rsi
testq %rsi, %rsi
je 0xfdfa5a
leaq 0x38(%rsp), %rdi
callq 0x2a758fc
movq %rbx, %rax
movl %ebp, %edx
addq $0xc0, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/Target/LoongArch/LoongArchISelLowering.cpp
|
llvm::MipsAsmPrinter::runOnMachineFunction(llvm::MachineFunction&)
|
bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
Subtarget = &MF.getSubtarget<MipsSubtarget>();
MipsFI = MF.getInfo<MipsFunctionInfo>();
if (Subtarget->inMips16Mode())
for (const auto &I : MipsFI->StubsNeeded) {
const char *Symbol = I.first;
const Mips16HardFloatInfo::FuncSignature *Signature = I.second;
if (StubsNeeded.find(Symbol) == StubsNeeded.end())
StubsNeeded[Symbol] = Signature;
}
MCP = MF.getConstantPool();
// In NaCl, all indirect jump targets must be aligned to bundle size.
if (Subtarget->isTargetNaCl())
NaClAlignIndirectJumpTargets(MF);
AsmPrinter::runOnMachineFunction(MF);
emitXRayTable();
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq 0x10(%rsi), %rax
movq %rax, 0x358(%rdi)
movq 0x30(%rsi), %rbp
movq %rbp, 0x360(%rdi)
cmpb $0x1, 0x13d(%rax)
jne 0xfe091a
movq 0x20(%rbp), %r15
addq $0x10, %rbp
cmpq %rbp, %r15
je 0xfe091a
leaq 0x328(%rbx), %rax
movq %rax, 0x8(%rsp)
leaq 0x330(%rbx), %r13
movq 0x20(%r15), %rax
movq %rax, 0x10(%rsp)
movq 0x338(%rbx), %rdx
movq %r13, %rcx
testq %rdx, %rdx
je 0xfe08dc
movq %r13, %rcx
xorl %esi, %esi
cmpq %rax, 0x20(%rdx)
setb %sil
cmovaeq %rdx, %rcx
movq 0x10(%rdx,%rsi,8), %rdx
testq %rdx, %rdx
jne 0xfe08c4
movq %r13, %rdx
cmpq %r13, %rcx
je 0xfe08ef
cmpq 0x20(%rcx), %rax
cmovbq %r13, %rcx
movq %rcx, %rdx
cmpq %r13, %rdx
jne 0xfe090a
movq 0x28(%r15), %r12
movq 0x8(%rsp), %rdi
leaq 0x10(%rsp), %rsi
callq 0xfe096e
movq %r12, (%rax)
movq %r15, %rdi
callq 0x780710
movq %rax, %r15
cmpq %rbp, %rax
jne 0xfe08a9
movq 0x40(%r14), %rax
movq %rax, 0x318(%rbx)
movq 0x358(%rbx), %rax
cmpl $0x12, 0x1fc(%rax)
jne 0xfe093d
movq %r14, %rsi
callq 0xfe09d4
movq (%rbx), %rax
movq %rbx, %rdi
movq %r14, %rsi
callq *0xb8(%rax)
movq %rbx, %rdi
callq 0x160a8c2
movq %rbx, %rdi
callq 0x1611e10
movb $0x1, %al
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/Mips/MipsAsmPrinter.cpp
|
llvm::MipsAsmPrinter::isLongBranchPseudo(int) const
|
bool MipsAsmPrinter::isLongBranchPseudo(int Opcode) const {
return (Opcode == Mips::LONG_BRANCH_LUi
|| Opcode == Mips::LONG_BRANCH_LUi2Op
|| Opcode == Mips::LONG_BRANCH_LUi2Op_64
|| Opcode == Mips::LONG_BRANCH_ADDiu
|| Opcode == Mips::LONG_BRANCH_ADDiu2Op
|| Opcode == Mips::LONG_BRANCH_DADDiu
|| Opcode == Mips::LONG_BRANCH_DADDiu2Op);
}
|
addl $0xfffffdf8, %esi # imm = 0xFFFFFDF8
cmpl $0x7, %esi
setb %al
retq
nop
|
/Target/Mips/MipsAsmPrinter.cpp
|
llvm::MipsAsmPrinter::PrintAsmMemoryOperand(llvm::MachineInstr const*, unsigned int, char const*, llvm::raw_ostream&)
|
bool MipsAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
unsigned OpNum,
const char *ExtraCode,
raw_ostream &O) {
assert(OpNum + 1 < MI->getNumOperands() && "Insufficient operands");
const MachineOperand &BaseMO = MI->getOperand(OpNum);
const MachineOperand &OffsetMO = MI->getOperand(OpNum + 1);
assert(BaseMO.isReg() &&
"Unexpected base pointer for inline asm memory operand.");
assert(OffsetMO.isImm() &&
"Unexpected offset for inline asm memory operand.");
int Offset = OffsetMO.getImm();
// Currently we are expecting either no ExtraCode or 'D','M','L'.
if (ExtraCode) {
switch (ExtraCode[0]) {
case 'D':
Offset += 4;
break;
case 'M':
if (Subtarget->isLittle())
Offset += 4;
break;
case 'L':
if (!Subtarget->isLittle())
Offset += 4;
break;
default:
return true; // Unknown modifier.
}
}
O << Offset << "($" << MipsInstPrinter::getRegisterName(BaseMO.getReg())
<< ")";
return false;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq 0x20(%rsi), %r14
leal 0x1(%rdx), %eax
shlq $0x5, %rax
movq 0x10(%r14,%rax), %rsi
testq %rcx, %rcx
je 0xfe2c0e
movzbl (%rcx), %ecx
cmpl $0x4d, %ecx
je 0xfe2bf8
cmpl $0x4c, %ecx
je 0xfe2be0
movb $0x1, %al
cmpl $0x44, %ecx
jne 0xfe2c62
addq $0x4, %rsi
jmp 0xfe2c0e
movq 0x358(%rdi), %rax
leaq 0x4(%rsi), %rcx
cmpb $0x0, 0x128(%rax)
cmoveq %rcx, %rsi
jmp 0xfe2c0e
movq 0x358(%rdi), %rax
leaq 0x4(%rsi), %rcx
cmpb $0x0, 0x128(%rax)
cmovneq %rcx, %rsi
movl %edx, %r15d
movslq %esi, %rsi
movq %r8, %rdi
callq 0x2b7d122
leaq 0x3f72d0a(%rip), %rsi # 0x4f5592d
movl $0x2, %edx
movq %rax, %rdi
callq 0x7f9ba8
movq %rax, %rbx
shlq $0x5, %r15
movl 0x4(%r14,%r15), %edi
callq 0x1976e58
movq %rbx, %rdi
movq %rax, %rsi
callq 0x7f9b78
leaq 0x2b28d17(%rip), %rsi # 0x3b0b96a
movl $0x1, %edx
movq %rax, %rdi
callq 0x7f9ba8
xorl %eax, %eax
popq %rbx
popq %r14
popq %r15
retq
|
/Target/Mips/MipsAsmPrinter.cpp
|
llvm::MipsAsmPrinter::EmitJal(llvm::MCSubtargetInfo const&, llvm::MCSymbol*)
|
void MipsAsmPrinter::EmitJal(const MCSubtargetInfo &STI, MCSymbol *Symbol) {
MCInst I;
I.setOpcode(Mips::JAL);
I.addOperand(
MCOperand::createExpr(MCSymbolRefExpr::create(Symbol, OutContext)));
OutStreamer->emitInstruction(I, STI);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x88, %rsp
movq %rdx, %rax
movq %rsi, %rbx
movq %rdi, %r14
leaq 0x10(%rsp), %r15
leaq 0x20(%rsp), %r12
xorps %xmm0, %xmm0
movaps %xmm0, -0x20(%r12)
movq %r12, -0x10(%r12)
movabsq $0x600000000, %rcx # imm = 0x600000000
movq %rcx, -0x8(%r12)
movl $0x71a, -0x20(%r12) # imm = 0x71A
movq 0x48(%rdi), %rdx
movq %rax, %rdi
xorl %esi, %esi
xorl %ecx, %ecx
callq 0x28e1d28
movq %r15, %rdi
movl $0x5, %esi
movq %rax, %rdx
callq 0x821b42
movq 0x50(%r14), %rdi
movq (%rdi), %rax
movq %rsp, %r14
movq %r14, %rsi
movq %rbx, %rdx
callq *0x4b8(%rax)
movq 0x10(%r14), %rdi
cmpq %r12, %rdi
je 0xfe3347
callq 0x780910
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/Target/Mips/MipsAsmPrinter.cpp
|
llvm::MipsRegisterInfo::getFrameRegister(llvm::MachineFunction const&) const
|
Register MipsRegisterInfo::
getFrameRegister(const MachineFunction &MF) const {
const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
bool IsN64 =
static_cast<const MipsTargetMachine &>(MF.getTarget()).getABI().IsN64();
if (Subtarget.inMips16Mode())
return TFI->hasFP(MF) ? Mips::S0 : Mips::SP;
else
return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) :
(IsN64 ? Mips::SP_64 : Mips::SP);
}
|
pushq %rbp
pushq %r14
pushq %rbx
movq %rsi, %rbx
movq 0x10(%rsi), %r14
movq (%r14), %rax
movq %r14, %rdi
callq *0x88(%rax)
cmpb $0x1, 0x13d(%r14)
jne 0xfe5b37
movq (%rax), %rcx
movq %rax, %rdi
movq %rbx, %rsi
callq *0xc8(%rcx)
testb %al, %al
movl $0x146, %eax # imm = 0x146
movl $0x14, %ecx
cmovnel %eax, %ecx
jmp 0xfe5b72
movq 0x8(%rbx), %rcx
movl 0x4a0(%rcx), %ebp
movq (%rax), %rcx
movq %rax, %rdi
movq %rbx, %rsi
callq *0xc8(%rcx)
cmpl $0x3, %ebp
movl $0xdb, %ecx
movl $0x8, %edx
cmovel %ecx, %edx
movl $0x14e, %esi # imm = 0x14E
movl $0x14, %ecx
cmovel %esi, %ecx
testb %al, %al
cmovnel %edx, %ecx
movl %ecx, %eax
popq %rbx
popq %r14
popq %rbp
retq
nop
|
/Target/Mips/MipsRegisterInfo.cpp
|
void llvm::StringMapEntry<std::unique_ptr<llvm::MipsSubtarget, std::default_delete<llvm::MipsSubtarget>>>::Destroy<llvm::MallocAllocator>(llvm::MallocAllocator&)
|
void Destroy(AllocatorTy &allocator) {
// Free memory referenced by the item.
size_t AllocSize = sizeof(StringMapEntry) + this->getKeyLength() + 1;
this->~StringMapEntry();
allocator.Deallocate(static_cast<void *>(this), AllocSize,
alignof(StringMapEntry));
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
movq (%rdi), %r14
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0xfe8317
movq (%rdi), %rax
callq *0x8(%rax)
addq $0x11, %r14
movq $0x0, 0x8(%rbx)
movl $0x8, %edx
movq %rbx, %rdi
movq %r14, %rsi
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0x2b410f1
|
/llvm/ADT/StringMapEntry.h
|
(anonymous namespace)::MipsIncomingValueHandler::getStackAddress(unsigned long, long, llvm::MachinePointerInfo&, llvm::ISD::ArgFlagsTy)
|
Register MipsIncomingValueHandler::getStackAddress(uint64_t Size,
int64_t Offset,
MachinePointerInfo &MPO,
ISD::ArgFlagsTy Flags) {
MachineFunction &MF = MIRBuilder.getMF();
MachineFrameInfo &MFI = MF.getFrameInfo();
// FIXME: This should only be immutable for non-byval memory arguments.
int FI = MFI.CreateFixedObject(Size, Offset, true);
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
return MIRBuilder.buildFrameIndex(LLT::pointer(0, 32), FI).getReg(0);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x18, %rsp
movq %rcx, %rbx
movq %rdi, %r14
movq 0x8(%rdi), %rax
movq 0x8(%rax), %rax
movq 0x38(%rax), %rdi
movl $0x1, %ecx
xorl %r8d, %r8d
callq 0x1d32a60
movl %eax, %ebp
movq 0x8(%r14), %rax
movq 0x8(%rax), %rsi
movq %rsp, %r15
movq %r15, %rdi
movl %ebp, %edx
xorl %ecx, %ecx
callq 0x1d55ea2
movq 0xd(%r15), %rax
movq %rax, 0xd(%rbx)
movups (%r15), %xmm0
movups %xmm0, (%rbx)
movq 0x8(%r14), %rdi
movq $0x102, (%r15) # imm = 0x102
movl $0x0, 0x8(%r15)
movq %rsp, %rsi
movl %ebp, %edx
callq 0x15ddc04
movq 0x20(%rdx), %rax
movl 0x4(%rax), %eax
addq $0x18, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/Target/Mips/MipsCallLowering.cpp
|
llvm::MipsTargetLowering::lowerSETCC(llvm::SDValue, llvm::SelectionDAG&) const
|
SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
SDValue Cond = createFPCmp(DAG, Op);
assert(Cond.getOpcode() == MipsISD::FPCmp &&
"Floating point operand expected.");
SDLoc DL(Op);
SDValue True = DAG.getConstant(1, DL, MVT::i32);
SDValue False = DAG.getConstant(0, DL, MVT::i32);
return createCMovFP(DAG, Cond, True, False, DL);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rcx, %rbx
movq %rsi, %r15
leaq 0x38(%rsp), %rsi
movq %r15, (%rsi)
movl %edx, 0x8(%rsi)
movq %rcx, %rdi
callq 0x1021268
movq %rax, 0x10(%rsp)
movl %edx, %ebp
movq 0x48(%r15), %rsi
movq %rsi, 0x18(%rsp)
testq %rsi, %rsi
je 0x10166db
leaq 0x18(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r15), %eax
leaq 0x18(%rsp), %r15
movl %eax, 0x8(%r15)
xorl %r14d, %r14d
movl %r14d, (%rsp)
movl $0x1, %esi
movq %rbx, %rdi
movq %r15, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, %r12
movl %edx, %r13d
movl %r14d, (%rsp)
movq %rbx, %rdi
xorl %esi, %esi
movq %r15, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %rax, 0x28(%rsp)
movl %edx, 0x30(%rsp)
movups 0x28(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movq 0x10(%rsp), %rsi
movl %ebp, %edx
movq %r12, %rcx
movl %r13d, %r8d
movq %r15, %r9
callq 0x10213d6
movq %rax, %rbx
movl %edx, %ebp
movq (%r15), %rsi
testq %rsi, %rsi
je 0x101676d
leaq 0x18(%rsp), %rdi
callq 0x2a758fc
movq %rbx, %rax
movl %ebp, %edx
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/Mips/MipsISelLowering.cpp
|
llvm::SDValue llvm::MipsTargetLowering::getAddrNonPICSym64<llvm::ExternalSymbolSDNode>(llvm::ExternalSymbolSDNode*, llvm::SDLoc const&, llvm::EVT, llvm::SelectionDAG&) const
|
SDValue getAddrNonPICSym64(NodeTy *N, const SDLoc &DL, EVT Ty,
SelectionDAG &DAG) const {
SDValue Hi = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_HI);
SDValue Lo = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_LO);
SDValue Highest =
DAG.getNode(MipsISD::Highest, DL, Ty,
getTargetNode(N, Ty, DAG, MipsII::MO_HIGHEST));
SDValue Higher = getTargetNode(N, Ty, DAG, MipsII::MO_HIGHER);
SDValue HigherPart =
DAG.getNode(ISD::ADD, DL, Ty, Highest,
DAG.getNode(MipsISD::Higher, DL, Ty, Higher));
SDValue Cst = DAG.getConstant(16, DL, MVT::i32);
SDValue Shift = DAG.getNode(ISD::SHL, DL, Ty, HigherPart, Cst);
SDValue Add = DAG.getNode(ISD::ADD, DL, Ty, Shift,
DAG.getNode(MipsISD::Hi, DL, Ty, Hi));
SDValue Shift2 = DAG.getNode(ISD::SHL, DL, Ty, Add, Cst);
return DAG.getNode(ISD::ADD, DL, Ty, Shift2,
DAG.getNode(MipsISD::Lo, DL, Ty, Lo));
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x128, %rsp # imm = 0x128
movq %r9, %r14
movq %r8, %rbx
movl %ecx, %ebp
movq %rdx, %r15
movq %rdx, 0x30(%rsp)
movq %rsi, %r12
movq 0x58(%rsi), %rsi
movq %r9, %rdi
movl %ecx, %edx
movq %r8, %rcx
movl $0x4, %r8d
callq 0x1766cc2
movq %rax, 0x38(%rsp)
movl %edx, 0x28(%rsp)
movq 0x58(%r12), %rsi
movq %r14, %rdi
movl %ebp, %edx
movq %rbx, %rcx
movl $0x5, %r8d
callq 0x1766cc2
movq %rax, 0x40(%rsp)
movl %edx, 0x2c(%rsp)
movq 0x58(%r12), %rsi
movq %r14, %rdi
movl %ebp, %edx
movq %rbx, %rcx
movl $0x13, %r8d
callq 0x1766cc2
movq %rax, 0x118(%rsp)
movl %edx, 0x120(%rsp)
movups 0x118(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x1e8, %esi # imm = 0x1E8
movq %r15, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x176388a
movq %rax, %r13
movl %edx, %r15d
movq 0x58(%r12), %rsi
movq %r14, %rdi
movl %ebp, %edx
movq %rbx, %rcx
movl $0x12, %r8d
callq 0x1766cc2
movq %r13, 0x108(%rsp)
movl %r15d, 0x110(%rsp)
movq %rax, 0xe8(%rsp)
movl %edx, 0xf0(%rsp)
movups 0xe8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x1e9, %esi # imm = 0x1E9
movq 0x30(%rsp), %r13
movq %r13, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x176388a
movq %rax, 0xf8(%rsp)
movl %edx, 0x100(%rsp)
movups 0xf8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x108(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x38, %esi
movq %r13, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x17638a8
movq %rax, %r15
movl %edx, %r12d
movl $0x0, (%rsp)
movl $0x10, %esi
movq %r14, %rdi
movq %r13, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %r15, 0xd8(%rsp)
movl %r12d, 0xe0(%rsp)
movq %rax, 0xc8(%rsp)
movq %rax, %r15
movl %edx, 0xd0(%rsp)
movl %edx, %r12d
movups 0xc8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xd8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0xbd, %esi
movq %r13, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x17638a8
movq %rax, 0xb8(%rsp)
movl %edx, 0xc0(%rsp)
movq 0x38(%rsp), %rax
movq %rax, 0x98(%rsp)
movl 0x28(%rsp), %eax
movl %eax, 0xa0(%rsp)
movups 0x98(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x1ea, %esi # imm = 0x1EA
movq %r13, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x176388a
movq %rax, 0xa8(%rsp)
movl %edx, 0xb0(%rsp)
movups 0xa8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xb8(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x38, %esi
movq %r13, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x17638a8
movq %rax, 0x88(%rsp)
movl %edx, 0x90(%rsp)
movq %r15, 0x78(%rsp)
movl %r12d, 0x80(%rsp)
movups 0x78(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x88(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0xbd, %esi
movq %r13, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x17638a8
movq %rax, 0x68(%rsp)
movl %edx, 0x70(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x48(%rsp)
movl 0x2c(%rsp), %eax
movl %eax, 0x50(%rsp)
movups 0x48(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x1eb, %esi # imm = 0x1EB
movq %r13, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x176388a
movq %rax, 0x58(%rsp)
movl %edx, 0x60(%rsp)
movups 0x58(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x68(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movl $0x38, %esi
movq %r13, %rdx
movl %ebp, %ecx
movq %rbx, %r8
callq 0x17638a8
addq $0x128, %rsp # imm = 0x128
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/Mips/MipsISelLowering.h
|
llvm::DenseMap<llvm::PointerUnion<llvm::Value const*, llvm::PseudoSourceValue const*>, llvm::ScopedHashTableVal<llvm::PointerUnion<llvm::Value const*, llvm::PseudoSourceValue const*>, std::pair<unsigned int, unsigned int>>*, llvm::DenseMapInfo<llvm::PointerUnion<llvm::Value const*, llvm::PseudoSourceValue const*>, void>, llvm::detail::DenseMapPair<llvm::PointerUnion<llvm::Value const*, llvm::PseudoSourceValue const*>, llvm::ScopedHashTableVal<llvm::PointerUnion<llvm::Value const*, llvm::PseudoSourceValue const*>, std::pair<unsigned int, unsigned int>>*>>::grow(unsigned int)
|
void grow(unsigned AtLeast) {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
assert(Buckets);
if (!OldBuckets) {
this->BaseT::initEmpty();
return;
}
this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
// Free the old table.
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
alignof(BucketT));
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %r15
movl 0x10(%rdi), %ebx
movq (%rdi), %r14
leal -0x1(%rsi), %eax
movl %eax, %ecx
shrl %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x2, %eax
orl %ecx, %eax
movl %eax, %ecx
shrl $0x4, %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x8, %eax
orl %ecx, %eax
movl %eax, %ecx
shrl $0x10, %ecx
orl %eax, %ecx
incl %ecx
cmpl $0x41, %ecx
movl $0x40, %edi
cmovael %ecx, %edi
movl %edi, 0x10(%r15)
shlq $0x4, %rdi
movl $0x8, %esi
callq 0x2b410ec
movq %rax, (%r15)
testq %r14, %r14
je 0x10340d0
shlq $0x4, %rbx
leaq (%r14,%rbx), %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x1034198
movl $0x8, %edx
movq %r14, %rdi
movq %rbx, %rsi
popq %rbx
popq %r14
popq %r15
jmp 0x2b410f1
movq $0x0, 0x8(%r15)
movl 0x10(%r15), %ecx
testq %rcx, %rcx
je 0x1034191
movabsq $0xfffffffffffffff, %rdx # imm = 0xFFFFFFFFFFFFFFF
addq %rdx, %rcx
andq %rcx, %rdx
andl $0x1, %ecx
negq %rcx
addq %rdx, %rcx
addq $0x2, %rcx
movq %rdx, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
addq $0x10, %rax
xorl %edx, %edx
movdqa 0x1b80f06(%rip), %xmm1 # 0x2bb5020
movdqa 0x1b80f0e(%rip), %xmm2 # 0x2bb5030
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movq %rdx, %xmm4
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
por %xmm1, %xmm4
pxor %xmm2, %xmm4
movdqa %xmm4, %xmm5
pcmpgtd %xmm0, %xmm5
pcmpeqd %xmm0, %xmm4
pshufd $0xf5, %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
pand %xmm5, %xmm6
pshufd $0xf5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
por %xmm6, %xmm4
movd %xmm4, %esi
notl %esi
testb $0x1, %sil
je 0x103416e
movq $-0x1000, -0x10(%rax) # imm = 0xF000
pxor %xmm3, %xmm4
pextrw $0x4, %xmm4, %esi
testb $0x1, %sil
je 0x1034184
movq $-0x1000, (%rax) # imm = 0xF000
addq $0x2, %rdx
addq $0x20, %rax
cmpq %rdx, %rcx
jne 0x103412a
popq %rbx
popq %r14
popq %r15
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::DenseMapBase<llvm::DenseMap<llvm::MachineInstr const*, llvm::MipsRegisterBankInfo::InstType, llvm::DenseMapInfo<llvm::MachineInstr const*, void>, llvm::detail::DenseMapPair<llvm::MachineInstr const*, llvm::MipsRegisterBankInfo::InstType>>, llvm::MachineInstr const*, llvm::MipsRegisterBankInfo::InstType, llvm::DenseMapInfo<llvm::MachineInstr const*, void>, llvm::detail::DenseMapPair<llvm::MachineInstr const*, llvm::MipsRegisterBankInfo::InstType>>::clear()
|
unsigned getNumEntries() const {
return NumEntries;
}
|
movl 0x8(%rdi), %eax
testl %eax, %eax
jne 0x103aa37
cmpl $0x0, 0xc(%rdi)
je 0x103ab10
shll $0x2, %eax
movl 0x10(%rdi), %ecx
cmpl %ecx, %eax
setae %al
cmpl $0x41, %ecx
setb %dl
orb %al, %dl
je 0x103e34a
testq %rcx, %rcx
je 0x103ab08
movq (%rdi), %rax
movabsq $0xfffffffffffffff, %rdx # imm = 0xFFFFFFFFFFFFFFF
addq %rdx, %rcx
andq %rcx, %rdx
andl $0x1, %ecx
negq %rcx
addq %rdx, %rcx
addq $0x2, %rcx
movq %rdx, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
addq $0x10, %rax
xorl %edx, %edx
movdqa 0x1b7a58f(%rip), %xmm1 # 0x2bb5020
movdqa 0x1b7a597(%rip), %xmm2 # 0x2bb5030
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movq %rdx, %xmm4
pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1]
por %xmm1, %xmm4
pxor %xmm2, %xmm4
movdqa %xmm4, %xmm5
pcmpgtd %xmm0, %xmm5
pcmpeqd %xmm0, %xmm4
pshufd $0xf5, %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
pand %xmm5, %xmm6
pshufd $0xf5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
por %xmm6, %xmm4
movd %xmm4, %esi
notl %esi
testb $0x1, %sil
je 0x103aae5
movq $-0x1000, -0x10(%rax) # imm = 0xF000
pxor %xmm3, %xmm4
pextrw $0x4, %xmm4, %esi
testb $0x1, %sil
je 0x103aafb
movq $-0x1000, (%rax) # imm = 0xF000
addq $0x2, %rdx
addq $0x20, %rax
cmpq %rdx, %rcx
jne 0x103aaa1
movq $0x0, 0x8(%rdi)
retq
nop
|
/llvm/ADT/DenseMap.h
|
llvm::DenseMapBase<llvm::DenseMap<llvm::MachineInstr const*, llvm::SmallVector<llvm::MachineInstr const*, 2u>, llvm::DenseMapInfo<llvm::MachineInstr const*, void>, llvm::detail::DenseMapPair<llvm::MachineInstr const*, llvm::SmallVector<llvm::MachineInstr const*, 2u>>>, llvm::MachineInstr const*, llvm::SmallVector<llvm::MachineInstr const*, 2u>, llvm::DenseMapInfo<llvm::MachineInstr const*, void>, llvm::detail::DenseMapPair<llvm::MachineInstr const*, llvm::SmallVector<llvm::MachineInstr const*, 2u>>>::erase(llvm::MachineInstr const* const&)
|
bool erase(const KeyT &Val) {
BucketT *TheBucket;
if (!LookupBucketFor(Val, TheBucket))
return false; // not in map.
TheBucket->getSecond().~ValueT();
TheBucket->getFirst() = getTombstoneKey();
decrementNumEntries();
incrementNumTombstones();
return true;
}
|
pushq %rbp
pushq %r14
pushq %rbx
subq $0x10, %rsp
movq %rdi, %rbx
leaq 0x8(%rsp), %rdx
callq 0x103bdd4
movl %eax, %ebp
testb %al, %al
je 0x103c5f3
movq 0x8(%rsp), %r14
movq 0x8(%r14), %rdi
leaq 0x18(%r14), %rax
cmpq %rax, %rdi
je 0x103c5da
callq 0x780910
movq $-0x2000, (%r14) # imm = 0xE000
movq 0x8(%rbx), %xmm0
paddd 0x2367062(%rip), %xmm0 # 0x33a3650
movq %xmm0, 0x8(%rbx)
movl %ebp, %eax
addq $0x10, %rsp
popq %rbx
popq %r14
popq %rbp
retq
|
/llvm/ADT/DenseMap.h
|
llvm::MipsSEDAGToDAGISel::getMSACtrlReg(llvm::SDValue) const
|
uint64_t getZExtValue() const { return Value->getZExtValue(); }
|
movq 0x58(%rsi), %rax
cmpl $0x41, 0x20(%rax)
jb 0x10467cc
movq 0x18(%rax), %rax
jmp 0x10467d0
addq $0x18, %rax
movl (%rax), %eax
leaq 0x475cdd7(%rip), %rcx # 0x57a35b0
movq (%rcx), %rcx
movq (%rcx), %rcx
movzwl (%rcx,%rax,2), %eax
retq
|
/llvm/CodeGen/SelectionDAGNodes.h
|
llvm::Mips16InstrInfo::storeRegToStack(llvm::MachineBasicBlock&, llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>, llvm::Register, bool, int, llvm::TargetRegisterClass const*, llvm::TargetRegisterInfo const*, long) const
|
void Mips16InstrInfo::storeRegToStack(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI,
int64_t Offset) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
unsigned Opc = 0;
if (Mips::CPU16RegsRegClass.hasSubClassEq(RC))
Opc = Mips::SwRxSpImmX16;
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)).
addFrameIndex(FI).addImm(Offset)
.addMemOperand(MMO);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movl %r9d, %ebx
movl %r8d, 0x10(%rsp)
movl %ecx, 0x14(%rsp)
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %r14
movq 0xa0(%rsp), %r15
movq $0x0, 0x8(%rsp)
leaq 0x30(%rsi), %rax
cmpq %rdx, %rax
je 0x105be1c
leaq 0x38(%r12), %rsi
leaq 0x8(%rsp), %rdi
callq 0x91058c
movq %r14, %rdi
movq %r13, %rsi
movl %ebx, %edx
movl $0x2, %ecx
callq 0x1006f38
movq (%r15), %rcx
movzwl 0x18(%rcx), %ecx
leaq 0x474788c(%rip), %rdx # 0x57a36c8
movq 0x8(%rdx), %rdx
movl %ecx, %esi
shrl $0x5, %esi
movl (%rdx,%rsi,4), %edx
xorl %esi, %esi
btl %ecx, %edx
movq $-0xaf1, %r15 # imm = 0xF50F
cmovaeq %rsi, %r15
movq %rax, 0x18(%rsp)
movq 0x8(%rsp), %rsi
movq %rsi, (%rsp)
testq %rsi, %rsi
je 0x105be78
movq %rsp, %rdi
movl $0x1, %edx
callq 0x2a757d8
movq (%rsp), %rsi
movq %rsi, 0x20(%rsp)
testq %rsi, %rsi
je 0x105be9e
movq %rsp, %rbp
leaq 0x20(%rsp), %rdx
movq %rbp, %rdi
callq 0x2a759cc
movq $0x0, (%rbp)
xorps %xmm0, %xmm0
leaq 0x20(%rsp), %rbp
movups %xmm0, 0x8(%rbp)
shlq $0x5, %r15
addq 0x8(%r14), %r15
movq %r13, %rdi
movq %r12, %rsi
movq %rbp, %rdx
movq %r15, %rcx
callq 0x93f73a
leaq 0x38(%rsp), %rdi
movq %rax, (%rdi)
movq %rdx, 0x8(%rdi)
movzbl 0x10(%rsp), %edx
shll $0x3, %edx
xorl %r12d, %r12d
movl 0x14(%rsp), %esi
xorl %ecx, %ecx
callq 0x93f5ac
movq %rax, %r14
movq (%rax), %rsi
movq 0x8(%rax), %rdi
movl $0xfff00000, %r13d # imm = 0xFFF00000
leaq 0x48(%rsp), %r15
movl (%r15), %eax
andl %r13d, %eax
orl $0x5, %eax
movl %eax, (%r15)
movq %r12, 0x8(%r15)
movl %ebx, 0x10(%r15)
movq %r15, %rdx
callq 0x1d3c22c
movq (%r14), %rsi
movq 0x8(%r14), %rdi
andl (%r15), %r13d
incl %r13d
movl %r13d, (%r15)
movq %r12, 0x8(%r15)
movq 0xb0(%rsp), %rax
movq %rax, 0x10(%r15)
leaq 0x48(%rsp), %rdx
callq 0x1d3c22c
movq (%r14), %rsi
movq 0x8(%r14), %rdi
movq 0x18(%rsp), %rdx
callq 0x1d3cbe2
movq (%rbp), %rsi
testq %rsi, %rsi
je 0x105bf64
leaq 0x20(%rsp), %rdi
callq 0x2a758fc
movq (%rsp), %rsi
testq %rsi, %rsi
je 0x105bf75
movq %rsp, %rdi
callq 0x2a758fc
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0x105bf89
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/Mips/Mips16InstrInfo.cpp
|
llvm::Mips16InstrInfo::adjustStackPtrBigUnrestricted(unsigned int, long, llvm::MachineBasicBlock&, llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>) const
|
void Mips16InstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
if (Amount == 0)
return;
if (isInt<16>(Amount)) // need to change to addiu sp, ....and isInt<16>
BuildAddiuSpImm(MBB, I, Amount);
else
adjustStackPtrBigUnrestricted(SP, Amount, MBB, I);
}
|
testq %rdx, %rdx
je 0x105cc6b
movq %rcx, %rsi
movq %rdx, %rcx
movq %r8, %rdx
jmp 0x105c588
retq
|
/Target/Mips/Mips16InstrInfo.cpp
|
(anonymous namespace)::MSP430DAGToDAGISel::Select(llvm::SDNode*)
|
void MSP430DAGToDAGISel::Select(SDNode *Node) {
SDLoc dl(Node);
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
return;
}
// Few custom selection stuff.
switch (Node->getOpcode()) {
default: break;
case ISD::FrameIndex: {
assert(Node->getValueType(0) == MVT::i16);
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i16);
if (Node->hasOneUse()) {
CurDAG->SelectNodeTo(Node, MSP430::ADDframe, MVT::i16, TFI,
CurDAG->getTargetConstant(0, dl, MVT::i16));
return;
}
ReplaceNode(Node, CurDAG->getMachineNode(
MSP430::ADDframe, dl, MVT::i16, TFI,
CurDAG->getTargetConstant(0, dl, MVT::i16)));
return;
}
case ISD::LOAD:
if (tryIndexedLoad(Node))
return;
// Other cases are autogenerated.
break;
case ISD::ADD:
if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
MSP430::ADD8rp, MSP430::ADD16rp))
return;
else if (tryIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
MSP430::ADD8rp, MSP430::ADD16rp))
return;
// Other cases are autogenerated.
break;
case ISD::SUB:
if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
MSP430::SUB8rp, MSP430::SUB16rp))
return;
// Other cases are autogenerated.
break;
case ISD::AND:
if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
MSP430::AND8rp, MSP430::AND16rp))
return;
else if (tryIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
MSP430::AND8rp, MSP430::AND16rp))
return;
// Other cases are autogenerated.
break;
case ISD::OR:
if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
MSP430::BIS8rp, MSP430::BIS16rp))
return;
else if (tryIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
MSP430::BIS8rp, MSP430::BIS16rp))
return;
// Other cases are autogenerated.
break;
case ISD::XOR:
if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
MSP430::XOR8rp, MSP430::XOR16rp))
return;
else if (tryIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
MSP430::XOR8rp, MSP430::XOR16rp))
return;
// Other cases are autogenerated.
break;
}
// Select the default instruction
SelectCode(Node);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x88, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq 0x48(%rsi), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x106f1b8
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
movl %eax, 0x10(%rsp)
movl 0x18(%rbx), %eax
testl %eax, %eax
js 0x106f250
cmpl $0xb8, %eax
jle 0x106f25c
cmpl $0xba, %eax
jg 0x106f29e
cmpl $0xb9, %eax
je 0x106f39a
cmpl $0xba, %eax
jne 0x106f4a9
movq 0x28(%rbx), %rax
movq (%rax), %rdx
movl 0x8(%rax), %ecx
movq 0x28(%rax), %r8
movl 0x30(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x194 # imm = 0x194
pushq $0x1a0 # imm = 0x1A0
callq 0x106f7d2
addq $0x10, %rsp
testb %al, %al
jne 0x106f4c0
movq 0x28(%rbx), %rax
movq 0x28(%rax), %rdx
movl 0x30(%rax), %ecx
movq (%rax), %r8
movl 0x8(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x194 # imm = 0x194
pushq $0x1a0 # imm = 0x1A0
jmp 0x106f49c
movl $0xffffffff, 0x24(%rbx) # imm = 0xFFFFFFFF
jmp 0x106f4c0
cmpl $0xf, %eax
je 0x106f2e7
cmpl $0x38, %eax
je 0x106f44b
cmpl $0x39, %eax
jne 0x106f4a9
movq 0x28(%rbx), %rax
movq (%rax), %rdx
movl 0x8(%rax), %ecx
movq 0x28(%rax), %r8
movl 0x30(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x233 # imm = 0x233
pushq $0x23f # imm = 0x23F
jmp 0x106f49c
cmpl $0xbb, %eax
je 0x106f3f4
cmpl $0x121, %eax # imm = 0x121
jne 0x106f4a9
movq %rbx, %rdi
callq 0x106f963
testb %al, %al
je 0x106f4a9
movzbl 0x58(%rbx), %r13d
cmpl $0x5, %r13d
je 0x106f55b
cmpl $0x6, %r13d
jne 0x106f4a9
movl $0x1fd, %ebp # imm = 0x1FD
jmp 0x106f560
movl 0x58(%rbx), %esi
movq 0x38(%r14), %rdi
movl $0x6, %edx
xorl %ecx, %ecx
movl $0x1, %r8d
callq 0x1765d42
movq 0x38(%rbx), %rcx
testq %rcx, %rcx
je 0x106f314
cmpq $0x0, 0x20(%rcx)
je 0x106f4e6
movq 0x38(%r14), %r15
movq %rax, 0x38(%rsp)
movl %edx, 0x40(%rsp)
subq $0x8, %rsp
leaq 0x10(%rsp), %r12
movq %r15, %rdi
xorl %esi, %esi
movq %r12, %rdx
movl $0x6, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
pushq $0x0
callq 0x17645fe
addq $0x10, %rsp
movq %rax, 0x28(%rsp)
movl %edx, 0x30(%rsp)
subq $0x20, %rsp
movups 0x48(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x58(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r15, %rdi
movl $0x157, %esi # imm = 0x157
movq %r12, %rdx
movl $0x6, %ecx
xorl %r8d, %r8d
callq 0x178f4ac
addq $0x20, %rsp
movq %r14, %rdi
movq %rbx, %rsi
movq %rax, %rdx
callq 0x9db81c
jmp 0x106f4c0
movq 0x28(%rbx), %rax
movq (%rax), %rdx
movl 0x8(%rax), %ecx
movq 0x28(%rax), %r8
movl 0x30(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x164 # imm = 0x164
pushq $0x170 # imm = 0x170
callq 0x106f7d2
addq $0x10, %rsp
testb %al, %al
jne 0x106f4c0
movq 0x28(%rbx), %rax
movq 0x28(%rax), %rdx
movl 0x30(%rax), %ecx
movq (%rax), %r8
movl 0x8(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x164 # imm = 0x164
pushq $0x170 # imm = 0x170
jmp 0x106f49c
movq 0x28(%rbx), %rax
movq (%rax), %rdx
movl 0x8(%rax), %ecx
movq 0x28(%rax), %r8
movl 0x30(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x26f # imm = 0x26F
pushq $0x27b # imm = 0x27B
callq 0x106f7d2
addq $0x10, %rsp
testb %al, %al
jne 0x106f4c0
movq 0x28(%rbx), %rax
movq 0x28(%rax), %rdx
movl 0x30(%rax), %ecx
movq (%rax), %r8
movl 0x8(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x26f # imm = 0x26F
pushq $0x27b # imm = 0x27B
jmp 0x106f49c
movq 0x28(%rbx), %rax
movq (%rax), %rdx
movl 0x8(%rax), %ecx
movq 0x28(%rax), %r8
movl 0x30(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x131 # imm = 0x131
pushq $0x13d # imm = 0x13D
callq 0x106f7d2
addq $0x10, %rsp
testb %al, %al
jne 0x106f4c0
movq 0x28(%rbx), %rax
movq 0x28(%rax), %rdx
movl 0x30(%rax), %ecx
movq (%rax), %r8
movl 0x8(%rax), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq $0x131 # imm = 0x131
pushq $0x13d # imm = 0x13D
callq 0x106f7d2
addq $0x10, %rsp
testb %al, %al
jne 0x106f4c0
leaq 0x2a995e0(%rip), %rdx # 0x3b08a90
movq %r14, %rdi
movq %rbx, %rsi
movl $0x1f85, %ecx # imm = 0x1F85
callq 0x17a709c
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0x106f4d4
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq 0x38(%r14), %r14
movq %rax, 0x58(%rsp)
movl %edx, 0x60(%rsp)
subq $0x8, %rsp
leaq 0x10(%rsp), %rdx
movq %r14, %rdi
xorl %esi, %esi
movl $0x6, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
pushq $0x0
callq 0x17645fe
addq $0x10, %rsp
movq %rax, 0x48(%rsp)
movl %edx, 0x50(%rsp)
subq $0x20, %rsp
movups 0x68(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x78(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movq %rbx, %rsi
movl $0x157, %edx # imm = 0x157
movl $0x6, %ecx
xorl %r8d, %r8d
callq 0x178e85a
addq $0x20, %rsp
jmp 0x106f4c0
movl $0x208, %ebp # imm = 0x208
movq 0x38(%r14), %r15
movq 0x48(%rbx), %rsi
movq %rsi, 0x18(%rsp)
testq %rsi, %rsi
je 0x106f581
leaq 0x18(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
leaq 0x18(%rsp), %r12
movl %eax, 0x8(%r12)
movb $0x6, 0x78(%rsp)
xorl %eax, %eax
movq %rax, 0x80(%rsp)
movb $0x1, 0x68(%rsp)
movq %rax, 0x70(%rsp)
movq 0x28(%rbx), %rax
subq $0x40, %rsp
movups (%rax), %xmm0
movups 0x28(%rax), %xmm1
movups %xmm0, 0x30(%rsp)
movups %xmm1, 0x20(%rsp)
movups 0xa8(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xb8(%rsp), %xmm0
movups %xmm0, (%rsp)
movzbl %r13b, %ecx
movq %r15, %rdi
movl %ebp, %esi
movq %r12, %rdx
xorl %r8d, %r8d
callq 0x178f75c
addq $0x40, %rsp
movq %r14, %rdi
movq %rbx, %rsi
movq %rax, %rdx
callq 0x9db81c
movq (%r12), %rsi
testq %rsi, %rsi
je 0x106f4c0
leaq 0x18(%rsp), %rdi
callq 0x2a758fc
jmp 0x106f4c0
nop
|
/Target/MSP430/MSP430ISelDAGToDAG.cpp
|
void llvm::cl::parser<llvm::MSP430Subtarget::HWMultEnum>::addLiteralOption<int>(llvm::StringRef, int const&, llvm::StringRef)
|
void addLiteralOption(StringRef Name, const DT &V, StringRef HelpStr) {
#ifndef NDEBUG
if (findOption(Name) != Values.size())
report_fatal_error("Option '" + Name + "' already exists!");
#endif
OptionInfo X(Name, static_cast<DataType>(V), HelpStr);
Values.push_back(X);
AddLiteralOption(Owner, Name);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x30, %rsp
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movl (%rcx), %eax
movq %rsp, %rsi
movq %r14, (%rsi)
movq %rdx, 0x8(%rsi)
movq %r8, 0x10(%rsi)
movq %r9, 0x18(%rsi)
leaq 0x473c207(%rip), %r12 # 0x57ae270
leaq 0x473c1e0(%rip), %r13 # 0x57ae250
movq %r13, 0x20(%rsi)
movb $0x1, 0x2c(%rsi)
movl %eax, 0x28(%rsi)
addq $0x10, %rdi
movl $0x1, %edx
callq 0x10720e2
movq 0x10(%r15), %rcx
movl 0x18(%r15), %edx
leaq (%rdx,%rdx,2), %rdx
shlq $0x4, %rdx
movups (%rax), %xmm0
movups 0x10(%rax), %xmm1
movups %xmm0, (%rcx,%rdx)
movups %xmm1, 0x10(%rcx,%rdx)
movq %r12, 0x20(%rcx,%rdx)
movl 0x28(%rax), %esi
movl %esi, 0x28(%rcx,%rdx)
movb 0x2c(%rax), %al
movb %al, 0x2c(%rcx,%rdx)
movq %r13, 0x20(%rcx,%rdx)
incl 0x18(%r15)
movq 0x8(%r15), %rdi
movq %r14, %rsi
movq %rbx, %rdx
callq 0x2b1f1ab
addq $0x30, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/llvm/Support/CommandLine.h
|
llvm::MSP430TargetLowering::LowerRETURNADDR(llvm::SDValue, llvm::SelectionDAG&) const
|
SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
MFI.setReturnAddressIsTaken(true);
if (verifyReturnAddressArgumentIsConstant(Op, DAG))
return SDValue();
unsigned Depth = Op.getConstantOperandVal(0);
SDLoc dl(Op);
EVT PtrVT = Op.getValueType();
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
DAG.getConstant(PtrVT.getStoreSize(), dl, MVT::i16);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
MachinePointerInfo());
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
MachinePointerInfo());
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x118, %rsp # imm = 0x118
movq %rcx, %rbx
movl %edx, %ebp
movq %rsi, %r14
movq %rdi, %r15
movq 0x28(%rcx), %rax
movq 0x38(%rax), %rax
movb $0x1, 0x26(%rax)
callq 0x17d3a3a
testb %al, %al
je 0x1073327
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp 0x1073570
movq 0x28(%r14), %rax
movq (%rax), %rax
movq 0x58(%rax), %rax
cmpl $0x41, 0x20(%rax)
jb 0x107333e
movq 0x18(%rax), %rax
jmp 0x1073342
addq $0x18, %rax
movl (%rax), %r12d
movq 0x48(%r14), %rsi
movq %rsi, 0x48(%rsp)
testq %rsi, %rsi
je 0x1073362
leaq 0x48(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%r14), %eax
movl %eax, 0x50(%rsp)
movq 0x30(%r14), %rax
movl %ebp, %ecx
shlq $0x4, %rcx
movb (%rax,%rcx), %dl
movq 0x8(%rax,%rcx), %rax
movb %dl, 0x58(%rsp)
movq %rax, 0x60(%rsp)
testq %r12, %r12
je 0x10734c7
movq %r14, %rsi
movl %ebp, %edx
movq %rbx, %rcx
callq 0x1073588
movq %rax, %rbp
movl %edx, %r12d
leaq 0x58(%rsp), %rdi
movq %rdi, %r13
callq 0x9ecd2a
leaq 0xf0(%rsp), %r14
movq %rax, (%r14)
movb %dl, 0x8(%r14)
movq %r14, %rdi
callq 0x2b60e74
xorl %r15d, %r15d
movl %r15d, (%rsp)
leaq 0x48(%rsp), %rdx
movq %rbx, %rdi
movq %rax, %rsi
movl $0x6, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x17645fe
movq %r13, %rsi
movq 0x8(%r13), %r13
leaq 0xe8(%rbx), %rcx
movq %rcx, 0x68(%rsp)
movq %rbp, 0x90(%rsp)
movl %r12d, 0x98(%rsp)
movq %rax, 0x80(%rsp)
movl %edx, 0x88(%rsp)
movl (%rsi), %ebp
movups 0x80(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x90(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rdi
movl $0x38, %esi
leaq 0x48(%rsp), %rdx
movl %ebp, %ecx
movq %r13, %r8
callq 0x17638a8
movq %rax, 0xa0(%rsp)
movl %edx, 0xa8(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0xd0(%rsp)
movq %r15, 0xdd(%rsp)
movaps %xmm0, 0x10(%r14)
movaps %xmm0, (%r14)
movq 0xe0(%rsp), %rax
movq %rax, 0x20(%rsp)
movaps 0xd0(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0xa0(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r15, 0x40(%rsp)
movq %r14, 0x38(%rsp)
movl %r15d, 0x30(%rsp)
movl %r15d, 0x28(%rsp)
movq %rbx, %rdi
movl %ebp, %esi
movq %r13, %rdx
leaq 0x48(%rsp), %rcx
movq 0x68(%rsp), %r8
jmp 0x107354f
movq %r15, %rdi
movq %rbx, %rsi
callq 0x107613c
movq 0x60(%rsp), %r9
leaq 0xe8(%rbx), %r8
movq %rax, 0x70(%rsp)
movl %edx, 0x78(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0xb0(%rsp)
xorl %eax, %eax
movq %rax, 0xbd(%rsp)
movl 0x58(%rsp), %esi
leaq 0xf0(%rsp), %rcx
movaps %xmm0, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0xc0(%rsp), %rdx
movq %rdx, 0x20(%rsp)
movaps 0xb0(%rsp), %xmm0
movups %xmm0, 0x10(%rsp)
movups 0x70(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rax, 0x40(%rsp)
movq %rcx, 0x38(%rsp)
movl %eax, 0x30(%rsp)
movl %eax, 0x28(%rsp)
leaq 0x48(%rsp), %rcx
movq %rbx, %rdi
movq %r9, %rdx
xorl %r9d, %r9d
callq 0x17695d6
movq %rax, %r14
movl %edx, %ebx
movq 0x48(%rsp), %rsi
testq %rsi, %rsi
je 0x1073570
leaq 0x48(%rsp), %rdi
callq 0x2a758fc
movq %r14, %rax
movl %ebx, %edx
addq $0x118, %rsp # imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/MSP430/MSP430ISelLowering.cpp
|
llvm::NVPTXAsmPrinter::printFPConstant(llvm::ConstantFP const*, llvm::raw_ostream&)
|
void NVPTXAsmPrinter::printFPConstant(const ConstantFP *Fp, raw_ostream &O) {
APFloat APF = APFloat(Fp->getValueAPF()); // make a copy
bool ignored;
unsigned int numHex;
const char *lead;
if (Fp->getType()->getTypeID() == Type::FloatTyID) {
numHex = 8;
lead = "0f";
APF.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &ignored);
} else if (Fp->getType()->getTypeID() == Type::DoubleTyID) {
numHex = 16;
lead = "0d";
APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &ignored);
} else
llvm_unreachable("unsupported fp type");
APInt API = APF.bitcastToAPInt();
O << lead << format_hex_no_prefix(API.getZExtValue(), numHex, /*Upper=*/true);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movq %rdx, %r14
movq %rsi, %r15
leaq 0x28(%rsp), %rbx
addq $0x20, %rsi
movq %rbx, %rdi
callq 0x81e6da
movq 0x8(%r15), %rax
cmpb $0x2, 0x8(%rax)
jne 0x107f326
leaq 0x3f0b092(%rip), %r15 # 0x4f8a3ac
movl $0x8, %ebp
callq 0x2b061ee
jmp 0x107f337
leaq 0x4029617(%rip), %r15 # 0x50a8944
movl $0x10, %ebp
callq 0x2b061f6
leaq 0x20(%rsp), %r13
leaq 0xf(%rsp), %rcx
movq %r13, %rdi
movq %rax, %rsi
movl $0x1, %edx
callq 0x2b109d0
leaq 0x10(%rsp), %r12
movq %r12, %rdi
movq %r13, %rsi
callq 0x815f90
movl $0x2, %edx
movq %r14, %rdi
movq %r15, %rsi
callq 0x7f9ba8
cmpl $0x41, 0x8(%r12)
jb 0x107f37e
movq 0x10(%rsp), %r12
movq (%r12), %rcx
leaq 0x40(%rsp), %rsi
movq %rcx, (%rsi)
movq $0x0, 0x8(%rsi)
movl %ebp, 0x10(%rsi)
movw $0x101, 0x14(%rsi) # imm = 0x101
movb $0x0, 0x16(%rsi)
movq %rax, %rdi
callq 0x2b7d9fc
cmpl $0x41, 0x18(%rsp)
jb 0x107f3bd
movq 0x10(%rsp), %rdi
testq %rdi, %rdi
je 0x107f3bd
callq 0x7802b0
movq %rbx, %rdi
callq 0x81603c
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/NVPTX/NVPTXAsmPrinter.cpp
|
llvm::NVPTXTargetLowering::NVPTXTargetLowering(llvm::NVPTXTargetMachine const&, llvm::NVPTXSubtarget const&)
|
NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
const NVPTXSubtarget &STI)
: TargetLowering(TM), nvTM(&TM), STI(STI) {
// always lower memset, memcpy, and memmove intrinsics to load/store
// instructions, rather
// then generating calls to memset, mempcy or memmove.
MaxStoresPerMemset = MaxStoresPerMemsetOptSize = (unsigned)0xFFFFFFFF;
MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = (unsigned) 0xFFFFFFFF;
MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = (unsigned) 0xFFFFFFFF;
setBooleanContents(ZeroOrNegativeOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// Jump is Expensive. Don't create extra control flow for 'and', 'or'
// condition branches.
setJumpIsExpensive(true);
// Wide divides are _very_ slow. Try to reduce the width of the divide if
// possible.
addBypassSlowDiv(64, 32);
// By default, use the Source scheduling
if (sched4reg)
setSchedulingPreference(Sched::RegPressure);
else
setSchedulingPreference(Sched::Source);
auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
LegalizeAction NoF16Action) {
setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
};
auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
LegalizeAction NoBF16Action) {
bool IsOpSupported = STI.hasBF16Math();
// Few instructions are available on sm_90 only
switch(Op) {
case ISD::FADD:
case ISD::FMUL:
case ISD::FSUB:
case ISD::SELECT:
case ISD::SELECT_CC:
case ISD::SETCC:
case ISD::FEXP2:
case ISD::FCEIL:
case ISD::FFLOOR:
case ISD::FNEARBYINT:
case ISD::FRINT:
case ISD::FROUNDEVEN:
case ISD::FTRUNC:
IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 78;
break;
}
setOperationAction(
Op, VT, IsOpSupported ? Action : NoBF16Action);
};
auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
LegalizeAction NoI16x2Action) {
bool IsOpSupported = false;
// instructions are available on sm_90 only
switch (Op) {
case ISD::ADD:
case ISD::SMAX:
case ISD::SMIN:
case ISD::UMIN:
case ISD::UMAX:
IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
break;
}
setOperationAction(Op, VT, IsOpSupported ? Action : NoI16x2Action);
};
addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
addRegisterClass(MVT::v2i16, &NVPTX::Int32RegsRegClass);
addRegisterClass(MVT::v4i8, &NVPTX::Int32RegsRegClass);
addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
addRegisterClass(MVT::f16, &NVPTX::Int16RegsRegClass);
addRegisterClass(MVT::v2f16, &NVPTX::Int32RegsRegClass);
addRegisterClass(MVT::bf16, &NVPTX::Int16RegsRegClass);
addRegisterClass(MVT::v2bf16, &NVPTX::Int32RegsRegClass);
// Conversion to/from FP16/FP16x2 is always legal.
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f16, Expand);
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Legal);
setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
// Conversion to/from BFP16/BFP16x2 is always legal.
setOperationAction(ISD::BUILD_VECTOR, MVT::v2bf16, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2bf16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2bf16, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2bf16, Expand);
setBF16OperationAction(ISD::SETCC, MVT::v2bf16, Legal, Expand);
setBF16OperationAction(ISD::SETCC, MVT::bf16, Legal, Promote);
if (getOperationAction(ISD::SETCC, MVT::bf16) == Promote)
AddPromotedToType(ISD::SETCC, MVT::bf16, MVT::f32);
// Conversion to/from i16/i16x2 is always legal.
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i16, Expand);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i8, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
// Only logical ops can be done on v4i8 directly, others must be done
// elementwise.
setOperationAction(
{ISD::ABS, ISD::ADD, ISD::ADDC, ISD::ADDE,
ISD::BITREVERSE, ISD::CTLZ, ISD::CTPOP, ISD::CTTZ,
ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FSHL, ISD::FSHR,
ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::PARITY,
ISD::ROTL, ISD::ROTR, ISD::SADDO, ISD::SADDO_CARRY,
ISD::SADDSAT, ISD::SDIV, ISD::SDIVREM, ISD::SELECT_CC,
ISD::SETCC, ISD::SHL, ISD::SINT_TO_FP, ISD::SMAX,
ISD::SMIN, ISD::SMULO, ISD::SMUL_LOHI, ISD::SRA,
ISD::SREM, ISD::SRL, ISD::SSHLSAT, ISD::SSUBO,
ISD::SSUBO_CARRY, ISD::SSUBSAT, ISD::SUB, ISD::SUBC,
ISD::SUBE, ISD::UADDO, ISD::UADDO_CARRY, ISD::UADDSAT,
ISD::UDIV, ISD::UDIVREM, ISD::UINT_TO_FP, ISD::UMAX,
ISD::UMIN, ISD::UMULO, ISD::UMUL_LOHI, ISD::UREM,
ISD::USHLSAT, ISD::USUBO, ISD::USUBO_CARRY, ISD::VSELECT,
ISD::USUBSAT},
MVT::v4i8, Expand);
// Operations not directly supported by NVPTX.
for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, MVT::v4i8,
MVT::i32, MVT::i64}) {
setOperationAction(ISD::SELECT_CC, VT, Expand);
setOperationAction(ISD::BR_CC, VT, Expand);
}
// Some SIGN_EXTEND_INREG can be done using cvt instruction.
// For others we will expand to a SHL/SRA pair.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
// TODO: we may consider expanding ROTL/ROTR on older GPUs. Currently on GPUs
// that don't have h/w rotation we lower them to multi-instruction assembly.
// See ROT*_sw in NVPTXIntrInfo.td
setOperationAction(ISD::ROTL, MVT::i64, Legal);
setOperationAction(ISD::ROTR, MVT::i64, Legal);
setOperationAction(ISD::ROTL, MVT::i32, Legal);
setOperationAction(ISD::ROTR, MVT::i32, Legal);
setOperationAction(ISD::ROTL, MVT::i16, Expand);
setOperationAction(ISD::ROTL, MVT::v2i16, Expand);
setOperationAction(ISD::ROTR, MVT::i16, Expand);
setOperationAction(ISD::ROTR, MVT::v2i16, Expand);
setOperationAction(ISD::ROTL, MVT::i8, Expand);
setOperationAction(ISD::ROTR, MVT::i8, Expand);
setOperationAction(ISD::BSWAP, MVT::i16, Expand);
// Indirect branch is not supported.
// This also disables Jump Table creation.
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BRIND, MVT::Other, Expand);
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
// We want to legalize constant related memmove and memcopy
// intrinsics.
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
// Turn FP extload into load/fpextend
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::bf16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::bf16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2bf16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2bf16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4bf16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4bf16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8bf16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8bf16, Expand);
// Turn FP truncstore into trunc + store.
// FIXME: vector types should also be expanded
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::bf16, Expand);
setTruncStoreAction(MVT::f64, MVT::bf16, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// PTX does not support load / store predicate registers
setOperationAction(ISD::LOAD, MVT::i1, Custom);
setOperationAction(ISD::STORE, MVT::i1, Custom);
for (MVT VT : MVT::integer_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
setTruncStoreAction(VT, MVT::i1, Expand);
}
// expand extload of vector of integers.
setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v2i16,
MVT::v2i8, Expand);
setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
// This is legal in NVPTX
setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
setOperationAction(ISD::ConstantFP, MVT::bf16, Legal);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
// TRAP can be lowered to PTX trap
setOperationAction(ISD::TRAP, MVT::Other, Legal);
// Register custom handling for vector loads/stores
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
if (IsPTXVectorType(VT)) {
setOperationAction(ISD::LOAD, VT, Custom);
setOperationAction(ISD::STORE, VT, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
}
}
// Support varargs.
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::VAARG, MVT::Other, Custom);
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
// Custom handling for i8 intrinsics
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
setOperationAction(ISD::ABS, Ty, Legal);
setOperationAction(ISD::SMIN, Ty, Legal);
setOperationAction(ISD::SMAX, Ty, Legal);
setOperationAction(ISD::UMIN, Ty, Legal);
setOperationAction(ISD::UMAX, Ty, Legal);
setOperationAction(ISD::CTPOP, Ty, Legal);
setOperationAction(ISD::CTLZ, Ty, Legal);
}
setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::CTPOP, MVT::v2i16, Legal, Expand);
setI16x2OperationAction(ISD::CTLZ, MVT::v2i16, Legal, Expand);
setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Custom);
setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Custom);
// Other arithmetic and logic ops are unsupported.
setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SRA, ISD::SRL, ISD::MULHS,
ISD::MULHU, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
ISD::SINT_TO_FP, ISD::UINT_TO_FP},
MVT::v2i16, Expand);
setOperationAction(ISD::ADDC, MVT::i32, Legal);
setOperationAction(ISD::ADDE, MVT::i32, Legal);
setOperationAction(ISD::SUBC, MVT::i32, Legal);
setOperationAction(ISD::SUBE, MVT::i32, Legal);
if (STI.getPTXVersion() >= 43) {
setOperationAction(ISD::ADDC, MVT::i64, Legal);
setOperationAction(ISD::ADDE, MVT::i64, Legal);
setOperationAction(ISD::SUBC, MVT::i64, Legal);
setOperationAction(ISD::SUBE, MVT::i64, Legal);
}
setOperationAction(ISD::CTTZ, MVT::i16, Expand);
setOperationAction(ISD::CTTZ, MVT::v2i16, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i64, Expand);
// PTX does not directly support SELP of i1, so promote to i32 first
setOperationAction(ISD::SELECT, MVT::i1, Custom);
// PTX cannot multiply two i64s in a single instruction.
setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
// We have some custom DAG combine patterns for these nodes
setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT, ISD::FADD,
ISD::LOAD, ISD::MUL, ISD::SHL, ISD::SREM, ISD::UREM,
ISD::VSELECT});
// setcc for f16x2 and bf16x2 needs special handling to prevent
// legalizer's attempt to scalarize it due to v2i1 not being legal.
if (STI.allowFP16Math() || STI.hasBF16Math())
setTargetDAGCombine(ISD::SETCC);
// Promote fp16 arithmetic if fp16 hardware isn't available or the
// user passed --nvptx-no-fp16-math. The flag is useful because,
// although sm_53+ GPUs have some sort of FP16 support in
// hardware, only sm_53 and sm_60 have full implementation. Others
// only have token amount of hardware and are likely to run faster
// by using fp32 units instead.
for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
setFP16OperationAction(Op, MVT::f16, Legal, Promote);
setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
// bf16 must be promoted to f32.
setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
if (getOperationAction(Op, MVT::bf16) == Promote)
AddPromotedToType(Op, MVT::bf16, MVT::f32);
}
// f16/f16x2 neg was introduced in PTX 60, SM_53.
const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
STI.getPTXVersion() >= 60 &&
STI.allowFP16Math();
for (const auto &VT : {MVT::f16, MVT::v2f16})
setOperationAction(ISD::FNEG, VT,
IsFP16FP16x2NegAvailable ? Legal : Expand);
setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);
setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);
// (would be) Library functions.
// These map to conversion instructions for scalar FP types.
for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
ISD::FROUNDEVEN, ISD::FTRUNC}) {
setOperationAction(Op, MVT::f16, Legal);
setOperationAction(Op, MVT::f32, Legal);
setOperationAction(Op, MVT::f64, Legal);
setOperationAction(Op, MVT::v2f16, Expand);
setOperationAction(Op, MVT::v2bf16, Expand);
setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
if (getOperationAction(Op, MVT::bf16) == Promote)
AddPromotedToType(Op, MVT::bf16, MVT::f32);
}
if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
setOperationAction(ISD::BF16_TO_FP, MVT::f32, Expand);
}
if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
setOperationAction(ISD::FP_EXTEND, VT, Custom);
setOperationAction(ISD::FP_ROUND, VT, Custom);
}
}
// sm_80 only has conversions between f32 and bf16. Custom lower all other
// bf16 conversions.
if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
setOperationAction(
{ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
VT, Custom);
}
setOperationAction(
{ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
MVT::bf16, Custom);
}
setOperationAction(ISD::FROUND, MVT::f16, Promote);
setOperationAction(ISD::FROUND, MVT::v2f16, Expand);
setOperationAction(ISD::FROUND, MVT::v2bf16, Expand);
setOperationAction(ISD::FROUND, MVT::f32, Custom);
setOperationAction(ISD::FROUND, MVT::f64, Custom);
setOperationAction(ISD::FROUND, MVT::bf16, Promote);
AddPromotedToType(ISD::FROUND, MVT::bf16, MVT::f32);
// 'Expand' implements FCOPYSIGN without calling an external library.
setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::bf16, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::v2bf16, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
// These map to corresponding instructions for f32/f64. f16 must be
// promoted to f32. v2f16 is expanded to f16, which is then promoted
// to f32.
for (const auto &Op :
{ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS}) {
setOperationAction(Op, MVT::f16, Promote);
setOperationAction(Op, MVT::f32, Legal);
setOperationAction(Op, MVT::f64, Legal);
setOperationAction(Op, MVT::v2f16, Expand);
setOperationAction(Op, MVT::v2bf16, Expand);
setOperationAction(Op, MVT::bf16, Promote);
AddPromotedToType(Op, MVT::bf16, MVT::f32);
}
for (const auto &Op : {ISD::FABS}) {
setOperationAction(Op, MVT::f16, Promote);
setOperationAction(Op, MVT::f32, Legal);
setOperationAction(Op, MVT::f64, Legal);
setOperationAction(Op, MVT::v2f16, Expand);
setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
if (getOperationAction(Op, MVT::bf16) == Promote)
AddPromotedToType(Op, MVT::bf16, MVT::f32);
}
// max.f16, max.f16x2 and max.NaN are supported on sm_80+.
auto GetMinMaxAction = [&](LegalizeAction NotSm80Action) {
bool IsAtLeastSm80 = STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
return IsAtLeastSm80 ? Legal : NotSm80Action;
};
for (const auto &Op : {ISD::FMINNUM, ISD::FMAXNUM}) {
setFP16OperationAction(Op, MVT::f16, GetMinMaxAction(Promote), Promote);
setOperationAction(Op, MVT::f32, Legal);
setOperationAction(Op, MVT::f64, Legal);
setFP16OperationAction(Op, MVT::v2f16, GetMinMaxAction(Expand), Expand);
setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
if (getOperationAction(Op, MVT::bf16) == Promote)
AddPromotedToType(Op, MVT::bf16, MVT::f32);
}
for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
setFP16OperationAction(Op, MVT::f16, GetMinMaxAction(Expand), Expand);
setFP16OperationAction(Op, MVT::bf16, Legal, Expand);
setOperationAction(Op, MVT::f32, GetMinMaxAction(Expand));
setFP16OperationAction(Op, MVT::v2f16, GetMinMaxAction(Expand), Expand);
setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
}
// Custom lowering for inline asm with 128-bit operands
setOperationAction(ISD::CopyToReg, MVT::i128, Custom);
setOperationAction(ISD::CopyFromReg, MVT::i128, Custom);
// No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
// No FPOW or FREM in PTX.
// Now deduce the information based on the above mentioned
// actions
computeRegisterProperties(STI.getRegisterInfo());
setMinCmpXchgSizeInBits(32);
setMaxAtomicSizeInBitsSupported(64);
setMaxDivRemBitWidthSupported(64);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r12
callq 0x17b1abc
leaq 0x472e153(%rip), %rax # 0x57af620
movq %rax, (%r12)
movq %r15, 0x4e0f8(%r12)
movq %rbx, 0x4e100(%r12)
movq $-0x1, 0x4e0e8(%r12)
pcmpeqd %xmm0, %xmm0
movdqu %xmm0, 0x4e0cc(%r12)
movabsq $0x200000002, %rax # imm = 0x200000002
movq %rax, 0x34(%r12)
movl $0x2, 0x3c(%r12)
movq %r12, %rdi
movl $0x1, %esi
callq 0x1e6d31e
leaq 0x10(%rsp), %rsi
movl $0x40, (%rsi)
leaq 0x18(%r12), %rdi
callq 0xb10a08
movl $0x20, 0x4(%rax)
movb 0x48a7cfe(%rip), %al # 0x5929240
incb %al
movb %al, 0x40(%r12)
leaq 0x47308c0(%rip), %rax # 0x57b1e10
movq %rax, 0x78(%r12)
leaq 0x473087c(%rip), %rax # 0x57b1dd8
movq %rax, 0x98(%r12)
leaq 0x4730835(%rip), %rcx # 0x57b1da0
movq %rcx, 0x1e0(%r12)
movq %rcx, 0x190(%r12)
movq %rcx, 0xa0(%r12)
leaq 0x47307de(%rip), %rdx # 0x57b1d68
movq %rdx, 0xa8(%r12)
leaq 0x4730727(%rip), %rdx # 0x57b1cc0
movq %rdx, 0xc8(%r12)
leaq 0x4730750(%rip), %rdx # 0x57b1cf8
movq %rdx, 0xd0(%r12)
movq %rax, 0xc0(%r12)
movq %rcx, 0x330(%r12)
movq %rax, 0xb8(%r12)
movq %rcx, 0x380(%r12)
movw $0x204, 0xba22(%r12) # imm = 0x204
movb $0x4, 0xba24(%r12)
movb $0x2, 0xba2b(%r12)
movb $0x0, 0x214f(%r12)
movq %rbx, 0x8(%rsp)
cmpl $0x12c, 0x144(%rbx) # imm = 0x12C
jb 0x108161e
movq 0x8(%rsp), %rax
cmpl $0x20, 0x140(%rax)
jb 0x108161e
movb $0x0, 0x2150(%r12)
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
callq 0x10924a6
xorb $0x1, %al
movb %al, 0x2690(%r12)
movq %rbx, %rdi
callq 0x10924a6
xorb $0x1, %al
addb %al, %al
movb %al, 0xba56(%r12)
movw $0x204, 0xcd14(%r12) # imm = 0x204
movb $0x4, 0xcd16(%r12)
movb $0x2, 0xcd1d(%r12)
cmpl $0x384, 0x144(%rbx) # imm = 0x384
setb %al
cmpl $0x4e, 0x140(%rbx)
setb %cl
orb %al, %cl
addb %cl, %cl
movb %cl, 0xcd48(%r12)
cmpl $0x4e, 0x140(%rbx)
setb %cl
orb %al, %cl
movb %cl, 0x24ab(%r12)
cmpb $0x1, %cl
jne 0x10816ca
leaq 0x4b570(%r12), %rdi
leaq 0x10(%rsp), %rsi
movabsq $0xa000000cf, %rax # imm = 0xA000000CF
movl %eax, (%rsi)
shrq $0x20, %rax
movb %al, 0x4(%rsi)
callq 0xa4e406
movb $0xc, (%rax)
leaq 0x10ea(%r12), %r13
movw $0x204, 0x6a90(%r12) # imm = 0x204
movb $0x4, %al
movb %al, 0x6a92(%r12)
movb $0x2, 0x6a99(%r12)
movw $0x404, 0x579e(%r12) # imm = 0x404
movb %al, 0x57a0(%r12)
movb %al, 0x57a7(%r12)
xorl %eax, %eax
leaq 0x2a8e9b8(%rip), %rcx # 0x3b100cc
movl (%rax,%rcx), %edx
movb $0x2, 0x4619(%r13,%rdx)
addq $0x4, %rax
cmpq $0xe4, %rax
jne 0x1081714
movabsq $0x5020d0c59630b0a, %rax # imm = 0x5020D0C59630B0A
movq %rax, 0x10(%rsp)
movl $0x7252f06, 0x18(%rsp) # imm = 0x7252F06
movb $0x8, 0x1c(%rsp)
xorl %eax, %eax
movb $0x2, %cl
movzbl 0x10(%rsp,%rax), %edx
imulq $0x1e5, %rdx, %rdx # imm = 0x1E5
movb %cl, 0xce(%r13,%rdx)
movb %cl, 0x129(%r13,%rdx)
incq %rax
cmpq $0xd, %rax
jne 0x108174c
xorl %edx, %edx
movb %dl, 0x20ec(%r12)
movb %dl, 0x1f07(%r12)
movb %dl, 0x1d22(%r12)
movb %dl, 0x1b3d(%r12)
movb $0x2, %al
movb %al, 0x158e(%r12)
movb %al, 0x6acf(%r12)
movw $0x404, %cx # imm = 0x404
movw %cx, 0x1efe(%r12)
movb $0x4, %sil
movb %sil, 0x1f00(%r12)
movw %cx, 0x20e3(%r12)
movb %sil, 0x20e5(%r12)
movb %dl, 0x1ef5(%r12)
movb %dl, 0x20da(%r12)
movw %dx, 0x20d2(%r12)
movw %dx, 0x1eed(%r12)
movw $0x202, %dx # imm = 0x202
movw %dx, 0x1d08(%r12)
movw %dx, 0x6ab5(%r12)
movw %dx, 0x1b23(%r12)
movb %al, 0x1d0c(%r12)
movb %al, 0x13f5(%r12)
movb %al, 0x13f4(%r12)
movb %sil, 0x1e3a(%r12)
movb %sil, 0x201f(%r12)
movb %sil, 0x12fe(%r12)
movss 0x1a0a4(%r12), %xmm0
movaps 0x27179a0(%rip), %xmm1 # 0x37991f0
andps %xmm1, %xmm0
movaps 0x27179a6(%rip), %xmm2 # 0x3799200
orps %xmm2, %xmm0
movss %xmm0, 0x1a0a4(%r12)
movss 0x1a236(%r12), %xmm0
andps %xmm1, %xmm0
orps %xmm2, %xmm0
movss %xmm0, 0x1a236(%r12)
movzwl 0x1a23a(%r12), %esi
movl $0xffffff0f, %edx # imm = 0xFFFFFF0F
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x1a23a(%r12)
movzwl 0x23802(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x23802(%r12)
movzwl 0x2576a(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x2576a(%r12)
movzwl 0x23816(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x23816(%r12)
movzwl 0x2577e(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x2577e(%r12)
movl 0x25790(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x25790(%r12)
movzwl 0x23b2a(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x23b2a(%r12)
movzwl 0x25a92(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x25a92(%r12)
movzwl 0x23b3e(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x23b3e(%r12)
movzwl 0x25aa6(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x25aa6(%r12)
movl 0x25ab8(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x25ab8(%r12)
movl 0x24174(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x24174(%r12)
movzwl 0x25c26(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x25c26(%r12)
movl 0x24188(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x24188(%r12)
movzwl 0x25c3a(%r12), %esi
andl %edx, %esi
orl $0x20, %esi
movw %si, 0x25c3a(%r12)
movb %al, 0x40e73(%r12)
movb %al, 0x40f3c(%r12)
movb %al, 0x40e72(%r12)
movb %al, 0x40f3b(%r12)
movb %al, 0x40f3d(%r12)
movw %cx, 0x15d5(%r12)
movq $-0x648, %rax # imm = 0xF9B8
movzwl 0x19d70(%r12,%rax,2), %ecx
andl $0xf, %ecx
orl $0x1110, %ecx # imm = 0x1110
movw %cx, 0x19d70(%r12,%rax,2)
movb $0x2, 0x40cd8(%r12,%rax)
addq $0xc9, %rax
jne 0x1081a13
movabsq $0x200000001, %rax # imm = 0x200000001
movq %rax, 0x10(%rsp)
movl $0x3, 0x18(%rsp)
movzwl 0x1d7cc(%r12), %eax
xorl %edx, %edx
movb 0x10(%rsp,%rdx), %cl
shlb $0x2, %cl
movl $0xf, %esi
shll %cl, %esi
notl %esi
andl %esi, %eax
movl $0x2, %esi
shll %cl, %esi
orl %eax, %esi
movl %esi, %eax
addq $0x4, %rdx
cmpq $0xc, %rdx
jne 0x1081a61
movw %ax, 0x1d7cc(%r12)
movb $0x2, 0x42a06(%r12)
xorl %eax, %eax
movb %al, 0x2997(%r12)
movb %al, 0x27b2(%r12)
movb %al, 0x25cd(%r12)
movb %al, 0x23e8(%r12)
movb $0x4, %cl
movb %cl, 0x1f50(%r12)
movb %cl, 0x2135(%r12)
movb %al, 0x1411(%r12)
movl $0x3241, %ebx # imm = 0x3241
movb $0x11, %bpl
movzbl %bpl, %edi
callq 0x108255d
testb %al, %al
je 0x1081afc
movw $0x404, -0x1(%r12,%rbx) # imm = 0x404
movb $0x4, -0xf3(%r12,%rbx)
addq $0x1e5, %rbx # imm = 0x1E5
incb %bpl
cmpq $0x11599, %rbx # imm = 0x11599
jne 0x1081ade
movl $0x4020204, 0x1403(%r12) # imm = 0x4020204
movb $0x4, 0x1a92(%r12)
movw $0x706, 0x10(%rsp) # imm = 0x706
movb $0x8, 0x12(%rsp)
xorl %eax, %eax
movzbl 0x10(%rsp,%rax), %ecx
imulq $0x1e5, %rcx, %rcx # imm = 0x1E5
movb $0x0, 0xbc(%r13,%rcx)
movw $0x0, 0xc6(%r13,%rcx)
incq %rax
movl $0x0, 0xb3(%r13,%rcx)
cmpq $0x3, %rax
jne 0x1081b32
movb $0x4, %al
movb %al, 0x6ab1(%r12)
movq 0x8(%rsp), %rsi
cmpl $0x384, 0x144(%rsi) # imm = 0x384
setb %cl
cmpl $0x50, 0x140(%rsi)
setb %dl
orb %cl, %dl
shlb $0x2, %dl
movb %dl, 0x6aa8(%r12)
cmpl $0x50, 0x140(%rsi)
setb %dl
orb %cl, %dl
shlb $0x2, %dl
movb %dl, 0x6aa9(%r12)
cmpl $0x50, 0x140(%rsi)
setb %dl
orb %cl, %dl
shlb $0x2, %dl
movb %dl, 0x6aaa(%r12)
cmpl $0x50, 0x140(%rsi)
setb %dl
orb %cl, %dl
shlb $0x2, %dl
movb %dl, 0x6aab(%r12)
movw $0x202, 0x6abb(%r12) # imm = 0x202
cmpl $0x50, 0x140(%rsi)
setb %dl
orb %cl, %dl
shlb $0x2, %dl
movb %dl, 0x6a2d(%r12)
movw $0x404, %cx # imm = 0x404
movw %cx, 0x6a2e(%r12)
movb %al, 0x6ab2(%r12)
movw %cx, 0x6a32(%r12)
xorl %eax, %eax
leaq 0x2a8e588(%rip), %rcx # 0x3b101b0
movl (%rax,%rcx), %edx
movb $0x2, 0x590b(%r13,%rdx)
addq $0x4, %rax
cmpq $0x28, %rax
jne 0x1081c28
movl $0x0, 0x1e71(%r12)
movq 0x8(%rsp), %rax
cmpl $0x2a, 0x140(%rax)
jbe 0x1081c64
movl $0x0, 0x2056(%r12)
movb $0x2, %al
movb %al, 0x1d0d(%r12)
movb %al, 0x6aba(%r12)
movb %al, 0x1ef2(%r12)
movb %al, 0x20d7(%r12)
movb $0x4, 0x1580(%r12)
movw $0x202, 0x2051(%r12) # imm = 0x202
xorl %eax, %eax
leaq 0x2a8e535(%rip), %rdx # 0x3b101d8
movl (%rax,%rdx), %esi
movl %esi, %ecx
andb $0x7, %cl
movl $0x1, %edi
shll %cl, %edi
sarl $0x3, %esi
movslq %esi, %rcx
orb %dil, 0x4b531(%r12,%rcx)
addq $0x4, %rax
cmpq $0x28, %rax
jne 0x1081ca3
movq 0x8(%rsp), %rdi
callq 0x10924a6
testb %al, %al
jne 0x1081ce6
movq 0x8(%rsp), %rax
cmpl $0x50, 0x148(%rax)
jb 0x1081cef
orb $-0x80, 0x4b54a(%r12)
movabsq $0xa00000000, %rax # imm = 0xA00000000
movaps 0x2a89d70(%rip), %xmm0 # 0x3b0ba70
movaps %xmm0, 0x10(%rsp)
movq %r12, 0x28(%rsp)
leaq 0x4b570(%r12), %rcx
movq %rcx, 0x38(%rsp)
xorl %r15d, %r15d
shrq $0x20, %rax
movq %rax, 0x30(%rsp)
leaq 0x20(%rsp), %r12
movq 0x8(%rsp), %r14
movl 0x10(%rsp,%r15), %ebx
movq %r14, %rdi
callq 0x10924a6
xorb $0x1, %al
movb %al, 0x14d7(%r13,%rbx)
movl 0x10(%rsp,%r15), %ebp
movq %r14, %rdi
callq 0x10924a6
xorb $0x1, %al
addb %al, %al
movb %al, 0xa89d(%r13,%rbp)
cmpl $0x50, 0x148(%r14)
setae %al
leal -0xce(%rbp), %ecx
cmpl $0x3f, %ecx
ja 0x1081d83
movabsq $-0x217ffffffffffffd, %rdx # imm = 0xDE80000000000003
btq %rcx, %rdx
jb 0x1081d95
leal -0x60(%rbp), %ecx
cmpl $0x3, %ecx
jb 0x1081d95
movl %eax, %ecx
cmpl $0xcc, %ebp
jne 0x1081db9
movq 0x8(%rsp), %rcx
cmpl $0x384, 0x144(%rcx) # imm = 0x384
jb 0x1081db7
movq 0x8(%rsp), %rcx
cmpl $0x4e, 0x140(%rcx)
setae %cl
jmp 0x1081db9
xorl %ecx, %ecx
xorb $0x1, %cl
addb %cl, %cl
movb %cl, 0xbb8f(%r13,%rbp)
leal -0xce(%rbp), %ecx
cmpl $0x3f, %ecx
ja 0x1081de1
movabsq $-0x217ffffffffffffd, %rdx # imm = 0xDE80000000000003
btq %rcx, %rdx
jb 0x1081df1
leal -0x60(%rbp), %ecx
cmpl $0x3, %ecx
jb 0x1081df1
cmpl $0xcc, %ebp
jne 0x1081e15
movq 0x8(%rsp), %rax
cmpl $0x384, 0x144(%rax) # imm = 0x384
jb 0x1081e13
movq 0x8(%rsp), %rax
cmpl $0x4e, 0x140(%rax)
setae %al
jmp 0x1081e15
xorl %eax, %eax
movl %eax, %ecx
xorb $0x1, %cl
movb %cl, 0x12f2(%r13,%rbp)
cmpl $0x1e5, %ebp # imm = 0x1E5
setae %cl
orb %al, %cl
testb $0x1, %cl
jne 0x1081e5c
movabsq $0xa00000000, %rax # imm = 0xA00000000
orq %rax, %rbp
movq 0x30(%rsp), %rax
movb %al, 0x24(%rsp)
movl %ebp, 0x20(%rsp)
movq 0x38(%rsp), %rdi
movq %r12, %rsi
callq 0xa4e406
movb $0xc, (%rax)
addq $0x4, %r15
cmpq $0x10, %r15
jne 0x1081d2d
movq 0x8(%rsp), %rax
cmpl $0x212, 0x144(%rax) # imm = 0x212
movb $0x2, %al
jb 0x1081ea0
movq 0x8(%rsp), %rcx
cmpl $0x3b, 0x140(%rcx)
movq 0x28(%rsp), %rbx
jbe 0x1081ea5
movq 0x8(%rsp), %rdi
callq 0x10924a6
xorb $0x1, %al
addb %al, %al
jmp 0x1081ea5
movq 0x28(%rsp), %rbx
movw $0x590b, 0x10(%rsp) # imm = 0x590B
xorl %ecx, %ecx
movzbl 0x10(%rsp,%rcx), %edx
imulq $0x1e5, %rdx, %rdx # imm = 0x1E5
movb %al, 0xf0(%r13,%rdx)
incq %rcx
cmpq $0x2, %rcx
jne 0x1081eae
movq 0x8(%rsp), %rax
cmpl $0x50, 0x148(%rax)
setb %al
addb %al, %al
movb %al, 0x24cc(%rbx)
movb %al, 0xcd69(%rbx)
xorl %r15d, %r15d
leaq 0x2a8e30e(%rip), %rbx # 0x3b10200
movb $0x2, %r12b
leaq 0x10(%rsp), %r14
xorl %ebp, %ebp
movl (%rbp,%rbx), %eax
movb %r15b, 0x14d7(%r13,%rax)
movb %r15b, 0x16bc(%r13,%rax)
movb %r15b, 0x18a1(%r13,%rax)
movb %r12b, 0xa89d(%r13,%rax)
movb %r12b, 0xbb8f(%r13,%rax)
movq 0x8(%rsp), %rcx
cmpl $0x50, 0x148(%rcx)
setae %cl
leal -0xce(%rax), %edx
cmpl $0x3f, %edx
ja 0x1081f52
movabsq $-0x217ffffffffffffd, %rsi # imm = 0xDE80000000000003
btq %rdx, %rsi
jb 0x1081f61
leal -0x60(%rax), %edx
cmpl $0x3, %edx
jb 0x1081f61
cmpl $0xcc, %eax
jne 0x1081f85
movq 0x8(%rsp), %rcx
cmpl $0x384, 0x144(%rcx) # imm = 0x384
jb 0x1081f83
movq 0x8(%rsp), %rcx
cmpl $0x4e, 0x140(%rcx)
setae %cl
jmp 0x1081f85
xorl %ecx, %ecx
movl %ecx, %edx
xorb $0x1, %dl
movb %dl, 0x12f2(%r13,%rax)
cmpl $0x1e5, %eax # imm = 0x1E5
setae %dl
orb %cl, %dl
testb $0x1, %dl
jne 0x1081fcb
movabsq $0xa00000000, %rcx # imm = 0xA00000000
orq %rcx, %rax
movq 0x30(%rsp), %rcx
movb %cl, 0x14(%rsp)
movl %eax, 0x10(%rsp)
movq 0x38(%rsp), %rdi
movq %r14, %rsi
callq 0xa4e406
movb $0xc, (%rax)
addq $0x4, %rbp
cmpq $0x18, %rbp
jne 0x1081efc
movq 0x8(%rsp), %rax
movl 0x144(%rax), %eax
cmpl $0x320, %eax # imm = 0x320
movq 0x28(%rsp), %rbx
jb 0x1081ffe
movq 0x8(%rsp), %rcx
cmpl $0x47, 0x140(%rcx)
jae 0x1082005
movb $0x2, 0x2892(%rbx)
cmpl $0x384, %eax # imm = 0x384
jb 0x108201a
movq 0x8(%rsp), %rcx
cmpl $0x4d, 0x140(%rcx)
ja 0x108204f
movw $0xc0a, 0x10(%rsp) # imm = 0xC0A
movb $0xd, 0x12(%rsp)
xorl %ecx, %ecx
movb $0x4, %dl
movzbl 0x10(%rsp,%rcx), %esi
imulq $0x1e5, %rsi, %rsi # imm = 0x1E5
movb %dl, 0xe5(%r13,%rsi)
movb %dl, 0xe2(%r13,%rsi)
incq %rcx
cmpq $0x3, %rcx
jne 0x108202a
cmpl $0x384, %eax # imm = 0x384
jb 0x1082064
movq 0x8(%rsp), %rax
cmpl $0x4d, 0x140(%rax)
ja 0x10820c4
movl $0x8070602, 0x20(%rsp) # imm = 0x8070602
xorl %eax, %eax
movaps 0x2a89a0b(%rip), %xmm0 # 0x3b0ba80
movzbl 0x20(%rsp,%rax), %ecx
movaps %xmm0, 0x10(%rsp)
imulq $0x1e5, %rcx, %rcx # imm = 0x1E5
addq %r13, %rcx
xorl %edx, %edx
movl 0x10(%rsp,%rdx), %esi
movb $0x4, (%rsi,%rcx)
addq $0x4, %rdx
cmpq $0x10, %rdx
jne 0x108208b
incq %rax
cmpq $0x4, %rax
jne 0x1082075
movaps %xmm0, 0x10(%rsp)
xorl %eax, %eax
movl 0x10(%rsp,%rax), %ecx
movb $0x4, 0x12f2(%r13,%rcx)
addq $0x4, %rax
cmpq $0x10, %rax
jne 0x10820ad
movb $0x1, %cl
movb %cl, 0x26cc(%rbx)
movb $0x2, %r15b
movb %r15b, 0xba92(%rbx)
movb %r15b, 0xcd84(%rbx)
movb $0x4, %al
movb %al, 0x28b1(%rbx)
movb %al, 0x2a96(%rbx)
movb %cl, 0x24e7(%rbx)
movabsq $0xa000000cf, %rax # imm = 0xA000000CF
addq $0x3c, %rax
leaq 0x10(%rsp), %rsi
movl %eax, (%rsi)
shrq $0x20, %rax
movb %al, 0x4(%rsi)
movq 0x38(%rsp), %rdi
movq %rdi, %r14
callq 0xa4e406
movb $0xc, (%rax)
movb %r15b, 0x2658(%rbx)
movb %r15b, 0xba1e(%rbx)
movb %r15b, 0x2473(%rbx)
movb %r15b, 0xcd10(%rbx)
movb %r15b, 0x283d(%rbx)
movb %r15b, 0x2a22(%rbx)
xorl %r12d, %r12d
xorl %ebx, %ebx
movq 0x30(%rsp), %rbp
leaq 0x2a8e0c0(%rip), %rax # 0x3b10218
movl (%rbx,%rax), %eax
movb $0x1, %cl
movb %cl, 0x14d7(%r13,%rax)
movb %r12b, 0x16bc(%r13,%rax)
movb %r12b, 0x18a1(%r13,%rax)
movb %r15b, 0xa89d(%r13,%rax)
movb %r15b, 0xbb8f(%r13,%rax)
movb %cl, 0x12f2(%r13,%rax)
movabsq $0xa00000000, %rcx # imm = 0xA00000000
orq %rcx, %rax
movb %bpl, 0x14(%rsp)
movl %eax, 0x10(%rsp)
movq %r14, %rdi
leaq 0x10(%rsp), %rsi
callq 0xa4e406
movb $0xc, (%rax)
addq $0x4, %rbx
cmpq $0x14, %rbx
jne 0x1082151
movq %r13, 0x40(%rsp)
movq 0x28(%rsp), %rdx
movb $0x1, 0x26b2(%rdx)
xorl %eax, %eax
movb %al, 0x2897(%rdx)
movb %al, 0x2a7c(%rdx)
movb $0x2, 0xba78(%rdx)
movq 0x8(%rsp), %rax
movl 0x148(%rax), %eax
cmpl $0x50, %eax
setb 0x24cd(%rdx)
setb %cl
addb %cl, %cl
movb %cl, 0xcd6a(%rdx)
cmpl $0x4f, %eax
ja 0x108222f
movabsq $0xa000000cf, %rax # imm = 0xA000000CF
addq $0x22, %rax
leaq 0x10(%rsp), %rsi
movl %eax, (%rsi)
shrq $0x20, %rax
movb %al, 0x4(%rsi)
movq %r14, %rdi
callq 0xa4e406
movb $0xc, (%rax)
movabsq $0x11300000112, %rax # imm = 0x11300000112
movq %rax, 0x20(%rsp)
xorl %ebp, %ebp
movq 0x8(%rsp), %r14
movl 0x20(%rsp,%rbp), %ebx
movl $0x320, %r13d # imm = 0x320
cmpl %r13d, 0x144(%r14)
setb %r12b
cmpl $0x46, 0x140(%r14)
setb %r15b
movq %r14, %rdi
callq 0x10924a6
xorb $0x1, %al
orb %r12b, %al
orb %r15b, %al
movq 0x40(%rsp), %rcx
movb %al, 0x14d7(%rcx,%rbx)
movl 0x20(%rsp,%rbp), %ebx
xorl %eax, %eax
movb %al, 0x16bc(%rcx,%rbx)
movb %al, 0x18a1(%rcx,%rbx)
cmpl %r13d, 0x144(%r14)
movq %rcx, %r13
setb %r15b
cmpl $0x46, 0x140(%r14)
setb %r12b
movq %r14, %rdi
callq 0x10924a6
xorb $0x1, %al
orb %r15b, %al
orb %r12b, %al
addb %al, %al
movb %al, 0xa89d(%r13,%rbx)
cmpl $0x50, 0x148(%r14)
movl 0x20(%rsp,%rbp), %eax
setae %cl
leal -0xce(%rax), %edx
cmpl $0x3f, %edx
ja 0x10822f4
movabsq $-0x217ffffffffffffd, %rsi # imm = 0xDE80000000000003
btq %rdx, %rsi
jb 0x1082305
leal -0x60(%rax), %edx
cmpl $0x3, %edx
jb 0x1082305
movl %ecx, %edx
cmpl $0xcc, %eax
jne 0x1082329
movq 0x8(%rsp), %rdx
cmpl $0x384, 0x144(%rdx) # imm = 0x384
jb 0x1082327
movq 0x8(%rsp), %rdx
cmpl $0x4e, 0x140(%rdx)
setae %dl
jmp 0x1082329
xorl %edx, %edx
xorb $0x1, %dl
addb %dl, %dl
movb %dl, 0xbb8f(%r13,%rax)
leal -0xce(%rax), %edx
cmpl $0x3f, %edx
movabsq $0xa00000000, %r8 # imm = 0xA00000000
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %r9
ja 0x1082365
movabsq $-0x217ffffffffffffd, %rsi # imm = 0xDE80000000000003
btq %rdx, %rsi
jb 0x1082374
leal -0x60(%rax), %edx
cmpl $0x3, %edx
jb 0x1082374
cmpl $0xcc, %eax
jne 0x1082398
movq 0x8(%rsp), %rcx
cmpl $0x384, 0x144(%rcx) # imm = 0x384
jb 0x1082396
movq 0x8(%rsp), %rcx
cmpl $0x4e, 0x140(%rcx)
setae %cl
jmp 0x1082398
xorl %ecx, %ecx
movl %ecx, %edx
xorb $0x1, %dl
movb %dl, 0x12f2(%r13,%rax)
cmpl $0x1e5, %eax # imm = 0x1E5
setae %dl
orb %cl, %dl
testb $0x1, %dl
jne 0x10823cd
orq %r8, %rax
movb %r9b, 0x14(%rsp)
movl %eax, 0x10(%rsp)
leaq 0x10(%rsp), %rsi
callq 0xa4e406
movb $0xc, (%rax)
addq $0x4, %rbp
cmpq $0x8, %rbp
jne 0x1082245
movabsq $0x11700000116, %rax # imm = 0x11700000116
movq %rax, 0x10(%rsp)
xorl %r15d, %r15d
movq 0x40(%rsp), %r13
movq 0x8(%rsp), %r14
movl 0x10(%rsp,%r15), %ebx
movl $0x320, %eax # imm = 0x320
cmpl %eax, 0x144(%r14)
setb %r12b
cmpl $0x46, 0x140(%r14)
setb %bpl
movq %r14, %rdi
callq 0x10924a6
xorb $0x1, %al
orb %r12b, %al
orb %bpl, %al
addb %al, %al
movb %al, 0x14d7(%r13,%rbx)
movl 0x10(%rsp,%r15), %r12d
movq %r14, %rdi
callq 0x10924a6
xorb $0x1, %al
addb %al, %al
movb %al, 0x12f2(%r13,%r12)
movl $0x320, %eax # imm = 0x320
cmpl %eax, 0x144(%r14)
setae %bl
setb %al
cmpl $0x46, 0x140(%r14)
setb %cl
orb %al, %cl
addb %cl, %cl
movb %cl, 0x16bc(%r13,%r12)
cmpl $0x46, 0x140(%r14)
setb %bpl
movq %r14, %rdi
callq 0x10924a6
andb %bl, %al
xorb $0x1, %al
orb %bpl, %al
addb %al, %al
movb %al, 0xa89d(%r13,%r12)
cmpl $0x50, 0x148(%r14)
movl 0x10(%rsp,%r15), %eax
setae %cl
leal -0xce(%rax), %edx
cmpl $0x3f, %edx
ja 0x10824c4
movabsq $-0x217ffffffffffffd, %rsi # imm = 0xDE80000000000003
btq %rdx, %rsi
jb 0x10824d3
leal -0x60(%rax), %edx
cmpl $0x3, %edx
jb 0x10824d3
cmpl $0xcc, %eax
jne 0x10824f7
movq 0x8(%rsp), %rcx
cmpl $0x384, 0x144(%rcx) # imm = 0x384
jb 0x10824f5
movq 0x8(%rsp), %rcx
cmpl $0x4e, 0x140(%rcx)
setae %cl
jmp 0x10824f7
xorl %ecx, %ecx
xorb $0x1, %cl
addb %cl, %cl
movb %cl, 0xbb8f(%r13,%rax)
addq $0x4, %r15
cmpq $0x8, %r15
jne 0x10823f7
movq 0x28(%rsp), %rbx
movw $0x404, 0x2228(%rbx) # imm = 0x404
movq 0x8(%rsp), %rdi
movq (%rdi), %rax
callq *0xc8(%rax)
movq %rbx, %rdi
movq %rax, %rsi
callq 0x1e6df66
movl $0x20, 0x58(%rbx)
movabsq $0x4000000040, %rax # imm = 0x4000000040
movq %rax, 0x4c(%rbx)
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/NVPTX/NVPTXISelLowering.cpp
|
PromoteScalarIntegerPTX(llvm::EVT const&, llvm::MVT*)
|
static bool PromoteScalarIntegerPTX(const EVT &VT, MVT *PromotedVT) {
if (VT.isScalarInteger()) {
switch (PowerOf2Ceil(VT.getFixedSizeInBits())) {
default:
llvm_unreachable(
"Promotion is not suitable for scalars of size larger than 64-bits");
case 1:
*PromotedVT = MVT::i1;
break;
case 2:
case 4:
case 8:
*PromotedVT = MVT::i8;
break;
case 16:
*PromotedVT = MVT::i16;
break;
case 32:
*PromotedVT = MVT::i32;
break;
case 64:
*PromotedVT = MVT::i64;
break;
}
return EVT(*PromotedVT) != VT;
}
return false;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
movzbl (%rdi), %r15d
testq %r15, %r15
je 0x1086bbc
leal -0x2(%r15), %eax
cmpb $0x8, %al
setb %al
jmp 0x1086bc4
movq %rbx, %rdi
callq 0x1e89dd2
testb %al, %al
je 0x1086bdf
testq %r15, %r15
je 0x1086be3
shll $0x4, %r15d
leaq 0x2148168(%rip), %rax # 0x31ced40
movq -0x10(%r15,%rax), %rax
jmp 0x1086beb
xorl %eax, %eax
jmp 0x1086c3e
movq %rbx, %rdi
callq 0x1e89e20
testq %rax, %rax
jle 0x1086bff
decq %rax
je 0x1086c03
bsrq %rax, %rcx
xorq $0x3f, %rcx
jmp 0x1086c08
xorl %eax, %eax
jmp 0x1086c12
movl $0x40, %ecx
negb %cl
movl $0x1, %eax
shlq %cl, %rax
bsfq %rax, %rcx
shll $0x3, %ecx
movabsq $0x8070605050502, %rdx # imm = 0x8070605050502
shrq %cl, %rdx
movb %dl, (%r14)
movb $0x1, %al
cmpb %dl, (%rbx)
jne 0x1086c3e
testb %dl, %dl
sete %cl
cmpq $0x0, 0x8(%rbx)
setne %al
andb %cl, %al
popq %rbx
popq %r14
popq %r15
retq
|
/Target/NVPTX/NVPTXISelLowering.cpp
|
llvm::cl::opt<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, false, llvm::cl::parser<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>>::opt<char [32], llvm::cl::desc, llvm::cl::initializer<char [1]>, llvm::cl::OptionHidden>(char const (&) [32], llvm::cl::desc const&, llvm::cl::initializer<char [1]> const&, llvm::cl::OptionHidden const&)
|
explicit opt(const Mods &... Ms)
: Option(llvm::cl::Optional, NotHidden), Parser(*this) {
apply(this, Ms...);
done();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %r8, %rbx
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %r14
xorl %ebp, %ebp
xorl %esi, %esi
xorl %edx, %edx
callq 0x7fca88
leaq 0x90(%r14), %rax
movq %rax, 0x80(%r14)
movq %rbp, 0x88(%r14)
movb %bpl, 0x90(%r14)
leaq 0xb8(%r14), %rax
movq %rax, 0xa8(%r14)
movq %rbp, 0xb0(%r14)
movb %bpl, 0xb8(%r14)
movb %bpl, 0xc8(%r14)
leaq 0x485638d(%rip), %rax # 0x58f7988
addq $0x10, %rax
movq %rax, 0xa0(%r14)
leaq 0x485672b(%rip), %rax # 0x58f7d38
addq $0x10, %rax
movq %rax, (%r14)
leaq 0x48565ed(%rip), %rax # 0x58f7c08
addq $0x10, %rax
movq %rax, 0xd0(%r14)
xorps %xmm0, %xmm0
movups %xmm0, 0xd8(%r14)
leaq -0x8a4a36(%rip), %rax # 0x7fcc02
movq %rax, 0xf0(%r14)
leaq -0x8a4a42(%rip), %rax # 0x7fcc04
movq %rax, 0xe8(%r14)
movq %r14, %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movq %rbx, %r8
callq 0x10a2a59
movq %r14, %rdi
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2b1e814
nop
|
/llvm/Support/CommandLine.h
|
llvm::NVPTXAAResult::getModRefInfoMask(llvm::MemoryLocation const&, llvm::AAQueryInfo&, bool)
|
ModRefInfo NVPTXAAResult::getModRefInfoMask(const MemoryLocation &Loc,
AAQueryInfo &AAQI,
bool IgnoreLocals) {
if (isConstOrParam(Loc.Ptr->getType()->getPointerAddressSpace()))
return ModRefInfo::NoModRef;
const Value *Base = getUnderlyingObject(Loc.Ptr);
if (isConstOrParam(Base->getType()->getPointerAddressSpace()))
return ModRefInfo::NoModRef;
return ModRefInfo::ModRef;
}
|
movq (%rsi), %rdi
movq 0x8(%rdi), %rax
movl $0xfe, %ecx
andl 0x8(%rax), %ecx
cmpl $0x12, %ecx
jne 0x10a2d57
movq 0x10(%rax), %rax
movq (%rax), %rax
movl 0x8(%rax), %ecx
shrl $0x8, %ecx
xorl %eax, %eax
cmpl $0x4, %ecx
je 0x10a2da9
cmpl $0x65, %ecx
je 0x10a2da9
pushq %rax
movl $0x6, %esi
callq 0x26fbb24
movq 0x8(%rax), %rax
movl $0xfe, %ecx
andl 0x8(%rax), %ecx
cmpl $0x12, %ecx
jne 0x10a2d8c
movq 0x10(%rax), %rax
movq (%rax), %rax
movl 0x8(%rax), %eax
shrl $0x8, %eax
xorl %ecx, %ecx
cmpl $0x65, %eax
setne %cl
xorl %edx, %edx
cmpl $0x4, %eax
leal (%rcx,%rcx,2), %eax
cmovel %edx, %eax
addq $0x8, %rsp
retq
|
/Target/NVPTX/NVPTXAliasAnalysis.cpp
|
(anonymous namespace)::GenericToNVVMLegacyPass::runOnModule(llvm::Module&)
|
bool GenericToNVVMLegacyPass::runOnModule(Module &M) {
return GenericToNVVM().runOnModule(M);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x80, %rsp
movq %rsi, %r15
xorps %xmm0, %xmm0
leaq 0x40(%rsp), %r14
movaps %xmm0, -0x30(%r14)
movaps %xmm0, 0x30(%r14)
movaps %xmm0, 0x20(%r14)
movaps %xmm0, 0x10(%r14)
movaps %xmm0, (%r14)
movaps %xmm0, -0x10(%r14)
movaps %xmm0, -0x20(%r14)
movaps %xmm0, -0x40(%r14)
movl $0x80, %ebp
movl %ebp, -0x30(%r14)
movl $0x1800, %edi # imm = 0x1800
movl $0x8, %esi
callq 0x2b410ec
movq %rax, -0x40(%r14)
movq %rsp, %rbx
movq %rbx, %rdi
callq 0x10a43f0
xorl %r12d, %r12d
movb %r12b, 0x30(%rbx)
movl %ebp, 0x50(%rbx)
movl $0x1800, %edi # imm = 0x1800
movl $0x8, %esi
callq 0x2b410ec
movq %rax, 0x40(%rbx)
movq %r14, %rdi
callq 0x10a4cf2
movb %r12b, 0x70(%rbx)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x10a3d38
movl %eax, %ebp
leaq 0x58(%rsp), %r15
movq %r15, %rdi
callq 0xb1ef32
movq %r14, %rdi
callq 0x10a5f0c
movq -0x18(%r15), %rdi
movl -0x8(%r15), %eax
shlq $0x4, %rax
leaq (%rax,%rax,2), %rsi
movl $0x8, %edx
callq 0x2b410f1
leaq 0x18(%rsp), %r14
movq %r14, %rdi
callq 0xb1ef32
movq %rbx, %rdi
callq 0x10a5ffc
movq -0x18(%r14), %rdi
movl -0x8(%r14), %eax
shlq $0x4, %rax
leaq (%rax,%rax,2), %rsi
movl $0x8, %edx
callq 0x2b410f1
movl %ebp, %eax
addq $0x80, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/Target/NVPTX/NVPTXGenericToNVVM.cpp
|
std::pair<llvm::DenseMapIterator<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, llvm::GlobalVariable*, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, llvm::GlobalVariable*>, false>, bool> llvm::DenseMapBase<llvm::DenseMap<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, llvm::GlobalVariable*, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, llvm::GlobalVariable*>>, llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, llvm::GlobalVariable*, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, llvm::GlobalVariable*>>::try_emplace<llvm::GlobalVariable*>(llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>&&, llvm::GlobalVariable*&&)
|
std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return std::make_pair(makeIterator(TheBucket,
shouldReverseIterate<KeyT>()
? getBuckets()
: getBucketsEnd(),
*this, true),
false); // Already in map.
// Otherwise, insert the new element.
TheBucket =
InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
return std::make_pair(makeIterator(TheBucket,
shouldReverseIterate<KeyT>()
? getBuckets()
: getBucketsEnd(),
*this, true),
true);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rcx, %r12
movq %rdx, %r13
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x8(%rsp), %r15
movq %rsi, %rdi
movq %rdx, %rsi
movq %r15, %rdx
callq 0x10a469a
movq (%r15), %r15
testb %al, %al
je 0x10a49e7
xorl %eax, %eax
jmp 0x10a4a1a
movq %r14, %rdi
movq %r13, %rsi
movq %r13, %rdx
movq %r15, %rcx
callq 0x10a4a44
movq %rax, %r15
leaq 0x8(%rax), %rdi
leaq 0x8(%r13), %rsi
callq 0xb1e8aa
movq 0x20(%r13), %rax
movq %rax, 0x20(%r15)
movq (%r12), %rax
movq %rax, 0x28(%r15)
movb $0x1, %al
movl 0x10(%r14), %ecx
leaq (%rcx,%rcx,2), %rcx
shlq $0x4, %rcx
addq (%r14), %rcx
movq %r15, (%rbx)
movq %rcx, 0x8(%rbx)
movb %al, 0x10(%rbx)
movq %rbx, %rax
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/llvm/ADT/DenseMap.h
|
llvm::DenseMap<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, llvm::GlobalVariable*, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::GlobalVariable*, llvm::GlobalVariable*, llvm::ValueMapConfig<llvm::GlobalVariable*, llvm::sys::SmartMutex<false>>>, llvm::GlobalVariable*>>::grow(unsigned int)
|
void grow(unsigned AtLeast) {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
assert(Buckets);
if (!OldBuckets) {
this->BaseT::initEmpty();
return;
}
this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
// Free the old table.
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
alignof(BucketT));
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdi, %rbx
movl 0x10(%rdi), %r15d
decl %esi
movl %esi, %eax
shrl %eax
orl %esi, %eax
movl %eax, %ecx
shrl $0x2, %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x4, %eax
orl %ecx, %eax
movl %eax, %ecx
shrl $0x8, %ecx
orl %eax, %ecx
movl %ecx, %eax
shrl $0x10, %eax
orl %ecx, %eax
incl %eax
cmpl $0x41, %eax
movl $0x40, %ecx
cmovael %eax, %ecx
movq (%rdi), %r14
movl %ecx, 0x10(%rdi)
shlq $0x4, %rcx
leaq (%rcx,%rcx,2), %rdi
movl $0x8, %esi
callq 0x2b410ec
movq %rax, (%rbx)
testq %r14, %r14
je 0x10a4b3f
shlq $0x4, %r15
leaq (%r15,%r15,2), %r15
leaq (%r14,%r15), %rdx
movq %rbx, %rdi
movq %r14, %rsi
callq 0x10a4b4c
movl $0x8, %edx
movq %r14, %rdi
movq %r15, %rsi
popq %rbx
popq %r14
popq %r15
jmp 0x2b410f1
movq %rbx, %rdi
popq %rbx
popq %r14
popq %r15
jmp 0x10a43f0
|
/llvm/ADT/DenseMap.h
|
std::pair<llvm::DenseMapIterator<llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, llvm::Value*, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, llvm::Value*>, false>, bool> llvm::DenseMapBase<llvm::DenseMap<llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, llvm::Value*, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, llvm::Value*>>, llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, llvm::Value*, llvm::DenseMapInfo<llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, void>, llvm::detail::DenseMapPair<llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>, llvm::Value*>>::try_emplace<llvm::Value*>(llvm::ValueMapCallbackVH<llvm::Constant*, llvm::Value*, llvm::ValueMapConfig<llvm::Constant*, llvm::sys::SmartMutex<false>>>&&, llvm::Value*&&)
|
std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
BucketT *TheBucket;
if (LookupBucketFor(Key, TheBucket))
return std::make_pair(makeIterator(TheBucket,
shouldReverseIterate<KeyT>()
? getBuckets()
: getBucketsEnd(),
*this, true),
false); // Already in map.
// Otherwise, insert the new element.
TheBucket =
InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
return std::make_pair(makeIterator(TheBucket,
shouldReverseIterate<KeyT>()
? getBuckets()
: getBucketsEnd(),
*this, true),
true);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq %rcx, %r12
movq %rdx, %r13
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x8(%rsp), %r15
movq %rsi, %rdi
movq %rdx, %rsi
movq %r15, %rdx
callq 0x10a4f9c
movq (%r15), %r15
testb %al, %al
je 0x10a5265
xorl %eax, %eax
jmp 0x10a5298
movq %r14, %rdi
movq %r13, %rsi
movq %r13, %rdx
movq %r15, %rcx
callq 0x10a52c2
movq %rax, %r15
leaq 0x8(%rax), %rdi
leaq 0x8(%r13), %rsi
callq 0xb1e8aa
movq 0x20(%r13), %rax
movq %rax, 0x20(%r15)
movq (%r12), %rax
movq %rax, 0x28(%r15)
movb $0x1, %al
movl 0x10(%r14), %ecx
leaq (%rcx,%rcx,2), %rcx
shlq $0x4, %rcx
addq (%r14), %rcx
movq %r15, (%rbx)
movq %rcx, 0x8(%rbx)
movb %al, 0x10(%rbx)
movq %rbx, %rax
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/llvm/ADT/DenseMap.h
|
llvm::NVPTXDAGToDAGISel::tryStoreRetval(llvm::SDNode*)
|
bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Offset = N->getOperand(1);
unsigned OffsetVal = Offset->getAsZExtVal();
MemSDNode *Mem = cast<MemSDNode>(N);
// How many elements do we have?
unsigned NumElts = 1;
switch (N->getOpcode()) {
default:
return false;
case NVPTXISD::StoreRetval:
NumElts = 1;
break;
case NVPTXISD::StoreRetvalV2:
NumElts = 2;
break;
case NVPTXISD::StoreRetvalV4:
NumElts = 4;
break;
}
// Build vector of operands
SmallVector<SDValue, 6> Ops;
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(N->getOperand(i + 2));
Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
Ops.push_back(Chain);
// Determine target opcode
// If we have an i1, use an 8-bit store. The lowering code in
// NVPTXISelLowering will have already emitted an upcast.
std::optional<unsigned> Opcode = 0;
switch (NumElts) {
default:
return false;
case 1:
Opcode = pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
NVPTX::StoreRetvalI8, NVPTX::StoreRetvalI16,
NVPTX::StoreRetvalI32, NVPTX::StoreRetvalI64,
NVPTX::StoreRetvalF32, NVPTX::StoreRetvalF64);
if (Opcode == NVPTX::StoreRetvalI8) {
// Fine tune the opcode depending on the size of the operand.
// This helps to avoid creating redundant COPY instructions in
// InstrEmitter::AddRegisterOperand().
switch (Ops[0].getSimpleValueType().SimpleTy) {
default:
break;
case MVT::i32:
Opcode = NVPTX::StoreRetvalI8TruncI32;
break;
case MVT::i64:
Opcode = NVPTX::StoreRetvalI8TruncI64;
break;
}
}
break;
case 2:
Opcode = pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
NVPTX::StoreRetvalV2I8, NVPTX::StoreRetvalV2I16,
NVPTX::StoreRetvalV2I32, NVPTX::StoreRetvalV2I64,
NVPTX::StoreRetvalV2F32, NVPTX::StoreRetvalV2F64);
break;
case 4:
Opcode = pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
NVPTX::StoreRetvalV4I8, NVPTX::StoreRetvalV4I16,
NVPTX::StoreRetvalV4I32, std::nullopt,
NVPTX::StoreRetvalV4F32, std::nullopt);
break;
}
if (!Opcode)
return false;
SDNode *Ret = CurDAG->getMachineNode(*Opcode, DL, MVT::Other, Ops);
MachineMemOperand *MemRef = cast<MemSDNode>(N)->getMemOperand();
CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ret), {MemRef});
ReplaceNode(N, Ret);
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xc8, %rsp
movq %rsi, %rbx
movq %rdi, %r13
movq 0x48(%rsi), %rsi
movq %rsi, 0x20(%rsp)
testq %rsi, %rsi
je 0x10aa4d4
leaq 0x20(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
movl %eax, 0x28(%rsp)
movq 0x28(%rbx), %rax
movq 0x28(%rax), %rcx
movq 0x58(%rcx), %rcx
cmpl $0x41, 0x20(%rcx)
jb 0x10aa4f3
movq 0x18(%rcx), %rcx
jmp 0x10aa4f7
addq $0x18, %rcx
movl $0xfffffc17, %edx # imm = 0xFFFFFC17
addl 0x18(%rbx), %edx
cmpl $0x2, %edx
ja 0x10aa5fa
movq (%rax), %rsi
movq %rsi, 0x38(%rsp)
movl 0x8(%rax), %eax
movl %eax, 0x1c(%rsp)
movl (%rcx), %eax
movq %rax, 0x30(%rsp)
movl %edx, %eax
leaq 0x2a92e29(%rip), %rcx # 0x3b3d350
movl (%rcx,%rax,4), %ebp
leaq 0x68(%rsp), %rcx
movq %rcx, -0x10(%rcx)
movabsq $0x600000000, %rax # imm = 0x600000000
movq %rax, -0x8(%rcx)
movl $0x58, %r12d
leaq 0x58(%rsp), %r15
movq %rbp, %r14
movq 0x28(%rbx), %rax
movq -0x8(%rax,%r12), %rsi
movl (%rax,%r12), %edx
movq %r15, %rdi
callq 0x9e21b4
addq $0x28, %r12
decq %r14
jne 0x10aa54f
movq %r13, %r14
movq 0x38(%r13), %rdi
movl $0x0, (%rsp)
xorl %r15d, %r15d
leaq 0x20(%rsp), %rdx
movq 0x30(%rsp), %rsi
movl $0x7, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
leaq 0x58(%rsp), %r13
movq %r13, %rdi
movq %rax, %rsi
callq 0x9e21b4
movq %r13, %rdi
movq 0x38(%rsp), %rsi
movl 0x1c(%rsp), %edx
callq 0x9e21b4
cmpq $0x1, %rbp
je 0x10aa6e9
cmpl $0x2, %ebp
je 0x10aa602
cmpl $0x4, %ebp
jne 0x10aa6ac
movzbl 0x58(%rbx), %edi
movq $0x0, (%rsp)
movl $0xca3, %esi # imm = 0xCA3
movl $0xca1, %edx # imm = 0xCA1
movl $0xca2, %ecx # imm = 0xCA2
xorl %r8d, %r8d
movl $0xca0, %r9d # imm = 0xCA0
jmp 0x10aa631
xorl %r15d, %r15d
jmp 0x10aa6c0
movzbl 0x58(%rbx), %edi
movabsq $0x100000c93, %r8 # imm = 0x100000C93
leaq 0x8(%r8), %rax
addq $0xb, %r8
movq %rax, (%rsp)
movl $0xc9f, %esi # imm = 0xC9F
movl $0xc9c, %edx # imm = 0xC9C
movl $0xc9d, %ecx # imm = 0xC9D
movl $0xc9a, %r9d # imm = 0xC9A
callq 0x10ac592
movq %rax, %rcx
shrq $0x20, %rcx
testb $0x1, %cl
je 0x10aa6a9
movq 0x38(%r14), %rdi
movq 0x58(%rsp), %rcx
movq %rcx, 0x40(%rsp)
movl 0x60(%rsp), %ecx
movq %rcx, 0x48(%rsp)
movups 0x40(%rsp), %xmm0
movups %xmm0, (%rsp)
leaq 0x20(%rsp), %rdx
movl %eax, %esi
movl $0x1, %ecx
xorl %r8d, %r8d
callq 0x178f5b0
movq %rax, %r15
movq 0x68(%rbx), %rax
movq 0x38(%r14), %rdi
leaq 0x50(%rsp), %rdx
movq %rax, (%rdx)
movl $0x1, %ecx
movq %r15, %rsi
callq 0x178e6d8
movq %r14, %rdi
movq %rbx, %rsi
movq %r15, %rdx
callq 0x9db81c
movb $0x1, %r15b
jmp 0x10aa6ac
xorl %r15d, %r15d
movq 0x58(%rsp), %rdi
leaq 0x68(%rsp), %rax
cmpq %rax, %rdi
je 0x10aa6c0
callq 0x780910
movq 0x20(%rsp), %rsi
testq %rsi, %rsi
je 0x10aa6d4
leaq 0x20(%rsp), %rdi
callq 0x2a758fc
movl %r15d, %eax
addq $0xc8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movzbl 0x58(%rbx), %edi
movabsq $0x100000c93, %r8 # imm = 0x100000C93
movq %r8, (%rsp)
addq $0x3, %r8
movl $0xc97, %esi # imm = 0xC97
movl $0xc94, %edx # imm = 0xC94
movl $0xc95, %ecx # imm = 0xC95
movl $0xc92, %r9d # imm = 0xC92
callq 0x10ac592
movq %rax, %rcx
shrq $0x20, %rcx
movl %ecx, %edx
notb %dl
cmpl $0xc97, %eax # imm = 0xC97
setne %sil
orb %dl, %sil
testb $0x1, %sil
jne 0x10aa63d
movq 0x58(%rsp), %rdx
movq (%rdx), %rsi
movl 0x8(%rdx), %edx
movq 0x30(%rsi), %rsi
shlq $0x4, %rdx
movzbl (%rsi,%rdx), %edx
cmpl $0x8, %edx
je 0x10aa769
cmpl $0x7, %edx
jne 0x10aa63d
movl $0xc98, %eax # imm = 0xC98
jmp 0x10aa63d
movl $0xc99, %eax # imm = 0xC99
jmp 0x10aa63d
nop
|
/Target/NVPTX/NVPTXISelDAGToDAG.cpp
|
llvm::NVPTXInstrInfo::copyPhysReg(llvm::MachineBasicBlock&, llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>, llvm::DebugLoc const&, llvm::MCRegister, llvm::MCRegister, bool) const
|
void NVPTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, MCRegister DestReg,
MCRegister SrcReg, bool KillSrc) const {
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *DestRC = MRI.getRegClass(DestReg);
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
if (RegInfo.getRegSizeInBits(*DestRC) != RegInfo.getRegSizeInBits(*SrcRC))
report_fatal_error("Copy one register into another with a different width");
unsigned Op;
if (DestRC == &NVPTX::Int1RegsRegClass) {
Op = NVPTX::IMOV1rr;
} else if (DestRC == &NVPTX::Int16RegsRegClass) {
Op = NVPTX::IMOV16rr;
} else if (DestRC == &NVPTX::Int32RegsRegClass) {
Op = (SrcRC == &NVPTX::Int32RegsRegClass ? NVPTX::IMOV32rr
: NVPTX::BITCONVERT_32_F2I);
} else if (DestRC == &NVPTX::Int64RegsRegClass) {
Op = (SrcRC == &NVPTX::Int64RegsRegClass ? NVPTX::IMOV64rr
: NVPTX::BITCONVERT_64_F2I);
} else if (DestRC == &NVPTX::Int128RegsRegClass) {
Op = NVPTX::IMOV128rr;
} else if (DestRC == &NVPTX::Float32RegsRegClass) {
Op = (SrcRC == &NVPTX::Float32RegsRegClass ? NVPTX::FMOV32rr
: NVPTX::BITCONVERT_32_I2F);
} else if (DestRC == &NVPTX::Float64RegsRegClass) {
Op = (SrcRC == &NVPTX::Float64RegsRegClass ? NVPTX::FMOV64rr
: NVPTX::BITCONVERT_64_I2F);
} else {
llvm_unreachable("Bad register copy");
}
BuildMI(MBB, I, DL, get(Op), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rsi, %r14
movq %rdi, %r12
movq 0x20(%rsi), %rax
movq 0x28(%rax), %rax
movl $0x7fffffff, %r10d # imm = 0x7FFFFFFF
movl %r8d, %esi
andl %r10d, %esi
movq 0x38(%rax), %rdi
shlq $0x4, %rsi
movq (%rdi,%rsi), %rax
andq $-0x8, %rax
andl %r9d, %r10d
shlq $0x4, %r10
movq (%rdi,%r10), %r11
movq 0x160(%r12), %rsi
subq 0x158(%r12), %rsi
andq $-0x8, %r11
shrq $0x3, %rsi
imull 0x180(%r12), %esi
movq 0x170(%r12), %rdi
movq (%rax), %r10
movzwl 0x18(%r10), %r10d
addl %esi, %r10d
shlq $0x4, %r10
movl (%rdi,%r10), %ebx
movq (%r11), %r10
movzwl 0x18(%r10), %r10d
addl %esi, %r10d
shlq $0x4, %r10
cmpl (%rdi,%r10), %ebx
jne 0x10ae3ee
movq $-0x349, %r13 # imm = 0xFCB7
cmpq 0x484d84e(%rip), %rax # 0x58fbad0
je 0x10ae2da
movq $-0x347, %r13 # imm = 0xFCB9
cmpq 0x484da3e(%rip), %rax # 0x58fbcd0
je 0x10ae2da
cmpq 0x484d645(%rip), %rax # 0x58fb8e0
jne 0x10ae2b4
cmpq 0x484d63c(%rip), %r11 # 0x58fb8e0
movq $-0x34b, %rax # imm = 0xFCB5
movq $-0x161, %r13 # imm = 0xFE9F
jmp 0x10ae2d6
cmpq 0x484d805(%rip), %rax # 0x58fbac0
jne 0x10ae39d
cmpq 0x484d7f8(%rip), %r11 # 0x58fbac0
movq $-0x34d, %rax # imm = 0xFCB3
movq $-0x163, %r13 # imm = 0xFE9D
cmoveq %rax, %r13
movl %r8d, %r15d
movq %rdx, %rbp
movl %r9d, 0xc(%rsp)
movq (%rcx), %rsi
movq %rsi, (%rsp)
testq %rsi, %rsi
je 0x10ae2fe
movq %rsp, %rdi
movl $0x1, %edx
callq 0x2a757d8
movq (%rsp), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0x10ae323
movq %rsp, %rbx
leaq 0x10(%rsp), %rdx
movq %rbx, %rdi
callq 0x2a759cc
movq $0x0, (%rbx)
xorps %xmm0, %xmm0
leaq 0x10(%rsp), %rbx
movups %xmm0, 0x8(%rbx)
shlq $0x5, %r13
addq 0x8(%r12), %r13
movq %r14, %rdi
movq %rbp, %rsi
movq %rbx, %rdx
movq %r13, %rcx
movl %r15d, %r8d
callq 0x90f593
leaq 0x28(%rsp), %rdi
movq %rax, (%rdi)
movq %rdx, 0x8(%rdi)
movzbl 0x70(%rsp), %edx
shll $0x3, %edx
movl 0xc(%rsp), %esi
xorl %ecx, %ecx
callq 0x93f5ac
movq (%rbx), %rsi
testq %rsi, %rsi
je 0x10ae37d
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movq (%rsp), %rsi
testq %rsi, %rsi
je 0x10ae38e
movq %rsp, %rdi
callq 0x2a758fc
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq $-0x345, %r13 # imm = 0xFCBB
cmpq 0x484dbbd(%rip), %rax # 0x58fbf68
je 0x10ae2da
cmpq 0x484d698(%rip), %rax # 0x58fba50
jne 0x10ae3d4
cmpq 0x484d68f(%rip), %r11 # 0x58fba50
movq $-0x2ed, %rax # imm = 0xFD13
movq $-0x162, %r13 # imm = 0xFE9E
jmp 0x10ae2d6
cmpq 0x484d765(%rip), %r11 # 0x58fbb40
movq $-0x2ef, %rax # imm = 0xFD11
movq $-0x164, %r13 # imm = 0xFE9C
jmp 0x10ae2d6
leaq 0x2a8f521(%rip), %rdi # 0x3b3d916
movl $0x1, %esi
callq 0x2b31b17
nop
|
/Target/NVPTX/NVPTXInstrInfo.cpp
|
(anonymous namespace)::NVPTXLowerArgs::handleByValParam(llvm::NVPTXTargetMachine const&, llvm::Argument*)
|
void NVPTXLowerArgs::handleByValParam(const NVPTXTargetMachine &TM,
Argument *Arg) {
bool IsGridConstant = isParamGridConstant(*Arg);
Function *Func = Arg->getParent();
BasicBlock::iterator FirstInst = Func->getEntryBlock().begin();
Type *StructType = Arg->getParamByValType();
assert(StructType && "Missing byval type");
auto AreSupportedUsers = [&](Value *Start) {
SmallVector<Value *, 16> ValuesToCheck = {Start};
auto IsSupportedUse = [IsGridConstant](Value *V) -> bool {
if (isa<GetElementPtrInst>(V) || isa<BitCastInst>(V) || isa<LoadInst>(V))
return true;
// ASC to param space are OK, too -- we'll just strip them.
if (auto *ASC = dyn_cast<AddrSpaceCastInst>(V)) {
if (ASC->getDestAddressSpace() == ADDRESS_SPACE_PARAM)
return true;
}
// Simple calls and stores are supported for grid_constants
// writes to these pointers are undefined behaviour
if (IsGridConstant &&
(isa<CallInst>(V) || isa<StoreInst>(V) || isa<PtrToIntInst>(V)))
return true;
return false;
};
while (!ValuesToCheck.empty()) {
Value *V = ValuesToCheck.pop_back_val();
if (!IsSupportedUse(V)) {
LLVM_DEBUG(dbgs() << "Need a "
<< (isParamGridConstant(*Arg) ? "cast " : "copy ")
<< "of " << *Arg << " because of " << *V << "\n");
(void)Arg;
return false;
}
if (!isa<LoadInst>(V) && !isa<CallInst>(V) && !isa<StoreInst>(V) &&
!isa<PtrToIntInst>(V))
llvm::append_range(ValuesToCheck, V->users());
}
return true;
};
if (llvm::all_of(Arg->users(), AreSupportedUsers)) {
// Convert all loads and intermediate operations to use parameter AS and
// skip creation of a local copy of the argument.
SmallVector<Use *, 16> UsesToUpdate;
for (Use &U : Arg->uses())
UsesToUpdate.push_back(&U);
Value *ArgInParamAS = new AddrSpaceCastInst(
Arg, PointerType::get(StructType, ADDRESS_SPACE_PARAM), Arg->getName(),
FirstInst);
for (Use *U : UsesToUpdate)
convertToParamAS(U, ArgInParamAS, IsGridConstant);
LLVM_DEBUG(dbgs() << "No need to copy or cast " << *Arg << "\n");
const auto *TLI =
cast<NVPTXTargetLowering>(TM.getSubtargetImpl()->getTargetLowering());
adjustByValArgAlignment(Arg, ArgInParamAS, TLI);
return;
}
const DataLayout &DL = Func->getDataLayout();
unsigned AS = DL.getAllocaAddrSpace();
if (isParamGridConstant(*Arg)) {
// Writes to a grid constant are undefined behaviour. We do not need a
// temporary copy. When a pointer might have escaped, conservatively replace
// all of its uses (which might include a device function call) with a cast
// to the generic address space.
IRBuilder<> IRB(&Func->getEntryBlock().front());
// Cast argument to param address space
auto *CastToParam = cast<AddrSpaceCastInst>(IRB.CreateAddrSpaceCast(
Arg, IRB.getPtrTy(ADDRESS_SPACE_PARAM), Arg->getName() + ".param"));
// Cast param address to generic address space. We do not use an
// addrspacecast to generic here, because, LLVM considers `Arg` to be in the
// generic address space, and a `generic -> param` cast followed by a `param
// -> generic` cast will be folded away. The `param -> generic` intrinsic
// will be correctly lowered to `cvta.param`.
Value *CvtToGenCall = IRB.CreateIntrinsic(
IRB.getPtrTy(ADDRESS_SPACE_GENERIC), Intrinsic::nvvm_ptr_param_to_gen,
CastToParam, nullptr, CastToParam->getName() + ".gen");
Arg->replaceAllUsesWith(CvtToGenCall);
// Do not replace Arg in the cast to param space
CastToParam->setOperand(0, Arg);
} else {
// Otherwise we have to create a temporary copy.
AllocaInst *AllocA =
new AllocaInst(StructType, AS, Arg->getName(), FirstInst);
// Set the alignment to alignment of the byval parameter. This is because,
// later load/stores assume that alignment, and we are going to replace
// the use of the byval parameter with this alloca instruction.
AllocA->setAlignment(Func->getParamAlign(Arg->getArgNo())
.value_or(DL.getPrefTypeAlign(StructType)));
Arg->replaceAllUsesWith(AllocA);
Value *ArgInParam = new AddrSpaceCastInst(
Arg, PointerType::get(Arg->getContext(), ADDRESS_SPACE_PARAM),
Arg->getName(), FirstInst);
// Be sure to propagate alignment to this load; LLVM doesn't know that NVPTX
// addrspacecast preserves alignment. Since params are constant, this load
// is definitely not volatile.
LoadInst *LI =
new LoadInst(StructType, ArgInParam, Arg->getName(),
/*isVolatile=*/false, AllocA->getAlign(), FirstInst);
new StoreInst(LI, AllocA, FirstInst);
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x378, %rsp # imm = 0x378
movq %rsi, %r14
movq %rdi, 0xe8(%rsp)
movq %rsi, %rdi
callq 0x109ea7a
movl %eax, %ebx
movq 0x18(%r14), %rax
movq %rax, 0x38(%rsp)
movq 0x50(%rax), %rax
leaq -0x18(%rax), %rcx
testq %rax, %rax
cmoveq %rax, %rcx
movq 0x38(%rcx), %rax
movq %rax, 0x18(%rsp)
movq %r14, %rdi
callq 0x2a3af06
movq %rax, 0x30(%rsp)
leaq 0x10(%r14), %rax
movq %rax, 0x28(%rsp)
movq %r14, 0x20(%rsp)
movq 0x10(%r14), %r12
testq %r12, %r12
je 0x10af7e5
leaq 0x138(%rsp), %r13
movl %ebx, %ebp
xorb $0x1, %bpl
movl $0x1008003, %r15d # imm = 0x1008003
movq 0x18(%r12), %rax
movq %rax, 0x2e8(%rsp)
movq %r13, 0x128(%rsp)
movabsq $0x1000000000, %rax # imm = 0x1000000000
movq %rax, 0x130(%rsp)
leaq 0x128(%rsp), %rdi
leaq 0x2e8(%rsp), %rsi
leaq 0x2f0(%rsp), %rdx
callq 0x9320ea
movl 0x130(%rsp), %r14d
testq %r14, %r14
je 0x10af7b4
movq 0x128(%rsp), %rax
movq -0x8(%rax,%r14,8), %rdx
leal -0x1(%r14), %esi
movl %esi, 0x130(%rsp)
movzbl (%rdx), %edi
movb $0x1, %r13b
cmpl $0x4d, %edi
jg 0x10af717
cmpl $0x3d, %edi
je 0x10af779
cmpl $0x3f, %edi
je 0x10af779
jmp 0x10af75f
cmpl $0x4e, %edi
je 0x10af779
cmpl $0x4f, %edi
jne 0x10af75f
movq 0x8(%rdx), %rcx
movl 0x8(%rcx), %r8d
movl $0xfe, %r9d
andl %r9d, %r8d
cmpl $0x12, %r8d
jne 0x10af73f
movq 0x10(%rcx), %rcx
movq (%rcx), %rcx
movl 0x8(%rcx), %ecx
movl $0xffffff00, %r8d # imm = 0xFFFFFF00
andl %r8d, %ecx
cmpl $0x6500, %ecx # imm = 0x6500
sete %r13b
movl %r13d, %ecx
orb %bpl, %cl
je 0x10af763
jmp 0x10af779
testb %bl, %bl
je 0x10af776
leal -0x3e(%rdi), %ecx
cmpb $0x17, %cl
ja 0x10af776
movl $0x804001, %r13d # imm = 0x804001
shrl %cl, %r13d
jmp 0x10af779
xorl %r13d, %r13d
testb $0x1, %r13b
je 0x10af7b4
addl $-0x3d, %edi
cmpl $0x18, %edi
ja 0x10af799
btl %edi, %r15d
jae 0x10af799
testb $0x1, %r13b
jne 0x10af6d7
jmp 0x10af7b4
movq 0x10(%rdx), %rdx
movl %esi, %ecx
leaq (%rax,%rcx,8), %rsi
leaq 0x128(%rsp), %rdi
xorl %ecx, %ecx
callq 0x10b05c0
jmp 0x10af78d
movq 0x128(%rsp), %rdi
leaq 0x138(%rsp), %r13
cmpq %r13, %rdi
je 0x10af7ce
callq 0x780910
testq %r14, %r14
jne 0x10b0272
movq 0x8(%r12), %r12
testq %r12, %r12
jne 0x10af693
leaq 0x2f8(%rsp), %rax
movq %rax, -0x10(%rax)
movabsq $0x1000000000, %rcx # imm = 0x1000000000
movq %rcx, -0x8(%rax)
movq 0x28(%rsp), %rax
movq (%rax), %rbx
testq %rbx, %rbx
je 0x10af828
leaq 0x2e8(%rsp), %r15
movq %r15, %rdi
movq %rbx, %rsi
callq 0xa2b05a
movq 0x8(%rbx), %rbx
testq %rbx, %rbx
jne 0x10af814
movl $0x48, %edi
movl $0x1, %esi
callq 0x2a9ec74
movq %rax, %r15
movq 0x30(%rsp), %rdi
movl $0x65, %esi
callq 0x2a9b6ac
movq %rax, %rbx
movq 0x20(%rsp), %r14
movq %r14, %rdi
callq 0x2a9f76a
leaq 0x128(%rsp), %rcx
movw $0x105, 0x20(%rcx) # imm = 0x105
movq %rax, (%rcx)
movq %rdx, 0x8(%rcx)
movl $0x1, %r9d
movq %r15, 0x28(%rsp)
movq %r15, %rdi
movq %r14, %rsi
movq %rbx, %rdx
movq 0x18(%rsp), %r8
callq 0x2a5b442
movl 0x2f0(%rsp), %eax
testq %rax, %rax
je 0x10afe36
movq 0x2e8(%rsp), %rcx
leaq (%rcx,%rax,8), %rax
movq %rax, 0x38(%rsp)
leaq 0xa0(%rsp), %rbx
leaq 0x40(%rsp), %rbp
movq %rcx, 0x30(%rsp)
movq (%rcx), %rax
movq 0x18(%rax), %rcx
cmpb $0x1d, (%rcx)
movl $0x0, %edx
cmovbq %rdx, %rcx
leaq 0x138(%rsp), %rdx
movq %rdx, 0x128(%rsp)
movq %rax, 0x138(%rsp)
movq %rcx, 0x140(%rsp)
movq 0x28(%rsp), %rax
movq %rax, 0x148(%rsp)
movabsq $0x200000001, %rax # imm = 0x200000001
movq %rax, 0x130(%rsp)
movq %rbx, 0x90(%rsp)
movabsq $0x600000000, %rax # imm = 0x600000000
movq %rax, 0x98(%rsp)
movl $0x1, %eax
movq 0x128(%rsp), %rcx
movl %eax, %edx
leaq (%rdx,%rdx,2), %rdx
movq -0x18(%rcx,%rdx,8), %rdi
movq -0x10(%rcx,%rdx,8), %rbx
movq -0x8(%rcx,%rdx,8), %r12
decl %eax
movl %eax, 0x130(%rsp)
movb (%rbx), %r15b
movl $0x0, %r13d
cmpb $0x3d, %r15b
cmoveq %rbx, %r13
jne 0x10af9c3
cmpq $0x0, -0x20(%r13)
je 0x10af98c
movq -0x18(%r13), %rax
movq -0x10(%r13), %rcx
movq %rax, (%rcx)
movq -0x18(%r13), %rax
testq %rax, %rax
je 0x10af98c
movq -0x10(%r13), %rcx
movq %rcx, 0x10(%rax)
leaq -0x20(%r13), %rax
movq %r12, (%rax)
testq %r12, %r12
je 0x10afca6
movq 0x10(%r12), %rcx
movq %rcx, -0x18(%r13)
testq %rcx, %rcx
je 0x10af9b2
leaq -0x18(%r13), %rdx
movq %rdx, 0x10(%rcx)
addq $0x10, %r12
movq %r12, -0x10(%r13)
movq %rax, (%r12)
jmp 0x10afca6
cmpb $0x3f, %r15b
jne 0x10afaa6
movq %rdi, 0x18(%rsp)
movl 0x4(%rbx), %eax
shll $0x5, %eax
movq %rbx, %rsi
subq %rax, %rsi
addq $0x20, %rsi
leaq 0x50(%rsp), %rax
movq %rax, 0x40(%rsp)
movabsq $0x400000000, %rax # imm = 0x400000000
movq %rax, 0x48(%rsp)
movq %rbp, %rdi
movq %rbx, %rdx
callq 0xc1b3ca
movq 0x48(%rbx), %r14
movq 0x40(%rsp), %r13
movl 0x48(%rsp), %ebp
movq %rbx, %rdi
callq 0x2a9f76a
movw $0x105, 0x110(%rsp) # imm = 0x105
movq %rax, 0xf0(%rsp)
movq %rdx, 0xf8(%rsp)
leaq 0x18(%rbx), %rax
movq %rax, 0x80(%rsp)
movq $0x0, 0x88(%rsp)
movups 0x80(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %r14, %rdi
movq %r12, %rsi
movq %r13, %rdx
movq %rbp, %rcx
leaq 0xf0(%rsp), %r8
callq 0x933398
movq %rax, %r13
movq %rbx, %rdi
callq 0x2a57ac6
movzbl %al, %esi
movq %r13, %rdi
callq 0x2a57a9a
movq 0x40(%rsp), %rdi
leaq 0x50(%rsp), %rax
cmpq %rax, %rdi
je 0x10afa9c
callq 0x780910
leaq 0x40(%rsp), %rbp
movq 0x18(%rsp), %rdi
cmpb $0x3f, %r15b
je 0x10afca6
movb (%rbx), %r15b
cmpb $0x4e, %r15b
jne 0x10afb15
movq %rdi, %r13
movq %rbx, %rdi
callq 0x2a9f5fc
movq %rax, %rdi
movl $0x65, %esi
callq 0x2a9abe8
movq %rax, %r14
movzbl (%rbx), %ebp
addl $-0x1d, %ebp
movq %rbx, %rdi
callq 0x2a9f76a
movw $0x105, 0x60(%rsp) # imm = 0x105
movq %rax, 0x40(%rsp)
movq %rdx, 0x48(%rsp)
leaq 0x18(%rbx), %r8
movl %ebp, %edi
leaq 0x40(%rsp), %rbp
movq %r12, %rsi
movq %r14, %rdx
movq %rbp, %rcx
xorl %r9d, %r9d
callq 0x2a5a386
movq %r13, %rdi
movq %rax, %r13
cmpb $0x4e, %r15b
je 0x10afca6
movb (%rbx), %al
cmpb $0x4f, %al
cmoveq %r12, %r13
je 0x10afca6
cmpb $0x55, %al
movl $0x0, %r14d
cmoveq %rbx, %r14
testq %r14, %r14
je 0x10afb9f
movq %rdi, %r15
movq %r12, %rdi
movq %r14, %rsi
callq 0x10b07b4
movq %r15, %rdi
cmpq $0x0, (%r15)
je 0x10afb71
movq 0x8(%rdi), %rcx
movq 0x10(%rdi), %rdx
movq %rcx, (%rdx)
movq 0x8(%rdi), %rcx
testq %rcx, %rcx
je 0x10afb71
movq 0x10(%rdi), %rdx
movq %rdx, 0x10(%rcx)
movq %rax, (%rdi)
movq %r14, %r13
testq %rax, %rax
je 0x10afb9f
movq 0x10(%rax), %rcx
movq %rcx, 0x8(%rdi)
testq %rcx, %rcx
je 0x10afb91
leaq 0x8(%rdi), %rdx
movq %rdx, 0x10(%rcx)
addq $0x10, %rax
movq %rax, 0x10(%rdi)
movq %rdi, (%rax)
movq %r14, %r13
testq %r14, %r14
jne 0x10afca6
cmpb $0x3e, (%rbx)
movl $0x0, %r14d
cmoveq %rbx, %r14
testq %r14, %r14
je 0x10afc2c
movq -0x40(%r14), %rax
movq %r14, %r13
cmpq (%rdi), %rax
jne 0x10afc2c
movq %rdi, %r15
movq %r12, %rdi
movq %r14, %rsi
callq 0x10b07b4
cmpq $0x0, -0x40(%r14)
je 0x10afbf7
movq -0x38(%r14), %rcx
movq -0x30(%r14), %rdx
movq %rcx, (%rdx)
movq -0x38(%r14), %rcx
testq %rcx, %rcx
je 0x10afbf7
movq -0x30(%r14), %rdx
movq %rdx, 0x10(%rcx)
leaq -0x40(%r14), %rcx
movq %rax, (%rcx)
movq %r14, %r13
testq %rax, %rax
movq %r15, %rdi
je 0x10afc2c
movq 0x10(%rax), %rdx
movq %rdx, -0x38(%r14)
testq %rdx, %rdx
je 0x10afc1e
leaq -0x38(%r14), %rsi
movq %rsi, 0x10(%rdx)
addq $0x10, %rax
movq %rax, -0x30(%r14)
movq %rcx, (%rax)
movq %r14, %r13
testq %r14, %r14
jne 0x10afca6
cmpb $0x4c, (%rbx)
movl $0x0, %r14d
cmoveq %rbx, %r14
jne 0x10afca6
movq -0x20(%r14), %rax
cmpq (%rdi), %rax
jne 0x10afca3
movq %r12, %rdi
movq %r14, %rsi
callq 0x10b07b4
cmpq $0x0, -0x20(%r14)
je 0x10afc77
movq -0x18(%r14), %rcx
movq -0x10(%r14), %rdx
movq %rcx, (%rdx)
movq -0x18(%r14), %rcx
testq %rcx, %rcx
je 0x10afc77
movq -0x10(%r14), %rdx
movq %rdx, 0x10(%rcx)
leaq -0x20(%r14), %rcx
movq %rax, (%rcx)
testq %rax, %rax
je 0x10afca3
movq 0x10(%rax), %rdx
movq %rdx, -0x18(%r14)
testq %rdx, %rdx
je 0x10afc98
leaq -0x18(%r14), %rsi
movq %rsi, 0x10(%rdx)
addq $0x10, %rax
movq %rax, -0x10(%r14)
movq %rcx, (%rax)
movq %r14, %r13
testq %r13, %r13
je 0x10afdb7
cmpq %rbx, %r13
je 0x10afdb7
movq 0x10(%rbx), %r14
testq %r14, %r14
je 0x10afda7
movq %r14, 0x40(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x48(%rsp)
movq %r13, 0x50(%rsp)
movl 0x130(%rsp), %edx
movq %rbp, %rax
cmpl 0x134(%rsp), %edx
jae 0x10afd1b
movq 0x128(%rsp), %rcx
movl 0x130(%rsp), %edx
leaq (%rdx,%rdx,2), %rdx
movq 0x10(%rax), %rsi
movq %rsi, 0x10(%rcx,%rdx,8)
movups (%rax), %xmm0
movups %xmm0, (%rcx,%rdx,8)
incl 0x130(%rsp)
movq 0x8(%r14), %r14
jmp 0x10afcbc
movq 0x128(%rsp), %rax
leaq (%rdx,%rdx,2), %rcx
cmpq %rbp, %rax
setbe %sil
leaq (%rax,%rcx,8), %rcx
cmpq %rbp, %rcx
movq %rbp, %r12
seta %bpl
andb %sil, %bpl
movq $-0x1, %r15
cmpb $0x1, %bpl
je 0x10afd8d
incq %rdx
movl $0x18, %ecx
leaq 0x128(%rsp), %rdi
leaq 0x138(%rsp), %rsi
callq 0x2b4ed86
movq %r12, %rax
testb %bpl, %bpl
movq %r12, %rbp
je 0x10afceb
leaq (%r15,%r15,2), %rax
shlq $0x3, %rax
addq 0x128(%rsp), %rax
jmp 0x10afceb
movq %r12, %r15
subq %rax, %r15
sarq $0x3, %r15
movabsq $-0x5555555555555555, %rax # imm = 0xAAAAAAAAAAAAAAAB
imulq %rax, %r15
jmp 0x10afd4c
leaq 0x90(%rsp), %rdi
movq %rbx, %rsi
callq 0x931722
movl 0x130(%rsp), %eax
testl %eax, %eax
jne 0x10af930
movl 0x98(%rsp), %ebx
testq %rbx, %rbx
je 0x10afdee
movq 0x90(%rsp), %r14
shlq $0x3, %rbx
movq -0x8(%r14,%rbx), %rdi
callq 0x2a512f8
addq $-0x8, %rbx
jne 0x10afdde
movq 0x90(%rsp), %rdi
leaq 0xa0(%rsp), %rbx
cmpq %rbx, %rdi
je 0x10afe08
callq 0x780910
movq 0x128(%rsp), %rdi
leaq 0x138(%rsp), %rax
cmpq %rax, %rdi
je 0x10afe22
callq 0x780910
movq 0x30(%rsp), %rcx
addq $0x8, %rcx
cmpq 0x38(%rsp), %rcx
jne 0x10af8ba
movq 0xe8(%rsp), %rdi
movq 0x4a8(%rdi), %rax
addq $0x4a8, %rdi # imm = 0x4A8
callq *0x90(%rax)
movq %rax, %r14
movq 0x20(%rsp), %r13
movq 0x18(%r13), %rbx
movq %r13, %rdi
callq 0x2a3af06
movq %rax, %r15
movq 0x28(%rbx), %rsi
leaq 0x128(%rsp), %r12
movq %r12, %rdi
callq 0x2a00194
movq %r14, %rdi
movq %rbx, %rsi
movq %r15, %rdx
movq %r12, %rcx
callq 0x1083280
movl %eax, %ecx
movl $0x1, %r15d
shlq %cl, %r15
movq %r13, %rdi
movl $0x51, %esi
callq 0x2a3afce
leaq 0x90(%rsp), %rdi
movq %rax, (%rdi)
callq 0x29a8f76
cmpq %r15, %rax
jae 0x10b0242
movq %rbx, %rdi
callq 0x2a3b1a0
movq %rax, %rdi
movl $0x51, %esi
movq %r15, 0x18(%rsp)
movq %r15, %rdx
callq 0x29a6594
movq %rax, %rbx
movq %r13, %rdi
movl $0x51, %esi
callq 0x2a3b254
movq %r13, %rdi
movq %rbx, %rsi
callq 0x2a3b1ec
leaq 0x50(%rsp), %rcx
movq %rcx, -0x10(%rcx)
movabsq $0x300000000, %rax # imm = 0x300000000
movq %rax, -0x8(%rcx)
xorps %xmm0, %xmm0
leaq 0x90(%rsp), %r14
movaps %xmm0, (%r14)
movaps %xmm0, 0x40(%r14)
movaps %xmm0, 0x30(%r14)
movaps %xmm0, 0x20(%r14)
movaps %xmm0, 0x10(%r14)
movq $0x8, 0x8(%r14)
movl $0x40, %edi
callq 0x7808d0
movq %rax, %r15
movq %rax, (%r14)
leaq 0x18(%rax), %rbx
movl $0x200, %edi # imm = 0x200
callq 0x7808d0
movq %rax, 0x18(%r15)
movq %rbx, 0x28(%r14)
movq %rax, 0x18(%r14)
leaq 0x200(%rax), %rcx
movq %rcx, 0x20(%r14)
movq %rbx, 0x48(%r14)
movq %rax, 0x38(%r14)
movq %rcx, 0x40(%r14)
movq %rax, 0x10(%r14)
movq %rax, 0x30(%r14)
leaq 0xf0(%rsp), %rsi
movq 0x28(%rsp), %rax
movq %rax, (%rsi)
movq $0x0, 0x8(%rsi)
movq %r14, %rdi
callq 0x10b0898
movq %r13, %rdi
callq 0x109ea7a
movq 0x10(%r14), %rax
cmpq %rax, 0x30(%r14)
je 0x10b0164
leaq 0x90(%rsp), %r14
leaq 0xf0(%rsp), %r15
leaq 0x128(%rsp), %r12
movq (%rax), %r13
movq 0x8(%rax), %rbx
movq 0xb0(%rsp), %rcx
addq $-0x10, %rcx
cmpq %rcx, %rax
je 0x10affee
addq $0x10, %rax
jmp 0x10b002f
movq 0xa8(%rsp), %rdi
movl $0x200, %esi # imm = 0x200
callq 0x7800d0
movq 0xb8(%rsp), %rax
leaq 0x8(%rax), %rcx
movq %rcx, 0xb8(%rsp)
movq 0x8(%rax), %rax
movq %rax, 0xa8(%rsp)
leaq 0x200(%rax), %rcx
movq %rcx, 0xb0(%rsp)
movq %rax, 0xa0(%rsp)
movq 0x10(%r13), %r13
testq %r13, %r13
je 0x10b014e
movq 0x18(%r13), %rbp
movzbl (%rbp), %eax
cmpl $0x4e, %eax
je 0x10b010e
cmpl $0x3f, %eax
je 0x10b0090
cmpl $0x3d, %eax
jne 0x10b0129
movl 0x48(%rsp), %edx
cmpl 0x4c(%rsp), %edx
jae 0x10b0132
movq 0x40(%rsp), %rax
movl 0x48(%rsp), %ecx
shlq $0x4, %rcx
movq %rbp, (%rax,%rcx)
movq %rbx, 0x8(%rax,%rcx)
incl 0x48(%rsp)
jmp 0x10b0129
movq %r12, %rdi
movl $0x65, %esi
callq 0x2a00468
movl 0xc(%rax), %esi
movq %r15, %rdi
xorl %edx, %edx
xorl %ecx, %ecx
callq 0x91d2c6
movq %rbp, %rdi
movq %r12, %rsi
movq %r15, %rdx
callq 0x2a57ae2
testb %al, %al
je 0x10b00f0
movq %r15, %rdi
movq $-0x1, %rsi
callq 0x91db9c
movq %rbp, 0x80(%rsp)
addq %rbx, %rax
movq %rax, 0x88(%rsp)
movq %r14, %rdi
leaq 0x80(%rsp), %rsi
callq 0x10b0898
cmpl $0x41, 0xf8(%rsp)
jb 0x10b0129
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x10b0129
callq 0x7802b0
jmp 0x10b0129
movq %rbp, 0xf0(%rsp)
movq %rbx, 0xf8(%rsp)
movq %r14, %rdi
movq %r15, %rsi
callq 0x10b0898
movq 0x8(%r13), %r13
jmp 0x10b003b
incq %rdx
movl $0x10, %ecx
leaq 0x40(%rsp), %rdi
leaq 0x50(%rsp), %rsi
callq 0x2b4ed86
jmp 0x10b0071
movq 0xa0(%rsp), %rax
cmpq %rax, 0xc0(%rsp)
jne 0x10affd0
movl 0x48(%rsp), %ebx
testq %rbx, %rbx
movq 0x18(%rsp), %r12
je 0x10b01d9
movq 0x40(%rsp), %r14
shlq $0x4, %rbx
xorl %r15d, %r15d
movq 0x8(%r14,%r15), %rsi
movq %r12, %rdi
callq 0x10b0a86
testq %rax, %rax
je 0x10b019a
bsrq %rax, %rax
xorq $0x3f, %rax
jmp 0x10b019f
movl $0x40, %eax
movb $0x3f, %cl
subb %al, %cl
movq (%r14,%r15), %rax
movzwl 0x2(%rax), %edx
movl %edx, %esi
shrb %sil
andb $0x3f, %sil
movzbl %cl, %ecx
movzbl %sil, %esi
cmpb %sil, %cl
cmoval %ecx, %esi
andl $-0x7f, %edx
movzbl %sil, %ecx
addl %ecx, %ecx
orl %edx, %ecx
movw %cx, 0x2(%rax)
addq $0x10, %r15
cmpq %r15, %rbx
jne 0x10b017e
movq 0x90(%rsp), %r14
testq %r14, %r14
je 0x10b022e
movq 0xb8(%rsp), %rbx
movq 0xd8(%rsp), %r15
leaq 0x8(%r15), %rax
cmpq %rax, %rbx
jae 0x10b021a
addq $-0x8, %rbx
movq 0x8(%rbx), %rdi
addq $0x8, %rbx
movl $0x200, %esi # imm = 0x200
callq 0x7800d0
cmpq %r15, %rbx
jb 0x10b0203
movq 0x98(%rsp), %rsi
shlq $0x3, %rsi
movq %r14, %rdi
callq 0x7800d0
movq 0x40(%rsp), %rdi
leaq 0x50(%rsp), %rax
cmpq %rax, %rdi
je 0x10b0242
callq 0x780910
leaq 0x128(%rsp), %rdi
callq 0x2a00528
movq 0x2e8(%rsp), %rdi
leaq 0x2f8(%rsp), %rax
cmpq %rax, %rdi
je 0x10b05ad
callq 0x780910
jmp 0x10b05ad
movq 0x38(%rsp), %r15
movq %r15, %rdi
callq 0x2a3b2e4
movq %rax, %r13
movl 0x4(%rax), %ebp
movq 0x20(%rsp), %r14
movq %r14, %rdi
callq 0x109ea7a
testb %al, %al
je 0x10b0416
movq 0x50(%r15), %rax
leaq -0x18(%rax), %rcx
testq %rax, %rax
cmoveq %rax, %rcx
movq 0x38(%rcx), %rax
leaq -0x18(%rax), %rsi
testq %rax, %rax
cmoveq %rax, %rsi
leaq 0x128(%rsp), %rbp
movq %rbp, %rdi
xorl %edx, %edx
xorl %ecx, %ecx
xorl %r8d, %r8d
callq 0x92eb52
movq 0x48(%rbp), %rdi
movl $0x65, %esi
callq 0x2a9abe8
movq %rax, %rbx
movq %r14, %rdi
callq 0x2a9f76a
movw $0x305, %r14w # imm = 0x305
leaq 0x2e8(%rsp), %r15
movw %r14w, 0x20(%r15)
movq %rax, (%r15)
movq %rdx, 0x8(%r15)
leaq 0x3f57692(%rip), %rax # 0x500799a
movq %rax, 0x10(%r15)
movq %rbp, %rdi
movl $0x32, %esi
movq 0x20(%rsp), %rdx
movq %rbx, %rcx
movq %r15, %r8
callq 0x932298
movq %rax, %rbx
movq 0x48(%rbp), %rdi
xorl %esi, %esi
callq 0x2a9abe8
movq %rax, %r12
leaq 0x90(%rsp), %r13
movq %rbx, (%r13)
movq %rbx, %rdi
callq 0x2a9f76a
movw %r14w, 0x20(%r15)
movq 0x20(%rsp), %r14
movq %rax, (%r15)
movq %rdx, 0x8(%r15)
leaq 0x3f576c5(%rip), %rax # 0x5007a26
movq %rax, 0x10(%r15)
movq %r15, (%rsp)
movl $0x1, %r8d
movq %rbp, %rdi
movq %r12, %rsi
movl $0x208b, %edx # imm = 0x208B
movq %r13, %rcx
xorl %r9d, %r9d
callq 0x2a4c2d4
movq %r14, %rdi
movq %rax, %rsi
callq 0x2a9fd6e
cmpq $0x0, -0x20(%rbx)
je 0x10b03b3
movq -0x18(%rbx), %rax
movq -0x10(%rbx), %rcx
movq %rax, (%rcx)
movq -0x18(%rbx), %rax
testq %rax, %rax
je 0x10b03b3
movq -0x10(%rbx), %rcx
movq %rcx, 0x10(%rax)
leaq -0x20(%rbx), %rax
movq %r14, -0x20(%rbx)
movq 0x10(%r14), %rcx
movq %rcx, -0x18(%rbx)
testq %rcx, %rcx
leaq 0x138(%rsp), %r14
je 0x10b03db
movq %rbx, %rdx
addq $-0x18, %rdx
movq %rdx, 0x10(%rcx)
movq 0x28(%rsp), %rcx
movq %rcx, -0x10(%rbx)
movq %rax, (%rcx)
leaq 0x1b0(%rsp), %rdi
callq 0x2a4d93c
leaq 0x1a8(%rsp), %rbx
movq %rbx, %rdi
callq 0x2a4dd0c
movq -0x80(%rbx), %rdi
cmpq %r14, %rdi
jne 0x10b0268
jmp 0x10b05ad
movl $0x50, %edi
movl $0x1, %esi
callq 0x2a9ec74
movq %rax, %rbx
movq %r14, %rdi
callq 0x2a9f76a
movw $0x105, %cx # imm = 0x105
leaq 0x128(%rsp), %rsi
movw %cx, 0x20(%rsi)
movq %rax, (%rsi)
movq %rdx, 0x8(%rsi)
movq %rsi, %rcx
movl $0x1, %r9d
movq %rbx, %rdi
movq 0x30(%rsp), %r12
movq %r12, %rsi
movl %ebp, %edx
movq 0x18(%rsp), %r8
callq 0x2a56ef8
movl 0x20(%r14), %esi
addq $0x70, %r15
movq %r15, %rdi
callq 0x29acc94
movl %eax, %ebp
movq %r13, %rdi
movq %r12, %rsi
callq 0x2a009b6
btl $0x8, %ebp
movzbl %al, %eax
cmovbl %ebp, %eax
movzwl 0x2(%rbx), %ecx
andl $-0x40, %ecx
movzbl %al, %eax
orl %ecx, %eax
movw %ax, 0x2(%rbx)
movq %r14, %rdi
movq %rbx, %rsi
callq 0x2a9fd6e
movl $0x48, %edi
movl $0x1, %esi
callq 0x2a9ec74
movq %rax, %r15
movq %r14, %rdi
callq 0x2a9f5fc
movq %rax, %rdi
movl $0x65, %esi
callq 0x2a9abe8
movq %rax, %r13
movq %r14, %rdi
callq 0x2a9f76a
movw $0x105, %cx # imm = 0x105
leaq 0x128(%rsp), %rsi
movw %cx, 0x20(%rsi)
movq %rax, (%rsi)
movq %rdx, 0x8(%rsi)
leaq 0x128(%rsp), %rbp
movl $0x1, %r9d
movq %r15, %rdi
movq %r14, %rsi
movq %r13, %rdx
movq %rbp, %rcx
movq 0x18(%rsp), %r13
movq %r13, %r8
callq 0x2a5b442
movl $0x50, %edi
movl $0x1, %esi
callq 0x2a9ec74
movq %rax, %r12
movq %r14, %rdi
callq 0x2a9f76a
movw $0x105, %cx # imm = 0x105
movw %cx, 0x20(%rbp)
movq %rax, (%rbp)
movq %rdx, 0x8(%rbp)
movb 0x2(%rbx), %al
andb $0x3f, %al
movq %r13, 0x118(%rsp)
movq $0x1, 0x120(%rsp)
movups 0x118(%rsp), %xmm0
movups %xmm0, (%rsp)
movzbl %al, %r9d
leaq 0x128(%rsp), %rcx
movq %r12, %rdi
movq 0x30(%rsp), %rsi
movq %r15, %rdx
xorl %r8d, %r8d
callq 0x2a570a8
movl $0x50, %edi
movl $0x2, %esi
callq 0x2a9ec74
movl $0x1, %r8d
movq %rax, %rdi
movq %r12, %rsi
movq %rbx, %rdx
movq %r13, %rcx
callq 0x2a57182
addq $0x378, %rsp # imm = 0x378
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/NVPTX/NVPTXLowerArgs.cpp
|
llvm::Value** llvm::SmallVectorImpl<llvm::Value*>::insert<llvm::Value::user_iterator_impl<llvm::User>, void>(llvm::Value**, llvm::Value::user_iterator_impl<llvm::User>, llvm::Value::user_iterator_impl<llvm::User>)
|
iterator insert(iterator I, ItTy From, ItTy To) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->begin()+InsertElt;
}
assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");
// Check that the reserve that follows doesn't invalidate the iterators.
this->assertSafeToAddRange(From, To);
size_t NumToInsert = std::distance(From, To);
// Ensure there is enough space.
reserve(this->size() + NumToInsert);
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(std::move_iterator<iterator>(this->end() - NumToInsert),
std::move_iterator<iterator>(this->end()));
// Copy the existing elements that get replaced.
std::move_backward(I, OldEnd-NumToInsert, OldEnd);
std::copy(From, To, I);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->set_size(this->size() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
*J = *From;
++J; ++From;
}
// Insert the non-overwritten middle part.
this->uninitialized_copy(From, To, OldEnd);
return I;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rcx, %rbx
movq %rdx, %r14
movq %rdi, %r13
movq (%rdi), %rax
movq %rsi, %rbp
subq %rax, %rbp
movl 0x8(%rdi), %edx
leaq (%rax,%rdx,8), %rax
cmpq %rsi, %rax
je 0x10b06bb
xorl %r15d, %r15d
cmpq %rbx, %r14
je 0x10b0604
movq %r14, %rax
movq 0x8(%rax), %rax
incq %r15
cmpq %rbx, %rax
jne 0x10b05f8
addq %r15, %rdx
movl 0xc(%r13), %eax
cmpq %rax, %rdx
jbe 0x10b0621
leaq 0x10(%r13), %rsi
movl $0x8, %ecx
movq %r13, %rdi
callq 0x2b4ed86
movq (%r13), %rax
leaq (%rax,%rbp), %r8
movl 0x8(%r13), %ecx
leaq (,%rcx,8), %rdi
leaq (%rax,%rcx,8), %r12
movq %rdi, %rdx
subq %rbp, %rdx
movq %rbp, %rsi
movq %rdx, %rbp
sarq $0x3, %rbp
cmpq %r15, %rbp
jae 0x10b06d2
addl %ecx, %r15d
movl %r15d, 0x8(%r13)
movq %rsi, (%rsp)
cmpq %rsi, %rdi
movq %r8, %r13
je 0x10b0685
movl %r15d, %ecx
movq %rdi, %r15
leaq (%rax,%rcx,8), %rdi
leaq (,%rbp,8), %rax
subq %rax, %rdi
movq %r13, %rsi
callq 0x780890
movq %r15, %rdi
cmpq (%rsp), %rdi
je 0x10b06b4
xorl %eax, %eax
movq 0x18(%r14), %rcx
movq %rcx, (%r13,%rax,8)
movq 0x8(%r14), %r14
incq %rax
cmpq %rax, %rbp
jne 0x10b068d
jmp 0x10b06b4
movq 0x18(%r14), %rax
movq %rax, (%r12)
addq $0x8, %r12
movq 0x8(%r14), %r14
cmpq %rbx, %r14
jne 0x10b06a4
jmp 0x10b071f
movq %r13, %rdi
movq %r14, %rsi
movq %rbx, %rdx
callq 0x10b0732
addq (%r13), %rbp
movq %rbp, %r13
jmp 0x10b071f
shlq $0x3, %r15
movq %r12, %rbp
subq %r15, %rbp
movq %r13, %rdi
movq %rbp, %rsi
movq %r12, %rdx
movq %r8, %r13
callq 0xe1eeb8
subq %r13, %rbp
je 0x10b0703
subq %rbp, %r12
movq %r12, %rdi
movq %r13, %rsi
movq %rbp, %rdx
callq 0x780120
cmpq %rbx, %r14
je 0x10b071f
movq %r13, %rax
movq 0x18(%r14), %rcx
movq %rcx, (%rax)
addq $0x8, %rax
movq 0x8(%r14), %r14
cmpq %rbx, %r14
jne 0x10b070b
movq %r13, %rax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/llvm/ADT/SmallVector.h
|
llvm::PPCInstrInfo::foldImmediate(llvm::MachineInstr&, llvm::MachineInstr&, llvm::Register, llvm::MachineRegisterInfo*) const
|
bool PPCInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
Register Reg, MachineRegisterInfo *MRI) const {
bool Changed = onlyFoldImmediate(UseMI, DefMI, Reg);
if (MRI->use_nodbg_empty(Reg))
DefMI.eraseFromParent();
return Changed;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %r8, %r14
movl %ecx, %r15d
movq %rdx, %rbx
callq 0x1108268
movl %eax, %ebp
movq %r14, %rdi
movl %r15d, %esi
callq 0x96036a
testq %rax, %rax
jne 0x11083a1
movq %rbx, %rdi
callq 0x1d3deba
movl %ebp, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/Target/PowerPC/PPCInstrInfo.cpp
|
(anonymous namespace)::PPCDAGToDAGISel::tryAsSingleRLDICR(llvm::SDNode*)
|
bool PPCDAGToDAGISel::tryAsSingleRLDICR(SDNode *N) {
assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected");
uint64_t Imm64;
if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64) ||
!isMask_64(~Imm64))
return false;
// If this is a negated 64-bit zero-extension mask,
// i.e. the immediate is a sequence of ones from most significant side
// and all zero for reminder, we should use rldicr.
unsigned MB = 63 - llvm::countr_one(~Imm64);
unsigned SH = 0;
SDLoc dl(N);
SDValue Ops[] = {N->getOperand(0), getI32Imm(SH, dl), getI32Imm(MB, dl)};
CurDAG->SelectNodeTo(N, PPC::RLDICR, MVT::i64, Ops);
return true;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq 0x28(%rsi), %rax
movq 0x28(%rax), %rax
cmpl $0xb, 0x18(%rax)
jne 0x111cc31
movq 0x30(%rax), %rcx
cmpb $0x8, (%rcx)
jne 0x111cc31
movq 0x58(%rax), %rax
cmpl $0x41, 0x20(%rax)
jb 0x111cc35
movq 0x18(%rax), %rax
jmp 0x111cc39
xorl %eax, %eax
jmp 0x111cc3e
addq $0x18, %rax
movq (%rax), %rcx
movb $0x1, %al
testb %al, %al
je 0x111cc6a
xorl %eax, %eax
cmpq $-0x1, %rcx
je 0x111cd54
leaq -0x1(%rcx), %rdx
orq %rcx, %rdx
cmpq $-0x1, %rdx
jne 0x111cd54
testq %rcx, %rcx
je 0x111cc71
bsfq %rcx, %rax
jmp 0x111cc76
xorl %eax, %eax
jmp 0x111cd54
movl $0x40, %eax
movl $0x3f, %r12d
subq %rax, %r12
movq 0x48(%rbx), %rsi
movq %rsi, 0x10(%rsp)
testq %rsi, %rsi
je 0x111cc9c
leaq 0x10(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl 0x44(%rbx), %eax
leaq 0x10(%rsp), %r15
movl %eax, 0x8(%r15)
movq 0x28(%rbx), %rax
movl 0x8(%rax), %ecx
leaq 0x30(%rsp), %r13
movl %ecx, 0x8(%r13)
movq (%rax), %rax
movq %rax, (%r13)
movq 0x38(%r14), %rdi
xorl %ebp, %ebp
movl %ebp, (%rsp)
xorl %esi, %esi
movq %r15, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x10(%r13)
movl %edx, 0x18(%r13)
movq 0x38(%r14), %rdi
movl %r12d, %esi
movl %ebp, (%rsp)
movq %r15, %rdx
movl $0x7, %ecx
xorl %r8d, %r8d
movl $0x1, %r9d
callq 0x17645fe
movq %rax, 0x20(%r13)
movl %edx, 0x28(%r13)
movq 0x38(%r14), %rdi
movq %r13, 0x20(%rsp)
movq $0x3, 0x28(%rsp)
movups 0x20(%rsp), %xmm0
movups %xmm0, (%rsp)
movq %rbx, %rsi
movl $0x719, %edx # imm = 0x719
movl $0x8, %ecx
xorl %r8d, %r8d
callq 0x178e962
movq (%r15), %rsi
testq %rsi, %rsi
je 0x111cd52
leaq 0x10(%rsp), %rdi
callq 0x2a758fc
movb $0x1, %al
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/Target/PowerPC/PPCISelDAGToDAG.cpp
|
llvm::APSInt::operator>=(long) const
|
bool operator>=(int64_t RHS) const {
return compareValues(*this, get(RHS)) >= 0;
}
|
pushq %r14
pushq %rbx
subq $0x18, %rsp
leaq 0x8(%rsp), %r14
movl $0x40, 0x8(%r14)
movq %rsi, (%r14)
movb $0x0, 0xc(%r14)
movq %r14, %rsi
callq 0x112a9f4
movl %eax, %ebx
cmpl $0x41, 0x8(%r14)
jb 0x112a9e6
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0x112a9e6
callq 0x7802b0
testl %ebx, %ebx
setns %al
addq $0x18, %rsp
popq %rbx
popq %r14
retq
nop
|
/llvm/ADT/APSInt.h
|
llvm::PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(llvm::AtomicCmpXchgInst*) const
|
TargetLowering::AtomicExpansionKind
PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
unsigned Size = AI->getNewValOperand()->getType()->getPrimitiveSizeInBits();
if (shouldInlineQuadwordAtomics() && Size == 128)
return AtomicExpansionKind::MaskedIntrinsic;
return TargetLowering::shouldExpandAtomicCmpXchgInIR(AI);
}
|
pushq %r14
pushq %rbx
subq $0x18, %rsp
movq %rdi, %rbx
movq -0x20(%rsi), %rax
movq 0x8(%rax), %rdi
callq 0x2a9a7de
leaq 0x8(%rsp), %rdi
movq %rax, (%rdi)
movb %dl, 0x8(%rdi)
callq 0x2b60e74
movq %rax, %r14
movq 0x4e0f8(%rbx), %rdi
callq 0x10d80c0
movl %eax, %ecx
xorl %eax, %eax
movl $0x0, %edx
testb %cl, %cl
je 0x116e7b2
movq 0x4e0f8(%rbx), %rcx
movb 0x1fc(%rcx), %dl
cmpl $0x80, %r14d
movzbl %dl, %ecx
leal (%rcx,%rcx,4), %ecx
cmovel %ecx, %eax
addq $0x18, %rsp
popq %rbx
popq %r14
retq
|
/Target/PowerPC/PPCISelLowering.cpp
|
llvm::SmallVectorTemplateBase<(anonymous namespace)::BucketElement, true>::push_back((anonymous namespace)::BucketElement)
|
void push_back(ValueParamT Elt) {
const T *EltPtr = reserveForParamAndGetAddress(Elt);
memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
this->set_size(this->size() + 1);
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r14
movl 0x8(%rdi), %edx
cmpl 0xc(%rdi), %edx
jae 0x117e854
movq (%r14), %rax
movl 0x8(%r14), %ecx
shlq $0x4, %rcx
movq %r15, (%rax,%rcx)
movq %rbx, 0x8(%rax,%rcx)
incl 0x8(%r14)
popq %rbx
popq %r14
popq %r15
retq
incq %rdx
leaq 0x10(%r14), %rsi
movl $0x10, %ecx
movq %r14, %rdi
callq 0x2b4ed86
jmp 0x117e836
|
/llvm/ADT/SmallVector.h
|
llvm::RISCVInstrInfo::mulImm(llvm::MachineFunction&, llvm::MachineBasicBlock&, llvm::MachineInstrBundleIterator<llvm::MachineInstr, false>, llvm::DebugLoc const&, llvm::Register, unsigned int, llvm::MachineInstr::MIFlag) const
|
void RISCVInstrInfo::mulImm(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator II, const DebugLoc &DL,
Register DestReg, uint32_t Amount,
MachineInstr::MIFlag Flag) const {
MachineRegisterInfo &MRI = MF.getRegInfo();
if (llvm::has_single_bit<uint32_t>(Amount)) {
uint32_t ShiftAmount = Log2_32(Amount);
if (ShiftAmount == 0)
return;
BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
.addReg(DestReg, RegState::Kill)
.addImm(ShiftAmount)
.setMIFlag(Flag);
} else if (STI.hasStdExtZba() &&
((Amount % 3 == 0 && isPowerOf2_64(Amount / 3)) ||
(Amount % 5 == 0 && isPowerOf2_64(Amount / 5)) ||
(Amount % 9 == 0 && isPowerOf2_64(Amount / 9)))) {
// We can use Zba SHXADD+SLLI instructions for multiply in some cases.
unsigned Opc;
uint32_t ShiftAmount;
if (Amount % 9 == 0) {
Opc = RISCV::SH3ADD;
ShiftAmount = Log2_64(Amount / 9);
} else if (Amount % 5 == 0) {
Opc = RISCV::SH2ADD;
ShiftAmount = Log2_64(Amount / 5);
} else if (Amount % 3 == 0) {
Opc = RISCV::SH1ADD;
ShiftAmount = Log2_64(Amount / 3);
} else {
llvm_unreachable("implied by if-clause");
}
if (ShiftAmount)
BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
.addReg(DestReg, RegState::Kill)
.addImm(ShiftAmount)
.setMIFlag(Flag);
BuildMI(MBB, II, DL, get(Opc), DestReg)
.addReg(DestReg, RegState::Kill)
.addReg(DestReg)
.setMIFlag(Flag);
} else if (llvm::has_single_bit<uint32_t>(Amount - 1)) {
Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
uint32_t ShiftAmount = Log2_32(Amount - 1);
BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
.addReg(DestReg)
.addImm(ShiftAmount)
.setMIFlag(Flag);
BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
.addReg(ScaledRegister, RegState::Kill)
.addReg(DestReg, RegState::Kill)
.setMIFlag(Flag);
} else if (llvm::has_single_bit<uint32_t>(Amount + 1)) {
Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
uint32_t ShiftAmount = Log2_32(Amount + 1);
BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
.addReg(DestReg)
.addImm(ShiftAmount)
.setMIFlag(Flag);
BuildMI(MBB, II, DL, get(RISCV::SUB), DestReg)
.addReg(ScaledRegister, RegState::Kill)
.addReg(DestReg, RegState::Kill)
.setMIFlag(Flag);
} else if (STI.hasStdExtZmmul()) {
Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
movImm(MBB, II, DL, N, Amount, Flag);
BuildMI(MBB, II, DL, get(RISCV::MUL), DestReg)
.addReg(DestReg, RegState::Kill)
.addReg(N, RegState::Kill)
.setMIFlag(Flag);
} else {
Register Acc;
uint32_t PrevShiftAmount = 0;
for (uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
if (Amount & (1U << ShiftAmount)) {
if (ShiftAmount)
BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
.addReg(DestReg, RegState::Kill)
.addImm(ShiftAmount - PrevShiftAmount)
.setMIFlag(Flag);
if (Amount >> (ShiftAmount + 1)) {
// If we don't have an accmulator yet, create it and copy DestReg.
if (!Acc) {
Acc = MRI.createVirtualRegister(&RISCV::GPRRegClass);
BuildMI(MBB, II, DL, get(TargetOpcode::COPY), Acc)
.addReg(DestReg)
.setMIFlag(Flag);
} else {
BuildMI(MBB, II, DL, get(RISCV::ADD), Acc)
.addReg(Acc, RegState::Kill)
.addReg(DestReg)
.setMIFlag(Flag);
}
}
PrevShiftAmount = ShiftAmount;
}
}
assert(Acc && "Expected valid accumulator");
BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
.addReg(DestReg, RegState::Kill)
.addReg(Acc, RegState::Kill)
.setMIFlag(Flag);
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xd8, %rsp
movl %r9d, 0x4(%rsp)
movq %r8, 0x28(%rsp)
movq %rcx, %r13
movq %rdx, %r12
movl 0x118(%rsp), %ebp
movl 0x110(%rsp), %r14d
leal -0x1(%r14), %r15d
movl %r14d, %eax
xorl %r15d, %eax
cmpl %r15d, %eax
movq %rdi, 0x30(%rsp)
jbe 0x1198afb
testl %r14d, %r14d
je 0x1198bea
bsrl %r14d, %ebx
xorl $0x1f, %ebx
jmp 0x1198bef
movq %rdi, %rbx
movq 0x28(%rsi), %rax
movq %rax, 0xd0(%rsp)
movq 0x50(%rdi), %rcx
cmpb $0x1, 0x169(%rcx)
movq %r13, 0x68(%rsp)
movq %r12, 0x58(%rsp)
jne 0x1198b62
movl %r14d, %eax
movl $0xaaaaaaab, %edx # imm = 0xAAAAAAAB
imull $0xaaaaaaab, %r14d, %esi # imm = 0xAAAAAAAB
cmpl $0x55555555, %esi # imm = 0x55555555
jbe 0x1198e99
imull $0xcccccccd, %r14d, %esi # imm = 0xCCCCCCCD
cmpl $0x33333333, %esi # imm = 0x33333333
jbe 0x1198ef1
imull $0x38e38e39, %r14d, %esi # imm = 0x38E38E39
cmpl $0x1c71c71c, %esi # imm = 0x1C71C71C
jbe 0x11991ab
leal -0x2(%r14), %eax
movl %r15d, %edx
xorl %eax, %edx
cmpl %eax, %edx
jbe 0x1198ba4
leaq 0x4623c0a(%rip), %rsi # 0x57bc780
leaq 0x2dc293b(%rip), %rdx # 0x3f5b4b8
movq 0xd0(%rsp), %rdi
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, %ebp
testl %r15d, %r15d
je 0x1198f4b
bsrl %r15d, %r14d
xorl $0x1f, %r14d
jmp 0x1198f51
leal 0x1(%r14), %ebx
movl %ebx, %eax
xorl %r14d, %eax
cmpl %r14d, %eax
jbe 0x1198d1f
leaq 0x4623bc3(%rip), %rsi # 0x57bc780
leaq 0x2dc28f4(%rip), %rdx # 0x3f5b4b8
movq 0xd0(%rsp), %rdi
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, %ebp
testl %ebx, %ebx
je 0x119963e
bsrl %ebx, %r14d
xorl $0x1f, %r14d
jmp 0x1199644
movl $0x20, %ebx
subl $0x1f, %ebx
je 0x1199c74
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0xc8(%rsp)
testq %rsi, %rsi
je 0x1198c1f
leaq 0xc8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
negl %ebx
movq 0xc8(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x1198c4f
leaq 0xc8(%rsp), %r14
leaq 0x8(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
movq $-0x653e0, %rcx # imm = 0xFFF9AC20
movq 0x30(%rsp), %rax
addq 0x8(%rax), %rcx
movq %r12, %rdi
movq %r13, %rsi
movl 0x4(%rsp), %r14d
movl %r14d, %r8d
callq 0x90f593
movq %rax, %r12
movq %rdx, %r15
xorl %ebp, %ebp
leaq 0x38(%rsp), %r13
movq %rbp, 0x8(%r13)
movl $0x4000000, (%r13) # imm = 0x4000000
movl %r14d, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %rax, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movl %ebx, %eax
movl $0xfff00000, %ecx # imm = 0xFFF00000
andl (%r13), %ecx
incl %ecx
movl %ecx, (%r13)
movq %rbp, 0x8(%r13)
movq %rax, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movl 0x118(%rsp), %eax
andl $0xffffff, %eax # imm = 0xFFFFFF
orl %eax, 0x2c(%r15)
leaq 0x8(%rsp), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je 0x1198d01
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0xc8(%rsp), %rsi
testq %rsi, %rsi
je 0x1199c74
leaq 0xc8(%rsp), %rdi
jmp 0x1199c6f
cmpb $0x0, 0x19c(%rcx)
je 0x1199241
leaq 0x4623a4d(%rip), %rsi # 0x57bc780
leaq 0x2dc277e(%rip), %rdx # 0x3f5b4b8
xorl %ebx, %ebx
movq 0xd0(%rsp), %rdi
xorl %ecx, %ecx
callq 0x1d82fd6
movl %r14d, %r9d
subq $0x8, %rsp
movq 0x38(%rsp), %rdi
movq %r12, %rsi
movq %r13, %rdx
movq 0x30(%rsp), %r14
movq %r14, %rcx
movl %eax, 0x60(%rsp)
movl %eax, %r8d
pushq %rbx
pushq %rbx
pushq %rbp
callq 0x118fae0
addq $0x20, %rsp
movq (%r14), %rsi
movq %rsi, 0x90(%rsp)
testq %rsi, %rsi
je 0x1198d9a
leaq 0x90(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0x90(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x1198dc8
leaq 0x90(%rsp), %r14
leaq 0x8(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
movq $-0x649a0, %rcx # imm = 0xFFF9B660
movq 0x30(%rsp), %rax
addq 0x8(%rax), %rcx
movq %r12, %rdi
movq %r13, %rsi
movl 0x4(%rsp), %r14d
movl %r14d, %r8d
callq 0x90f593
movq %rax, %r15
movq %rdx, %r12
xorl %ebx, %ebx
leaq 0x38(%rsp), %r13
movq %rbx, 0x8(%r13)
movl $0x4000000, %ebp # imm = 0x4000000
movl %ebp, (%r13)
movl %r14d, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %rax, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movq %rbx, 0x8(%r13)
movl %ebp, (%r13)
movl 0x58(%rsp), %eax
movl %eax, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r12, %rdi
movq %r15, %rsi
callq 0x1d3c22c
movl 0x118(%rsp), %eax
andl $0xffffff, %eax # imm = 0xFFFFFF
orl %eax, 0x2c(%r12)
leaq 0x8(%rsp), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je 0x1198e7b
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0x90(%rsp), %rsi
testq %rsi, %rsi
je 0x1199c74
leaq 0x90(%rsp), %rdi
jmp 0x1199c6f
cmpl $0x3, %r14d
jb 0x1198b3c
movq %rax, %rsi
imulq %rdx, %rsi
shrq $0x21, %rsi
movl %esi, %edi
shrl %edi
andl $0x15555555, %edi # imm = 0x15555555
subl %edi, %esi
movl $0x33333333, %edi # imm = 0x33333333
movl %esi, %r8d
andl %edi, %r8d
shrl $0x2, %esi
andl %edi, %esi
addl %r8d, %esi
movl %esi, %edi
shrl $0x4, %edi
addl %esi, %edi
andl $0xf0f0f0f, %edi # imm = 0xF0F0F0F
imull $0x1010101, %edi, %esi # imm = 0x1010101
shrl $0x18, %esi
cmpl $0x2, %esi
jb 0x11991fe
jmp 0x1198b3c
cmpl $0x5, %r14d
jb 0x1198b4f
movl $0xcccccccd, %esi # imm = 0xCCCCCCCD
imulq %rax, %rsi
shrq $0x22, %rsi
movl %esi, %edi
shrl %edi
andl $0x15555555, %edi # imm = 0x15555555
subl %edi, %esi
movl $0x33333333, %edi # imm = 0x33333333
movl %esi, %r8d
andl %edi, %r8d
shrl $0x2, %esi
andl %edi, %esi
addl %r8d, %esi
movl %esi, %edi
shrl $0x4, %edi
addl %esi, %edi
andl $0xf0f0f0f, %edi # imm = 0xF0F0F0F
imull $0x1010101, %edi, %esi # imm = 0x1010101
shrl $0x18, %esi
cmpl $0x2, %esi
jb 0x11991fe
jmp 0x1198b4f
movl $0x20, %r14d
movl $0x1f, %ebx
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0xb0(%rsp)
testq %rsi, %rsi
je 0x1198f7d
leaq 0xb0(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
subl %r14d, %ebx
movq 0xb0(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x1198fae
leaq 0xb0(%rsp), %r14
leaq 0x8(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %r14
movups %xmm0, 0x8(%r14)
movq $-0x653e0, %rcx # imm = 0xFFF9AC20
movq 0x30(%rsp), %rax
addq 0x8(%rax), %rcx
movq %r12, %rdi
movq %r13, %rsi
movq %r14, %rdx
movl %ebp, 0x24(%rsp)
movl %ebp, %r8d
callq 0x90f593
movq %rax, %r12
movq %rdx, %r15
xorl %ebp, %ebp
leaq 0x38(%rsp), %r13
movq %rbp, 0x8(%r13)
movl $0x0, (%r13)
movl 0x4(%rsp), %eax
movl %eax, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %r12, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movl %ebx, %eax
movl $0xfff00000, %ecx # imm = 0xFFF00000
andl (%r13), %ecx
incl %ecx
movl %ecx, (%r13)
movq %rbp, 0x8(%r13)
movq %rax, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movl 0x118(%rsp), %ebx
andl $0xffffff, %ebx # imm = 0xFFFFFF
orl %ebx, 0x2c(%r15)
movq (%r14), %rsi
testq %rsi, %rsi
je 0x1199063
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0xb0(%rsp), %rsi
testq %rsi, %rsi
je 0x119907d
leaq 0xb0(%rsp), %rdi
callq 0x2a758fc
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0xa8(%rsp)
testq %rsi, %rsi
movq 0x68(%rsp), %r15
movq 0x58(%rsp), %r12
je 0x11990ae
leaq 0xa8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq %rbx, 0x28(%rsp)
movq 0xa8(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x11990e1
leaq 0xa8(%rsp), %r14
leaq 0x8(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
movq $-0x5d060, %rcx # imm = 0xFFFA2FA0
movq 0x30(%rsp), %rax
addq 0x8(%rax), %rcx
movq %r12, %rdi
movq %r15, %rsi
movl 0x4(%rsp), %r14d
movl %r14d, %r8d
callq 0x90f593
movq %rax, %r15
movq %rdx, %r12
xorl %ebx, %ebx
leaq 0x38(%rsp), %r13
movq %rbx, 0x8(%r13)
movl $0x4000000, %ebp # imm = 0x4000000
movl %ebp, (%r13)
movl 0x24(%rsp), %eax
movl %eax, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %r15, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movq %rbx, 0x8(%r13)
movl %ebp, (%r13)
movl %r14d, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r12, %rdi
movq %r15, %rsi
callq 0x1d3c22c
movq 0x28(%rsp), %rax
orl %eax, 0x2c(%r12)
leaq 0x8(%rsp), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je 0x119918d
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0xa8(%rsp), %rsi
testq %rsi, %rsi
je 0x1199c74
leaq 0xa8(%rsp), %rdi
jmp 0x1199c6f
cmpl $0x9, %r14d
jb 0x1198b62
imulq $0x38e38e39, %rax, %rsi # imm = 0x38E38E39
shrq $0x21, %rsi
movl %esi, %edi
shrl %edi
andl $0x5555555, %edi # imm = 0x5555555
subl %edi, %esi
movl $0x33333333, %edi # imm = 0x33333333
movl %esi, %r8d
andl %edi, %r8d
shrl $0x2, %esi
andl %edi, %esi
addl %r8d, %esi
movl %esi, %edi
shrl $0x4, %edi
addl %esi, %edi
andl $0xf0f0f0f, %edi # imm = 0xF0F0F0F
imull $0x1010101, %edi, %esi # imm = 0x1010101
shrl $0x18, %esi
cmpl $0x1, %esi
ja 0x1198b62
imull $0x38e38e39, %r14d, %ecx # imm = 0x38E38E39
cmpl $0x1c71c71c, %ecx # imm = 0x1C71C71C
jbe 0x119961e
imull $0xcccccccd, %r14d, %ecx # imm = 0xCCCCCCCD
cmpl $0x33333333, %ecx # imm = 0x33333333
jbe 0x119989e
imulq %rdx, %rax
shrq $0x21, %rax
testl %eax, %eax
je 0x11999ec
bsrl %eax, %r13d
xorl $0x1f, %r13d
jmp 0x11999f2
testl %r14d, %r14d
je 0x11998c0
movl %ebp, %eax
andl $0xffffff, %eax # imm = 0xFFFFFF
movl %eax, 0x64(%rsp)
xorl %ebx, %ebx
xorl %r15d, %r15d
xorl %ebp, %ebp
btl %r15d, %r14d
jae 0x1199606
testl %r15d, %r15d
movl %ebp, 0x24(%rsp)
je 0x11993b6
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0x80(%rsp)
testq %rsi, %rsi
je 0x119929a
movl $0x1, %edx
leaq 0x80(%rsp), %rdi
callq 0x2a757d8
movq 0x80(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x11992ca
leaq 0x80(%rsp), %rdi
leaq 0x8(%rsp), %rdx
callq 0x2a759cc
movq $0x0, 0x80(%rsp)
leaq 0x10(%rsp), %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movq 0x30(%rsp), %rax
movq 0x8(%rax), %rcx
movq $-0x653e0, %rax # imm = 0xFFF9AC20
addq %rax, %rcx
movq %r12, %rdi
movq %r13, %rsi
leaq 0x8(%rsp), %rdx
movl 0x4(%rsp), %ebp
movl %ebp, %r8d
callq 0x90f593
movq %rax, %r12
movq %rdx, %r13
xorl %r14d, %r14d
movq %r14, 0x40(%rsp)
movl $0x4000000, %ecx # imm = 0x4000000
movl %ecx, 0x38(%rsp)
movl %ebp, 0x3c(%rsp)
leaq 0x48(%rsp), %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movl %ecx, 0x38(%rsp)
movq %rdx, %rdi
movq %r12, %rsi
leaq 0x38(%rsp), %rbp
movq %rbp, %rdx
callq 0x1d3c22c
movl %r15d, %eax
subl %ebx, %eax
movl 0x38(%rsp), %ecx
movl $0xfff00000, %edx # imm = 0xFFF00000
andl %edx, %ecx
incl %ecx
movl %ecx, 0x38(%rsp)
movq %r14, 0x40(%rsp)
movq %rax, 0x48(%rsp)
movq %r13, %rdi
movq %r12, %rsi
movq %rbp, %rdx
callq 0x1d3c22c
movl 0x64(%rsp), %eax
orl %eax, 0x2c(%r13)
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0x1199386
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0x80(%rsp), %rsi
testq %rsi, %rsi
movl 0x24(%rsp), %ebp
je 0x11993a4
leaq 0x80(%rsp), %rdi
callq 0x2a758fc
movq 0x68(%rsp), %r13
movq 0x58(%rsp), %r12
movl 0x110(%rsp), %r14d
leal 0x1(%r15), %ecx
movl %r14d, %eax
shrl %cl, %eax
testl %eax, %eax
je 0x11994f4
testl %ebp, %ebp
je 0x11994fc
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0x70(%rsp)
testq %rsi, %rsi
movq 0x30(%rsp), %rbx
je 0x11993f5
movl $0x1, %edx
leaq 0x70(%rsp), %rdi
callq 0x2a757d8
movq 0x70(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x119941c
leaq 0x70(%rsp), %rdi
leaq 0x8(%rsp), %rdx
callq 0x2a759cc
movq $0x0, 0x70(%rsp)
leaq 0x10(%rsp), %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movq 0x8(%rbx), %rcx
movq $-0x5d060, %rax # imm = 0xFFFA2FA0
addq %rax, %rcx
movq %r12, %rdi
movq %r13, %rsi
leaq 0x8(%rsp), %rdx
movl %ebp, %r8d
callq 0x90f593
movq %rax, %r12
movq %rdx, %r13
movl %ebp, %eax
xorl %ebp, %ebp
movq %rbp, 0x40(%rsp)
movl $0x4000000, %ecx # imm = 0x4000000
movl %ecx, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
leaq 0x48(%rsp), %rbx
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movl %ecx, 0x38(%rsp)
movq %rdx, %rdi
movq %r12, %rsi
leaq 0x38(%rsp), %rdx
callq 0x1d3c22c
movq %rbp, 0x40(%rsp)
movl %ebp, 0x38(%rsp)
movl 0x4(%rsp), %eax
movl %eax, 0x3c(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movl %ebp, 0x38(%rsp)
movq %r13, %rdi
movq %r12, %rsi
leaq 0x38(%rsp), %rdx
callq 0x1d3c22c
movl 0x64(%rsp), %eax
orl %eax, 0x2c(%r13)
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0x11994ca
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0x70(%rsp), %rsi
testq %rsi, %rsi
je 0x11994de
leaq 0x70(%rsp), %rdi
callq 0x2a758fc
movl %r15d, %ebx
movq 0x68(%rsp), %r13
movq 0x58(%rsp), %r12
movl 0x24(%rsp), %ebp
jmp 0x1199606
movl %r15d, %ebx
jmp 0x1199606
movq 0xd0(%rsp), %rdi
leaq 0x4623275(%rip), %rsi # 0x57bc780
leaq 0x2dc1fa6(%rip), %rdx # 0x3f5b4b8
xorl %ecx, %ecx
callq 0x1d82fd6
movl %eax, %ebp
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0x78(%rsp)
testq %rsi, %rsi
movq 0x30(%rsp), %rbx
je 0x1199541
movl $0x1, %edx
leaq 0x78(%rsp), %rdi
callq 0x2a757d8
movq 0x78(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x1199568
leaq 0x78(%rsp), %rdi
leaq 0x8(%rsp), %rdx
callq 0x2a759cc
movq $0x0, 0x78(%rsp)
leaq 0x10(%rsp), %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movq 0x8(%rbx), %rcx
movq $-0x260, %rax # imm = 0xFDA0
addq %rax, %rcx
movq %r12, %rdi
movq %r13, %rsi
leaq 0x8(%rsp), %rdx
movl %ebp, %r8d
callq 0x90f593
movq %rdx, %r12
movq $0x0, 0x40(%rsp)
xorl %edx, %edx
movl %edx, 0x38(%rsp)
movl 0x4(%rsp), %ecx
movl %ecx, 0x3c(%rsp)
leaq 0x48(%rsp), %rcx
xorps %xmm0, %xmm0
movups %xmm0, (%rcx)
movl %edx, 0x38(%rsp)
movq %r12, %rdi
movq %rax, %rsi
leaq 0x38(%rsp), %rdx
callq 0x1d3c22c
movl 0x64(%rsp), %eax
orl %eax, 0x2c(%r12)
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0x11995ea
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0x78(%rsp), %rsi
testq %rsi, %rsi
je 0x11995fe
leaq 0x78(%rsp), %rdi
callq 0x2a758fc
movl %r15d, %ebx
movq 0x58(%rsp), %r12
incl %r15d
movl %r14d, %eax
movl %r15d, %ecx
shrl %cl, %eax
testl %eax, %eax
jne 0x119925c
jmp 0x11998c2
imulq $0x38e38e39, %rax, %rax # imm = 0x38E38E39
shrq $0x21, %rax
testl %eax, %eax
je 0x11999fb
bsrl %eax, %r13d
xorl $0x1f, %r13d
jmp 0x1199a01
movl $0x20, %r14d
movl $0x1f, %ebx
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0xa0(%rsp)
testq %rsi, %rsi
je 0x1199670
leaq 0xa0(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
subl %r14d, %ebx
movq 0xa0(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x11996a1
leaq 0xa0(%rsp), %r14
leaq 0x8(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %r14
movups %xmm0, 0x8(%r14)
movq $-0x653e0, %rcx # imm = 0xFFF9AC20
movq 0x30(%rsp), %rax
addq 0x8(%rax), %rcx
movq %r12, %rdi
movq %r13, %rsi
movq %r14, %rdx
movl %ebp, 0x24(%rsp)
movl %ebp, %r8d
callq 0x90f593
movq %rax, %r12
movq %rdx, %r15
xorl %ebp, %ebp
leaq 0x38(%rsp), %r13
movq %rbp, 0x8(%r13)
movl $0x0, (%r13)
movl 0x4(%rsp), %eax
movl %eax, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %r12, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movl %ebx, %eax
movl $0xfff00000, %ecx # imm = 0xFFF00000
andl (%r13), %ecx
incl %ecx
movl %ecx, (%r13)
movq %rbp, 0x8(%r13)
movq %rax, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r15, %rdi
movq %r12, %rsi
callq 0x1d3c22c
movl 0x118(%rsp), %ebx
andl $0xffffff, %ebx # imm = 0xFFFFFF
orl %ebx, 0x2c(%r15)
movq (%r14), %rsi
testq %rsi, %rsi
je 0x1199756
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0xa0(%rsp), %rsi
testq %rsi, %rsi
je 0x1199770
leaq 0xa0(%rsp), %rdi
callq 0x2a758fc
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0x98(%rsp)
testq %rsi, %rsi
movq 0x68(%rsp), %r15
movq 0x58(%rsp), %r12
je 0x11997a1
leaq 0x98(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq %rbx, 0x28(%rsp)
movq 0x98(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x11997d4
leaq 0x98(%rsp), %r14
leaq 0x8(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
movq $-0x657e0, %rcx # imm = 0xFFF9A820
movq 0x30(%rsp), %rax
addq 0x8(%rax), %rcx
movq %r12, %rdi
movq %r15, %rsi
movl 0x4(%rsp), %r14d
movl %r14d, %r8d
callq 0x90f593
movq %rax, %r15
movq %rdx, %r12
xorl %ebx, %ebx
leaq 0x38(%rsp), %r13
movq %rbx, 0x8(%r13)
movl $0x4000000, %ebp # imm = 0x4000000
movl %ebp, (%r13)
movl 0x24(%rsp), %eax
movl %eax, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %r15, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movq %rbx, 0x8(%r13)
movl %ebp, (%r13)
movl %r14d, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r12, %rdi
movq %r15, %rsi
callq 0x1d3c22c
movq 0x28(%rsp), %rax
orl %eax, 0x2c(%r12)
leaq 0x8(%rsp), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je 0x1199880
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0x98(%rsp), %rsi
testq %rsi, %rsi
je 0x1199c74
leaq 0x98(%rsp), %rdi
jmp 0x1199c6f
movl $0xcccccccd, %ecx # imm = 0xCCCCCCCD
imulq %rax, %rcx
shrq $0x22, %rcx
testl %ecx, %ecx
je 0x1199a0a
bsrl %ecx, %r13d
xorl $0x1f, %r13d
jmp 0x1199a10
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0x88(%rsp)
testq %rsi, %rsi
je 0x11998e9
leaq 0x88(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movl %ebp, 0x24(%rsp)
movq 0x88(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
movq 0x30(%rsp), %rbx
je 0x1199920
leaq 0x88(%rsp), %r14
leaq 0x8(%rsp), %rdx
movq %r14, %rdi
callq 0x2a759cc
movq $0x0, (%r14)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
movq $-0x5d060, %rcx # imm = 0xFFFA2FA0
addq 0x8(%rbx), %rcx
movq %r12, %rdi
movq %r13, %rsi
movl 0x4(%rsp), %ebx
movl %ebx, %r8d
callq 0x90f593
movq %rax, %r15
movq %rdx, %r12
xorl %ebp, %ebp
leaq 0x38(%rsp), %r13
movq %rbp, 0x8(%r13)
movl $0x4000000, %r14d # imm = 0x4000000
movl %r14d, (%r13)
movl %ebx, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %rax, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movq %rbp, 0x8(%r13)
movl %r14d, (%r13)
movl 0x24(%rsp), %eax
movl %eax, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r12, %rdi
movq %r15, %rsi
callq 0x1d3c22c
movl 0x118(%rsp), %eax
andl $0xffffff, %eax # imm = 0xFFFFFF
orl %eax, 0x2c(%r12)
leaq 0x8(%rsp), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je 0x11999ce
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0x88(%rsp), %rsi
testq %rsi, %rsi
je 0x1199c74
leaq 0x88(%rsp), %rdi
jmp 0x1199c6f
movl $0x20, %r13d
movq $-0x3287, %r14 # imm = 0xCD79
jmp 0x1199a17
movl $0x20, %r13d
movq $-0x328b, %r14 # imm = 0xCD75
jmp 0x1199a17
movl $0x20, %r13d
movq $-0x3289, %r14 # imm = 0xCD77
subl $0x1f, %r13d
je 0x1199b54
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0xc0(%rsp)
testq %rsi, %rsi
je 0x1199a48
leaq 0xc0(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
negl %r13d
movq 0xc0(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
je 0x1199a79
leaq 0xc0(%rsp), %r15
leaq 0x8(%rsp), %rdx
movq %r15, %rdi
callq 0x2a759cc
movq $0x0, (%r15)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
movq $-0x653e0, %rcx # imm = 0xFFF9AC20
addq 0x8(%rbx), %rcx
movq %r12, %rdi
movq 0x68(%rsp), %rsi
movl 0x4(%rsp), %ebx
movl %ebx, %r8d
callq 0x90f593
movq %rax, %r15
movq %rdx, %rbp
xorl %eax, %eax
leaq 0x38(%rsp), %r12
movq %rax, 0x8(%r12)
movl $0x4000000, (%r12) # imm = 0x4000000
movl %ebx, 0x4(%r12)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r12)
movq %rdx, %rdi
movq %r15, %rsi
movq %r12, %rdx
callq 0x1d3c22c
movl %r13d, %eax
movl $0xfff00000, %ecx # imm = 0xFFF00000
andl (%r12), %ecx
incl %ecx
movl %ecx, (%r12)
xorl %ecx, %ecx
movq %rcx, 0x8(%r12)
movq %rax, 0x10(%r12)
leaq 0x38(%rsp), %rdx
movq %rbp, %rdi
movq %r15, %rsi
callq 0x1d3c22c
movl 0x118(%rsp), %eax
andl $0xffffff, %eax # imm = 0xFFFFFF
orl %eax, 0x2c(%rbp)
leaq 0x8(%rsp), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je 0x1199b2e
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0xc0(%rsp), %rsi
testq %rsi, %rsi
je 0x1199b48
leaq 0xc0(%rsp), %rdi
callq 0x2a758fc
movl 0x118(%rsp), %ebp
movq 0x58(%rsp), %r12
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq %rsi, 0xb8(%rsp)
testq %rsi, %rsi
je 0x1199b7b
leaq 0xb8(%rsp), %rdi
movl $0x1, %edx
callq 0x2a757d8
movq 0xb8(%rsp), %rsi
movq %rsi, 0x8(%rsp)
testq %rsi, %rsi
movq 0x68(%rsp), %rbx
je 0x1199bae
leaq 0xb8(%rsp), %r15
leaq 0x8(%rsp), %rdx
movq %r15, %rdi
callq 0x2a759cc
movq $0x0, (%r15)
xorps %xmm0, %xmm0
leaq 0x8(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
shlq $0x5, %r14
movq 0x30(%rsp), %rax
addq 0x8(%rax), %r14
movq %r12, %rdi
movq %rbx, %rsi
movq %r14, %rcx
movl 0x4(%rsp), %r15d
movl %r15d, %r8d
callq 0x90f593
movq %rax, %r14
movq %rdx, %r12
xorl %ebx, %ebx
leaq 0x38(%rsp), %r13
movq %rbx, 0x8(%r13)
movl $0x4000000, (%r13) # imm = 0x4000000
movl %r15d, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
movq %rdx, %rdi
movq %rax, %rsi
movq %r13, %rdx
callq 0x1d3c22c
movq %rbx, 0x8(%r13)
movl $0x0, (%r13)
movl %r15d, 0x4(%r13)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r13)
leaq 0x38(%rsp), %rdx
movq %r12, %rdi
movq %r14, %rsi
callq 0x1d3c22c
andl $0xffffff, %ebp # imm = 0xFFFFFF
orl %ebp, 0x2c(%r12)
leaq 0x8(%rsp), %rax
movq (%rax), %rsi
testq %rsi, %rsi
je 0x1199c5a
leaq 0x8(%rsp), %rdi
callq 0x2a758fc
movq 0xb8(%rsp), %rsi
testq %rsi, %rsi
je 0x1199c74
leaq 0xb8(%rsp), %rdi
callq 0x2a758fc
addq $0xd8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/Target/RISCV/RISCVInstrInfo.cpp
|
llvm::RISCVELFTargetObjectFile::SelectSectionForGlobal(llvm::GlobalObject const*, llvm::SectionKind, llvm::TargetMachine const&) const
|
MCSection *RISCVELFTargetObjectFile::SelectSectionForGlobal(
const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
// Handle Small Section classification here.
if (Kind.isBSS() && isGlobalInSmallSection(GO, TM))
return SmallBSSSection;
if (Kind.isData() && isGlobalInSmallSection(GO, TM))
return SmallDataSection;
// Otherwise, we work the same as ELF.
return TargetLoweringObjectFileELF::SelectSectionForGlobal(GO, Kind, TM);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rcx, %rbx
movl %edx, %r14d
movq %rsi, %r12
movq %rdi, %r15
leal -0xf(%r14), %eax
cmpb $0x2, %al
ja 0x11a5640
movq %r15, %rdi
movq %r12, %rsi
callq 0x11a5510
testb %al, %al
je 0x11a5640
movq 0x498(%r15), %rax
jmp 0x11a565c
cmpb $0x13, %r14b
jne 0x11a5668
movq %r15, %rdi
movq %r12, %rsi
callq 0x11a5510
testb %al, %al
je 0x11a5668
movq 0x468(%r15), %rax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
movq %r15, %rdi
movq %r12, %rsi
movl %r14d, %edx
movq %rbx, %rcx
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
jmp 0x1e727f4
|
/Target/RISCV/RISCVTargetObjectFile.cpp
|
llvm::APInt::isShiftedMask() const
|
bool isShiftedMask() const {
if (isSingleWord())
return isShiftedMask_64(U.VAL);
unsigned Ones = countPopulationSlowCase();
unsigned LeadZ = countLeadingZerosSlowCase();
return (Ones + LeadZ + countr_zero()) == BitWidth;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
movl 0x8(%rdi), %r15d
cmpl $0x40, %r15d
ja 0x11b6cee
movq (%rbx), %rax
testq %rax, %rax
je 0x11b6d22
leaq -0x1(%rax), %rcx
orq %rax, %rcx
leaq 0x1(%rcx), %rax
testq %rcx, %rax
jmp 0x11b6d14
movq %rbx, %rdi
callq 0x2b12a20
movl %eax, %ebp
movq %rbx, %rdi
callq 0x2b1284c
movl %eax, %r14d
addl %ebp, %r14d
movq %rbx, %rdi
callq 0x2b1292e
addl %r14d, %eax
cmpl %r15d, %eax
sete %al
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
xorl %eax, %eax
jmp 0x11b6d17
|
/llvm/ADT/APInt.h
|
(anonymous namespace)::RISCVInstructionSelector::renderImm(llvm::MachineInstrBuilder&, llvm::MachineInstr const&, int) const
|
const MachineOperand& getOperand(unsigned i) const {
assert(i < getNumOperands() && "getOperand() out of range!");
return Operands[i];
}
|
movq 0x20(%rdx), %rax
movq 0x30(%rax), %rcx
movl 0x20(%rcx), %eax
cmpl $0x40, %eax
ja 0x11b7b0e
movq 0x18(%rcx), %rdx
movl %eax, %ecx
negb %cl
shlq %cl, %rdx
sarq %cl, %rdx
xorl %ecx, %ecx
testl %eax, %eax
cmovneq %rdx, %rcx
jmp 0x11b7b15
movq 0x18(%rcx), %rax
movq (%rax), %rcx
subq $0x28, %rsp
movq (%rsi), %rax
movq 0x8(%rsi), %rdi
movl $0xfff00000, %esi # imm = 0xFFF00000
leaq 0x8(%rsp), %rdx
andl (%rdx), %esi
incl %esi
movl %esi, (%rdx)
movq $0x0, 0x8(%rdx)
movq %rcx, 0x10(%rdx)
movq %rax, %rsi
callq 0x1d3c22c
addq $0x28, %rsp
retq
nop
|
/llvm/CodeGen/MachineInstr.h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.