text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
#define AlignSize(x,y) (x + y - 1) & ~(y - 1)
#define TWO_MB 2 * 1024 * 1024
#define SIXTY_FOUR_KB 64 * 1024
#define FOUR_KB 4 * 1024
#define CUDA_CHECK(status) \
if (status != cudaSuccess) \
{ \
printf("%s:%d CudaError: %s\n", __FILE__, __LINE__, cudaGetErrorString(status)); \
assert(0); \
}
enum KernelOp {
READ,
};
enum UVMBehavior {
PAGE_FAULT,
ZERO_COPY,
PREFETCH_ONCE_AND_HINTS,
STRIPE_GPU_CPU,
};
enum MemoryAccess {
STREAMING,
BLOCK_STREAMING,
RANDOM_WARP // random page per warp, coalseced within warp
};
template <typename T> __device__ T myrand(T i);
// glibc LCG constants - taken from public domain
// x_n+1 = (a*x_n + c) mod m
// a = 1103515245
// m = 2^31
// c = 12345
template<>
__device__
__forceinline__ uint64_t myrand(uint64_t x)
{
uint64_t a = 1103515245;
uint64_t m = (uint64_t)0x1 << 31;
uint64_t c = 12345;
return ((a * x + c) % m);
}
template<typename data_type>
__global__ void read_thread(data_type *ptr, const size_t size)
{
size_t n = size / sizeof(data_type);
data_type accum = 0;
for(size_t tid = threadIdx.x + blockIdx.x * blockDim.x; tid < n;
tid += blockDim.x * gridDim.x)
accum += ptr[tid];
if (threadIdx.x == 0)
ptr[0] = accum;
}
// lock-step block sync version - yield better performance
template<typename data_type>
__global__ void read_thread_blocksync(data_type *ptr, const size_t size)
{
size_t n = size / sizeof(data_type);
data_type accum = 0;
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
while (1) {
if ((tid - threadIdx.x) > n) {
break;
}
if (tid < n)
accum += ptr[tid];
tid += (blockDim.x * gridDim.x);
__syncthreads();
}
if (threadIdx.x == 0)
ptr[0] = accum;
}
template<typename data_type>
__global__ void read_thread_blockCont(data_type *ptr, const size_t size)
{
size_t n = size / sizeof(data_type);
data_type accum = 0;
size_t elements_per_block = ((n + (gridDim.x - 1)) / gridDim.x) + 1;
size_t startIdx = elements_per_block * blockIdx.x;
for (size_t rid = threadIdx.x; rid < elements_per_block; rid += blockDim.x) {
if ((rid + startIdx) < n)
accum += ptr[rid + startIdx];
}
if (threadIdx.x == 0)
ptr[0] = accum;
}
// lock-step block sync version - yield better performance
template<typename data_type>
__global__ void read_thread_blockCont_blocksync(data_type *ptr, const size_t size)
{
size_t n = size / sizeof(data_type);
data_type accum = 0;
size_t elements_per_block = ((n + (gridDim.x - 1)) / gridDim.x) + 1;
size_t startIdx = elements_per_block * blockIdx.x;
size_t rid = threadIdx.x + startIdx;
while (1) {
if ((rid - threadIdx.x - startIdx) > elements_per_block) {
break;
}
if (rid < n) {
accum += ptr[rid];
}
rid += blockDim.x;
__syncthreads();
}
if (threadIdx.x == 0)
ptr[0] = accum;
}
template<typename data_type>
__global__ void cta_random_warp_streaming_read(data_type *ptr, const size_t size, size_t num_pages,
size_t page_size)
{
size_t n = size / sizeof(data_type);
int loop_count = n / (blockDim.x * gridDim.x);
size_t dtype_per_page = page_size / sizeof(data_type);
size_t lane0_idx_mod = dtype_per_page - warpSize; // so that warp doesnt overshoot page boundary
int lane_id = threadIdx.x & 31;
uint64_t idx = threadIdx.x + blockIdx.x * blockDim.x;
data_type accum = 0;
uint64_t nRandom = myrand(idx); // seed
for (int i = 0; i < loop_count; i++) {
nRandom = myrand(nRandom);
uint64_t page_number = nRandom % num_pages;
// warp lane 0 broadcast page number to all other warp lanes
page_number = __shfl_sync(0xffffffff, page_number, 0);
// coalesced 128 byte access within page - not aligned
// maybe access two cache lines instead of one
uint64_t page_idx = nRandom % lane0_idx_mod;
page_idx = __shfl_sync(0xffffffff, page_idx, 0);
page_idx += lane_id;
accum += ptr[page_number * dtype_per_page + page_idx];
idx += blockDim.x * gridDim.x;
}
if (threadIdx.x == 0)
ptr[0] = accum;
}
typedef struct {
std::string header_string = "";
size_t page_size;
float oversubscription_factor;
int loop_count;
int block_size;
KernelOp k_op;
UVMBehavior uvm_behavior;
MemoryAccess memory_access;
} cmdline_params;
cmdline_params parse_arguments(int argc, char *argv[]) {
KernelOp k_op = READ;
UVMBehavior uvm_behavior = PAGE_FAULT;
MemoryAccess memory_access = STREAMING;
float oversubscription_factor = 1.0f; // 1.0 - 100%
size_t page_size = TWO_MB;
int loop_count = 3;
int block_size = 128;
std::string header_string = "";
int cur_pos = 1;
while (cur_pos < argc) {
std::string flag = argv[cur_pos++];
if (flag == "-m") {
// uvm mode
// default (page-fault), prefetch, stripe_gpu_cpu, STRIPE_GPU_GPU
std::string flag_val = argv[cur_pos++];
if (flag_val == "prefetch_once_and_hints")
uvm_behavior = PREFETCH_ONCE_AND_HINTS;
else if (flag_val == "stripe_gpu_cpu")
uvm_behavior = STRIPE_GPU_CPU;
else if (flag_val == "zero_copy")
uvm_behavior = ZERO_COPY;
else
uvm_behavior = PAGE_FAULT;
}
else if (flag == "-a") {
// test
std::string flag_val = argv[cur_pos++];
if (flag_val == "streaming")
memory_access = STREAMING;
else if (flag_val == "block_streaming")
memory_access = BLOCK_STREAMING;
else if (flag_val == "random_warp")
memory_access = RANDOM_WARP;
}
else if (flag == "-o") {
std::string flag_val = argv[cur_pos++];
if (flag_val == "read")
k_op = READ;
}
else if (flag == "-p") {
oversubscription_factor = (float)std::atof(argv[cur_pos++]);
}
else if (flag == "-s") {
std::string flag_val = argv[cur_pos++];
if (flag_val == "2M")
page_size = TWO_MB;
else if (flag_val == "64K")
page_size = SIXTY_FOUR_KB;
else if (flag_val == "4K")
page_size = FOUR_KB;
else {
printf("Set valid page size: 2M/64K/4K\n");
exit(-1);
}
}
else if (flag == "-lc") {
loop_count = std::atoi(argv[cur_pos++]);
}
else if (flag == "-blocksize") {
block_size = std::atoi(argv[cur_pos++]);
}
}
// log string
header_string += "Read,";
std::string mode_str;
if (uvm_behavior == PAGE_FAULT)
mode_str = "Page_Fault,";
else if (uvm_behavior == ZERO_COPY)
mode_str = "Zero_copy,";
else if (uvm_behavior == STRIPE_GPU_CPU)
mode_str = "stripe_gpu_cpu,";
else if (uvm_behavior == PREFETCH_ONCE_AND_HINTS)
mode_str = "prefetch_once_and_hints,";
header_string += mode_str;
std::string access_str;
if (memory_access == STREAMING)
access_str = "streaming,";
else if (memory_access == BLOCK_STREAMING)
access_str = "block_streaming,";
else if (memory_access == RANDOM_WARP)
access_str = "random_warp,";
header_string += access_str;
header_string += std::to_string(oversubscription_factor);
header_string += ",";
if (page_size == TWO_MB)
header_string += "2MB,";
else if (page_size == SIXTY_FOUR_KB)
header_string += "64KB,";
else if (page_size == FOUR_KB)
header_string += "4KB,";
header_string += "blocksize=";
header_string += std::to_string(block_size);
header_string += ",";
header_string += "loop_count=";
header_string += std::to_string(loop_count);
cmdline_params args;
args.header_string = header_string;
args.page_size = page_size;
args.oversubscription_factor = oversubscription_factor;
args.loop_count = loop_count;
args.block_size = block_size;
args.k_op = k_op;
args.uvm_behavior = uvm_behavior;
args.memory_access = memory_access;
return args;
}
void setup_memory_allocation(cmdline_params params, size_t vidmem_size, bool is_P9, void** blockMemory, void **uvm_alloc_ptr, size_t& allocation_size) {
// determine cudaMallocManaged size
int current_device = 0;
CUDA_CHECK(cudaSetDevice(current_device));
allocation_size = (size_t)(params.oversubscription_factor * vidmem_size);
if (params.memory_access == RANDOM_WARP) {
// reduce test working memory
// cudaMalloc 2/3 GPU
size_t cudaMallocSize = AlignSize(size_t(vidmem_size * 0.67), params.page_size);
allocation_size = AlignSize(size_t(vidmem_size * 0.33 * params.oversubscription_factor),
params.page_size);
CUDA_CHECK(cudaMalloc(blockMemory, cudaMallocSize));
}
// pad allocation to page_size
allocation_size = AlignSize(allocation_size, params.page_size);
// For P9 we need to allocate and free in-benchmark loop
// as evicted memory has remote mappings don't trigger a page-fault
if (!(is_P9 && params.uvm_behavior == PAGE_FAULT)) {
CUDA_CHECK(cudaMallocManaged(uvm_alloc_ptr, allocation_size));
// populate pages on GPU
CUDA_CHECK(cudaMemPrefetchAsync(*uvm_alloc_ptr, allocation_size, current_device));
}
}
dim3 get_grid_config(cmdline_params params, int multiProcessorCount) {
dim3 grid(1,1,1);
int num_blocks_per_sm = 1; // placeholder value
if (params.k_op == READ) {
if (params.memory_access == STREAMING) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm,
read_thread_blocksync<float>, params.block_size, 0);
grid.x = multiProcessorCount * num_blocks_per_sm;
}
else if (params.memory_access == BLOCK_STREAMING) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm,
read_thread_blockCont_blocksync<float>, params.block_size, 0);
grid.x = multiProcessorCount * num_blocks_per_sm;
}
else if (params.memory_access == RANDOM_WARP) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm,
cta_random_warp_streaming_read<float>, params.block_size, 0);
grid.x = multiProcessorCount * num_blocks_per_sm;
}
}
return grid;
}
int main(int argc, char *argv[]) {
cmdline_params args = parse_arguments(argc, argv);
int current_device = 0;
cudaDeviceProp prop;
CUDA_CHECK(cudaGetDeviceProperties(&prop, current_device));
bool is_P9 = (prop.pageableMemoryAccessUsesHostPageTables == 1);
void *blockMemory = nullptr;
void *uvm_alloc_ptr = nullptr;
size_t allocation_size = 0;
size_t avail_phy_vidmem = 0, total_phy_vidmem = 0;
CUDA_CHECK(cudaMemGetInfo(&avail_phy_vidmem, &total_phy_vidmem));
setup_memory_allocation(args, prop.totalGlobalMem, is_P9, &blockMemory, &uvm_alloc_ptr, allocation_size);
size_t num_pages = allocation_size / args.page_size;
if (args.memory_access == RANDOM_WARP)
CUDA_CHECK(cudaMemGetInfo(&avail_phy_vidmem, &total_phy_vidmem));
// P9 need more state space on vidmem - size in MB
size_t state_space_size = (prop.pageableMemoryAccessUsesHostPageTables == 1) ? 320 : 128;
size_t permissible_phys_pages_count = avail_phy_vidmem / args.page_size;
permissible_phys_pages_count -= (state_space_size * 1024 * 1024 / args.page_size);
dim3 block(args.block_size,1,1);
dim3 grid = get_grid_config(args, prop.multiProcessorCount);
cudaStream_t task_stream;
CUDA_CHECK(cudaStreamCreate(&task_stream));
cudaEvent_t startE, stopE;
CUDA_CHECK(cudaEventCreate(&startE));
CUDA_CHECK(cudaEventCreate(&stopE));
float kernel_time = 0.0f;
float accum_kernel_time = 0.0f;
float accum_bw = 0.0f;
for (int itr = 0; itr < args.loop_count; itr++) {
// on P9, memory is allocated in loop for fault based allocations
// to avoid access counter initiated mapping
if (is_P9 && args.uvm_behavior == PAGE_FAULT) {
CUDA_CHECK(cudaMallocManaged(&uvm_alloc_ptr, allocation_size));
}
// prefetch to CPU as starting point
if (args.uvm_behavior != PREFETCH_ONCE_AND_HINTS)
CUDA_CHECK(cudaMemPrefetchAsync(uvm_alloc_ptr, allocation_size, cudaCpuDeviceId,
task_stream));
switch(args.uvm_behavior) {
case STRIPE_GPU_CPU:
{
// distribute pages across GPU0 and CPU
// get page-split ratios
float cpu_factor = args.oversubscription_factor - 1.0;
if (cpu_factor < 0.0f)
cpu_factor = 0.0f;
int mod_zero_devId = cudaCpuDeviceId;
int flip_devId = current_device;
int mod_scale = num_pages;
if (cpu_factor > 1.0) {
mod_zero_devId = current_device;
flip_devId = cudaCpuDeviceId;
mod_scale = int(std::round(args.oversubscription_factor));
}
else if (cpu_factor != 0.0f) {
mod_scale = int(std::round(args.oversubscription_factor / cpu_factor));
}
int gpu_page_count = 0, cpu_page_count = 0;
void *running_ptr = uvm_alloc_ptr;
for (int i = 0; i < num_pages; i++) {
int device = flip_devId;
if ((i % mod_scale) == 0 && i != 0)
device = mod_zero_devId;
if (gpu_page_count == permissible_phys_pages_count)
device = cudaCpuDeviceId;
CUDA_CHECK(cudaMemPrefetchAsync(running_ptr, args.page_size, device, task_stream));
if (device == cudaCpuDeviceId)
cpu_page_count++;
else
gpu_page_count++;
if (itr == 0) {
CUDA_CHECK(cudaMemAdvise(running_ptr, args.page_size, cudaMemAdviseSetPreferredLocation,
device));
if (device == cudaCpuDeviceId)
CUDA_CHECK(cudaMemAdvise(running_ptr, args.page_size, cudaMemAdviseSetAccessedBy,
current_device));
}
running_ptr = reinterpret_cast<void*>((size_t)running_ptr + args.page_size);
}
}
break;
case PREFETCH_ONCE_AND_HINTS:
{
// in oversubscrription this is going to over-flow back to sysmem
if (itr == 0) {
CUDA_CHECK(cudaMemAdvise(uvm_alloc_ptr, allocation_size, cudaMemAdviseSetAccessedBy,
current_device));
CUDA_CHECK(cudaMemPrefetchAsync(uvm_alloc_ptr, allocation_size, current_device,
task_stream));
}
}
break;
case ZERO_COPY:
{
if (itr == 0) {
CUDA_CHECK(cudaMemAdvise(uvm_alloc_ptr, allocation_size,
cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));
CUDA_CHECK(cudaMemAdvise(uvm_alloc_ptr, allocation_size,
cudaMemAdviseSetAccessedBy, current_device));
}
}
default:
break;
}
//CUDA_CHECK(cudaDeviceSynchronize());
// timer start
CUDA_CHECK(cudaEventRecord(startE, task_stream));
// run read/write kernel for streaming/random access
if (args.k_op == READ) {
if (args.memory_access == STREAMING) {
read_thread_blocksync<float><<<grid, block, 0, task_stream>>>((float*)uvm_alloc_ptr,
allocation_size);
}
else if (args.memory_access == BLOCK_STREAMING) {
read_thread_blockCont_blocksync<float><<<grid, block, 0, task_stream>>>((float*)uvm_alloc_ptr,
allocation_size);
}
else if (args.memory_access == RANDOM_WARP) {
cta_random_warp_streaming_read<float><<<grid, block, 0, task_stream>>>(
(float*)uvm_alloc_ptr, allocation_size,
num_pages, args.page_size);
}
}
// timer stop
CUDA_CHECK(cudaEventRecord(stopE, task_stream));
CUDA_CHECK(cudaEventSynchronize(stopE));
CUDA_CHECK(cudaEventElapsedTime(&kernel_time, startE, stopE));
accum_kernel_time += kernel_time;
float bw_meas = allocation_size / (1024.0f * 1024.0f * 1024.0f) / (kernel_time / 1000.0f );
accum_bw += bw_meas;
if (is_P9 && args.uvm_behavior == PAGE_FAULT) {
CUDA_CHECK(cudaFree(uvm_alloc_ptr));
}
}
CUDA_CHECK(cudaEventDestroy(startE));
CUDA_CHECK(cudaEventDestroy(stopE));
CUDA_CHECK(cudaStreamDestroy(task_stream));
// avg time, comp bw, print numbers, avg bw per run or total run/total sizes??, avg kernel time, avg bw
printf("%s, %f ms, %f GB/s\n", args.header_string.c_str(), accum_kernel_time / args.loop_count, accum_bw / args.loop_count);
if (!(is_P9 && args.uvm_behavior == PAGE_FAULT)) {
CUDA_CHECK(cudaFree(uvm_alloc_ptr));
}
if (blockMemory)
CUDA_CHECK(cudaFree(blockMemory));
}
|
the_stack
|
#include "bondsKernelsGpu.h"
__device__ int monthLengthKernelGpu(int month, bool leapYear)
{
int MonthLength[12];
MonthLength[0]=31;
MonthLength[1]=28;
MonthLength[2]=31;
MonthLength[3]=30;
MonthLength[4]=31;
MonthLength[5]=30;
MonthLength[6]=31;
MonthLength[7]=31;
MonthLength[8]=30;
MonthLength[9]=31;
MonthLength[10]=30;
MonthLength[11]=31;
int MonthLeapLength[12];
MonthLeapLength[0]=31;
MonthLeapLength[1]=29;
MonthLeapLength[2]=31;
MonthLeapLength[3]=30;
MonthLeapLength[4]=31;
MonthLeapLength[5]=30;
MonthLeapLength[6]=31;
MonthLeapLength[7]=31;
MonthLeapLength[8]=30;
MonthLeapLength[9]=31;
MonthLeapLength[10]=30;
MonthLeapLength[11]=31;
return (leapYear? MonthLeapLength[month-1] : MonthLength[month-1]);
}
__device__ int monthOffsetKernelGpu(int m, bool leapYear)
{
int MonthOffset[13];
MonthOffset[0]=0;
MonthOffset[1]=31;
MonthOffset[2]=59;
MonthOffset[3]=90;
MonthOffset[4]=120;
MonthOffset[5]=151;
MonthOffset[6]=181;
MonthOffset[7]=212;
MonthOffset[8]=243;
MonthOffset[9]=273;
MonthOffset[10]=304;
MonthOffset[11]=334;
MonthOffset[12]=365;
int MonthLeapOffset[13];
MonthLeapOffset[0]=0;
MonthLeapOffset[1]=31;
MonthLeapOffset[2]=60;
MonthLeapOffset[3]=91;
MonthLeapOffset[4]=121;
MonthLeapOffset[5]=152;
MonthLeapOffset[6]=182;
MonthLeapOffset[7]=213;
MonthLeapOffset[8]=244;
MonthLeapOffset[9]=274;
MonthLeapOffset[10]=305;
MonthLeapOffset[11]=335;
MonthLeapOffset[12]=366;
return (leapYear? MonthLeapOffset[m-1] : MonthOffset[m-1]);
}
__device__ int yearOffsetKernelGpu(int y)
{
int YearOffset[121];
YearOffset[0] = 0;;
YearOffset[1] = 366;;
YearOffset[2] = 731;
YearOffset[3] = 1096;
YearOffset[4] = 1461;
YearOffset[5] = 1827;
YearOffset[6] = 2192;
YearOffset[7] = 2557;
YearOffset[8] = 2922;
YearOffset[9] = 3288;
YearOffset[10] = 3653;
YearOffset[11] = 4018;
YearOffset[12] = 4383;
YearOffset[13] = 4749;
YearOffset[14] = 5114;
YearOffset[15] = 5479;
YearOffset[16] = 5844;
YearOffset[17] = 6210;
YearOffset[18] = 6575;
YearOffset[19] = 6940;
YearOffset[20] = 7305;
YearOffset[21] = 7671;
YearOffset[22] = 8036;
YearOffset[23] = 8401;
YearOffset[24] = 8766;
YearOffset[25] = 9132;
YearOffset[26] = 9497;
YearOffset[27] = 9862;
YearOffset[28] = 10227;
YearOffset[29] = 10593;
YearOffset[30] = 10958;
YearOffset[31] = 11323;
YearOffset[32] = 11688;
YearOffset[33] = 12054;
YearOffset[34] = 12419;
YearOffset[35] = 12784;
YearOffset[36] = 13149;
YearOffset[37] = 13515;
YearOffset[38] = 13880;
YearOffset[39] = 14245;
YearOffset[40] = 14610;
YearOffset[41] = 14976;
YearOffset[42] = 15341;
YearOffset[43] = 15706;
YearOffset[44] = 16071;
YearOffset[45] = 16437;
YearOffset[46] = 16802;
YearOffset[47] = 17167;
YearOffset[48] = 17532;
YearOffset[49] = 17898;
YearOffset[50] = 18263;
YearOffset[51] = 18628;
YearOffset[52] = 18993;
YearOffset[53] = 19359;
YearOffset[54] = 19724;
YearOffset[55] = 20089;
YearOffset[56] = 20454;
YearOffset[57] = 20820;
YearOffset[58] = 21185;
YearOffset[59] = 21550;
YearOffset[60] = 21915;
YearOffset[61] = 22281;
YearOffset[62] = 22646;
YearOffset[63] = 23011;
YearOffset[64] = 23376;
YearOffset[65] = 23742;
YearOffset[66] = 24107;
YearOffset[67] = 24472;
YearOffset[68] = 24837;
YearOffset[69] = 25203;
YearOffset[70] = 25568;
YearOffset[71] = 25933;
YearOffset[72] = 26298;
YearOffset[73] = 26664;
YearOffset[74] = 27029;
YearOffset[75] = 27394;
YearOffset[76] = 27759;
YearOffset[77] = 28125;
YearOffset[78] = 28490;
YearOffset[79] = 28855;
YearOffset[80] = 29220;
YearOffset[81] = 29586;
YearOffset[82] = 29951;
YearOffset[83] = 30316;
YearOffset[84] = 30681;
YearOffset[85] = 31047;
YearOffset[86] = 31412;
YearOffset[87] = 31777;
YearOffset[88] = 32142;
YearOffset[89] = 32508;
YearOffset[90] = 32873;
YearOffset[91] = 33238;
YearOffset[92] = 33603;
YearOffset[93] = 33969;
YearOffset[94] = 34334;
YearOffset[95] = 34699;
YearOffset[96] = 35064;
YearOffset[97] = 35430;
YearOffset[98] = 35795;
YearOffset[99] = 36160;
YearOffset[100] = 36525;
YearOffset[101] = 36891;
YearOffset[102] = 37256;
YearOffset[103] = 37621;
YearOffset[104] = 37986;
YearOffset[105] = 38352;
YearOffset[106] = 38717;
YearOffset[107] = 39082;
YearOffset[108] = 39447;
YearOffset[109] = 39813;
YearOffset[110] = 40178;
YearOffset[111] = 40543;
YearOffset[112] = 40908;
YearOffset[113] = 41274;
YearOffset[114] = 41639;
YearOffset[115] = 42004;
YearOffset[116] = 42369;
YearOffset[117] = 42735;
YearOffset[118] = 43100;
YearOffset[119] = 42735;
YearOffset[120] = 43830;
return YearOffset[y-1900];
}
__device__ bool isLeapKernelGpu(int y)
{
bool YearIsLeap[121];
YearIsLeap[0] = 1;;
YearIsLeap[1] = 0;;
YearIsLeap[2] = 0;
YearIsLeap[3] = 0;//1096;
YearIsLeap[4] = 1;//1461;
YearIsLeap[5] = 0;//1827;
YearIsLeap[6] = 0;//2192;
YearIsLeap[7] = 0;//2557;
YearIsLeap[8] = 1;//2922;
YearIsLeap[9] = 0;//3288;
YearIsLeap[10] = 0;//3653;
YearIsLeap[11] = 0;//4018;
YearIsLeap[12] = 1;//4383;
YearIsLeap[13] = 0;//4749;
YearIsLeap[14] = 0;//5114;
YearIsLeap[15] = 0;//5479;
YearIsLeap[16] = 1;//5844;
YearIsLeap[17] = 0;//6210;
YearIsLeap[18] = 0;//6575;
YearIsLeap[19] = 0;//6940;
YearIsLeap[20] = 1;//7305;
YearIsLeap[21] = 0;//7671;
YearIsLeap[22] = 0;//8036;
YearIsLeap[23] = 0;//8401;
YearIsLeap[24] = 1;//8766;
YearIsLeap[25] = 0;//9132;
YearIsLeap[26] = 0;//9497;
YearIsLeap[27] = 0;//9862;
YearIsLeap[28] = 1;//10227;
YearIsLeap[29] = 0;//10593;
YearIsLeap[30] = 0;//10958;
YearIsLeap[31] = 0;//11323;
YearIsLeap[32] = 1;//11688;
YearIsLeap[33] = 0;//12054;
YearIsLeap[34] = 0;//12419;
YearIsLeap[35] = 0;//12784;
YearIsLeap[36] = 1;//13149;
YearIsLeap[37] = 0;//13515;
YearIsLeap[38] = 0;//13880;
YearIsLeap[39] = 0;//14245;
YearIsLeap[40] = 1;//14610;
YearIsLeap[41] = 0;//14976;
YearIsLeap[42] = 0;//15341;
YearIsLeap[43] = 0;//15706;
YearIsLeap[44] = 1;//16071;
YearIsLeap[45] = 0;//16437;
YearIsLeap[46] = 0;//16802;
YearIsLeap[47] = 0;//17167;
YearIsLeap[48] = 1;//17532;
YearIsLeap[49] = 0;//17898;
YearIsLeap[50] = 0;//18263;
YearIsLeap[51] = 0;//18628;
YearIsLeap[52] = 1;//18993;
YearIsLeap[53] = 0;//19359;
YearIsLeap[54] = 0;//19724;
YearIsLeap[55] = 0;//20089;
YearIsLeap[56] = 1;//20454;
YearIsLeap[57] = 0;//20820;
YearIsLeap[58] = 0;//21185;
YearIsLeap[59] = 0;//21550;
YearIsLeap[60] = 1;//21915;
YearIsLeap[61] = 0;//22281;
YearIsLeap[62] = 0;//22646;
YearIsLeap[63] = 0;//23011;
YearIsLeap[64] = 1;//23376;
YearIsLeap[65] = 0;//23742;
YearIsLeap[66] = 0;//24107;
YearIsLeap[67] = 0;//24472;
YearIsLeap[68] = 1;//24837;
YearIsLeap[69] = 0;//25203;
YearIsLeap[70] = 0;//25568;
YearIsLeap[71] = 0;//25933;
YearIsLeap[72] = 1;//26298;
YearIsLeap[73] = 0;//26664;
YearIsLeap[74] = 0;//27029;
YearIsLeap[75] = 0;//27394;
YearIsLeap[76] = 1;//27759;
YearIsLeap[77] = 0;//28125;
YearIsLeap[78] = 0;//28490;
YearIsLeap[79] = 0;//28855;
YearIsLeap[80] = 1;//29220;
YearIsLeap[81] = 0;//29586;
YearIsLeap[82] = 0;//29951;
YearIsLeap[83] = 0;//30316;
YearIsLeap[84] = 1;//30681;
YearIsLeap[85] = 0;//31047;
YearIsLeap[86] = 0;//31412;
YearIsLeap[87] = 0;//31777;
YearIsLeap[88] = 1;//32142;
YearIsLeap[89] = 0;//32508;
YearIsLeap[90] = 0;//32873;
YearIsLeap[91] = 0;//33238;
YearIsLeap[92] = 1;//33603;
YearIsLeap[93] = 0;//33969;
YearIsLeap[94] = 0;//34334;
YearIsLeap[95] = 0;//34699;
YearIsLeap[96] = 1;//35064;
YearIsLeap[97] = 0;//35430;
YearIsLeap[98] = 0;//35795;
YearIsLeap[99] = 0;//36160;
YearIsLeap[100] = 1;// 36525;
YearIsLeap[101] = 0;// 36891;
YearIsLeap[102] = 0;// 37256;
YearIsLeap[103] = 0;// 37621;
YearIsLeap[104] = 1;// 37986;
YearIsLeap[105] = 0;// 38352;
YearIsLeap[106] = 0;//38717;
YearIsLeap[107] = 0;//39082;
YearIsLeap[108] = 1;//39447;
YearIsLeap[109] = 0;//39813;
YearIsLeap[110] = 0;//40178;
YearIsLeap[111] = 0;//40543;
YearIsLeap[112] = 1;//40908;
YearIsLeap[113] = 0;//41274;
YearIsLeap[114] = 0;//41639;
YearIsLeap[115] = 0;//42004;
YearIsLeap[116] = 1;//42369;
YearIsLeap[117] = 0;//42735;
YearIsLeap[118] = 0;//43100;
YearIsLeap[119] = 0;//42735;
YearIsLeap[120] = 1;//43830;
return YearIsLeap[y-1900];
}
__device__ bondsDateStruct intializeDateKernelGpu(int d, int m, int y)
{
bondsDateStruct currDate;
currDate.day = d;
currDate.month = m;
currDate.year = y;
bool leap = isLeapKernelGpu(y);
int offset = monthOffsetKernelGpu(m,leap);
currDate.dateSerialNum = d + offset + yearOffsetKernelGpu(y);
return currDate;
}
__device__ dataType yearFractionGpu(bondsDateStruct d1,
bondsDateStruct d2, int dayCounter)
{
return dayCountGpu(d1, d2, dayCounter) / (dataType)360.0;
}
__device__ int dayCountGpu(bondsDateStruct d1, bondsDateStruct d2, int dayCounter)
{
if (dayCounter == USE_EXACT_DAY)
{
int dd1 = d1.day, dd2 = d2.day;
int mm1 = d1.month, mm2 = d2.month;
int yy1 = d1.year, yy2 = d2.year;
if (dd2 == 31 && dd1 < 30)
{
dd2 = 1; mm2++;
}
return 360*(yy2-yy1) + 30*(mm2-mm1-1) + MAX(0, 30-dd1) + MIN(30, dd2);
}
else
{
return (d2.dateSerialNum - d1.dateSerialNum);
}
}
__device__ dataType couponNotionalGpu()
{
return (dataType)100.0;
}
__device__ dataType bondNotionalGpu()
{
return (dataType)100.0;
}
__device__ dataType fixedRateCouponNominalGpu()
{
return (dataType)100.0;
}
__device__ bool eventHasOccurredGpu(bondsDateStruct currDate, bondsDateStruct eventDate)
{
return eventDate.dateSerialNum > currDate.dateSerialNum;
}
__device__ bool cashFlowHasOccurredGpu(bondsDateStruct refDate, bondsDateStruct eventDate)
{
return eventHasOccurredGpu(refDate, eventDate);
}
__device__ bondsDateStruct advanceDateGpu(bondsDateStruct date, int numMonthsAdvance)
{
int d = date.day;
int m = date.month+numMonthsAdvance;
int y = date.year;
while (m > 12)
{
m -= 12;
y += 1;
}
while (m < 1)
{
m += 12;
y -= 1;
}
int length = monthLengthKernelGpu(m, isLeapKernelGpu(y));
if (d > length)
d = length;
bondsDateStruct newDate = intializeDateKernelGpu(d, m, y);
return newDate;
}
__device__ int getNumCashFlowsGpu(inArgsStruct inArgs, int bondNum)
{
int numCashFlows = 0;
//bondsDateStruct endDate = inArgs.bond[bondNum].maturityDate;
bondsDateStruct currCashflowDate = inArgs.bond[bondNum].maturityDate;
while (currCashflowDate.dateSerialNum > inArgs.bond[bondNum].startDate.dateSerialNum)
{
numCashFlows++;
currCashflowDate = advanceDateGpu(currCashflowDate, -6);
}
return numCashFlows+1;
}
__device__ dataType getDirtyPriceGpu(inArgsStruct inArgs, int bondNum, cashFlowsStruct cashFlows, int numLegs)
{
dataType currentNotional = bondNotionalGpu();
return discountingBondEngineCalculateSettlementValueGpu(inArgs, bondNum, cashFlows, numLegs) * (dataType)100.0 / currentNotional;
}
__device__ dataType getAccruedAmountGpu(inArgsStruct inArgs, bondsDateStruct date, int bondNum, cashFlowsStruct cashFlows, int numLegs)
{
return bondAccruedAmountGpu(inArgs, date, bondNum, cashFlows, numLegs);
}
__device__ dataType discountingBondEngineCalculateSettlementValueGpu(inArgsStruct inArgs, int bondNum, cashFlowsStruct cashFlows, int numLegs)
{
bondsDateStruct currDate = inArgs.currDate[bondNum];
if (currDate.dateSerialNum < inArgs.bond[bondNum].startDate.dateSerialNum)
{
currDate = inArgs.bond[bondNum].startDate;
}
return cashFlowsNpvGpu(cashFlows,
inArgs.discountCurve[bondNum],
false,
currDate,
currDate,
numLegs);
}
__device__ dataType bondAccruedAmountGpu(inArgsStruct inArgs, bondsDateStruct date, int bondNum, cashFlowsStruct cashFlows, int numLegs)
{
dataType currentNotional = bondNotionalGpu();
if (currentNotional == (dataType)0.0)
return (dataType)0.0;
return bondFunctionsAccruedAmountGpu(inArgs, date, bondNum, cashFlows, numLegs);
}
__device__ dataType bondFunctionsAccruedAmountGpu(inArgsStruct inArgs, bondsDateStruct date, int bondNum, cashFlowsStruct cashFlows, int numLegs)
{
return cashFlowsAccruedAmountGpu(cashFlows,
false, date, numLegs, inArgs, bondNum) * (dataType)100.0 / bondNotionalGpu();
}
__device__ dataType cashFlowsAccruedAmountGpu(cashFlowsStruct cashFlows,
bool includecurrDateFlows,
bondsDateStruct currDate,
int numLegs, inArgsStruct inArgs, int bondNum)
{
int legComputeNum = cashFlowsNextCashFlowNumGpu(cashFlows,
currDate, numLegs);
dataType result = 0.0;
int i;
for (i = legComputeNum; i < (numLegs); ++i)
{
result += fixedRateCouponAccruedAmountGpu(cashFlows, i, currDate, inArgs, bondNum);
}
return result;
}
__device__ dataType fixedRateCouponAccruedAmountGpu(cashFlowsStruct cashFlows, int numLeg, bondsDateStruct d, inArgsStruct inArgs, int bondNum)
{
if (d.dateSerialNum <= cashFlows.legs[numLeg].accrualStartDate.dateSerialNum || d.dateSerialNum > inArgs.maturityDate[bondNum].dateSerialNum)
{
return (dataType)0.0;
}
else
{
bondsDateStruct endDate = cashFlows.legs[numLeg].accrualEndDate;
if (d.dateSerialNum < cashFlows.legs[numLeg].accrualEndDate.dateSerialNum)
{
endDate = d;
}
return fixedRateCouponNominalGpu()*(interestRateCompoundFactorGpu(cashFlows.intRate,
cashFlows.legs[numLeg].accrualStartDate, endDate, cashFlows.dayCounter) - (dataType)1.0);
}
}
__device__ dataType cashFlowsNpvGpu(cashFlowsStruct cashFlows,
bondsYieldTermStruct discountCurve,
bool includecurrDateFlows,
bondsDateStruct currDate,
bondsDateStruct npvDate,
int numLegs)
{
npvDate = currDate;
dataType totalNPV = 0.0;
int i;
for (i=0; i<numLegs; ++i) {
if (!(cashFlowHasOccurredGpu(cashFlows.legs[i].paymentDate, currDate)))
totalNPV += fixedRateCouponAmountGpu(cashFlows, i) *
bondsYieldTermStructureDiscountGpu(discountCurve, cashFlows.legs[i].paymentDate);
}
return totalNPV/bondsYieldTermStructureDiscountGpu(discountCurve, npvDate);
}
__device__ dataType bondsYieldTermStructureDiscountGpu(bondsYieldTermStruct ytStruct, bondsDateStruct t)
{
ytStruct.intRate.rate = ytStruct.forward;
ytStruct.intRate.freq = ytStruct.frequency;
ytStruct.intRate.comp = ytStruct.compounding;
return flatForwardDiscountImplGpu(ytStruct.intRate, yearFractionGpu(ytStruct.refDate, t, ytStruct.dayCounter));
}
__device__ dataType flatForwardDiscountImplGpu(intRateStruct intRate, dataType t)
{
return interestRateDiscountFactorGpu(intRate, t);
}
__device__ dataType interestRateDiscountFactorGpu(intRateStruct intRate, dataType t)
{
return (dataType)1.0/interestRateCompoundFactorGpuTwoArgs(intRate, t);
}
__device__ dataType interestRateCompoundFactorGpuTwoArgs(intRateStruct intRate, dataType t)
{
if (intRate.comp == SIMPLE_INTEREST)
return (dataType)1.0 + intRate.rate*t;
else if (intRate.comp == COMPOUNDED_INTEREST)
return pow((dataType)1.0+intRate.rate/intRate.freq, intRate.freq*t);
else if (intRate.comp == CONTINUOUS_INTEREST)
return exp(intRate.rate*t);
return (dataType)0.0;
}
__device__ dataType fixedRateCouponAmountGpu(cashFlowsStruct cashFlows, int numLeg)
{
if (cashFlows.legs[numLeg].amount == COMPUTE_AMOUNT)
{
return fixedRateCouponNominalGpu()*(interestRateCompoundFactorGpu(cashFlows.intRate, cashFlows.legs[numLeg].accrualStartDate,
cashFlows.legs[numLeg].accrualEndDate, cashFlows.dayCounter) - (dataType)1.0);
}
else
{
return cashFlows.legs[numLeg].amount;
}
}
__device__ dataType interestRateCompoundFactorGpu(intRateStruct intRate, bondsDateStruct d1,
bondsDateStruct d2, int dayCounter)
{
dataType t = yearFractionGpu(d1, d2, dayCounter);
return interestRateCompoundFactorGpuTwoArgs(intRate, t);
}
__device__ dataType interestRateImpliedRateGpu(dataType compound,
int comp,
dataType freq,
dataType t)
{
dataType r = 0.0f;
if (compound==(dataType)1.0)
{
r = 0.0;
}
else
{
switch (comp)
{
case SIMPLE_INTEREST:
r = (compound - (dataType)1.0)/t;
break;
case COMPOUNDED_INTEREST:
r = (pow((dataType)compound, (dataType)1.0/((freq)*t))-(dataType)1.0)*(freq);
break;
}
}
return r;
}
__device__ couponStruct cashFlowsNextCashFlowGpu(cashFlowsStruct cashFlows,
bondsDateStruct currDate,
int numLegs)
{
int i;
for (i = 0; i < numLegs; ++i)
{
if ( ! (cashFlowHasOccurredGpu(cashFlows.legs[i].paymentDate, currDate) ))
return cashFlows.legs[i];
}
return cashFlows.legs[numLegs-1];
}
__device__ int cashFlowsNextCashFlowNumGpu(cashFlowsStruct cashFlows,
bondsDateStruct currDate,
int numLegs)
{
int i;
for (i = 0; i < numLegs; ++i)
{
if ( ! (cashFlowHasOccurredGpu(cashFlows.legs[i].paymentDate, currDate) ))
return i;
}
return (numLegs-1);
}
__device__ dataType getBondYieldGpu(dataType cleanPrice,
int dc,
int comp,
dataType freq,
bondsDateStruct settlement,
dataType accuracy,
int maxEvaluations,
inArgsStruct currInArgs, int bondNum, cashFlowsStruct cashFlows, int numLegs)
{
dataType currentNotional = bondNotionalGpu();
if (currentNotional == (dataType)0.0)
return (dataType)0.0;
if (currInArgs.bond[bondNum].startDate.dateSerialNum > settlement.dateSerialNum)
{
settlement = currInArgs.bond[bondNum].startDate;
}
return getBondFunctionsYieldGpu(cleanPrice, dc, comp, freq,
settlement, accuracy, maxEvaluations,
currInArgs, bondNum, cashFlows, numLegs);
}
__device__ dataType getBondFunctionsYieldGpu(dataType cleanPrice,
int dc,
int comp,
dataType freq,
bondsDateStruct settlement,
dataType accuracy,
int maxEvaluations,
inArgsStruct currInArgs, int bondNum, cashFlowsStruct cashFlows, int numLegs)
{
dataType dirtyPrice = cleanPrice + bondFunctionsAccruedAmountGpu(currInArgs, settlement, bondNum, cashFlows, numLegs);
dirtyPrice /= (dataType)100.0 / bondNotionalGpu();
return getCashFlowsYieldGpu(cashFlows, dirtyPrice,
dc, comp, freq,
false, settlement, settlement, numLegs,
accuracy, maxEvaluations, (dataType)0.05);
}
__device__ dataType getCashFlowsYieldGpu(cashFlowsStruct leg,
dataType npv,
int dayCounter,
int compounding,
dataType frequency,
bool includecurrDateFlows,
bondsDateStruct currDate,
bondsDateStruct npvDate,
int numLegs,
dataType accuracy,
int maxIterations,
dataType guess)
{
//Brent solver;
solverStruct solver;
solver.maxEvaluations_ = maxIterations;
irrFinderStruct objFunction;
objFunction.npv = npv;
objFunction.dayCounter = dayCounter;
objFunction.comp = compounding;
objFunction.freq = frequency;
objFunction.includecurrDateFlows = includecurrDateFlows;
objFunction.currDate = currDate;
objFunction.npvDate = npvDate;
return solverSolveGpu(solver, objFunction, accuracy, guess, guess/(dataType)10.0, leg, numLegs);
}
__device__ dataType solverSolveGpu(solverStruct solver,
irrFinderStruct f,
dataType accuracy,
dataType guess,
dataType step,
cashFlowsStruct cashFlows,
int numLegs)
{
// check whether we really want to use epsilon
accuracy = MAX(accuracy, QL_EPSILON_GPU);
dataType growthFactor = (dataType)1.6;
int flipflop = -1;
solver.root_ = guess;
solver.fxMax_ = fOpGpu(f, solver.root_, cashFlows, numLegs);
// monotonically crescent bias, as in optionValue(volatility)
if (closeGpu(solver.fxMax_,(dataType)0.0))
{
return solver.root_;
}
else if (closeGpu(solver.fxMax_, (dataType)0.0))
{
solver.xMin_ = /*enforceBounds*/(solver.root_ - step);
solver.fxMin_ = fOpGpu(f, solver.xMin_, cashFlows, numLegs);
solver.xMax_ = solver.root_;
}
else
{
solver.xMin_ = solver.root_;
solver.fxMin_ = solver.fxMax_;
solver.xMax_ = /*enforceBounds*/(solver.root_+step);
solver.fxMax_ = fOpGpu(f, solver.xMax_, cashFlows, numLegs);
}
solver.evaluationNumber_ = 2;
while (solver.evaluationNumber_ <= solver.maxEvaluations_)
{
if (solver.fxMin_*solver.fxMax_ <= (dataType)0.0)
{
if (closeGpu(solver.fxMin_, (dataType)0.0))
return solver.xMin_;
if (closeGpu(solver.fxMax_, (dataType)0.0))
return solver.xMax_;
solver.root_ = (solver.xMax_+solver.xMin_)/(dataType)2.0;
return solveImplGpu(solver, f, accuracy, cashFlows, numLegs);
}
if (fabs(solver.fxMin_) < fabs(solver.fxMax_))
{
solver.xMin_ = /*enforceBounds*/(solver.xMin_+growthFactor*(solver.xMin_ - solver.xMax_));
solver.fxMin_= fOpGpu(f, solver.xMin_, cashFlows, numLegs);
}
else if (fabs(solver.fxMin_) > fabs(solver.fxMax_))
{
solver.xMax_ = /*enforceBounds*/(solver.xMax_+growthFactor*(solver.xMax_ - solver.xMin_));
solver.fxMax_= fOpGpu(f, solver.xMax_, cashFlows, numLegs);
}
else if (flipflop == -1)
{
solver.xMin_ = /*enforceBounds*/(solver.xMin_+growthFactor*(solver.xMin_ - solver.xMax_));
solver.fxMin_= fOpGpu(f, solver.xMin_, cashFlows, numLegs);
solver.evaluationNumber_++;
flipflop = 1;
}
else if (flipflop == 1)
{
solver.xMax_ = /*enforceBounds*/(solver.xMax_+growthFactor*(solver.xMax_ - solver.xMin_));
solver.fxMax_= fOpGpu(f, solver.xMax_, cashFlows, numLegs);
flipflop = -1;
}
solver.evaluationNumber_++;
}
return (dataType)0.0;
}
__device__ dataType cashFlowsNpvYieldGpu(cashFlowsStruct cashFlows,
intRateStruct y,
bool includecurrDateFlows,
bondsDateStruct currDate,
bondsDateStruct npvDate,
int numLegs)
{
dataType npv = 0.0;
dataType discount = 1.0;
bondsDateStruct lastDate;
bool first = true;
int i;
for (i=0; i<numLegs; ++i)
{
if (cashFlowHasOccurredGpu(cashFlows.legs[i].paymentDate, currDate))
continue;
bondsDateStruct couponDate = cashFlows.legs[i].paymentDate;
dataType amount = fixedRateCouponAmountGpu(cashFlows, i);
if (first)
{
first = false;
if (i > 0) {
lastDate = advanceDateGpu(cashFlows.legs[i].paymentDate, -1*6);
} else {
lastDate = cashFlows.legs[i].accrualStartDate;
}
discount *= interestRateDiscountFactorGpu(y, yearFractionGpu(npvDate, couponDate, y.dayCounter));
}
else
{
discount *= interestRateDiscountFactorGpu(y, yearFractionGpu(lastDate, couponDate, y.dayCounter));
}
lastDate = couponDate;
npv += amount * discount;
}
return npv;
}
__device__ dataType fOpGpu(irrFinderStruct f, dataType y, cashFlowsStruct cashFlows, int numLegs)
{
intRateStruct yield;
yield.rate = y;
yield.comp = f.comp;
yield.freq = f.freq;
yield.dayCounter = f.dayCounter;
dataType NPV = cashFlowsNpvYieldGpu(cashFlows,
yield,
false,
f.currDate,
f.npvDate, numLegs);
return (f.npv - NPV);
}
__device__ dataType fDerivativeGpu(irrFinderStruct f, dataType y, cashFlowsStruct cashFlows, int numLegs)
{
intRateStruct yield;
yield.rate = y;
yield.dayCounter = f.dayCounter;
yield.comp = f.comp;
yield.freq = f.freq;
return modifiedDurationGpu(cashFlows, yield,
f.includecurrDateFlows,
f.currDate, f.npvDate, numLegs);
}
__device__ bool closeGpu(dataType x, dataType y)
{
return closeGpuThreeArgs(x,y,42);
}
__device__ bool closeGpuThreeArgs(dataType x, dataType y, int n)
{
dataType diff = fabs(x-y);
dataType tolerance = n*QL_EPSILON_GPU;
return diff <= tolerance*fabs(x) &&
diff <= tolerance*fabs(y);
}
__device__ dataType enforceBoundsGpu(dataType x)
{
return x;
}
__device__ dataType solveImplGpu(solverStruct solver, irrFinderStruct f,
dataType xAccuracy, cashFlowsStruct cashFlows, int numLegs)
{
dataType froot, dfroot, dx, dxold;
dataType xh, xl;
// Orient the search so that f(xl) < 0
if (solver.fxMin_ < (dataType)0.0)
{
xl = solver.xMin_;
xh = solver.xMax_;
}
else
{
xh = solver.xMin_;
xl = solver.xMax_;
}
// the "stepsize before last"
dxold = solver.xMax_ - solver.xMin_;
// it was dxold=std::fabs(xMax_-xMin_); in Numerical Recipes
// here (xMax_-xMin_ > 0) is verified in the constructor
// and the last step
dx = dxold;
froot = fOpGpu(f, solver.root_, cashFlows, numLegs);
dfroot = fDerivativeGpu(f, solver.root_, cashFlows, numLegs);
++solver.evaluationNumber_;
while (solver.evaluationNumber_<=solver.maxEvaluations_)
{
// Bisect if (out of range || not decreasing fast enough)
if ((((solver.root_-xh)*dfroot-froot)*
((solver.root_-xl)*dfroot-froot) > (dataType)0.0)
|| (fabs((dataType)2.0*froot) > fabs(dxold*dfroot)))
{
dxold = dx;
dx = (xh-xl)/(dataType)2.0;
solver.root_=xl+dx;
}
else
{
dxold = dx;
dx = froot/dfroot;
solver.root_ -= dx;
}
// Convergence criterion
if (fabs(dx) < xAccuracy)
return solver.root_;
froot = fOpGpu(f, solver.root_, cashFlows, numLegs);
dfroot = fDerivativeGpu(f, solver.root_, cashFlows, numLegs);
++solver.evaluationNumber_;
if (froot < (dataType)0.0)
xl=solver.root_;
else
xh=solver.root_;
}
return solver.root_;
}
__device__ dataType modifiedDurationGpu(cashFlowsStruct cashFlows,
intRateStruct y,
bool includecurrDateFlows,
bondsDateStruct currDate,
bondsDateStruct npvDate,
int numLegs)
{
dataType P = 0.0;
dataType dPdy = 0.0;
dataType r = y.rate;
dataType N = y.freq;
int dc = y.dayCounter;
int i;
for (i=0; i<numLegs; ++i)
{
if (!cashFlowHasOccurredGpu(cashFlows.legs[i].paymentDate, currDate))
{
dataType t = yearFractionGpu(npvDate,
cashFlows.legs[i].paymentDate, dc);
dataType c = fixedRateCouponAmountGpu(cashFlows, i);
dataType B = interestRateDiscountFactorGpu(y, t);
P += c * B;
if (y.comp == SIMPLE_INTEREST)
dPdy -= c * B*B * t;
if (y.comp == COMPOUNDED_INTEREST)
dPdy -= c * t * B/(1+r/N);
if (y.comp == CONTINUOUS_INTEREST)
dPdy -= c * B * t;
if (y.comp == SIMPLE_THEN_COMPOUNDED_INTEREST)
{
if (t<=(dataType)1.0/N)
dPdy -= c * B*B * t;
else
dPdy -= c * t * B/((dataType)1+r/N);
}
}
}
if (P == (dataType)0.0) // no cashflows
{
return (dataType)0.0;
}
return (-1*dPdy)/P; // reverse derivative sign
}
__global__ void getBondsResultsGpu(inArgsStruct inArgs, resultsStruct results, int n)
{
int bondNum = blockIdx.x*blockDim.x + threadIdx.x;
if (bondNum < n)
{
int numLegs;
int numCashFlows = 0;
//bondsDateStruct endDate = inArgs.bond[bondNum].maturityDate;
bondsDateStruct currCashflowDate = inArgs.bond[bondNum].maturityDate;
while (currCashflowDate.dateSerialNum > inArgs.bond[bondNum].startDate.dateSerialNum)
{
numCashFlows++;
currCashflowDate = advanceDateGpu(currCashflowDate, -6);
}
numLegs = numCashFlows+1;
cashFlowsStruct cashFlows;
couponStruct cashLegs[9];
cashFlows.legs = cashLegs;
cashFlows.intRate.dayCounter = USE_EXACT_DAY;
cashFlows.intRate.rate = inArgs.bond[bondNum].rate;
cashFlows.intRate.freq = ANNUAL_FREQ;
cashFlows.intRate.comp = SIMPLE_INTEREST;
cashFlows.dayCounter = USE_EXACT_DAY;
cashFlows.nominal = (dataType)100.0;
//bondsDateStruct currPaymentDate;
bondsDateStruct currStartDate = advanceDateGpu(inArgs.bond[bondNum].maturityDate, (numLegs - 1)*-6);
bondsDateStruct currEndDate = advanceDateGpu(currStartDate, 6);
int cashFlowNum;
for (cashFlowNum = 0; cashFlowNum < numLegs-1; cashFlowNum++)
{
cashFlows.legs[cashFlowNum].paymentDate = currEndDate;
cashFlows.legs[cashFlowNum].accrualStartDate = currStartDate;
cashFlows.legs[cashFlowNum].accrualEndDate = currEndDate;
cashFlows.legs[cashFlowNum].amount = COMPUTE_AMOUNT;
currStartDate = currEndDate;
currEndDate = advanceDateGpu(currEndDate, 6);
}
cashFlows.legs[numLegs-1].paymentDate = inArgs.bond[bondNum].maturityDate;
cashFlows.legs[numLegs-1].accrualStartDate = inArgs.currDate[bondNum];
cashFlows.legs[numLegs-1].accrualEndDate = inArgs.currDate[bondNum];
cashFlows.legs[numLegs-1].amount = (dataType)100.0;
results.bondForwardVal[bondNum] = getBondYieldGpu(inArgs.bondCleanPrice[bondNum],
USE_EXACT_DAY,
COMPOUNDED_INTEREST,
(dataType)2.0,
inArgs.currDate[bondNum],
ACCURACY,
100,
inArgs, bondNum, cashFlows, numLegs);
inArgs.discountCurve[bondNum].forward = results.bondForwardVal[bondNum];
results.dirtyPrice[bondNum] = getDirtyPriceGpu(inArgs, bondNum, cashFlows, numLegs);
results.accruedAmountCurrDate[bondNum] = getAccruedAmountGpu(inArgs, inArgs.currDate[bondNum], bondNum, cashFlows, numLegs);
results.cleanPrice[bondNum] = results.dirtyPrice[bondNum] - results.accruedAmountCurrDate[bondNum];
}
}
|
the_stack
|
namespace cgbn {
__device__ __forceinline__ uint32_t add_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("add.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t addc_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("addc.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t addc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("addc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t sub_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("sub.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t subc_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("subc.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t subc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("subc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t madlo(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("mad.lo.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t madlo_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("mad.lo.cc.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t madloc_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t madloc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("madc.lo.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t madhi(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("mad.hi.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t madhi_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("mad.hi.cc.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t madhic_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t madhic(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("madc.hi.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint64_t mad_wide(uint32_t a, uint32_t b, uint64_t c) {
uint64_t r;
asm volatile ("mad.wide.u32 %0, %1, %2, %3;" : "=l"(r) : "r"(a), "r"(b), "l"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadll(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %al, %bl;\n\t"
"add.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadll_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %al, %bl;\n\t"
"add.cc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadllc_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %al, %bl;\n\t"
"addc.cc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadllc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %al, %bl;\n\t"
"addc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadlh(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %al, %bh;\n\t"
"add.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadlh_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %al, %bh;\n\t"
"add.cc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadlhc_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %al, %bh;\n\t"
"addc.cc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadlhc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %al, %bh;\n\t"
"addc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadhl(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %ah, %bl;\n\t"
"add.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadhl_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %ah, %bl;\n\t"
"add.cc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadhlc_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %ah, %bl;\n\t"
"addc.cc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadhlc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %ah, %bl;\n\t"
"addc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadhh(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %ah, %bh;\n\t"
"add.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadhh_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %ah, %bh;\n\t"
"add.cc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadhhc_cc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %ah, %bh;\n\t"
"addc.cc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t xmadhhc(uint32_t a, uint32_t b, uint32_t c) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u16 %al, %ah, %bl, %bh;\n\t"
"mov.b32 {%al,%ah},%1;\n\t"
"mov.b32 {%bl,%bh},%2;\n\t"
"mul.wide.u16 %0, %ah, %bh;\n\t"
"addc.u32 %0, %0, %3;\n\t"
"}" : "=r"(r) : "r"(a), "r"(b), "r"(c));
return r;
}
__device__ __forceinline__ uint32_t umin(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("min.u32 %0,%1,%2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t umax(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("max.u32 %0,%1,%2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t uleft_clamp(uint32_t lo, uint32_t hi, uint32_t amt) {
uint32_t r;
#if __CUDA_ARCH__>=320
asm volatile ("shf.l.clamp.b32 %0,%1,%2,%3;" : "=r"(r) : "r"(lo), "r"(hi), "r"(amt));
#else
amt=umin(amt, 32);
r=hi<<amt;
r=r | (lo>>32-amt);
#endif
return r;
}
__device__ __forceinline__ uint32_t uright_clamp(uint32_t lo, uint32_t hi, uint32_t amt) {
uint32_t r;
#if __CUDA_ARCH__>=320
asm volatile ("shf.r.clamp.b32 %0,%1,%2,%3;" : "=r"(r) : "r"(lo), "r"(hi), "r"(amt));
#else
amt=umin(amt, 32);
r=lo>>amt;
r=r | (hi<<32-amt);
#endif
return r;
}
__device__ __forceinline__ uint32_t uleft_wrap(uint32_t lo, uint32_t hi, uint32_t amt) {
uint32_t r;
#if __CUDA_ARCH__>=320
asm volatile ("shf.l.wrap.b32 %0,%1,%2,%3;" : "=r"(r) : "r"(lo), "r"(hi), "r"(amt));
#else
amt=amt & 0x1F;
r=hi<<amt;
r=r | (lo>>32-amt);
#endif
return r;
}
__device__ __forceinline__ uint32_t uright_wrap(uint32_t lo, uint32_t hi, uint32_t amt) {
uint32_t r;
#if __CUDA_ARCH__>=320
asm volatile ("shf.r.wrap.b32 %0,%1,%2,%3;" : "=r"(r) : "r"(lo), "r"(hi), "r"(amt));
#else
amt=amt & 0x1F;
r=lo>>amt;
r=r | (hi<<32-amt);
#endif
return r;
}
__device__ __forceinline__ uint32_t uabs(int32_t x) {
uint32_t r;
asm volatile ("abs.s32 %0,%1;" : "=r"(r) : "r"(x));
return r;
}
__device__ __forceinline__ uint32_t uhigh(uint64_t wide) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u32 %ignore;\n\t"
"mov.b64 {%ignore,%0},%1;\n\t"
"}" : "=r"(r) : "l"(wide));
return r;
}
__device__ __forceinline__ uint32_t ulow(uint64_t wide) {
uint32_t r;
asm volatile ("{\n\t"
".reg .u32 %ignore;\n\t"
"mov.b64 {%0,%ignore},%1;\n\t"
"}" : "=r"(r) : "l"(wide));
return r;
}
__device__ __forceinline__ uint64_t make_wide(uint32_t lo, uint32_t hi) {
uint64_t r;
asm volatile ("mov.b64 %0,{%1,%2};" : "=l"(r) : "r"(lo), "r"(hi));
return r;
}
} /* namespace cgbn */
|
the_stack
|
#include "Thinning.h"
#include <iostream>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:DEF_PATTERN_SIZE
// 定义了 PATTERN 表的默认大小。
#define DEF_PATTERN_SIZE 512
// 宏:DEF_PATTERN_LEN
// 定义了 PATTERN 表的个数。
#define DEF_PATTERN_LEN 4
// 宏:CST_IMG_WIDTH 和 CST_IMG_HEIGHT
// 定义了当输入参数为坐标集时,坐标集转化图像的大小。
#define CST_IMG_WIDTH 1024
#define CST_IMG_HEIGHT 1024
// static 变量:lutthin(PATTERN 表)
// 此一个 unsigned char 型数组的大小为 DEF_PATTERN_SIZE * 4 , 为 4 个
// PATTERN 表,是 PATTERN 表细化算法里的四种判数断组。在这里将四个表合
// 并为一个,便于将数据拷贝到 device 端。
unsigned char lutthin[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1,
1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0,
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1,
0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0,
1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,
0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1,
1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0,
0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1,
1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0,
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1,
1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0,
1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0
};
// Kernel 函数:_thinMatSubFirKer(实现 PATTERN 表删除算法)
// 根据算法、PATTERN 表 1、PATTERN 表 2 和 PATTERN 表 3 对图像进行第一步细化
// 处理,对输出图像(已将输入图像完全拷贝到输出图像中)进行遍历,如果目标点的
// 8 邻域满足 PATTERN 表 1、PATTERN 表 2 和 PATTERN 表 3 条件,则将 highpixel
// 置为 lowpixel 表示删除,否则不删除。并将第一步细化的结果存储在暂存图像
// tempimg 中。
static __global__ void
_thinMatSubFirKer(
ImageCuda outimg, // 输出图像
ImageCuda tempimg, // 暂存图像(暂时存储 PATTERN 表删除算法
// 第一步操作的结果)
unsigned char *devlutthin1, // PATTERN 表 1
unsigned char *devlutthin2, // PATTERN 表 2
unsigned char *devlutthin3, // PATTERN 表 3
unsigned char highpixel, // 高像素
unsigned char lowpixel // 低像素
);
// Kernel 函数:_thinMatSubSecKer(实现 PATTERN 表删除算法)
// 根据算法、PATTERN 表 1、PATTERN 表 2 和 PATTERN 表 4 对暂存图像进行第二次
// 细化处理,遍历暂存图像,如果目标点的 8 邻域满足 PATTERN 表 1、PATTERN 表 2
// 和 PATTERN 表 4 条件,将 highpixel 置为 lowpixel 表示删除,否则不删除。并
// 将第二步细化的结果重新存储到输出图像 outimg 中。同时将细化的点数存储在
// devchangecount 中。在核函数外对 devchangecount 进行判断,如果其值 为 0 时,
// 即图像中没有点可以删除时,停止迭代。
static __global__ void
_thinMatSubSecKer(
ImageCuda tempimg, // 暂存图像(暂时存储 PATTERN 表删除算法
// 第一步操作的结果)
ImageCuda outimg, // 输出图像
unsigned char *devlutthin1, // PATTERN 表 1
unsigned char *devlutthin2, // PATTERN 表 2
unsigned char *devlutthin4, // PATTERN 表 4
int *devchangecount, // 经过一次细化后细化点的个数(用于判断
// 是否继续迭代)
unsigned char lowpixel // 低像素
);
// Kernel 函数:_thinMatSubFirKer(实现 PATTERN 表删除算法的第一步操作)
static __global__ void _thinMatSubFirKer(ImageCuda outimg,
ImageCuda tempimg,
unsigned char *devlutthin1,
unsigned char *devlutthin2,
unsigned char *devlutthin3,
unsigned char highpixel,
unsigned char lowpixel)
{
// dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示
// column,r 表示 row )。
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = blockIdx.y * blockDim.y + threadIdx.y;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,
// 另一方面防止由于段错误导致程序崩溃。
if (dstc >= outimg.imgMeta.width - 1 ||
dstr >= outimg.imgMeta.height - 1 || dstc < 1 || dstr < 1)
return;
// 定义目标点位置的指针。
unsigned char *outptr;
// 获取当前像素点在图像中的相对位置。
int curpos = dstr * outimg.pitchBytes + dstc;
// 获取当前像素点在图像中的绝对位置。
outptr = outimg.imgMeta.imgData + curpos ;
// 如果目标像素点的像素值为低像素, 则不进行细化处理。
if (*outptr != lowpixel) {
// 根据目标像素点 8 邻域的特性获取其在 PATTERN 表内的索引。
int index = 0;
// 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值,
// 防止下面细化处理时重复计算。
int posColumn1 = (dstr - 1) * outimg.pitchBytes;
int posColumn2 = posColumn1 + outimg.pitchBytes;
int posColumn3 = posColumn2 + outimg.pitchBytes;
// 根据算法描述,对 8 邻域内的像素点赋予权重,对 8 邻域内的像素点进行
// 遍历,将邻域内像素值为 highpixel 的像素点的权重值相加,即获得 8 邻
// 域内像素特性在 PATTERN 表内对应的索引值。以此获得目标像素在 PATTERN
// 表中对应的值。
if (outimg.imgMeta.imgData[dstc - 1 + posColumn1] != lowpixel)
index += 1;
if (outimg.imgMeta.imgData[dstc - 1 + posColumn2] != lowpixel)
index += 2;
if (outimg.imgMeta.imgData[dstc - 1 + posColumn3] != lowpixel)
index += 4;
if (outimg.imgMeta.imgData[dstc + posColumn1] != lowpixel)
index += 8;
if (outimg.imgMeta.imgData[dstc + posColumn2] != lowpixel)
index += 16;
if (outimg.imgMeta.imgData[dstc + posColumn3] != lowpixel)
index += 32;
if (outimg.imgMeta.imgData[dstc + 1 + posColumn1] != lowpixel)
index += 64;
if (outimg.imgMeta.imgData[dstc + 1 + posColumn2] != lowpixel)
index += 128;
if (outimg.imgMeta.imgData[dstc + 1 + posColumn3] != lowpixel)
index += 256;
// 获得索引值在 PATTERN 表 1 、2 、3 内对应的值
unsigned char replacedPix1 = devlutthin1[index];
unsigned char replacedPix2 = devlutthin2[index];
unsigned char replacedPix3 = devlutthin3[index];
// 根据获取的值得出初步细化结果,将结果中存储到暂存图像中。
if (replacedPix1 == 1 && replacedPix2 == 1 && replacedPix3 == 1)
tempimg.imgMeta.imgData[curpos] = lowpixel;
else
tempimg.imgMeta.imgData[curpos] = highpixel;
} else {
// 如果目标点的像素点为 lowpixel ,则直接将目标点的像素值赋给暂存图像。
tempimg.imgMeta.imgData[curpos] = lowpixel;
}
}
// Kernel 函数:_thinMatSubSecKer(实现 PATTERN 表删除算法的第二步操作)
static __global__ void _thinMatSubSecKer(ImageCuda tempimg,
ImageCuda outimg,
unsigned char *devlutthin1,
unsigned char *devlutthin2,
unsigned char *devlutthin4,
int *devchangecount,
unsigned char lowpixel)
{
// dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量(其中,
// c 表示 column, r 表示 row)。
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = blockIdx.y * blockDim.y + threadIdx.y;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,
// 另一方面防止由于段错误导致程序崩溃。
if (dstc >= tempimg.imgMeta.width - 1 ||
dstr >= tempimg.imgMeta.height - 1 || dstc < 1 || dstr < 1)
return;
// 定义目标点在暂存图像中位置的指针。
unsigned char *temptr;
// 获取当前像素点在暂存图像中的相对位置。
int curpos = dstr * outimg.pitchBytes + dstc;
// 获取当前像素点在图像中的绝对位置。
temptr = tempimg.imgMeta.imgData + curpos;
// 判断目标像素点是否删除,初始化为 false 。
bool niv = false;
// 如果暂存图像内目标像素点的像素值为 lowpixel , 则不进行第二步细化处理
if (*temptr != lowpixel) {
// 根据暂存图像内目标像素点 8 邻域的特性获取其在 PATTERN 表内的索引。
int index = 0;
// 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值,
// 防止下面细化处理时重复计算。
int posColumn1 = (dstr - 1) * tempimg.pitchBytes;
int posColumn2 = posColumn1 + tempimg.pitchBytes;
int posColumn3 = posColumn2 + tempimg.pitchBytes;
// 根据算法描述,对 8 邻域内的像素点赋予权重,对 8 邻域内的像素点进行
// 遍历,将邻域内像素值为 highpixel 的像素点的权重值相加,即获得 8
// 邻域内像素特性在 PATTERN 表内对应的索引值。以此获得目标像素在
// PATTERN 表中对应的值。
if (tempimg.imgMeta.imgData[dstc - 1 + posColumn1] != lowpixel)
index += 1;
if (tempimg.imgMeta.imgData[dstc - 1 + posColumn2] != lowpixel)
index += 2;
if (tempimg.imgMeta.imgData[dstc - 1 + posColumn3] != lowpixel)
index += 4;
if (tempimg.imgMeta.imgData[dstc + posColumn1] != lowpixel)
index += 8;
if (tempimg.imgMeta.imgData[dstc + posColumn2] != lowpixel)
index += 16;
if (tempimg.imgMeta.imgData[dstc + posColumn3] != lowpixel)
index += 32;
if (tempimg.imgMeta.imgData[dstc + 1 + posColumn1] != lowpixel)
index += 64;
if (tempimg.imgMeta.imgData[dstc + 1 + posColumn2] != lowpixel)
index += 128;
if (tempimg.imgMeta.imgData[dstc + 1 + posColumn3] != lowpixel)
index += 256;
// 获得索引值在 PATTERN 表 1、 2 、4 内对应的值
unsigned char replacedPix1 = devlutthin1[index];
unsigned char replacedPix2 = devlutthin2[index];
unsigned char replacedPix4 = devlutthin4[index];
// 根据从 PATTERN 表获取的值判断目标点是否删除。
niv = !(replacedPix1 == 1 && replacedPix2 == 1 && replacedPix4 == 1);
}
// 经过细化处理后,如果输出图像对应的像素点的像素值为 highpixel 而 niv 的
// 值为 false,则将目标像素点置为 lowpixel。
if (niv != (outimg.imgMeta.imgData[curpos] != lowpixel)) {
// 删除目标像素点。
outimg.imgMeta.imgData[curpos] = lowpixel;
// 记录删除点数的 devchangecount 值加 1 。
atomicAdd(devchangecount, 1);
}
}
// 宏:FAIL_THIN_IMAGE_FREE
// 如果出错,就释放之前申请的内存。
#define FAIL_THIN_IMAGE_FREE do { \
if (devlutthin != NULL) \
cudaFree(devlutthin); \
if (tempimg != NULL) \
ImageBasicOp::deleteImage(tempimg); \
if (devchangecount != NULL) \
cudaFree(devchangecount); \
} while (0)
// 成员方法:thinMatlabLike(细化边界 - PATTERN 表法)
__host__ int Thinning::thinMatlabLike(Image *inimg, Image *outimg)
{
// 局部变量,错误码。
int errcode;
cudaError_t cudaerrcode;
// 检查输入图像,输出图像是否为空。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
// 声明所有中间变量并初始化为空。
unsigned char *devlutthin = NULL;
Image *tempimg = NULL;
int *devchangecount = NULL;
// 在 Device 上为 4 个 PATTERN 表 分配空间。并且因为 PATTERN 表在所给的代码
// 中已给出,且大小都为 DEF_PATTERN_SIZE .在这里直接一次申请所有空间
//(DEF_PATTERN_SIZE * 4),然后通过偏移索引各个 PATTERN 表。
unsigned char *devlutthin1, *devlutthin2, *devlutthin3, *devlutthin4;
cudaerrcode = cudaMalloc((void **)&devlutthin,
DEF_PATTERN_SIZE * DEF_PATTERN_LEN *
sizeof (unsigned char));
if (cudaerrcode != cudaSuccess)
return CUDA_ERROR;
// 通过偏移读取 Device 端内存空间。每次偏移一个 PATTERN 的长度,即
// DEF_PATTERN_SIZE。
devlutthin1 = devlutthin;
devlutthin2 = devlutthin1 + DEF_PATTERN_SIZE;
devlutthin3 = devlutthin2 + DEF_PATTERN_SIZE;
devlutthin4 = devlutthin3 + DEF_PATTERN_SIZE;
// 将位于 host 的 PATTERN 表数据拷贝到 device 端。
cudaerrcode = cudaMemcpy(devlutthin, lutthin, DEF_PATTERN_SIZE *
DEF_PATTERN_LEN * sizeof (unsigned char),
cudaMemcpyHostToDevice);
if (cudaerrcode != cudaSuccess) {
FAIL_THIN_IMAGE_FREE;
return CUDA_ERROR;
}
// 记录细化点数的变量,位于 host 端。
int changeCount;
// 记录细化点数的变量,位于 device 端。并为其申请空间。
cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int));
if (cudaerrcode != cudaSuccess) {
FAIL_THIN_IMAGE_FREE;
return CUDA_ERROR;
}
// 生成暂存图像。
errcode = ImageBasicOp::newImage(&tempimg);
if (errcode != NO_ERROR)
return errcode;
errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width,
inimg->height);
if (errcode != NO_ERROR) {
FAIL_THIN_IMAGE_FREE;
return errcode;
}
// 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到
// device 端。
errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg);
if (errcode != NO_ERROR) {
FAIL_THIN_IMAGE_FREE;
return errcode;
}
// 提取输出图像
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR) {
FAIL_THIN_IMAGE_FREE;
return errcode;
}
// 提取暂存图像
ImageCuda tempsubimgCud;
errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud);
if (errcode != NO_ERROR) {
FAIL_THIN_IMAGE_FREE;
return errcode;
}
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 gridsize, blocksize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
// 赋值为 1,以便开始第一次迭代。
changeCount = 1;
// 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时,
// 停止迭代。
while (changeCount > 0) {
// 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。
changeCount = 0;
cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int),
cudaMemcpyHostToDevice);
if (cudaerrcode != cudaSuccess) {
FAIL_THIN_IMAGE_FREE;
return CUDA_ERROR;
}
// 调用核函数,开始第一步细化操作。
_thinMatSubFirKer<<<gridsize, blocksize>>>(outsubimgCud, tempsubimgCud,
devlutthin1, devlutthin2,
devlutthin3, highPixel,
lowPixel);
if (cudaGetLastError() != cudaSuccess) {
// 核函数出错,结束迭代函数,释放申请的变量空间。
FAIL_THIN_IMAGE_FREE;
return CUDA_ERROR;
}
// 调用核函数,开始第二步细化操作。
_thinMatSubSecKer<<<gridsize, blocksize>>>(tempsubimgCud, outsubimgCud,
devlutthin1, devlutthin2,
devlutthin4, devchangecount,
lowPixel);
if (cudaGetLastError() != cudaSuccess) {
// 核函数出错,结束迭代函数,释放申请的变量空间 。
FAIL_THIN_IMAGE_FREE;
return CUDA_ERROR;
}
// 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount
// 变量,进行迭代判断。
cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int),
cudaMemcpyDeviceToHost);
if (cudaerrcode != cudaSuccess) {
FAIL_THIN_IMAGE_FREE;
return CUDA_ERROR;
}
}
// 细化结束后释放申请的变量空间。
cudaFree(devlutthin);
cudaFree(devchangecount);
ImageBasicOp::deleteImage(tempimg);
return NO_ERROR;
}
// 取消前面的宏定义。
#undef FAIL_THIN_IMAGE_FREE
// 成员方法:thinMatlabLike(细化边界- PATTERN 表法)
__host__ int Thinning::thinMatlabLike(CoordiSet *incst,
CoordiSet *outcst)
{
// 局部变量,错误码。
int errcode;
// 检查输入坐标集,输出坐标集是否为空。
if (incst == NULL || outcst == NULL)
return NULL_POINTER;
// 生成输入图像。
Image *inimg;
errcode = ImageBasicOp::newImage(&inimg);
if (errcode != NO_ERROR)
return errcode;
// 图像大小默认为 1024 * 1024。
errcode = ImageBasicOp::makeAtHost(inimg, CST_IMG_WIDTH,
CST_IMG_HEIGHT);
if (errcode != NO_ERROR) {
// 遇到错误则释放 Device 端空间。
ImageBasicOp::deleteImage(inimg);
return errcode;
}
// 生成输出图像。
Image *outimg;
errcode = ImageBasicOp::newImage(&outimg);
if (errcode != NO_ERROR) {
// 遇到错误则释放 Device 端空间。
ImageBasicOp::deleteImage(inimg);
return errcode;
}
// 图像大小默认为 1024 * 1024。
errcode = ImageBasicOp::makeAtHost(outimg, CST_IMG_WIDTH,
CST_IMG_HEIGHT);
if (errcode != NO_ERROR) {
// 遇到错误则释放 Device 端空间。
ImageBasicOp::deleteImage(inimg);
ImageBasicOp::deleteImage(outimg);
return errcode;
}
// 利用转换器将输入坐标集转化为输入图像。
errcode = this->imgCon.cstConvertToImg(incst, inimg);
if (errcode != NO_ERROR) {
// 遇到错误则释放 Device 端空间。
ImageBasicOp::deleteImage(inimg);
ImageBasicOp::deleteImage(outimg);
return errcode;
}
// 细化图像。
errcode = this->thinMatlabLike(inimg, outimg);
if (errcode != NO_ERROR) {
// 遇到错误则释放 Device 端空间。
ImageBasicOp::deleteImage(inimg);
ImageBasicOp::deleteImage(outimg);
return errcode;
}
// 利用转换器将输出图像转化为输出坐标集。
errcode = this->imgCon.imgConvertToCst(outimg, outcst);
if (errcode != NO_ERROR) {
// 遇到错误则释放 Device 端空间。
ImageBasicOp::deleteImage(inimg);
ImageBasicOp::deleteImage(outimg);
return errcode;
}
// 细化结束后释放申请的变量空间。
ImageBasicOp::deleteImage(inimg);
ImageBasicOp::deleteImage(outimg);
return NO_ERROR;
}
|
the_stack
|
#include <thrust/device_malloc.h>
#include <thrust/device_new.h>
#include <thrust/device_ptr.h>
#include "nvblox/gpu_hash/cuda/gpu_hash_interface.cuh"
#include "nvblox/gpu_hash/cuda/gpu_indexing.cuh"
#include "nvblox/utils/timing.h"
namespace nvblox {
__device__ inline bool isTsdfVoxelValid(const TsdfVoxel& voxel) {
constexpr float kMinWeight = 1e-4;
return voxel.weight > kMinWeight;
}
__device__ thrust::pair<float, bool> cast(
const Ray& ray, // NOLINT
Index3DDeviceHashMapType<TsdfBlock> block_hash, // NOLINT
float truncation_distance_m, // NOLINT
float block_size_m, // NOLINT
int maximum_steps, // NOLINT
float maximum_ray_length_m, // NOLINT
float surface_distance_epsilon_m) {
// Approach: Step along the ray until we find the surface, or fail to
bool last_distance_positive = false;
// t captures the parameter scaling along ray.direction. We assume
// that the ray is normalized which such that t has units meters.
float t = 0.0f;
for (int i = 0; (i < maximum_steps) && (t < maximum_ray_length_m); i++) {
// Current point to sample
const Vector3f p_L = ray.origin + t * ray.direction;
// Evaluate the distance at this point
float step;
TsdfVoxel* voxel_ptr;
// Can't get distance, let's see what to do...
if (!getVoxelAtPosition(block_hash, p_L, block_size_m, &voxel_ptr) ||
!isTsdfVoxelValid(*voxel_ptr)) {
// 1) We weren't in observed space before this, let's step through this
// (unobserved) shit and hope to hit something allocated.
if (!last_distance_positive) {
// step forward by the truncation distance
step = truncation_distance_m;
last_distance_positive = false;
}
// 2) We were in observed space, now we've left it... let's kill this
// ray, it's risky to continue.
// Note(alexmillane): The "risk" here is that we've somehow passed
// through the truncation band. This occurs occasionally. The risk
// of continuing is that we can then see through an object. It's safer
// to stop here and hope for better luck in the next frame.
else {
return {t, false};
}
}
// We got a valid distance
else {
// Distance negative (or close to it)!
// We're gonna terminate, let's determine how.
if (voxel_ptr->distance < surface_distance_epsilon_m) {
// 1) We found a zero crossing. Terminate successfully.
if (last_distance_positive) {
// We "refine" the distance by back stepping the (now negative)
// distance value
t += voxel_ptr->distance;
// Output - Success!
return {t, true};
}
// 2) We just went from unobserved to negative. We're observing
// something from behind, terminate.
else {
return {t, false};
}
}
// Distance positive!
else {
// Step by this amount
step = voxel_ptr->distance;
last_distance_positive = true;
}
}
// Step further along the ray
t += step;
}
// Ran out of number of steps or distance... Fail
return {t, false};
}
__global__ void sphereTracingKernel(
const Ray ray, // NOLINT
Index3DDeviceHashMapType<TsdfBlock> block_hash, // NOLINT
float* t, // NOLINT
bool* success_flag, // NOLINT
float truncation_distance_m, // NOLINT
float block_size_m, // NOLINT
int maximum_steps, // NOLINT
float maximum_ray_length_m, // NOLINT
float surface_distance_epsilon_m) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx != 0) return;
thrust::pair<float, bool> res =
cast(ray, block_hash, truncation_distance_m, block_size_m, maximum_steps,
maximum_ray_length_m, surface_distance_epsilon_m);
*t = res.first;
*success_flag = res.second;
}
__global__ void sphereTraceImageKernel(
const Camera camera, // NOLINT
const Transform T_S_C, // NOLINT
Index3DDeviceHashMapType<TsdfBlock> block_hash, // NOLINT
float* image, // NOLINT
float truncation_distance_m, // NOLINT
float block_size_m, // NOLINT
int maximum_steps, // NOLINT
float maximum_ray_length_m, // NOLINT
float surface_distance_epsilon_m, // NOLINT
int ray_subsampling_factor) {
const int ray_col_idx = threadIdx.x + blockIdx.x * blockDim.x;
const int ray_row_idx = threadIdx.y + blockIdx.y * blockDim.y;
// Note: we ensure that this division works cleanly before getting here.
const int ray_rows = camera.rows() / ray_subsampling_factor;
const int ray_cols = camera.cols() / ray_subsampling_factor;
if ((ray_row_idx >= ray_rows) || (ray_col_idx >= ray_cols)) {
return;
}
// Get the image-plane coordinates of where this ray should pass such that it
// is in the center of the patch it will represent.
constexpr float kHalf = 1.0f / 2.0f;
const Index2D ray_indices(ray_col_idx, ray_row_idx);
const Vector2f pixel_coords =
(ray_indices * ray_subsampling_factor).cast<float>() +
kHalf * static_cast<float>(ray_subsampling_factor) * Vector2f::Ones();
// Get the ray going through this pixel (in layer coordinate)
const Vector3f ray_direction_C =
camera.rayFromImagePlaneCoordinates(pixel_coords).normalized();
const Ray ray_L{T_S_C.linear() * ray_direction_C, T_S_C.translation()};
// Cast the ray into the layer
thrust::pair<float, bool> t_optional =
cast(ray_L, block_hash, truncation_distance_m, block_size_m,
maximum_steps, maximum_ray_length_m, surface_distance_epsilon_m);
// If success, write depth to image, otherwise write -1.
if (t_optional.second == true) {
const float depth = t_optional.first * ray_direction_C.z();
image::access(ray_row_idx, ray_col_idx, ray_cols, image) = depth;
} else {
image::access(ray_row_idx, ray_col_idx, ray_cols, image) = -1.0f;
}
}
SphereTracer::SphereTracer(Params params) : params_(std::move(params)) {
checkCudaErrors(cudaStreamCreate(&tracing_stream_));
cudaMalloc(&t_device_, sizeof(float));
cudaMalloc(&success_flag_device_, sizeof(bool));
}
SphereTracer::~SphereTracer() {
cudaStreamSynchronize(tracing_stream_);
cudaFree(t_device_);
cudaFree(success_flag_device_);
checkCudaErrors(cudaStreamDestroy(tracing_stream_));
}
bool SphereTracer::castOnGPU(const Ray& ray, const TsdfLayer& tsdf_layer,
const float truncation_distance_m,
float* t) const {
constexpr float eps = 1e-5;
CHECK_NEAR(ray.direction.norm(), 1.0, eps);
// Get the GPU hash
GPULayerView<TsdfBlock> gpu_layer_view = tsdf_layer.getGpuLayerView();
// Kernel
const float surface_distance_epsilon_m =
params_.surface_distance_epsilon_vox * tsdf_layer.voxel_size();
sphereTracingKernel<<<1, 1, 0, tracing_stream_>>>(
ray, // NOLINT
gpu_layer_view.getHash().impl_, // NOLINT
t_device_, // NOLINT
success_flag_device_, // NOLINT
truncation_distance_m, // NOLINT
gpu_layer_view.block_size(), // NOLINT
params_.maximum_steps, // NOLINT
params_.maximum_ray_length_m, // NOLINT
surface_distance_epsilon_m);
// GPU -> CPU
cudaMemcpyAsync(t, t_device_, sizeof(float), cudaMemcpyDeviceToHost,
tracing_stream_);
checkCudaErrors(cudaStreamSynchronize(tracing_stream_));
checkCudaErrors(cudaPeekAtLastError());
return true;
}
std::shared_ptr<const DepthImage> SphereTracer::renderImageOnGPU(
const Camera& camera, const Transform& T_S_C, const TsdfLayer& tsdf_layer,
const float truncation_distance_m,
const MemoryType output_image_memory_type,
const int ray_subsampling_factor) {
CHECK_EQ(camera.width() % ray_subsampling_factor, 0);
CHECK_EQ(camera.height() % ray_subsampling_factor, 0);
// Output space
const int image_height = camera.height() / ray_subsampling_factor;
const int image_width = camera.width() / ray_subsampling_factor;
// If we get a request for a different size image, reallocate.
if (!depth_image_ || depth_image_->width() != image_width ||
depth_image_->height() != image_height ||
depth_image_->memory_type() != output_image_memory_type) {
depth_image_ = std::make_shared<DepthImage>(image_height, image_width,
output_image_memory_type);
}
// Get the GPU hash
timing::Timer hash_transfer_timer(
"color/integrate/sphere_trace/hash_transfer");
GPULayerView<TsdfBlock> gpu_layer_view = tsdf_layer.getGpuLayerView();
hash_transfer_timer.Stop();
// Get metric surface distance epsilon
const float surface_distance_epsilon_m =
params_.surface_distance_epsilon_vox * tsdf_layer.voxel_size();
// Kernel
// Call params
// - 1 thread per pixel
// - 8 x 8 threads per thread block
// - N x M thread blocks get 1 thread per pixel
constexpr dim3 kThreadsPerThreadBlock(8, 8, 1);
const dim3 num_blocks(
depth_image_->cols() / kThreadsPerThreadBlock.y + 1, // NOLINT
depth_image_->rows() / kThreadsPerThreadBlock.x + 1, // NOLINT
1);
sphereTraceImageKernel<<<num_blocks, kThreadsPerThreadBlock, 0,
tracing_stream_>>>(
camera, // NOLINT
T_S_C, // NOLINT
gpu_layer_view.getHash().impl_, // NOLINT
depth_image_->dataPtr(), // NOLINT
truncation_distance_m, // NOLINT
gpu_layer_view.block_size(), // NOLINT
params_.maximum_steps, // NOLINT
params_.maximum_ray_length_m, // NOLINT
surface_distance_epsilon_m, // NOLINT
ray_subsampling_factor);
checkCudaErrors(cudaStreamSynchronize(tracing_stream_));
checkCudaErrors(cudaPeekAtLastError());
return depth_image_;
}
} // namespace nvblox
|
the_stack
|
// FeatureVecCalc.cu
// 实现计算起始特征向量
#include "FeatureVecCalc.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 256
#define DEF_BLOCK_Y 1
// 结构体 FeatureVectorProcessorParam(特征向量处理参数)
// 该结构体中定义了初始特征向量进一步处理的参数,有
// cv、sd、nc 的上限,下限,均值。
typedef struct ProcessorParam_st
{
float mincv, maxcv, avgcv, rangecv; // cv 的上限,下限,均值,极限值的差。
float minsd, maxsd, avgsd, rangesd; // sd 的上限,下限,均值,极限值的差。
float minnc, maxnc, avgnc, rangenc; // nc 的上限,下限,均值,极限值的差。
} FeatureVectorProcessorParam;
// 核函数 _calcFeatureVectorKer(生成初始特征向量)
// 该方法用于计算指定坐标集所规定的图像范围内的各 PIXEL 的初始特征向量。
// 利用需求文档中给出的公式,计算每个像素的三个特征值:灰度中心値 float CV 、
// 灰度値标准差 float SD 、最大灰度非共起系数 float NC。
static __global__ void // Kernel 函数无返回值
_calcFeatureVectorKer(
ImageCuda inimgcud, // 输入图像
CoordiSet incoordiset, // 输入坐标集
FeatureVecArray outfeaturevecarray, // 输出特征向量
FeatureVecCalc featureveccalc, // 平滑向量处理类
unsigned char *neighbortmp // 暂存空间,暂存每个像素邻域像素
// 每个像素的邻域存两份,一个为排
// 序前,一个为排序后
);
// 核函数 _processFeatureVectorKer(处理初始特征向量)
// 该方法用于进一步处理在 _calcFeatureVectorKer 中计算出来的初始特征向量,
// 利用的参数是在初始特征向量的上下限、均值等
static __global__ void // Kernel 函数无返回值
_processFeatureVectorKer(
FeatureVecArray inoutfeaturevecarray, // 输入输出特征向量
FeatureVecCalc featureveccalc, // 平滑向量处理类
FeatureVectorProcessorParam param // 处理参数
);
// 核函数:_calcFeatureVectorKer(计算初始特征向量)
static __global__ void _calcFeatureVectorKer(
ImageCuda inimgcud, CoordiSet incoordiset, FeatureVecArray
outfeaturevecarray, FeatureVecCalc featureveccalc,
unsigned char *neighbortmppointer)
{
// 计算当前 Thread 所对应的坐标集中的点的位置
int index = blockIdx.x * blockDim.x + threadIdx.x;
// 如果当前索引超过了坐标集中的点的个数,直接返回
if(index >= incoordiset.count)
return;
// 计算该点在原图像中的位置
int xcrd = incoordiset.tplData[2 * index];
int ycrd = incoordiset.tplData[2 * index + 1];
// 将输出特征向量的 X,Y 坐标写入到特征向量组中
outfeaturevecarray.x[index] = xcrd;
outfeaturevecarray.y[index] = ycrd;
// 计算邻域正方形的边长及线性数组的长度。
int n = featureveccalc.getNeighborWidth();
int neighborwidth = n * 2 + 1;
int length = neighborwidth * neighborwidth;
// 计算当前像素使用的邻域暂存空间的偏移
int offset = index * length * 2;
unsigned char *neighbortmp = neighbortmppointer + offset;
// 复制邻域像素到邻域暂存中
for (int i = 0; i < neighborwidth; i++) {
for (int j = 0; j < neighborwidth; j++) {
// 计算对应图像中的坐标
int xcrdi = xcrd - n + i;
int ycrdj = ycrd - n + j;
// 如果当前坐标超过图像边缘,设定为边缘值
if (xcrdi >= inimgcud.imgMeta.width)
xcrdi = inimgcud.imgMeta.width - 1;
if (xcrdi < 0)
xcrdi = 0;
if (ycrdj >= inimgcud.imgMeta.height)
ycrdj = inimgcud.imgMeta.width - 1;
if (ycrdj < 0)
ycrdj = 0;
// 将图像中像素值拷贝到邻域暂存空间
neighbortmp[i * neighborwidth + j] =
inimgcud.imgMeta.imgData[ycrdj * inimgcud.pitchBytes +
xcrdi];
}
}
// 统计邻域内每个像素值的点的个数
int pixelcount[PIXELRANGE];
// 排序后的数组
unsigned char *neighbortmpsorted = neighbortmp + length;
// 对neighbortmp进行排序
featureveccalc.sortNeighbor(neighbortmp, neighbortmpsorted, pixelcount,
length);
// 计算平均灰度值
float cv = featureveccalc.calAvgPixel(neighbortmpsorted, length * 1 / 3,
length * 2 / 3);
// 将平均灰度值写入到输出特征向量组中
outfeaturevecarray.CV[index] = cv;
// 计算灰度标准差
float sd = featureveccalc.calPixelSd(neighbortmpsorted, length, cv);
// 将灰度标准差写入到输出特征向量组中
outfeaturevecarray.SD[index] = sd;
// 计算最大非共起系数,首先求八个方向的灰度平均值
// 从0-8依次为从方向右逆时针开始到方向右下结束
float eightdirectionavg[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
for (int j = 1; j <= n; j++) {
// 当前像素右边的点的灰度求和
eightdirectionavg[0] += neighbortmp[n * neighborwidth + n + j];
// 当前像素右上的点的灰度求和
eightdirectionavg[1] += neighbortmp[(n - j) * neighborwidth + n + j];
// 当前像素上边的点的灰度求和
eightdirectionavg[2] += neighbortmp[(n - j) * neighborwidth + n];
// 当前像素左上的点的灰度求和
eightdirectionavg[3] += neighbortmp[(n - j) * neighborwidth + n - j];
// 当前像素左边的点的灰度求和
eightdirectionavg[4] += neighbortmp[n * neighborwidth + n - j];
// 当前像素左下的点的灰度求和
eightdirectionavg[5] += neighbortmp[(n + j) * neighborwidth + n - j];
// 当前像素下边的点的灰度求和
eightdirectionavg[6] += neighbortmp[(n + j) * neighborwidth + n];
// 当前像素右下的点的灰度求和
eightdirectionavg[7] += neighbortmp[(n + j) * neighborwidth + n + j];
}
// 求八个方向上平均灰度值
for (int i = 0; i < 8; i++) {
eightdirectionavg[i] /= n;
}
// 根据给定的公式计算八个方向的共起系数并找出最大共起系数
int t = (featureveccalc.getPitch() + 1) / 2;
int m = (n - 2 * t + 1) * powf((2 * n + 1), 4);
float eightncs[8]= { 0, 0, 0, 0, 0, 0, 0, 0 }; // 八个方向的共起系数
float nc = 0;
unsigned char pixeltmp1 = 0; // 暂存对角线上的像素
unsigned char pixeltmp2 = 0; // 暂存对角线上的像素
for (int j = t; j <= n - t; j++) {
if (featureveccalc.getPitch() % 2 == 0) {
// 当前像素右边的点共起系数求和
pixeltmp1 = neighbortmp[n * neighborwidth + n + j - t];
pixeltmp2 = neighbortmp[n * neighborwidth + n + j + t - 1];
eightncs[0] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[0]) *
(pixeltmp2 - eightdirectionavg[0]));
// 当前像素右上的点共起系数求和
pixeltmp1 = neighbortmp[(n + j - t) * neighborwidth + n + j - t];
pixeltmp2 =
neighbortmp[(n + j + t - 1) * neighborwidth +
n + j + t - 1];
eightncs[1] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[1]) *
(pixeltmp2 - eightdirectionavg[1]));
// 当前像素上边的点共起系数求和
pixeltmp1 = neighbortmp[(n + j - t) * neighborwidth + n];
pixeltmp2 = neighbortmp[(n + j + t - 1) * neighborwidth + n];
eightncs[2] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[2]) *
(pixeltmp2 - eightdirectionavg[2]));
// 当前像素左上的点共起系数求和
pixeltmp1 = neighbortmp[(n + j - t) * neighborwidth + n - (j - t)];
pixeltmp2 =
neighbortmp[(n + j + t - 1) * neighborwidth +
n - (j + t - 1)];
eightncs[3] +=pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[3]) *
(pixeltmp2 - eightdirectionavg[3]));
// 当前像素左边的点共起系数求和
pixeltmp1 = neighbortmp[n * neighborwidth + n - (j - t)];
pixeltmp2 = neighbortmp[n * neighborwidth + n - (j + t - 1)];
eightncs[4] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[4]) *
(pixeltmp2 - eightdirectionavg[4]));
// 当前像素左下的点共起系数求和
pixeltmp1 =
neighbortmp[(n - (j - t)) * neighborwidth + n - (j - t)];
pixeltmp2 =
neighbortmp[(n - (j + t - 1)) * neighborwidth +
n - (j + t - 1)];
eightncs[5] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[5]) *
(pixeltmp2 - eightdirectionavg[5]));
// 当前像素下边的点共起系数求和
pixeltmp1 = neighbortmp[(n - (j - t)) * neighborwidth + n];
pixeltmp2 = neighbortmp[(n - (j + t - 1)) * neighborwidth + n];
eightncs[6] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[6]) *
(pixeltmp2 - eightdirectionavg[6]));
// 当前像素右下的点共起系数求和
pixeltmp1 = neighbortmp[(n - (j - t)) * neighborwidth + n + j - t];
pixeltmp2 =
neighbortmp[(n - (j + t - 1)) * neighborwidth +
n + j + t - 1];
eightncs[7] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[7]) *
(pixeltmp2 - eightdirectionavg[7]));
} else {
// 当前像素右边的点共起系数求和
pixeltmp1 = neighbortmp[n * neighborwidth + n + j - t];
pixeltmp2 = neighbortmp[n * neighborwidth + n + j + t];
eightncs[0] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[0]) *
(pixeltmp2 - eightdirectionavg[0]));
// 当前像素右上的点共起系数求和
pixeltmp1 = neighbortmp[(n + j - t) * neighborwidth + n + j - t];
pixeltmp2 =
neighbortmp[(n + j + t) * neighborwidth + n + j + t];
eightncs[1] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[1]) *
(pixeltmp2 - eightdirectionavg[1]));
// 当前像素上边的点共起系数求和
pixeltmp1 = neighbortmp[(n + j - t) * neighborwidth + n];
pixeltmp2 = neighbortmp[(n + j + t) * neighborwidth + n];
eightncs[2] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[2]) *
(pixeltmp2 - eightdirectionavg[2]));
// 当前像素左上的点共起系数求和
pixeltmp1 = neighbortmp[(n + j - t) * neighborwidth + n - (j - t)];
pixeltmp2 =
neighbortmp[(n + j + t) * neighborwidth + n - (j + t)];
eightncs[3] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[3]) *
(pixeltmp2 - eightdirectionavg[3]));
// 当前像素左边的点共起系数求和
pixeltmp1 = neighbortmp[n * neighborwidth + n - (j - t)];
pixeltmp2 = neighbortmp[n * neighborwidth + n - (j + t)];
eightncs[4] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[4]) *
(pixeltmp2 - eightdirectionavg[4]));
// 当前像素左下的点共起系数求和
pixeltmp1 =
neighbortmp[(n - (j - t)) * neighborwidth + n - (j - t)];
pixeltmp2 =
neighbortmp[(n - (j + t)) * neighborwidth + n - (j + t)];
eightncs[5] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[5]) *
(pixeltmp2 - eightdirectionavg[5]));
// 当前像素下边的点共起系数求和
pixeltmp1 = neighbortmp[(n - (j - t)) * neighborwidth + n];
pixeltmp2 = neighbortmp[(n - (j + t)) * neighborwidth + n];
eightncs[6] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[6]) *
(pixeltmp2 - eightdirectionavg[6]));
// 当前像素右下的点共起系数求和
pixeltmp1 = neighbortmp[(n - (j - t)) * neighborwidth + n + j - t];
pixeltmp2 =
neighbortmp[(n - (j + t)) * neighborwidth + n + j + t];
eightncs[7] += pixelcount[pixeltmp1] * pixelcount[pixeltmp2] *
abs((pixeltmp1 - eightdirectionavg[7]) *
(pixeltmp2 - eightdirectionavg[7]));
}
}
// 计算并找出最大非共起系数
for (int i = 0; i < 8; i++) {
eightncs[i] /= m;
// 记录最大非共起系数
if (nc < eightncs[i])
nc = eightncs[i];
}
// 将最大非共起系数写入输出特征向量组中
outfeaturevecarray.NC[index] = nc;
}
// 核函数 _processFeatureVectorKer(处理初始特征向量)
static __global__ void
_processFeatureVectorKer(FeatureVecArray inoutfeaturevecarray,
FeatureVecCalc featureveccalc, FeatureVectorProcessorParam param)
{
// 计算当前 Thread 所对应的坐标集中的点的位置
int index = blockIdx.x * blockDim.x + threadIdx.x;
// 如果 index 超过了处理的点的个数不做处理直接返回
if(index >= inoutfeaturevecarray.count)
return;
// 取出上一步计算得到的特征值
float cv = inoutfeaturevecarray.CV[index];
float sd = inoutfeaturevecarray.SD[index];
float nc = inoutfeaturevecarray.NC[index];
// 如果特征值低于下界或者高于上界,则置为下界或上界
if (cv < param.mincv)
cv = param.mincv;
if (cv > param.maxcv)
cv = param.maxcv;
if (sd < param.minsd)
sd = param.minsd;
if (sd > param.maxsd)
sd = param.maxsd;
if (nc < param.minnc)
nc = param.minnc;
if (nc > param.maxnc)
nc = param.maxnc;
// 计算外部参数
float a = 1 / (1 + featureveccalc.getAlpha() + featureveccalc.getBeta());
// 处理特征值
if (param.rangecv != 0)
cv = a * (cv - param.avgcv) / param.rangecv;
if (param.rangesd != 0)
sd = a * featureveccalc.getAlpha() * (sd - param.avgsd) /
param.rangesd;
if (param.rangenc != 0)
nc = a * featureveccalc.getBeta() * (nc - param.avgnc) /
param.rangenc;
// 将特征值赋回去
inoutfeaturevecarray.CV[index] = cv;
inoutfeaturevecarray.SD[index] = sd;
inoutfeaturevecarray.NC[index] = nc;
}
// 全局函数:cmp (两个 float 变量的比较函数)
// 使用于针对特征值快速排序中的比较函数指针
int cmp(const void * a, const void * b)
{
return((*(float *)a - *(float *)b > 0) ? 1 : -1);
}
// 宏 DELETE_THREE_FEATURE_ARRAY_HOST (删除 Host 端三个特征向量数组)
// 在出错或者函数运行结束后,清理三个特征向量数组的值
#define DELETE_THREE_FEATURE_ARRAY_HOST do { \
if ((cvshost) != NULL) \
delete [] (cvshost); \
if ((sdshost) != NULL) \
delete [] (sdshost); \
if ((ncshost) != NULL) \
delete [] (ncshost); \
} while(0)
// Host 成员方法:calFeatureVector(计算初始特征向量)
__host__ int FeatureVecCalc::calFeatureVector(Image *inimg,
CoordiSet *incoordiset, FeatureVecArray *outfeaturevecarray)
{
// 检查输入参数是否为 NULL, 如果为 NULL 直接报错返回
if (inimg == NULL || incoordiset == NULL || outfeaturevecarray == NULL)
return NULL_POINTER;
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 将坐标集拷贝到 Device 内存中
errcode = CoordiSetBasicOp::copyToCurrentDevice(incoordiset);
if (errcode != NO_ERROR)
return errcode;
// 申请邻域存储空间
unsigned char *neighbortmp = NULL ;
int neighborsize = (neighborWidth * 2 + 1) * (neighborWidth * 2 + 1);
errcode = cudaMalloc(
(void **)(&neighbortmp),
2 * incoordiset->count * neighborsize * sizeof (unsigned char));
if (errcode != cudaSuccess) {
return CUDA_ERROR;
}
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = incoordiset->count / blocksize.x + 1;
gridsize.y = 1;
// 计算初始特征向量
_calcFeatureVectorKer<<<gridsize,blocksize>>>(insubimgCud, *incoordiset,
*outfeaturevecarray, *this,
neighbortmp);
// 及时释放申请的 neighbortmp
cudaFree(neighbortmp);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// Host 端三个特征向量数组声明
float *cvshost = NULL;
float *sdshost = NULL;
float *ncshost = NULL;
// 申请 Host 端 cv 特征值数组空间
cvshost = new float[incoordiset->count];
if (cvshost == NULL) {
DELETE_THREE_FEATURE_ARRAY_HOST;
return NULL_POINTER;
}
// 申请 Host 端 sd 特征值数组空间
sdshost = new float[incoordiset->count];
if (sdshost == NULL) {
DELETE_THREE_FEATURE_ARRAY_HOST;
return NULL_POINTER;
}
// 申请 Host 端 nc 特征值数组空间
ncshost = new float[incoordiset->count];
if (ncshost == NULL) {
DELETE_THREE_FEATURE_ARRAY_HOST;
return NULL_POINTER;
}
// 将 CV 拷贝到 Host 端
errcode = cudaMemcpy(cvshost, outfeaturevecarray->CV,
incoordiset->count * sizeof (float),
cudaMemcpyDeviceToHost);
// 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之
// 后报错返回。
if (errcode != cudaSuccess) {
// 清除之前申请的内存
DELETE_THREE_FEATURE_ARRAY_HOST;
return CUDA_ERROR;
}
// 将 SD 拷贝到 Host 端
errcode = cudaMemcpy(sdshost, outfeaturevecarray->SD,
incoordiset->count * sizeof (float),
cudaMemcpyDeviceToHost);
// 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之
// 后报错返回。
if (errcode != cudaSuccess) {
// 清除之前申请的内存
DELETE_THREE_FEATURE_ARRAY_HOST;
return CUDA_ERROR;
}
// 将 NC 拷贝到 Host 端
errcode = cudaMemcpy(ncshost, outfeaturevecarray->NC,
incoordiset->count * sizeof (float),
cudaMemcpyDeviceToHost);
// 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之
// 后报错返回。
if (errcode != cudaSuccess) {
// 清除之前申请的内存
DELETE_THREE_FEATURE_ARRAY_HOST;
return CUDA_ERROR;
}
// 使用快速排序分别对三组特征值排序
qsort(cvshost, incoordiset->count, sizeof(float), cmp);
qsort(sdshost, incoordiset->count, sizeof(float), cmp);
qsort(ncshost, incoordiset->count, sizeof(float), cmp);
// 处理参数定义
FeatureVectorProcessorParam param;
// 计算上下限的下标
int bordermin = (int)(0.05 * incoordiset->count);
int bordermax = (int)(0.95 * incoordiset->count);
// 给各个处理参数赋值
param.mincv = cvshost[bordermin];
param.maxcv = cvshost[bordermax];
param.avgcv = calAvgFeatureValue(cvshost, bordermin, bordermax);
param.rangecv = param.maxcv - param.mincv;
param.minsd = sdshost[bordermin];
param.maxsd = sdshost[bordermax];
param.avgsd = calAvgFeatureValue(sdshost, bordermin, bordermax);
param.rangesd = param.maxsd - param.minsd;
param.minnc = ncshost[bordermin];
param.maxnc = ncshost[bordermax];
param.avgnc = calAvgFeatureValue(ncshost, bordermin, bordermax);
param.rangenc = param.maxnc - param.minnc;
// 对初始特征向量进行简单处理
_processFeatureVectorKer<<<gridsize,blocksize>>>(*outfeaturevecarray,
*this, param);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 内存清理
DELETE_THREE_FEATURE_ARRAY_HOST;
return NO_ERROR;
}
|
the_stack
|
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
#include <thrust/logical.h>
/*
* Note:
* This implementation assumes that off-diag entries all have the opposite sign
* comparing to the diag entry. This is true for most practical cases.
* It would even work if the offending off-diag entries are just a few.
* But if there are many off-diag entries violate this assumption,
* the interpolation based on this strength would be inaccurate.
* This is explained in "Intro to Algebraic multigrid" by K. Stuben.
*/
namespace amgx
{
template< typename T_Config >
Strength_BaseBase<T_Config>::Strength_BaseBase(AMG_Config &cfg,
const std::string &cfg_scope)
{
alpha = cfg.AMG_Config::getParameter<double>("strength_threshold", cfg_scope);
}
/*************************************************************************
* "random" hash function for both device and host
************************************************************************/
__host__ __device__ __forceinline__
static float ourHash(const int i)
{
unsigned int a = i;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return (a ^ 0x4a51e590) / (float)UINT_MAX;
}
/*************************************************************************
* Computes the strength matrix and the connection weights
* Described in \S 4.1 of:
* "Reducing complexity in parallel algebraic multigrid preconditioners"
*
************************************************************************/
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec,
AMGX_IndPrecision t_indPrec>
void
Strength_Base<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::
computeStrongConnectionsAndWeights_1x1(Matrix_h &A,
BVector &s_con,
FVector &weights,
const double max_row_sum)
{
bool compute_row_sum = (max_row_sum < 1.0);
VVector sums_ptr;
// get the (normalised) row sums
if (compute_row_sum)
{
sums_ptr.resize(A.get_num_rows());
weightedRowSum(A, sums_ptr);
cudaCheckError();
}
// get min/max off-diag, depending on sign of diagonal
for (int row = 0; row < A.get_num_rows(); row++)
{
ValueType diag(0), minVal(0), maxVal(0);
ValueType row_sum = compute_row_sum ? sums_ptr[row] : -1;
int rowEnd = A.row_offsets[row + 1];
for (int j = A.row_offsets[row]; j < rowEnd; j++)
{
int col = A.col_indices[j];
ValueType val = A.values[j];
if (col == row)
{
diag = val;
}
else
{
minVal = min(minVal, val);
maxVal = max(maxVal, val);
}
}
//set the threshold for being strongly connected
ValueType threshold = (diag < 0) ? maxVal : minVal;
threshold *= this->alpha;
//initialize the weight to a small random number
if (A.is_matrix_singleGPU())
{
weights[row] += ourHash(row);
}
else
{
weights[row] += ourHash((int)A.manager->base_index() + row);
}
// sum the column of S - # of points that strongly connect to me
for (int j = A.row_offsets[row]; j < rowEnd; j++)
{
bool is_strongly_connected = false;
if (compute_row_sum && row_sum > max_row_sum)
{
is_strongly_connected = false;
}
else
is_strongly_connected =
this->strongly_connected(A.values[j], threshold, diag);
int col = A.col_indices[j];
s_con[j] = (col != row) && is_strongly_connected;
weights[A.col_indices[j]] += s_con[j] ? 1. : 0.;
}
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec,
AMGX_IndPrecision t_indPrec>
void
Strength_Base<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::
computeWeights_1x1(Matrix_h &S,
FVector &weights)
{
for (int row = 0; row < S.get_num_rows(); row++)
{
//initialize the weight to a small random number
if (S.is_matrix_singleGPU())
{
weights[row] += ourHash(row);
}
else
{
weights[row] += ourHash(S.manager->base_index() + row);
}
int rowEnd = S.row_offsets[row + 1];
for (int j = S.row_offsets[row]; j < rowEnd; j++)
{
int col = S.col_indices[j];
if (col != row) { weights[col] += 1.; }
}
}
}
template <typename ValueType>
__device__
bool stronglyConnectedAHat(ValueType value, ValueType threshold, ValueType diag)
{
return (diag < ValueType(0)) ? value > threshold : value < threshold;
}
/*************************************************************************
* Computes the strength matrix and the connection weights (device)
************************************************************************/
template< typename IndexType, typename ValueType, int kCtaSize, bool singleGPU >
__global__
void computeStrongConnectionsAndWeightsKernel( const IndexType *A_rows,
const IndexType *A_cols,
const ValueType *A_vals,
int A_num_rows,
bool *s_con,
float *weights,
ValueType alpha,
ValueType *row_sum,
const double max_row_sum,
int64_t base_index)
{
// One warp works on each row and hence one iteration handles
// num_warps*numBlock rows. This means atomicAdd() is inevitable.
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
__shared__ volatile ValueType smem[kCtaSize];
__shared__ volatile ValueType s_diag[num_warps];
__shared__ volatile ValueType s_threshold[num_warps];
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int aRowId = blockIdx.x * num_warps + warpId ; aRowId < A_num_rows ;
aRowId += num_rows_per_iter )
{
ValueType minVal(0), maxVal(0);
if ( laneId == 0 ) // Reset the diagonal
{
s_diag[warpId] = ValueType(0);
}
// Row sum
ValueType rowSum = -1.0;
if (max_row_sum < 1.0) { rowSum = row_sum[aRowId]; }
// get diagonal, min/max off-diagonals
const int aRowBegin = A_rows[aRowId ];
const int aRowEnd = A_rows[aRowId + 1];
for ( IndexType aRowIt = aRowBegin + laneId ; utils::any( aRowIt < aRowEnd ) ;
aRowIt += 32 )
{
IndexType aColId = aRowIt < aRowEnd ? A_cols[aRowIt] : -1;
ValueType aValue = aRowIt < aRowEnd ? A_vals[aRowIt] : ValueType(0);
if ( aColId == aRowId ) // only one thread evaluates to true.
{
s_diag[warpId] = aValue;
}
bool is_off_diagonal = aRowIt < aRowEnd && aColId != aRowId;
if ( is_off_diagonal )
{
minVal = min( minVal, aValue );
maxVal = max( maxVal, aValue );
}
}
// init weights[] with a random number
if ( laneId == 0 )
{
if ( singleGPU )
{
atomicAdd( &weights[aRowId], ourHash(aRowId) );
}
else
{
atomicAdd( &weights[aRowId], ourHash( (int) base_index + aRowId) );
}
}
// Big assumption: diag and off-diag always have the opposite sign.
// If diag entry is negative, then all off-diag entries must be positive.
// This means max off-diag is to be used to compute the threshold.
// If diag entry is positve, the min off-diag is used instead.
if ( s_diag[warpId] < ValueType(0) )
{
smem[threadIdx.x] = maxVal;
#pragma unroll
for ( int offset = 16 ; offset > 0 ; offset /= 2 )
if ( laneId < offset )
{
smem[threadIdx.x] = maxVal = max( maxVal, smem[threadIdx.x + offset] );
}
}
else
{
smem[threadIdx.x] = minVal;
#pragma unroll
for ( int offset = 16 ; offset > 0 ; offset /= 2 )
if ( laneId < offset )
{
smem[threadIdx.x] = minVal = min( minVal, smem[threadIdx.x + offset] );
}
}
if ( laneId == 0 )
{
// If laneId=0, then maxVal or minVal is in smem[threadIdx.x].
s_threshold[warpId] = smem[threadIdx.x] * alpha;
}
// sum of the column of S
for ( IndexType aRowIt = aRowBegin + laneId ; utils::any( aRowIt < aRowEnd ) ;
aRowIt += 32 )
{
IndexType aColId = aRowIt < aRowEnd ? A_cols[aRowIt] : -1;
ValueType aValue = aRowIt < aRowEnd ? A_vals[aRowIt] : ValueType(0);
bool is_strongly_connected = false;
if (max_row_sum < 1.0 && rowSum > max_row_sum)
{
is_strongly_connected = false;
}
else
{
bool is_off_diagonal = aRowIt < aRowEnd && aColId != aRowId;
is_strongly_connected = is_off_diagonal &&
stronglyConnectedAHat( aValue, s_threshold[warpId], s_diag[warpId] );
}
if ( is_strongly_connected && aRowIt < aRowEnd && aColId < A_num_rows)
{
atomicAdd( &weights[aColId], 1.0f );
}
if ( aRowIt < aRowEnd )
{
s_con[aRowIt] = is_strongly_connected;
}
}
}
}
template< typename IndexType, typename ValueType, int kCtaSize, bool singleGPU >
__global__
void computeWeightsKernel( const IndexType *A_rows,
const IndexType *A_cols,
int A_num_rows,
float *weights,
int64_t base_index)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int aRowId = blockIdx.x * num_warps + warpId ; aRowId < A_num_rows ;
aRowId += num_rows_per_iter )
{
if ( laneId == 0 )
{
if ( singleGPU )
{
atomicAdd( &weights[aRowId], ourHash(aRowId) );
}
else
{
atomicAdd( &weights[aRowId], ourHash( (int) base_index + aRowId) );
}
}
const int aRowBegin = A_rows[aRowId ];
const int aRowEnd = A_rows[aRowId + 1];
for ( IndexType aRowIt = aRowBegin + laneId ; utils::any( aRowIt < aRowEnd ) ;
aRowIt += 32 )
{
IndexType aColId = aRowIt < aRowEnd ? A_cols[aRowIt] : -1;
bool is_off_diagonal = aRowIt < aRowEnd && aColId != aRowId;
if (is_off_diagonal)
{
atomicAdd( &weights[aColId], 1.0f );
}
}
}
}
/*************************************************************************
* Computes the strength matrix and the connection weights (device)
************************************************************************/
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec,
AMGX_IndPrecision t_indPrec>
void Strength_Base<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::
computeStrongConnectionsAndWeights_1x1(Matrix_d &A,
BVector &s_con,
FVector &weights,
const double max_row_sum)
{
typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d;
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueType;
// get the raw pointers for everything I need
const IndexType *offsets_ptr = A.row_offsets.raw();
const IndexType *column_indices_ptr = A.col_indices.raw();
const ValueType *values_ptr = A.values.raw();
bool *s_con_ptr = s_con.raw();
float *weights_ptr = weights.raw();
bool compute_row_sum = (max_row_sum < 1.0);
if (A.get_num_rows() == 0) { compute_row_sum = false; }
VVector sums_ptr;
if (compute_row_sum)
{
sums_ptr.resize(A.get_num_rows());
weightedRowSum(A, sums_ptr);
cudaCheckError();
}
// choose a blocksize. Use 1 warp per row
const int blockSize = 256;
const int numWarps = blockSize / 32;
const int numBlocks = min( 4096, (int) (A.get_num_rows() + numWarps - 1) / numWarps );
if (A.get_num_rows() > 0)
{
if (A.is_matrix_singleGPU())
computeStrongConnectionsAndWeightsKernel<IndexType, ValueType, blockSize, true>
<<< numBlocks, blockSize>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
A.get_num_rows(),
s_con.raw(),
weights.raw(),
this->alpha,
compute_row_sum ? sums_ptr.raw() : NULL,
max_row_sum,
0);
else
computeStrongConnectionsAndWeightsKernel<IndexType, ValueType, blockSize, false>
<<< numBlocks, blockSize>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
A.get_num_rows(),
s_con.raw(),
weights.raw(),
this->alpha,
compute_row_sum ? sums_ptr.raw() : NULL,
max_row_sum,
A.manager->base_index());
}
if (!A.is_matrix_singleGPU() && A.currentView() == OWNED)
{
// Need to add neighbors contribution to my weights
weights.dirtybit = 1;
A.manager->add_from_halo(weights, weights.tag);
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec,
AMGX_IndPrecision t_indPrec>
void Strength_Base<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::
computeWeights_1x1(Matrix_d &S,
FVector &weights)
{
typedef TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> TConfig_d;
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueType;
// get the raw pointers for everything I need
const IndexType *offsets_ptr = S.row_offsets.raw();
const IndexType *column_indices_ptr = S.col_indices.raw();
float *weights_ptr = weights.raw();
// choose a blocksize. Use 1 thread per row
const int blockSize = 256;
const int numWarps = blockSize / 32;
const int numBlocks = min( 4096, (int) (S.get_num_rows() + numWarps - 1) / numWarps );
cudaDeviceSynchronize();
cudaCheckError();
// call the CUDA kernel
if (S.is_matrix_singleGPU())
computeWeightsKernel<IndexType, ValueType, blockSize, true>
<<< numBlocks, blockSize>>>(
S.row_offsets.raw(),
S.col_indices.raw(),
S.get_num_rows(),
weights.raw(),
0);
else
computeWeightsKernel<IndexType, ValueType, blockSize, false>
<<< numBlocks, blockSize>>>(
S.row_offsets.raw(),
S.col_indices.raw(),
S.get_num_rows(),
weights.raw(),
S.manager->base_index());
cudaCheckError();
if (!S.is_matrix_singleGPU() && S.currentView() == OWNED)
{
// Need to add neighbors contribution to my weights
weights.dirtybit = 1;
S.manager->add_from_halo(weights, weights.tag);
}
cudaCheckError();
}
template<class T_Config>
void Strength_BaseBase<T_Config>::
computeStrongConnectionsAndWeights(Matrix<T_Config> &A,
BVector &s_con,
FVector &weights,
const double max_row_sum
)
{
if (A.get_block_size() == 1)
{
computeStrongConnectionsAndWeights_1x1(A, s_con, weights, max_row_sum);
}
else
FatalError("Unsupported block size for strong connections",
AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
template<class T_Config>
void Strength_BaseBase<T_Config>::computeWeights(Matrix<T_Config> &S,
FVector &weights
)
{
if (S.get_block_size() == 1)
{
computeWeights_1x1(S, weights);
}
else
FatalError("Unsupported block size for strong connections",
AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class Strength_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Strength_BaseBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
the_stack
|
* \test Testing the BLAS level 1 routines in the ViennaCL BLAS-like shared library
**/
// include necessary system headers
#include <iostream>
#include <vector>
#include <cmath>
// Some helper functions for this tutorial:
#include "viennacl.hpp"
#include "viennacl/vector.hpp"
template<typename ScalarType>
ScalarType diff(ScalarType const & s1, ScalarType const & s2)
{
if (s1 > s2 || s1 < s2)
return (s1 - s2) / std::max(static_cast<ScalarType>(std::fabs(static_cast<double>(s1))),
static_cast<ScalarType>(std::fabs(static_cast<double>(s2))));
return ScalarType(0);
}
template<typename ScalarType, typename ViennaCLVectorType>
ScalarType diff(std::vector<ScalarType> const & v1, ViennaCLVectorType const & vcl_vec)
{
std::vector<ScalarType> v2_cpu(vcl_vec.size());
viennacl::backend::finish();
viennacl::copy(vcl_vec, v2_cpu);
ScalarType inf_norm = 0;
for (unsigned int i=0;i<v1.size(); ++i)
{
if ( std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ) > 0 )
v2_cpu[i] = std::fabs(v2_cpu[i] - v1[i]) / std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) );
else
v2_cpu[i] = 0.0;
if (v2_cpu[i] > inf_norm)
inf_norm = v2_cpu[i];
}
return inf_norm;
}
template<typename T, typename U, typename EpsilonT>
void check(T const & t, U const & u, EpsilonT eps)
{
EpsilonT rel_error = std::fabs(static_cast<EpsilonT>(diff(t,u)));
if (rel_error > eps)
{
std::cerr << "Relative error: " << rel_error << std::endl;
std::cerr << "Aborting!" << std::endl;
exit(EXIT_FAILURE);
}
std::cout << "SUCCESS ";
}
int main()
{
std::size_t size = 10; // at least 7
float eps_float = 1e-5f;
double eps_double = 1e-12;
float ref_float_alpha;
double ref_double_alpha;
std::vector<float> ref_float_x(size, 1.0f);
std::vector<float> ref_float_y(size, 2.0f);
std::vector<double> ref_double_x(size, 1.0);
std::vector<double> ref_double_y(size, 2.0);
ViennaCLBackend my_backend;
ViennaCLBackendCreate(&my_backend);
// Host setup
float host_float_alpha = 0;
viennacl::vector<float> host_float_x = viennacl::scalar_vector<float>(size, 1.0f, viennacl::context(viennacl::MAIN_MEMORY));
viennacl::vector<float> host_float_y = viennacl::scalar_vector<float>(size, 2.0f, viennacl::context(viennacl::MAIN_MEMORY));
double host_double_alpha = 0;
viennacl::vector<double> host_double_x = viennacl::scalar_vector<double>(size, 1.0, viennacl::context(viennacl::MAIN_MEMORY));
viennacl::vector<double> host_double_y = viennacl::scalar_vector<double>(size, 2.0, viennacl::context(viennacl::MAIN_MEMORY));
// CUDA setup
#ifdef VIENNACL_WITH_CUDA
float cuda_float_alpha = 0;
viennacl::vector<float> cuda_float_x = viennacl::scalar_vector<float>(size, 1.0f, viennacl::context(viennacl::CUDA_MEMORY));
viennacl::vector<float> cuda_float_y = viennacl::scalar_vector<float>(size, 2.0f, viennacl::context(viennacl::CUDA_MEMORY));
double cuda_double_alpha = 0;
viennacl::vector<double> cuda_double_x = viennacl::scalar_vector<double>(size, 1.0, viennacl::context(viennacl::CUDA_MEMORY));
viennacl::vector<double> cuda_double_y = viennacl::scalar_vector<double>(size, 2.0, viennacl::context(viennacl::CUDA_MEMORY));
#endif
// OpenCL setup
#ifdef VIENNACL_WITH_OPENCL
ViennaCLInt context_id = 0;
float opencl_float_alpha = 0;
viennacl::vector<float> opencl_float_x = viennacl::scalar_vector<float>(size, 1.0f, viennacl::context(viennacl::ocl::get_context(context_id)));
viennacl::vector<float> opencl_float_y = viennacl::scalar_vector<float>(size, 2.0f, viennacl::context(viennacl::ocl::get_context(context_id)));
double opencl_double_alpha = 0;
viennacl::vector<double> *opencl_double_x = NULL;
viennacl::vector<double> *opencl_double_y = NULL;
if ( viennacl::ocl::current_device().double_support() )
{
opencl_double_x = new viennacl::vector<double>(viennacl::scalar_vector<double>(size, 1.0, viennacl::context(viennacl::ocl::get_context(context_id))));
opencl_double_y = new viennacl::vector<double>(viennacl::scalar_vector<double>(size, 2.0, viennacl::context(viennacl::ocl::get_context(context_id))));
}
ViennaCLBackendSetOpenCLContextID(my_backend, context_id);
#endif
// consistency checks:
check(ref_float_x, host_float_x, eps_float);
check(ref_float_y, host_float_y, eps_float);
check(ref_double_x, host_double_x, eps_double);
check(ref_double_y, host_double_y, eps_double);
#ifdef VIENNACL_WITH_CUDA
check(ref_float_x, cuda_float_x, eps_float);
check(ref_float_y, cuda_float_y, eps_float);
check(ref_double_x, cuda_double_x, eps_double);
check(ref_double_y, cuda_double_y, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
check(ref_float_x, opencl_float_x, eps_float);
check(ref_float_y, opencl_float_y, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
check(ref_double_x, *opencl_double_x, eps_double);
check(ref_double_y, *opencl_double_y, eps_double);
}
#endif
// ASUM
std::cout << std::endl << "-- Testing xASUM...";
ref_float_alpha = 0;
ref_double_alpha = 0;
for (std::size_t i=0; i<size/4; ++i)
{
ref_float_alpha += std::fabs(ref_float_x[2 + 3*i]);
ref_double_alpha += std::fabs(ref_double_x[2 + 3*i]);
}
std::cout << std::endl << "Host: ";
ViennaCLHostSasum(my_backend, ViennaCLInt(size/4),
&host_float_alpha,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 2, 3);
check(ref_float_alpha, host_float_alpha, eps_float);
ViennaCLHostDasum(my_backend, ViennaCLInt(size/4),
&host_double_alpha,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 2, 3);
check(ref_double_alpha, host_double_alpha, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
ViennaCLCUDASasum(my_backend, ViennaCLInt(size/4),
&cuda_float_alpha,
viennacl::cuda_arg(cuda_float_x), 2, 3);
check(ref_float_alpha, cuda_float_alpha, eps_float);
ViennaCLCUDADasum(my_backend, ViennaCLInt(size/4),
&cuda_double_alpha,
viennacl::cuda_arg(cuda_double_x), 2, 3);
check(ref_double_alpha, cuda_double_alpha, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
ViennaCLOpenCLSasum(my_backend, ViennaCLInt(size/4),
&opencl_float_alpha,
viennacl::traits::opencl_handle(opencl_float_x).get(), 2, 3);
check(ref_float_alpha, opencl_float_alpha, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLDasum(my_backend, ViennaCLInt(size/4),
&opencl_double_alpha,
viennacl::traits::opencl_handle(*opencl_double_x).get(), 2, 3);
check(ref_double_alpha, opencl_double_alpha, eps_double);
}
#endif
// AXPY
std::cout << std::endl << "-- Testing xAXPY...";
for (std::size_t i=0; i<size/3; ++i)
{
ref_float_y[1 + 2*i] += 2.0f * ref_float_x[0 + 2*i];
ref_double_y[1 + 2*i] += 2.0 * ref_double_x[0 + 2*i];
}
std::cout << std::endl << "Host: ";
ViennaCLHostSaxpy(my_backend, ViennaCLInt(size/3),
2.0f,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 0, 2,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_y), 1, 2);
check(ref_float_x, host_float_x, eps_float);
check(ref_float_y, host_float_y, eps_float);
ViennaCLHostDaxpy(my_backend, ViennaCLInt(size/3),
2.0,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 0, 2,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_y), 1, 2);
check(ref_double_x, host_double_x, eps_double);
check(ref_double_y, host_double_y, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
ViennaCLCUDASaxpy(my_backend, ViennaCLInt(size/3),
2.0f,
viennacl::cuda_arg(cuda_float_x), 0, 2,
viennacl::cuda_arg(cuda_float_y), 1, 2);
check(ref_float_x, cuda_float_x, eps_float);
check(ref_float_y, cuda_float_y, eps_float);
ViennaCLCUDADaxpy(my_backend, ViennaCLInt(size/3),
2.0,
viennacl::cuda_arg(cuda_double_x), 0, 2,
viennacl::cuda_arg(cuda_double_y), 1, 2);
check(ref_double_x, cuda_double_x, eps_double);
check(ref_double_y, cuda_double_y, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
ViennaCLOpenCLSaxpy(my_backend, ViennaCLInt(size/3),
2.0f,
viennacl::traits::opencl_handle(opencl_float_x).get(), 0, 2,
viennacl::traits::opencl_handle(opencl_float_y).get(), 1, 2);
check(ref_float_x, opencl_float_x, eps_float);
check(ref_float_y, opencl_float_y, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLDaxpy(my_backend, ViennaCLInt(size/3),
2.0,
viennacl::traits::opencl_handle(*opencl_double_x).get(), 0, 2,
viennacl::traits::opencl_handle(*opencl_double_y).get(), 1, 2);
check(ref_double_x, *opencl_double_x, eps_double);
check(ref_double_y, *opencl_double_y, eps_double);
}
#endif
// COPY
std::cout << std::endl << "-- Testing xCOPY...";
for (std::size_t i=0; i<size/3; ++i)
{
ref_float_y[0 + 2*i] = ref_float_x[1 + 2*i];
ref_double_y[0 + 2*i] = ref_double_x[1 + 2*i];
}
std::cout << std::endl << "Host: ";
ViennaCLHostScopy(my_backend, ViennaCLInt(size/3),
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 1, 2,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_y), 0, 2);
check(ref_float_x, host_float_x, eps_float);
check(ref_float_y, host_float_y, eps_float);
ViennaCLHostDcopy(my_backend, ViennaCLInt(size/3),
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 1, 2,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_y), 0, 2);
check(ref_double_x, host_double_x, eps_double);
check(ref_double_y, host_double_y, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
ViennaCLCUDAScopy(my_backend, ViennaCLInt(size/3),
viennacl::cuda_arg(cuda_float_x), 1, 2,
viennacl::cuda_arg(cuda_float_y), 0, 2);
check(ref_float_x, cuda_float_x, eps_float);
check(ref_float_y, cuda_float_y, eps_float);
ViennaCLCUDADcopy(my_backend, ViennaCLInt(size/3),
viennacl::cuda_arg(cuda_double_x), 1, 2,
viennacl::cuda_arg(cuda_double_y), 0, 2);
check(ref_double_x, cuda_double_x, eps_double);
check(ref_double_y, cuda_double_y, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
ViennaCLOpenCLScopy(my_backend, ViennaCLInt(size/3),
viennacl::traits::opencl_handle(opencl_float_x).get(), 1, 2,
viennacl::traits::opencl_handle(opencl_float_y).get(), 0, 2);
check(ref_float_x, opencl_float_x, eps_float);
check(ref_float_y, opencl_float_y, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLDcopy(my_backend, ViennaCLInt(size/3),
viennacl::traits::opencl_handle(*opencl_double_x).get(), 1, 2,
viennacl::traits::opencl_handle(*opencl_double_y).get(), 0, 2);
check(ref_double_x, *opencl_double_x, eps_double);
check(ref_double_y, *opencl_double_y, eps_double);
}
#endif
// DOT
std::cout << std::endl << "-- Testing xDOT...";
ref_float_alpha = 0;
ref_double_alpha = 0;
for (std::size_t i=0; i<size/2; ++i)
{
ref_float_alpha += ref_float_y[3 + i] * ref_float_x[2 + i];
ref_double_alpha += ref_double_y[3 + i] * ref_double_x[2 + i];
}
std::cout << std::endl << "Host: ";
ViennaCLHostSdot(my_backend, ViennaCLInt(size/2),
&host_float_alpha,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 2, 1,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_y), 3, 1);
check(ref_float_alpha, host_float_alpha, eps_float);
ViennaCLHostDdot(my_backend, ViennaCLInt(size/2),
&host_double_alpha,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 2, 1,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_y), 3, 1);
check(ref_double_alpha, host_double_alpha, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
ViennaCLCUDASdot(my_backend, ViennaCLInt(size/2),
&cuda_float_alpha,
viennacl::cuda_arg(cuda_float_x), 2, 1,
viennacl::cuda_arg(cuda_float_y), 3, 1);
check(ref_float_alpha, cuda_float_alpha, eps_float);
ViennaCLCUDADdot(my_backend, ViennaCLInt(size/2),
&cuda_double_alpha,
viennacl::cuda_arg(cuda_double_x), 2, 1,
viennacl::cuda_arg(cuda_double_y), 3, 1);
check(ref_double_alpha, cuda_double_alpha, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
ViennaCLOpenCLSdot(my_backend, ViennaCLInt(size/2),
&opencl_float_alpha,
viennacl::traits::opencl_handle(opencl_float_x).get(), 2, 1,
viennacl::traits::opencl_handle(opencl_float_y).get(), 3, 1);
check(ref_float_alpha, opencl_float_alpha, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLDdot(my_backend, ViennaCLInt(size/2),
&opencl_double_alpha,
viennacl::traits::opencl_handle(*opencl_double_x).get(), 2, 1,
viennacl::traits::opencl_handle(*opencl_double_y).get(), 3, 1);
check(ref_double_alpha, opencl_double_alpha, eps_double);
}
#endif
// NRM2
std::cout << std::endl << "-- Testing xNRM2...";
ref_float_alpha = 0;
ref_double_alpha = 0;
for (std::size_t i=0; i<size/3; ++i)
{
ref_float_alpha += ref_float_x[1 + 2*i] * ref_float_x[1 + 2*i];
ref_double_alpha += ref_double_x[1 + 2*i] * ref_double_x[1 + 2*i];
}
ref_float_alpha = std::sqrt(ref_float_alpha);
ref_double_alpha = std::sqrt(ref_double_alpha);
std::cout << std::endl << "Host: ";
ViennaCLHostSnrm2(my_backend, ViennaCLInt(size/3),
&host_float_alpha,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 1, 2);
check(ref_float_alpha, host_float_alpha, eps_float);
ViennaCLHostDnrm2(my_backend, ViennaCLInt(size/3),
&host_double_alpha,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 1, 2);
check(ref_double_alpha, host_double_alpha, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
ViennaCLCUDASnrm2(my_backend, ViennaCLInt(size/3),
&cuda_float_alpha,
viennacl::cuda_arg(cuda_float_x), 1, 2);
check(ref_float_alpha, cuda_float_alpha, eps_float);
ViennaCLCUDADnrm2(my_backend, ViennaCLInt(size/3),
&cuda_double_alpha,
viennacl::cuda_arg(cuda_double_x), 1, 2);
check(ref_double_alpha, cuda_double_alpha, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
ViennaCLOpenCLSnrm2(my_backend, ViennaCLInt(size/3),
&opencl_float_alpha,
viennacl::traits::opencl_handle(opencl_float_x).get(), 1, 2);
check(ref_float_alpha, opencl_float_alpha, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLDnrm2(my_backend, ViennaCLInt(size/3),
&opencl_double_alpha,
viennacl::traits::opencl_handle(*opencl_double_x).get(), 1, 2);
check(ref_double_alpha, opencl_double_alpha, eps_double);
}
#endif
// ROT
std::cout << std::endl << "-- Testing xROT...";
for (std::size_t i=0; i<size/4; ++i)
{
float tmp = 0.6f * ref_float_x[2 + 3*i] + 0.8f * ref_float_y[1 + 2*i];
ref_float_y[1 + 2*i] = -0.8f * ref_float_x[2 + 3*i] + 0.6f * ref_float_y[1 + 2*i];;
ref_float_x[2 + 3*i] = tmp;
double tmp2 = 0.6 * ref_double_x[2 + 3*i] + 0.8 * ref_double_y[1 + 2*i];
ref_double_y[1 + 2*i] = -0.8 * ref_double_x[2 + 3*i] + 0.6 * ref_double_y[1 + 2*i];;
ref_double_x[2 + 3*i] = tmp2;
}
std::cout << std::endl << "Host: ";
ViennaCLHostSrot(my_backend, ViennaCLInt(size/4),
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 2, 3,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_y), 1, 2,
0.6f, 0.8f);
check(ref_float_x, host_float_x, eps_float);
check(ref_float_y, host_float_y, eps_float);
ViennaCLHostDrot(my_backend, ViennaCLInt(size/4),
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 2, 3,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_y), 1, 2,
0.6, 0.8);
check(ref_double_x, host_double_x, eps_double);
check(ref_double_y, host_double_y, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
ViennaCLCUDASrot(my_backend, ViennaCLInt(size/4),
viennacl::cuda_arg(cuda_float_x), 2, 3,
viennacl::cuda_arg(cuda_float_y), 1, 2,
0.6f, 0.8f);
check(ref_float_x, cuda_float_x, eps_float);
check(ref_float_y, cuda_float_y, eps_float);
ViennaCLCUDADrot(my_backend, ViennaCLInt(size/4),
viennacl::cuda_arg(cuda_double_x), 2, 3,
viennacl::cuda_arg(cuda_double_y), 1, 2,
0.6, 0.8);
check(ref_double_x, cuda_double_x, eps_double);
check(ref_double_y, cuda_double_y, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
ViennaCLOpenCLSrot(my_backend, ViennaCLInt(size/4),
viennacl::traits::opencl_handle(opencl_float_x).get(), 2, 3,
viennacl::traits::opencl_handle(opencl_float_y).get(), 1, 2,
0.6f, 0.8f);
check(ref_float_x, opencl_float_x, eps_float);
check(ref_float_y, opencl_float_y, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLDrot(my_backend, ViennaCLInt(size/4),
viennacl::traits::opencl_handle(*opencl_double_x).get(), 2, 3,
viennacl::traits::opencl_handle(*opencl_double_y).get(), 1, 2,
0.6, 0.8);
check(ref_double_x, *opencl_double_x, eps_double);
check(ref_double_y, *opencl_double_y, eps_double);
}
#endif
// SCAL
std::cout << std::endl << "-- Testing xSCAL...";
for (std::size_t i=0; i<size/4; ++i)
{
ref_float_x[1 + 3*i] *= 2.0f;
ref_double_x[1 + 3*i] *= 2.0;
}
std::cout << std::endl << "Host: ";
ViennaCLHostSscal(my_backend, ViennaCLInt(size/4),
2.0f,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 1, 3);
check(ref_float_x, host_float_x, eps_float);
ViennaCLHostDscal(my_backend, ViennaCLInt(size/4),
2.0,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 1, 3);
check(ref_double_x, host_double_x, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
ViennaCLCUDASscal(my_backend, ViennaCLInt(size/4),
2.0f,
viennacl::cuda_arg(cuda_float_x), 1, 3);
check(ref_float_x, cuda_float_x, eps_float);
ViennaCLCUDADscal(my_backend, ViennaCLInt(size/4),
2.0,
viennacl::cuda_arg(cuda_double_x), 1, 3);
check(ref_double_x, cuda_double_x, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
ViennaCLOpenCLSscal(my_backend, ViennaCLInt(size/4),
2.0f,
viennacl::traits::opencl_handle(opencl_float_x).get(), 1, 3);
check(ref_float_x, opencl_float_x, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLDscal(my_backend, ViennaCLInt(size/4),
2.0,
viennacl::traits::opencl_handle(*opencl_double_x).get(), 1, 3);
check(ref_double_x, *opencl_double_x, eps_double);
}
#endif
// SWAP
std::cout << std::endl << "-- Testing xSWAP...";
for (std::size_t i=0; i<size/3; ++i)
{
float tmp = ref_float_x[2 + 2*i];
ref_float_x[2 + 2*i] = ref_float_y[1 + 2*i];
ref_float_y[1 + 2*i] = tmp;
double tmp2 = ref_double_x[2 + 2*i];
ref_double_x[2 + 2*i] = ref_double_y[1 + 2*i];
ref_double_y[1 + 2*i] = tmp2;
}
std::cout << std::endl << "Host: ";
ViennaCLHostSswap(my_backend, ViennaCLInt(size/3),
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 2, 2,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_y), 1, 2);
check(ref_float_y, host_float_y, eps_float);
ViennaCLHostDswap(my_backend, ViennaCLInt(size/3),
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 2, 2,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_y), 1, 2);
check(ref_double_y, host_double_y, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
ViennaCLCUDASswap(my_backend, ViennaCLInt(size/3),
viennacl::cuda_arg(cuda_float_x), 2, 2,
viennacl::cuda_arg(cuda_float_y), 1, 2);
check(ref_float_y, cuda_float_y, eps_float);
ViennaCLCUDADswap(my_backend, ViennaCLInt(size/3),
viennacl::cuda_arg(cuda_double_x), 2, 2,
viennacl::cuda_arg(cuda_double_y), 1, 2);
check(ref_double_y, cuda_double_y, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
ViennaCLOpenCLSswap(my_backend, ViennaCLInt(size/3),
viennacl::traits::opencl_handle(opencl_float_x).get(), 2, 2,
viennacl::traits::opencl_handle(opencl_float_y).get(), 1, 2);
check(ref_float_y, opencl_float_y, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLDswap(my_backend, ViennaCLInt(size/3),
viennacl::traits::opencl_handle(*opencl_double_x).get(), 2, 2,
viennacl::traits::opencl_handle(*opencl_double_y).get(), 1, 2);
check(ref_double_y, *opencl_double_y, eps_double);
}
#endif
// IAMAX
std::cout << std::endl << "-- Testing IxASUM...";
ViennaCLInt ref_index = 0;
ref_float_alpha = 0;
for (std::size_t i=0; i<size/3; ++i)
{
if (ref_float_x[0 + 2*i] > std::fabs(ref_float_alpha))
{
ref_index = ViennaCLInt(i);
ref_float_alpha = std::fabs(ref_float_x[0 + 2*i]);
}
}
std::cout << std::endl << "Host: ";
ViennaCLInt idx = 0;
ViennaCLHostiSamax(my_backend, ViennaCLInt(size/3),
&idx,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 0, 2);
check(static_cast<float>(ref_index), static_cast<float>(idx), eps_float);
idx = 0;
ViennaCLHostiDamax(my_backend, ViennaCLInt(size/3),
&idx,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 0, 2);
check(ref_index, idx, eps_double);
#ifdef VIENNACL_WITH_CUDA
std::cout << std::endl << "CUDA: ";
idx = 0;
ViennaCLCUDAiSamax(my_backend, ViennaCLInt(size/3),
&idx,
viennacl::cuda_arg(cuda_float_x), 0, 2);
check(ref_float_x[2*ref_index], ref_float_x[2*idx], eps_float);
idx = 0;
ViennaCLCUDAiDamax(my_backend, ViennaCLInt(size/3),
&idx,
viennacl::cuda_arg(cuda_double_x), 0, 2);
check(ref_double_x[2*ref_index], ref_double_x[2*idx], eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
std::cout << std::endl << "OpenCL: ";
idx = 0;
ViennaCLOpenCLiSamax(my_backend, ViennaCLInt(size/3),
&idx,
viennacl::traits::opencl_handle(opencl_float_x).get(), 0, 2);
check(ref_float_x[2*static_cast<std::size_t>(ref_index)], ref_float_x[2*static_cast<std::size_t>(idx)], eps_float);
idx = 0;
if ( viennacl::ocl::current_device().double_support() )
{
ViennaCLOpenCLiDamax(my_backend, ViennaCLInt(size/3),
&idx,
viennacl::traits::opencl_handle(*opencl_double_x).get(), 0, 2);
check(ref_double_x[2*static_cast<std::size_t>(ref_index)], ref_double_x[2*static_cast<std::size_t>(idx)], eps_double);
}
#endif
#ifdef VIENNACL_WITH_OPENCL
//cleanup
if ( viennacl::ocl::current_device().double_support() )
{
delete opencl_double_x;
delete opencl_double_y;
}
#endif
ViennaCLBackendDestroy(&my_backend);
//
// That's it.
//
std::cout << std::endl << "!!!! TEST COMPLETED SUCCESSFULLY !!!!" << std::endl;
return EXIT_SUCCESS;
}
|
the_stack
|
#pragma once
#include <gunrock/app/problem_base.cuh>
namespace gunrock {
namespace app {
namespace sage {
/**
* @brief Speciflying parameters for SSSP Problem
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
// GUARD_CU(parameters.Use<bool>(
// "mark-pred",
// util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
// false,
// "Whether to mark predecessor info.",
// __FILE__, __LINE__));
return retval;
}
/**
* @brief Single-Source Shortest Path Problem structure.
* @tparam _GraphT Type of the graph
* @tparam _LabelT Type of labels used in sage
* @tparam _ValueT Type of per-vertex distance values
* @tparam _FLAG Problem flags
*/
template <typename _GraphT,
// typename _LabelT = typename _GraphT::VertexT,
typename _ValueT = typename _GraphT::ValueT,
ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG> {
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
// typedef typename GraphT::CsrT CsrT;
typedef typename GraphT::GpT GpT;
typedef typename GraphT::VertexT LabelT;
typedef _ValueT ValueT;
typedef ProblemBase<GraphT, FLAG> BaseProblem;
typedef DataSliceBase<GraphT, FLAG> BaseDataSlice;
// Helper structures
/**
* @brief Data structure containing SSSP-specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice {
// sage-specific storage arrays
util::Array1D<SizeT, ValueT>
W_f_1_1D; // w_f_1 1D array. weight matrix for W^1 feature part
util::Array1D<SizeT, ValueT>
W_a_1_1D; // w_a_1 1D array. weight matrix for W^1 agg part
util::Array1D<SizeT, ValueT>
W_f_2_1D; // w_f_2 1D array. weight matrix for W^2 feature part
util::Array1D<SizeT, ValueT>
W_a_2_1D; // w_a_2 1D array. weight matrix for W^2 agg part
util::Array1D<uint64_t, ValueT> features_1D; // fature matrix 1D
util::Array1D<SizeT, ValueT> children_temp; // 256 agg(h_B1^1)
util::Array1D<SizeT, ValueT> source_temp; // 256 h_B2^1
util::Array1D<SizeT, ValueT> source_result; // 256 h_B2^2
util::Array1D<SizeT, ValueT>
child_temp; // 256 h_B1^1, I feel like this one could be local
util::Array1D<SizeT, ValueT>
sums_child_feat; // 64 sum of children's features, I feel like this one
// could be local as well
util::Array1D<SizeT, ValueT> sums; // 64 per child
util::Array1D<uint64_t, ValueT, util::PINNED>
host_source_result; // results on HOST
util::Array1D<SizeT, curandState>
rand_states; // random states, one per child
util::Array1D<SizeT, VertexT> children; // children vertices
VertexT batch_size;
int feature_column;
int num_children_per_source;
int num_leafs_per_child;
int Wf1_dim0, Wf1_dim1;
int Wa1_dim0, Wa1_dim1;
int Wf2_dim0, Wf2_dim1;
int Wa2_dim0, Wa2_dim1;
int result_column;
bool custom_kernels;
bool debug;
cudaStream_t d2h_stream;
cudaEvent_t d2h_start, d2h_finish;
/*
* @brief Default constructor
*/
DataSlice() : BaseDataSlice() {
W_f_1_1D.SetName("W_f_1_1D");
W_a_1_1D.SetName("W_a_1_1D");
W_f_2_1D.SetName("W_f_2_1D");
W_a_2_1D.SetName("W_a_2_1D");
features_1D.SetName("features_1D");
children_temp.SetName("children_temp");
source_temp.SetName("source_temp");
source_result.SetName("source_result");
child_temp.SetName("child_temp");
sums_child_feat.SetName("sums_child_feat");
sums.SetName("sums");
host_source_result.SetName("host_source_result");
rand_states.SetName("rand_states");
children.SetName("children");
}
/*
* @brief Default destructor
*/
virtual ~DataSlice() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(W_f_1_1D.Release(target));
GUARD_CU(W_a_1_1D.Release(target));
GUARD_CU(W_f_2_1D.Release(target));
GUARD_CU(W_a_2_1D.Release(target));
GUARD_CU(features_1D.Release(target));
GUARD_CU(children_temp.Release(target));
GUARD_CU(source_temp.Release(target));
GUARD_CU(source_result.Release(target));
GUARD_CU(child_temp.Release(target));
GUARD_CU(sums_child_feat.Release(target));
GUARD_CU(sums.Release(target));
GUARD_CU(host_source_result.Release(util::HOST));
GUARD_CU(rand_states.Release(target));
GUARD_CU(children.Release(target));
GUARD_CU2(cudaStreamDestroy(d2h_stream), "cudaStreamDestory failed.");
GUARD_CU2(cudaEventDestroy(d2h_start), "cudaEventDestory failed.");
GUARD_CU2(cudaEventDestroy(d2h_finish), "cudaEventDestory failed.");
GUARD_CU(BaseDataSlice ::Release(target));
return retval;
}
/**
* @brief initializing sage-specific data on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] num_gpus Number of GPUs
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &sub_graph, int num_gpus = 1, int gpu_idx = 0,
util::Location target = util::DEVICE,
ProblemFlag flag = Problem_None) {
cudaError_t retval = cudaSuccess;
auto nodes = sub_graph.nodes;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
GUARD_CU(W_f_1_1D.Allocate( // 64 *128, target));
Wf1_dim0 * Wf1_dim1, target));
GUARD_CU(W_a_1_1D.Allocate( // 64 *128, target));
Wa1_dim0 * Wa1_dim1, target));
GUARD_CU(W_f_2_1D.Allocate( // 256*128, target));
Wf2_dim0 * Wf2_dim1, target));
GUARD_CU(W_a_2_1D.Allocate( // 256*128, target));
Wa2_dim0 * Wa2_dim1, target));
GUARD_CU(
features_1D.Allocate(((uint64_t)nodes) * feature_column, target));
auto num_children = num_children_per_source * batch_size;
if (!custom_kernels || Wa2_dim0 > 1024) {
GUARD_CU(child_temp.Allocate(num_children * Wf2_dim0, target));
}
GUARD_CU(children_temp.Allocate(batch_size * Wf2_dim0, target));
GUARD_CU(source_temp.Allocate(batch_size * Wf2_dim0, target));
GUARD_CU(sums_child_feat.Allocate(batch_size * result_column, target));
GUARD_CU(sums.Allocate(num_children * feature_column, target));
GUARD_CU(source_result.Allocate(batch_size * result_column, target));
GUARD_CU(rand_states.Allocate(
max(80 * 256, 2560 * min(feature_column, 512)), target));
GUARD_CU(children.Allocate(num_children, target));
GUARD_CU(host_source_result.Allocate(((uint64_t)nodes) * result_column,
util::HOST));
GUARD_CU2(cudaStreamCreateWithFlags(&d2h_stream, cudaStreamNonBlocking),
"cudaStreamCreateWithFlags failed.");
GUARD_CU2(cudaEventCreateWithFlags(&d2h_start, cudaEventDisableTiming),
"cudaEventCreateWithFlags failed.");
GUARD_CU2(cudaEventCreateWithFlags(&d2h_finish, cudaEventDisableTiming),
"cudaEventCreateWithFlags failed.");
GUARD_CU(sub_graph.Move(util::HOST, target, this->stream));
return retval;
} // Init
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
// Reset data
GUARD_CU(child_temp.ForEach(
[] __host__ __device__(ValueT & val) { val = 0; },
util::PreDefinedValues<SizeT>::InvalidValue, target, this->stream));
GUARD_CU(children_temp.ForEach(
[] __host__ __device__(ValueT & val) { val = 0; },
util::PreDefinedValues<SizeT>::InvalidValue, target, this->stream));
GUARD_CU(source_temp.ForEach(
[] __host__ __device__(ValueT & val) { val = 0; },
util::PreDefinedValues<SizeT>::InvalidValue, target, this->stream));
GUARD_CU(sums_child_feat.ForEach(
[] __host__ __device__(ValueT & val) { val = 0; },
util::PreDefinedValues<SizeT>::InvalidValue, target, this->stream));
GUARD_CU(sums.ForEach([] __host__ __device__(ValueT & val) { val = 0; },
util::PreDefinedValues<SizeT>::InvalidValue, target,
this->stream));
GUARD_CU(source_result.ForEach(
[] __host__ __device__(ValueT & val) { val = 0; },
util::PreDefinedValues<SizeT>::InvalidValue, target, this->stream));
return retval;
}
}; // DataSlice
// Members
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
// Methods
/**
* @brief SSSPProblem default constructor
*/
Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None)
: BaseProblem(_parameters, _flag), data_slices(NULL) {}
/**
* @brief SSSPProblem default destructor
*/
virtual ~Problem() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL) {
delete[] data_slices;
data_slices = NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief Copy result distancess computed on GPUs back to host-side arrays.
* @param[out] h_distances Host array to store computed vertex distances from
* the source.
* @param[out] h_preds Host array to store computed vertex predecessors.
* @param[in] target where the results are stored
* \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(ValueT *h_source_result,
// VertexT *h_preds = NULL,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
SizeT nodes = this->org_graph->nodes;
auto &data_slice = data_slices[0][0];
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
GUARD_CU(data_slice.host_source_result.ForEach(
h_source_result,
[] __host__ __device__(const ValueT &val, ValueT &h_val) {
h_val = val;
},
((uint64_t)nodes) * data_slice.result_column, util::HOST));
return retval;
}
template <typename ArrayT>
cudaError_t ReadMat(ArrayT &array, std::string filename, uint64_t dim0,
uint64_t dim1) {
cudaError_t retval = cudaSuccess;
auto temp_vals_2D = gunrock::app::sage::template ReadMatrix<ValueT, SizeT>(
filename, dim0, dim1);
GUARD_CU(array.Allocate(dim0 * dim1, util::HOST));
// for (auto pos = 0; pos < dim0 * dim1; pos++)
//{
// array[pos] = temp_vals_2D[pos / dim1][pos % dim1];
//}
GUARD_CU(array.ForAll(
[temp_vals_2D, dim1] __host__ __device__(ValueT * vals,
const uint64_t &pos) {
vals[pos] = temp_vals_2D[pos / dim1][pos % dim1];
},
dim0 * dim1, util::HOST));
for (auto x = 0; x < dim0; x++) {
delete[] temp_vals_2D[x];
temp_vals_2D[x] = NULL;
}
delete[] temp_vals_2D;
temp_vals_2D = NULL;
return retval;
}
/**
* @brief initialization function.
* @param graph The graph that SSSP processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
auto ¶ = this->parameters;
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
// if (this -> parameters.template Get<bool>("mark-pred"))
// this -> flag = this -> flag | Mark_Predecessors;
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]");
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
data_slice.batch_size = para.template Get<int>("batch-size");
data_slice.feature_column = para.template Get<int>("feature-column");
data_slice.num_children_per_source =
para.template Get<int>("num-children-per-source");
data_slice.Wf1_dim0 = data_slice.feature_column;
data_slice.Wf1_dim1 = para.template Get<int>("Wf1-dim1");
data_slice.Wa1_dim0 = data_slice.feature_column;
data_slice.Wa1_dim1 = para.template Get<int>("Wa1-dim1");
data_slice.Wf2_dim0 = data_slice.Wf1_dim1 + data_slice.Wa1_dim1;
data_slice.Wf2_dim1 = para.template Get<int>("Wf2-dim1");
data_slice.Wa2_dim0 = data_slice.Wf1_dim1 + data_slice.Wa1_dim1;
data_slice.Wa2_dim1 = para.template Get<int>("Wa2-dim1");
data_slice.result_column = data_slice.Wa2_dim1 + data_slice.Wf2_dim1;
data_slice.num_leafs_per_child =
para.template Get<int>("num-leafs-per-child");
if (!util::isValid(data_slice.num_leafs_per_child))
data_slice.num_leafs_per_child = data_slice.num_children_per_source;
data_slice.custom_kernels = para.template Get<bool>("custom-kernels");
data_slice.debug = para.template Get<bool>("v");
GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus,
this->gpu_idx[gpu], target, this->flag));
std::string Wf1_filename = para.template Get<std::string>("Wf1");
if (Wf1_filename == "") util::PrintMsg("Using randomly generated Wf1");
GUARD_CU(ReadMat(data_slice.W_f_1_1D, Wf1_filename, data_slice.Wf1_dim0,
data_slice.Wf1_dim1));
std::string Wa1_filename = para.template Get<std::string>("Wa1");
if (Wa1_filename == "") util::PrintMsg("Using randomly generated Wa1");
GUARD_CU(ReadMat(data_slice.W_a_1_1D, Wa1_filename, data_slice.Wa1_dim0,
data_slice.Wa1_dim1));
std::string Wf2_filename = para.template Get<std::string>("Wf2");
if (Wf2_filename == "") util::PrintMsg("Using randomly generated Wf2");
GUARD_CU(ReadMat(data_slice.W_f_2_1D, Wf2_filename, data_slice.Wf2_dim0,
data_slice.Wf2_dim1));
std::string Wa2_filename = para.template Get<std::string>("Wa2");
if (Wa2_filename == "") util::PrintMsg("Using randomly generated Wa2");
GUARD_CU(ReadMat(data_slice.W_a_2_1D, Wa2_filename, data_slice.Wa2_dim0,
data_slice.Wa2_dim1));
std::string features_filename =
para.template Get<std::string>("features");
if (features_filename == "")
util::PrintMsg("Using randomly generated features");
GUARD_CU(ReadMat(data_slice.features_1D, features_filename, graph.nodes,
data_slice.feature_column));
GUARD_CU(data_slice.W_f_1_1D.Move(util::HOST, util::DEVICE));
GUARD_CU(data_slice.W_a_1_1D.Move(util::HOST, util::DEVICE));
GUARD_CU(data_slice.W_f_2_1D.Move(util::HOST, util::DEVICE));
GUARD_CU(data_slice.W_a_2_1D.Move(util::HOST, util::DEVICE));
GUARD_CU(data_slice.features_1D.Move(util::HOST, util::DEVICE));
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed.");
} // end for (gpu)
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(
// VertexT src,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
for (int gpu = 0; gpu < this->num_gpus; ++gpu) {
// Set device
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu]->Reset(target));
int rand_seed = this->parameters.template Get<int>("rand-seed");
if (!util::isValid(rand_seed)) rand_seed = time(NULL);
if (!this->parameters.template Get<bool>("quiet"))
util::PrintMsg("rand-seed = " + std::to_string(rand_seed));
GUARD_CU(data_slices[gpu]->rand_states.ForAll(
[rand_seed] __host__ __device__(curandState * states,
const SizeT &pos) {
curand_init(rand_seed, pos, 0, states + pos);
},
util::PreDefinedValues<SizeT>::InvalidValue, util::DEVICE,
data_slices[gpu]->stream));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
return retval;
}
/** @} */
};
} // namespace sage
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#ifdef GPU
#if ( MODEL == HYDRO )
#if ( FLU_SCHEME == RTVD )
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres, const real MinEint,
const EoS_t EoS );
#elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
__global__
void CUFLU_FluidSolver_MHM(
const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char g_DE_Array_Out [][ CUBE(PS2) ],
real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ],
const double g_Corner_Array [][3],
const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ],
real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ],
const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t EoS );
#elif ( FLU_SCHEME == CTU )
__global__
void CUFLU_FluidSolver_CTU(
const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char g_DE_Array_Out [][ CUBE(PS2) ],
real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ],
const double g_Corner_Array [][3],
const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ],
real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ],
const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t EoS );
#endif // FLU_SCHEME
#elif ( MODEL == ELBDM )
__global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ],
real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ],
real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ],
const real dt, const real _dh, const real Eta, const bool StoreFlux,
const real Taylor3_Coeff, const bool XYZ, const real MinDens );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
#ifndef GRAVITY
static ExtAcc_t GPUExtAcc_Ptr = NULL;
#endif
// device pointers
extern real (*d_Flu_Array_F_In )[FLU_NIN ][ CUBE(FLU_NXT) ];
extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ CUBE(PS2) ];
extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ SQR(PS2) ];
extern double (*d_Corner_Array_F)[3];
#if ( MODEL == HYDRO )
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_F_Out)[ CUBE(PS2) ];
#else
static char (*d_DE_Array_F_Out)[ CUBE(PS2) ] = NULL;
#endif
#ifdef MHD
extern real (*d_Mag_Array_F_In )[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_Mag_Array_F_Out)[NCOMP_MAG][ PS2P1*SQR(PS2) ];
extern real (*d_Ele_Array )[9][NCOMP_ELE][ PS2P1*PS2 ];
#else
static real (*d_Mag_Array_F_In )[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ] = NULL;
static real (*d_Mag_Array_F_Out)[NCOMP_MAG][ PS2P1*SQR(PS2) ] = NULL;
static real (*d_Ele_Array )[9][NCOMP_ELE][ PS2P1*PS2 ] = NULL;
#endif
#if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
extern real (*d_PriVar) [NCOMP_LR ][ CUBE(FLU_NXT) ];
extern real (*d_Slope_PPM)[3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ];
extern real (*d_FC_Var) [6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ];
extern real (*d_FC_Flux) [3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ];
#ifdef MHD
extern real (*d_FC_Mag_Half)[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_EC_Ele )[NCOMP_MAG][ CUBE(N_EC_ELE) ];
#else
static real (*d_FC_Mag_Half)[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ] = NULL;
static real (*d_EC_Ele )[NCOMP_MAG][ CUBE(N_EC_ELE) ] = NULL;
#endif // MHD
#endif // FLU_SCHEME
#endif // #if ( MODEL == HYDRO )
#ifdef UNSPLIT_GRAVITY
extern real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ];
#else
static real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ] = NULL;
#endif
extern cudaStream_t *Stream;
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_Asyn_FluidSolver
// Description : 1. MODEL == HYDRO : use GPU to solve the Euler equations by different schemes
// --> invoke the kernel "CUFLU_FluidSolver_XXX"
// 2. MODEL == ELBDM : use GPU to solve the kinematic operator in the Schrodinger's equations
// --> invoke the kernel "CUFLU_ELBDMSolver"
//
// ***********************************************************
// ** Asynchronous Function **
// ** **
// ** will return before the execution in GPU is complete **
// ***********************************************************
//
// Note : 1. Use streams for the asychronous memory copy between device and host
// 2. Prefix "d" : for pointers pointing to the "Device" memory space
// Prefix "h" : for pointers pointing to the "Host" memory space
// 3. Use the input pamameter "XYZ" to control the order of update for dimensional-splitting
// method (currently only RTVD)
// 4. Currently five hydro schemes are supported :
// 1. Relaxing TVD scheme (RTVD ) --> split
// 2. MUSCL-Hancock scheme (MHM ) --> unsplit
// 3. MUSCL-Hancock scheme with Riemann prediction (MHM_RP) --> unsplit
// 4. Corner-Transport-Upwind scheme (CTU ) --> unsplit
//
// Parameter : h_Flu_Array_In : Host array to store the input fluid variables
// h_Flu_Array_Out : Host array to store the output fluid variables
// h_Mag_Array_In : Host array storing the input B field (for MHD only)
// h_Mag_Array_Out : Host array to store the output B field (for MHD only)
// h_DE_Array_Out : Host array to store the dual-energy status
// h_Flux_Array : Host array to store the output fluxes
// h_Ele_Array : Host array to store the output electric field (for MHD only)
// h_Corner_Array : Host array storing the physical corner coordinates of each patch group
// h_Pot_Array_USG : Host array storing the input potential for UNSPLIT_GRAVITY
// NPatchGroup : Number of patch groups evaluated simultaneously by GPU
// dt : Time interval to advance solution
// dh : Cell size
// StoreFlux : true --> store the coarse-fine fluxes
// StoreElectric : true --> store the coarse-fine electric field
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// ~ useless in directionally unsplit schemes
// LR_Limiter : Slope limiter for the data reconstruction in the MHM/MHM_RP/CTU schemes
// (0/1/2/3/4) = (vanLeer/generalized MinMod/vanAlbada/
// vanLeer + generalized MinMod/extrema-preserving) limiter
// MinMod_Coeff : Coefficient of the generalized MinMod limiter
// ELBDM_Eta : Particle mass / Planck constant
// ELBDM_Taylor3_Coeff : Coefficient in front of the third term in the Taylor expansion for ELBDM
// ELBDM_Taylor3_Auto : true --> Determine ELBDM_Taylor3_Coeff automatically by invoking the
// function "ELBDM_SetTaylor3Coeff"
// Time : Current physical time (for UNSPLIT_GRAVITY only)
// UsePot : Add self-gravity and/or external potential (for UNSPLIT_GRAVITY only)
// ExtAcc : Add external acceleration (for UNSPLIT_GRAVITY only)
// MinDens/Pres/Eint : Density, pressure, and internal energy floors
// DualEnergySwitch : Use the dual-energy formalism if E_int/E_kin < DualEnergySwitch
// NormPassive : true --> normalize passive scalars so that the sum of their mass density
// is equal to the gas mass density
// NNorm : Number of passive scalars to be normalized
// --> Should be set to the global variable "PassiveNorm_NVar"
// FracPassive : true --> convert passive scalars to mass fraction during data reconstruction
// NFrac : Number of passive scalars for the option "FracPassive"
// --> Should be set to the global variable "PassiveIntFrac_NVar"
// JeansMinPres : Apply minimum pressure estimated from the Jeans length
// JeansMinPres_Coeff : Coefficient used by JeansMinPres = G*(Jeans_NCell*Jeans_dh)^2/(Gamma*pi);
// GPU_NStream : Number of CUDA streams for the asynchronous memory copy
//-------------------------------------------------------------------------------------------------------
void CUAPI_Asyn_FluidSolver( real h_Flu_Array_In[][FLU_NIN ][ CUBE(FLU_NXT) ],
real h_Flu_Array_Out[][FLU_NOUT][ CUBE(PS2) ],
real h_Mag_Array_In[][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real h_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char h_DE_Array_Out[][ CUBE(PS2) ],
real h_Flux_Array[][9][NFLUX_TOTAL][ SQR(PS2) ],
real h_Ele_Array[][9][NCOMP_ELE][ PS2P1*PS2 ],
const double h_Corner_Array[][3],
real h_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
const int NPatchGroup, const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const bool XYZ, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff,
const real ELBDM_Eta, real ELBDM_Taylor3_Coeff, const bool ELBDM_Taylor3_Auto,
const double Time, const bool UsePot, const OptExtAcc_t ExtAcc,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const int GPU_NStream )
{
// check
# ifdef GAMER_DEBUG
# if ( MODEL == HYDRO )
# ifdef UNSPLIT_GRAVITY
if ( UsePot )
{
if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" );
if ( d_Pot_Array_USG_F == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_F == NULL !!\n" );
}
if ( ExtAcc )
{
if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" );
if ( d_Corner_Array_F == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_F == NULL !!\n" );
}
# endif
# elif ( MODEL == ELBDM )
# else
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
if ( StoreFlux )
{
if ( d_Flux_Array == NULL ) Aux_Error( ERROR_INFO, "d_Flux_Array == NULL !!\n" );
if ( h_Flux_Array == NULL ) Aux_Error( ERROR_INFO, "h_Flux_Array == NULL !!\n" );
}
# ifdef MHD
if ( h_Mag_Array_In == NULL ) Aux_Error( ERROR_INFO, "h_Mag_Array_In == NULL !!\n" );
if ( d_Mag_Array_F_In == NULL ) Aux_Error( ERROR_INFO, "d_Mag_Array_F_In == NULL !!\n" );
if ( h_Mag_Array_Out == NULL ) Aux_Error( ERROR_INFO, "h_Mag_Array_Out == NULL !!\n" );
if ( d_Mag_Array_F_Out == NULL ) Aux_Error( ERROR_INFO, "d_Mag_Array_F_Out == NULL !!\n" );
if ( d_FC_Mag_Half == NULL ) Aux_Error( ERROR_INFO, "d_FC_Mag_Half == NULL !!\n" );
if ( d_EC_Ele == NULL ) Aux_Error( ERROR_INFO, "d_EC_Ele == NULL !!\n" );
if ( StoreElectric )
{
if ( d_Ele_Array == NULL ) Aux_Error( ERROR_INFO, "d_Ele_Array == NULL !!\n" );
if ( h_Ele_Array == NULL ) Aux_Error( ERROR_INFO, "h_Ele_Array == NULL !!\n" );
}
# endif
# endif // #ifdef GAMER_DEBUG
const dim3 BlockDim_FluidSolver ( FLU_BLOCK_SIZE_X, FLU_BLOCK_SIZE_Y, 1 ); // for the fluidsolvers
// model-dependent operations
# if ( MODEL == HYDRO )
# elif ( MODEL == ELBDM )
// evaluate the optimized Taylor expansion coefficient
if ( ELBDM_Taylor3_Auto ) ELBDM_Taylor3_Coeff = ELBDM_SetTaylor3Coeff( dt, dh, ELBDM_Eta );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
int *NPatch_per_Stream = new int [GPU_NStream];
int *UsedPatch = new int [GPU_NStream];
int *Flu_MemSize_In = new int [GPU_NStream];
int *Flu_MemSize_Out = new int [GPU_NStream];
int *Flux_MemSize = new int [GPU_NStream];
# ifdef MHD
int *Mag_MemSize_In = new int [GPU_NStream];
int *Mag_MemSize_Out = new int [GPU_NStream];
int *Ele_MemSize = new int [GPU_NStream];
# endif
# ifdef UNSPLIT_GRAVITY
int *USG_MemSize = new int [GPU_NStream];
int *Corner_MemSize = new int [GPU_NStream];
# endif
# ifdef DUAL_ENERGY
int *DE_MemSize_Out = new int [GPU_NStream];
# endif
// set the number of patches of each stream
UsedPatch[0] = 0;
if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatchGroup;
else
{
for (int s=0; s<GPU_NStream-1; s++)
{
NPatch_per_Stream[s] = NPatchGroup / GPU_NStream;
UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s];
}
NPatch_per_Stream[GPU_NStream-1] = NPatchGroup - UsedPatch[GPU_NStream-1];
}
// set the size of data to be transferred into GPU in each stream
for (int s=0; s<GPU_NStream; s++)
{
Flu_MemSize_In [s] = sizeof(real )*NPatch_per_Stream[s]*FLU_NIN *CUBE(FLU_NXT);
Flu_MemSize_Out[s] = sizeof(real )*NPatch_per_Stream[s]*FLU_NOUT*CUBE(PS2);
Flux_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*NFLUX_TOTAL*9*PS2*PS2;
# ifdef MHD
Mag_MemSize_In [s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_MAG*FLU_NXT_P1*SQR(FLU_NXT);
Mag_MemSize_Out[s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_MAG*PS2P1*SQR(PS2);
Ele_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*NCOMP_ELE*9*PS2P1*PS2;
# endif
# ifdef UNSPLIT_GRAVITY
USG_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*CUBE(USG_NXT_F);
Corner_MemSize [s] = sizeof(double)*NPatch_per_Stream[s]*3;
# endif
# ifdef DUAL_ENERGY
DE_MemSize_Out [s] = sizeof(char )*NPatch_per_Stream[s]*CUBE(PS2);
# endif
}
// a. copy data from host to device
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_F_In + UsedPatch[s], h_Flu_Array_In + UsedPatch[s],
Flu_MemSize_In[s], cudaMemcpyHostToDevice, Stream[s] ) );
# ifdef MHD
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Mag_Array_F_In + UsedPatch[s], h_Mag_Array_In + UsedPatch[s],
Mag_MemSize_In[s], cudaMemcpyHostToDevice, Stream[s] ) );
# endif
# ifdef UNSPLIT_GRAVITY
if ( UsePot )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_USG_F + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s],
USG_MemSize [s], cudaMemcpyHostToDevice, Stream[s] ) );
if ( ExtAcc )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Corner_Array_F + UsedPatch[s], h_Corner_Array + UsedPatch[s],
Corner_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
# endif
} // for (int s=0; s<GPU_NStream; s++)
// b. execute the kernel
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
# if ( MODEL == HYDRO )
# if ( FLU_SCHEME == RTVD )
CUFLU_FluidSolver_RTVD <<< NPatch_per_Stream[s], BlockDim_FluidSolver, 0, Stream[s] >>>
( d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
dt, 1.0/dh, StoreFlux, XYZ, MinDens, MinPres, MinEint, EoS );
# elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP )
CUFLU_FluidSolver_MHM <<< NPatch_per_Stream[s], BlockDim_FluidSolver, 0, Stream[s] >>>
( d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Mag_Array_F_In + UsedPatch[s],
d_Mag_Array_F_Out + UsedPatch[s],
d_DE_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Ele_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
d_PriVar + UsedPatch[s],
d_Slope_PPM + UsedPatch[s],
d_FC_Var + UsedPatch[s],
d_FC_Flux + UsedPatch[s],
d_FC_Mag_Half + UsedPatch[s],
d_EC_Ele + UsedPatch[s],
dt, dh, StoreFlux, StoreElectric, LR_Limiter, MinMod_Coeff,
Time, UsePot, ExtAcc, GPUExtAcc_Ptr, MinDens, MinPres, MinEint,
DualEnergySwitch, NormPassive, NNorm, FracPassive, NFrac,
JeansMinPres, JeansMinPres_Coeff, EoS );
# elif ( FLU_SCHEME == CTU )
CUFLU_FluidSolver_CTU <<< NPatch_per_Stream[s], BlockDim_FluidSolver, 0, Stream[s] >>>
( d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Mag_Array_F_In + UsedPatch[s],
d_Mag_Array_F_Out + UsedPatch[s],
d_DE_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
d_Ele_Array + UsedPatch[s],
d_Corner_Array_F + UsedPatch[s],
d_Pot_Array_USG_F + UsedPatch[s],
d_PriVar + UsedPatch[s],
d_Slope_PPM + UsedPatch[s],
d_FC_Var + UsedPatch[s],
d_FC_Flux + UsedPatch[s],
d_FC_Mag_Half + UsedPatch[s],
d_EC_Ele + UsedPatch[s],
dt, dh, StoreFlux, StoreElectric, LR_Limiter, MinMod_Coeff,
Time, UsePot, ExtAcc, GPUExtAcc_Ptr, MinDens, MinPres, MinEint,
DualEnergySwitch, NormPassive, NNorm, FracPassive, NFrac,
JeansMinPres, JeansMinPres_Coeff, EoS );
# else
# error : unsupported GPU hydro scheme
# endif // FLU_SCHEME
# elif ( MODEL == ELBDM )
CUFLU_ELBDMSolver <<< NPatch_per_Stream[s], BlockDim_FluidSolver, 0, Stream[s] >>>
( d_Flu_Array_F_In + UsedPatch[s],
d_Flu_Array_F_Out + UsedPatch[s],
d_Flux_Array + UsedPatch[s],
dt, 1.0/dh, ELBDM_Eta, StoreFlux, ELBDM_Taylor3_Coeff, XYZ, MinDens );
# else
# error : unsupported MODEL !!
# endif // MODEL
CUDA_CHECK_ERROR( cudaGetLastError() );
} // for (int s=0; s<GPU_NStream; s++)
// c. copy data from device to host
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Flu_Array_Out + UsedPatch[s], d_Flu_Array_F_Out + UsedPatch[s],
Flu_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) );
if ( StoreFlux )
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Flux_Array + UsedPatch[s], d_Flux_Array + UsedPatch[s],
Flux_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
# ifdef MHD
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Mag_Array_Out + UsedPatch[s], d_Mag_Array_F_Out + UsedPatch[s],
Mag_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) );
if ( StoreElectric )
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Ele_Array + UsedPatch[s], d_Ele_Array + UsedPatch[s],
Ele_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_DE_Array_Out + UsedPatch[s], d_DE_Array_F_Out + UsedPatch[s],
DE_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) );
# endif
} // for (int s=0; s<GPU_NStream; s++)
delete [] NPatch_per_Stream;
delete [] UsedPatch;
delete [] Flu_MemSize_In;
delete [] Flu_MemSize_Out;
delete [] Flux_MemSize;
# ifdef MHD
delete [] Mag_MemSize_In;
delete [] Mag_MemSize_Out;
delete [] Ele_MemSize;
# endif
# ifdef UNSPLIT_GRAVITY
delete [] USG_MemSize;
delete [] Corner_MemSize;
# endif
# ifdef DUAL_ENERGY
delete [] DE_MemSize_Out;
# endif
} // FUNCTION : CUAPI_Asyn_FluidSolver
#endif // #ifdef GPU
|
the_stack
|
#include <iostream>
#include <vector>
#include <memory>
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/cudalegacy/NPP_staging.hpp"
#include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp"
typedef NCVVectorAlloc<Ncv32f> FloatVector;
/////////////////////////////////////////////////////////////////////////////////////////
// Implementation specific constants
/////////////////////////////////////////////////////////////////////////////////////////
__device__ const float eps2 = 1e-6f;
/////////////////////////////////////////////////////////////////////////////////////////
// Additional defines
/////////////////////////////////////////////////////////////////////////////////////////
// rounded up division
inline int iDivUp(int a, int b)
{
return (a + b - 1)/b;
}
/////////////////////////////////////////////////////////////////////////////////////////
// Texture references
/////////////////////////////////////////////////////////////////////////////////////////
texture<float, 2, cudaReadModeElementType> tex_coarse;
texture<float, 2, cudaReadModeElementType> tex_fine;
texture<float, 2, cudaReadModeElementType> tex_I1;
texture<float, 2, cudaReadModeElementType> tex_I0;
texture<float, 2, cudaReadModeElementType> tex_Ix;
texture<float, 2, cudaReadModeElementType> tex_Ixx;
texture<float, 2, cudaReadModeElementType> tex_Ix0;
texture<float, 2, cudaReadModeElementType> tex_Iy;
texture<float, 2, cudaReadModeElementType> tex_Iyy;
texture<float, 2, cudaReadModeElementType> tex_Iy0;
texture<float, 2, cudaReadModeElementType> tex_Ixy;
texture<float, 1, cudaReadModeElementType> tex_u;
texture<float, 1, cudaReadModeElementType> tex_v;
texture<float, 1, cudaReadModeElementType> tex_du;
texture<float, 1, cudaReadModeElementType> tex_dv;
texture<float, 1, cudaReadModeElementType> tex_numerator_dudv;
texture<float, 1, cudaReadModeElementType> tex_numerator_u;
texture<float, 1, cudaReadModeElementType> tex_numerator_v;
texture<float, 1, cudaReadModeElementType> tex_inv_denominator_u;
texture<float, 1, cudaReadModeElementType> tex_inv_denominator_v;
texture<float, 1, cudaReadModeElementType> tex_diffusivity_x;
texture<float, 1, cudaReadModeElementType> tex_diffusivity_y;
/////////////////////////////////////////////////////////////////////////////////////////
// SUPPLEMENTARY FUNCTIONS
/////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// \brief performs pointwise summation of two vectors stored in device memory
/// \param d_res - pointer to resulting vector (device memory)
/// \param d_op1 - term #1 (device memory)
/// \param d_op2 - term #2 (device memory)
/// \param len - vector size
///////////////////////////////////////////////////////////////////////////////
__global__ void pointwise_add(float *d_res, const float *d_op1, const float *d_op2, const int len)
{
const int pos = blockIdx.x*blockDim.x + threadIdx.x;
if(pos >= len) return;
d_res[pos] = d_op1[pos] + d_op2[pos];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief wrapper for summation kernel.
/// Computes \b op1 + \b op2 and stores result to \b res
/// \param res array, containing op1 + op2 (device memory)
/// \param op1 term #1 (device memory)
/// \param op2 term #2 (device memory)
/// \param count vector size
///////////////////////////////////////////////////////////////////////////////
static void add(float *res, const float *op1, const float *op2, const int count, cudaStream_t stream)
{
dim3 threads(256);
dim3 blocks(iDivUp(count, threads.x));
pointwise_add<<<blocks, threads, 0, stream>>>(res, op1, op2, count);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief wrapper for summation kernel.
/// Increments \b res by \b rhs
/// \param res initial vector, will be replaced with result (device memory)
/// \param rhs increment (device memory)
/// \param count vector size
///////////////////////////////////////////////////////////////////////////////
static void add(float *res, const float *rhs, const int count, cudaStream_t stream)
{
add(res, res, rhs, count, stream);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief kernel for scaling vector by scalar
/// \param d_res scaled vector (device memory)
/// \param d_src source vector (device memory)
/// \param scale scalar to scale by
/// \param len vector size (number of elements)
///////////////////////////////////////////////////////////////////////////////
__global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos >= len) return;
d_res[pos] = d_src[pos] * scale;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief scale vector by scalar
///
/// kernel wrapper
/// \param d_res scaled vector (device memory)
/// \param d_src source vector (device memory)
/// \param scale scalar to scale by
/// \param len vector size (number of elements)
/// \param stream CUDA stream
///////////////////////////////////////////////////////////////////////////////
static void ScaleVector(float *d_res, const float *d_src, float scale, const int len, cudaStream_t stream)
{
dim3 threads(256);
dim3 blocks(iDivUp(len, threads.x));
scaleVector<<<blocks, threads, 0, stream>>>(d_res, d_src, scale, len);
}
const int SOR_TILE_WIDTH = 32;
const int SOR_TILE_HEIGHT = 6;
const int PSOR_TILE_WIDTH = 32;
const int PSOR_TILE_HEIGHT = 6;
const int PSOR_PITCH = PSOR_TILE_WIDTH + 4;
const int PSOR_HEIGHT = PSOR_TILE_HEIGHT + 4;
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Compute smooth term diffusivity along x axis
///\param s (out) pointer to memory location for result (diffusivity)
///\param pos (in) position within shared memory array containing \b u
///\param u (in) shared memory array containing \b u
///\param v (in) shared memory array containing \b v
///\param du (in) shared memory array containing \b du
///\param dv (in) shared memory array containing \b dv
///////////////////////////////////////////////////////////////////////////////
__forceinline__ __device__ void diffusivity_along_x(float *s, int pos, const float *u, const float *v, const float *du, const float *dv)
{
//x derivative between pixels (i,j) and (i-1,j)
const int left = pos-1;
float u_x = u[pos] + du[pos] - u[left] - du[left];
float v_x = v[pos] + dv[pos] - v[left] - dv[left];
const int up = pos + PSOR_PITCH;
const int down = pos - PSOR_PITCH;
const int up_left = up - 1;
const int down_left = down-1;
//y derivative between pixels (i,j) and (i-1,j)
float u_y = 0.25f*(u[up] + du[up] + u[up_left] + du[up_left] - u[down] - du[down] - u[down_left] - du[down_left]);
float v_y = 0.25f*(v[up] + dv[up] + v[up_left] + dv[up_left] - v[down] - dv[down] - v[down_left] - dv[down_left]);
*s = 0.5f / sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2);
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Compute smooth term diffusivity along y axis
///\param s (out) pointer to memory location for result (diffusivity)
///\param pos (in) position within shared memory array containing \b u
///\param u (in) shared memory array containing \b u
///\param v (in) shared memory array containing \b v
///\param du (in) shared memory array containing \b du
///\param dv (in) shared memory array containing \b dv
///////////////////////////////////////////////////////////////////////////////
__forceinline__ __device__ void diffusivity_along_y(float *s, int pos, const float *u, const float *v, const float *du, const float *dv)
{
//y derivative between pixels (i,j) and (i,j-1)
const int down = pos-PSOR_PITCH;
float u_y = u[pos] + du[pos] - u[down] - du[down];
float v_y = v[pos] + dv[pos] - v[down] - dv[down];
const int right = pos + 1;
const int left = pos - 1;
const int down_right = down + 1;
const int down_left = down - 1;
//x derivative between pixels (i,j) and (i,j-1);
float u_x = 0.25f*(u[right] + u[down_right] + du[right] + du[down_right] - u[left] - u[down_left] - du[left] - du[down_left]);
float v_x = 0.25f*(v[right] + v[down_right] + dv[right] + dv[down_right] - v[left] - v[down_left] - dv[left] - dv[down_left]);
*s = 0.5f/sqrtf(u_x*u_x + v_x*v_x + u_y*u_y + v_y*v_y + eps2);
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Load element of 2D global memory to shared memory
///\param smem pointer to shared memory array
///\param is shared memory array column
///\param js shared memory array row
///\param w number of columns in global memory array
///\param h number of rows in global memory array
///\param p global memory array pitch in floats
///////////////////////////////////////////////////////////////////////////////
template<int tex_id>
__forceinline__ __device__ void load_array_element(float *smem, int is, int js, int i, int j, int w, int h, int p)
{
//position within shared memory array
const int ijs = js * PSOR_PITCH + is;
//mirror reflection across borders
i = max(i, -i-1);
i = min(i, w-i+w-1);
j = max(j, -j-1);
j = min(j, h-j+h-1);
const int pos = j * p + i;
switch(tex_id){
case 0:
smem[ijs] = tex1Dfetch(tex_u, pos);
break;
case 1:
smem[ijs] = tex1Dfetch(tex_v, pos);
break;
case 2:
smem[ijs] = tex1Dfetch(tex_du, pos);
break;
case 3:
smem[ijs] = tex1Dfetch(tex_dv, pos);
break;
}
}
///////////////////////////////////////////////////////////////////////////////
///\brief Utility function. Load part (tile) of 2D global memory to shared memory
///\param smem pointer to target shared memory array
///\param ig column number within source
///\param jg row number within source
///\param w number of columns in global memory array
///\param h number of rows in global memory array
///\param p global memory array pitch in floats
///////////////////////////////////////////////////////////////////////////////
template<int tex>
__forceinline__ __device__ void load_array(float *smem, int ig, int jg, int w, int h, int p)
{
const int i = threadIdx.x + 2;
const int j = threadIdx.y + 2;
load_array_element<tex>(smem, i, j, ig, jg, w, h, p);//load current pixel
__syncthreads();
if(threadIdx.y < 2)
{
//load bottom shadow elements
load_array_element<tex>(smem, i, j-2, ig, jg-2, w, h, p);
if(threadIdx.x < 2)
{
//load bottom right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j-2, ig+PSOR_TILE_WIDTH, jg-2, w, h, p);
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load bottom left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j-2, ig-PSOR_TILE_WIDTH, jg-2, w, h, p);
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
else if(threadIdx.y >= PSOR_TILE_HEIGHT-2)
{
//load upper shadow elements
load_array_element<tex>(smem, i, j+2, ig, jg+2, w, h, p);
if(threadIdx.x < 2)
{
//load upper right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j+2, ig+PSOR_TILE_WIDTH, jg+2, w, h, p);
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load upper left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j+2, ig-PSOR_TILE_WIDTH, jg+2, w, h, p);
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
else
{
//load middle shadow elements
if(threadIdx.x < 2)
{
//load middle right shadow elements
load_array_element<tex>(smem, i+PSOR_TILE_WIDTH, j, ig+PSOR_TILE_WIDTH, jg, w, h, p);
}
else if(threadIdx.x >= PSOR_TILE_WIDTH-2)
{
//load middle left shadow elements
load_array_element<tex>(smem, i-PSOR_TILE_WIDTH, j, ig-PSOR_TILE_WIDTH, jg, w, h, p);
}
}
__syncthreads();
}
///////////////////////////////////////////////////////////////////////////////
/// \brief computes matrix of linearised system for \c du, \c dv
/// Computed values reside in GPU memory. \n
/// Matrix computation is divided into two steps. This kernel performs first step\n
/// - compute smoothness term diffusivity between pixels - psi dash smooth
/// - compute robustness factor in the data term - psi dash data
/// \param diffusivity_x (in/out) diffusivity between pixels along x axis in smoothness term
/// \param diffusivity_y (in/out) diffusivity between pixels along y axis in smoothness term
/// \param denominator_u (in/out) precomputed part of expression for new du value in SOR iteration
/// \param denominator_v (in/out) precomputed part of expression for new dv value in SOR iteration
/// \param numerator_dudv (in/out) precomputed part of expression for new du and dv value in SOR iteration
/// \param numerator_u (in/out) precomputed part of expression for new du value in SOR iteration
/// \param numerator_v (in/out) precomputed part of expression for new dv value in SOR iteration
/// \param w (in) frame width
/// \param h (in) frame height
/// \param pitch (in) pitch in floats
/// \param alpha (in) alpha in Brox model (flow smoothness)
/// \param gamma (in) gamma in Brox model (edge importance)
///////////////////////////////////////////////////////////////////////////////
__global__ void prepare_sor_stage_1_tex(float *diffusivity_x, float *diffusivity_y,
float *denominator_u, float *denominator_v,
float *numerator_dudv,
float *numerator_u, float *numerator_v,
int w, int h, int s,
float alpha, float gamma)
{
__shared__ float u[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float v[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float du[PSOR_PITCH * PSOR_HEIGHT];
__shared__ float dv[PSOR_PITCH * PSOR_HEIGHT];
//position within tile
const int i = threadIdx.x;
const int j = threadIdx.y;
//position within smem arrays
const int ijs = (j+2) * PSOR_PITCH + i + 2;
//position within global memory
const int ig = blockIdx.x * blockDim.x + threadIdx.x;
const int jg = blockIdx.y * blockDim.y + threadIdx.y;
const int ijg = jg * s + ig;
//position within texture
float x = (float)ig + 0.5f;
float y = (float)jg + 0.5f;
//load u and v to smem
load_array<0>(u, ig, jg, w, h, s);
load_array<1>(v, ig, jg, w, h, s);
load_array<2>(du, ig, jg, w, h, s);
load_array<3>(dv, ig, jg, w, h, s);
//warped position
float wx = (x + u[ijs])/(float)w;
float wy = (y + v[ijs])/(float)h;
x /= (float)w;
y /= (float)h;
//compute image derivatives
const float Iz = tex2D(tex_I1, wx, wy) - tex2D(tex_I0, x, y);
const float Ix = tex2D(tex_Ix, wx, wy);
const float Ixz = Ix - tex2D(tex_Ix0, x, y);
const float Ixy = tex2D(tex_Ixy, wx, wy);
const float Ixx = tex2D(tex_Ixx, wx, wy);
const float Iy = tex2D(tex_Iy, wx, wy);
const float Iyz = Iy - tex2D(tex_Iy0, x, y);
const float Iyy = tex2D(tex_Iyy, wx, wy);
//compute data term
float q0, q1, q2;
q0 = Iz + Ix * du[ijs] + Iy * dv[ijs];
q1 = Ixz + Ixx * du[ijs] + Ixy * dv[ijs];
q2 = Iyz + Ixy * du[ijs] + Iyy * dv[ijs];
float data_term = 0.5f * rsqrtf(q0*q0 + gamma*(q1*q1 + q2*q2) + eps2);
//scale data term by 1/alpha
data_term /= alpha;
//compute smoothness term (diffusivity)
float sx, sy;
if(ig >= w || jg >= h) return;
diffusivity_along_x(&sx, ijs, u, v, du, dv);
diffusivity_along_y(&sy, ijs, u, v, du, dv);
if(ig == 0) sx = 0.0f;
if(jg == 0) sy = 0.0f;
numerator_dudv[ijg] = data_term * (Ix*Iy + gamma * Ixy*(Ixx + Iyy));
numerator_u[ijg] = data_term * (Ix*Iz + gamma * (Ixx*Ixz + Ixy*Iyz));
numerator_v[ijg] = data_term * (Iy*Iz + gamma * (Iyy*Iyz + Ixy*Ixz));
denominator_u[ijg] = data_term * (Ix*Ix + gamma * (Ixy*Ixy + Ixx*Ixx));
denominator_v[ijg] = data_term * (Iy*Iy + gamma * (Ixy*Ixy + Iyy*Iyy));
diffusivity_x[ijg] = sx;
diffusivity_y[ijg] = sy;
}
///////////////////////////////////////////////////////////////////////////////
///\brief computes matrix of linearised system for \c du, \c dv
///\param inv_denominator_u
///\param inv_denominator_v
///\param w
///\param h
///\param s
///////////////////////////////////////////////////////////////////////////////
__global__ void prepare_sor_stage_2(float *inv_denominator_u, float *inv_denominator_v,
int w, int h, int s)
{
__shared__ float sx[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)];
__shared__ float sy[(PSOR_TILE_WIDTH+1) * (PSOR_TILE_HEIGHT+1)];
//position within tile
const int i = threadIdx.x;
const int j = threadIdx.y;
//position within smem arrays
const int ijs = j*(PSOR_TILE_WIDTH+1) + i;
//position within global memory
const int ig = blockIdx.x * blockDim.x + threadIdx.x;
const int jg = blockIdx.y * blockDim.y + threadIdx.y;
const int ijg = jg*s + ig;
int inside = ig < w && jg < h;
float denom_u;
float denom_v;
if(inside)
{
denom_u = inv_denominator_u[ijg];
denom_v = inv_denominator_v[ijg];
}
if(inside)
{
sx[ijs] = tex1Dfetch(tex_diffusivity_x, ijg);
sy[ijs] = tex1Dfetch(tex_diffusivity_y, ijg);
}
else
{
sx[ijs] = 0.0f;
sy[ijs] = 0.0f;
}
int up = ijs+PSOR_TILE_WIDTH+1;
if(j == PSOR_TILE_HEIGHT-1)
{
if(jg < h-1 && inside)
{
sy[up] = tex1Dfetch(tex_diffusivity_y, ijg + s);
}
else
{
sy[up] = 0.0f;
}
}
int right = ijs + 1;
if(threadIdx.x == PSOR_TILE_WIDTH-1)
{
if(ig < w-1 && inside)
{
sx[right] = tex1Dfetch(tex_diffusivity_x, ijg + 1);
}
else
{
sx[right] = 0.0f;
}
}
__syncthreads();
float diffusivity_sum;
diffusivity_sum = sx[ijs] + sx[ijs+1] + sy[ijs] + sy[ijs+PSOR_TILE_WIDTH+1];
if(inside)
{
denom_u += diffusivity_sum;
denom_v += diffusivity_sum;
inv_denominator_u[ijg] = 1.0f/denom_u;
inv_denominator_v[ijg] = 1.0f/denom_v;
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Red-Black SOR
/////////////////////////////////////////////////////////////////////////////////////////
template<int isBlack> __global__ void sor_pass(float *new_du,
float *new_dv,
const float *g_inv_denominator_u,
const float *g_inv_denominator_v,
const float *g_numerator_u,
const float *g_numerator_v,
const float *g_numerator_dudv,
float omega,
int width,
int height,
int stride)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height)
return;
const int pos = j * stride + i;
const int pos_r = i < width - 1 ? pos + 1 : pos;
const int pos_u = j < height - 1 ? pos + stride : pos;
const int pos_d = j > 0 ? pos - stride : pos;
const int pos_l = i > 0 ? pos - 1 : pos;
//load smooth term
float s_up, s_left, s_right, s_down;
s_left = tex1Dfetch(tex_diffusivity_x, pos);
s_down = tex1Dfetch(tex_diffusivity_y, pos);
if(i < width-1)
s_right = tex1Dfetch(tex_diffusivity_x, pos_r);
else
s_right = 0.0f; //Neumann BC
if(j < height-1)
s_up = tex1Dfetch(tex_diffusivity_y, pos_u);
else
s_up = 0.0f; //Neumann BC
//load u, v and du, dv
float u_up, u_left, u_right, u_down, u;
float v_up, v_left, v_right, v_down, v;
float du_up, du_left, du_right, du_down, du;
float dv_up, dv_left, dv_right, dv_down, dv;
u_left = tex1Dfetch(tex_u, pos_l);
u_right = tex1Dfetch(tex_u, pos_r);
u_down = tex1Dfetch(tex_u, pos_d);
u_up = tex1Dfetch(tex_u, pos_u);
u = tex1Dfetch(tex_u, pos);
v_left = tex1Dfetch(tex_v, pos_l);
v_right = tex1Dfetch(tex_v, pos_r);
v_down = tex1Dfetch(tex_v, pos_d);
v = tex1Dfetch(tex_v, pos);
v_up = tex1Dfetch(tex_v, pos_u);
du = tex1Dfetch(tex_du, pos);
du_left = tex1Dfetch(tex_du, pos_l);
du_right = tex1Dfetch(tex_du, pos_r);
du_down = tex1Dfetch(tex_du, pos_d);
du_up = tex1Dfetch(tex_du, pos_u);
dv = tex1Dfetch(tex_dv, pos);
dv_left = tex1Dfetch(tex_dv, pos_l);
dv_right = tex1Dfetch(tex_dv, pos_r);
dv_down = tex1Dfetch(tex_dv, pos_d);
dv_up = tex1Dfetch(tex_dv, pos_u);
float numerator_dudv = g_numerator_dudv[pos];
if((i+j)%2 == isBlack)
{
// update du
float numerator_u = (s_left*(u_left + du_left) + s_up*(u_up + du_up) + s_right*(u_right + du_right) + s_down*(u_down + du_down) -
u * (s_left + s_right + s_up + s_down) - g_numerator_u[pos] - numerator_dudv*dv);
du = (1.0f - omega) * du + omega * g_inv_denominator_u[pos] * numerator_u;
// update dv
float numerator_v = (s_left*(v_left + dv_left) + s_up*(v_up + dv_up) + s_right*(v_right + dv_right) + s_down*(v_down + dv_down) -
v * (s_left + s_right + s_up + s_down) - g_numerator_v[pos] - numerator_dudv*du);
dv = (1.0f - omega) * dv + omega * g_inv_denominator_v[pos] * numerator_v;
}
new_du[pos] = du;
new_dv[pos] = dv;
}
///////////////////////////////////////////////////////////////////////////////
// utility functions
///////////////////////////////////////////////////////////////////////////////
void initTexture1D(texture<float, 1, cudaReadModeElementType> &tex)
{
tex.addressMode[0] = cudaAddressModeClamp;
tex.filterMode = cudaFilterModePoint;
tex.normalized = false;
}
void initTexture2D(texture<float, 2, cudaReadModeElementType> &tex)
{
tex.addressMode[0] = cudaAddressModeMirror;
tex.addressMode[1] = cudaAddressModeMirror;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = true;
}
void InitTextures()
{
initTexture2D(tex_I0);
initTexture2D(tex_I1);
initTexture2D(tex_fine); // for downsampling
initTexture2D(tex_coarse); // for prolongation
initTexture2D(tex_Ix);
initTexture2D(tex_Ixx);
initTexture2D(tex_Ix0);
initTexture2D(tex_Iy);
initTexture2D(tex_Iyy);
initTexture2D(tex_Iy0);
initTexture2D(tex_Ixy);
initTexture1D(tex_u);
initTexture1D(tex_v);
initTexture1D(tex_du);
initTexture1D(tex_dv);
initTexture1D(tex_diffusivity_x);
initTexture1D(tex_diffusivity_y);
initTexture1D(tex_inv_denominator_u);
initTexture1D(tex_inv_denominator_v);
initTexture1D(tex_numerator_dudv);
initTexture1D(tex_numerator_u);
initTexture1D(tex_numerator_v);
}
namespace
{
struct ImagePyramid
{
std::vector<FloatVector*> img0;
std::vector<FloatVector*> img1;
std::vector<Ncv32u> w;
std::vector<Ncv32u> h;
explicit ImagePyramid(int outer_iterations)
{
img0.reserve(outer_iterations);
img1.reserve(outer_iterations);
w.reserve(outer_iterations);
h.reserve(outer_iterations);
}
~ImagePyramid()
{
w.clear();
h.clear();
for (int i = static_cast<int>(img0.size()) - 1; i >= 0; --i)
{
delete img1[i];
delete img0[i];
}
img0.clear();
img1.clear();
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////
// MAIN FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////
NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
INCVMemAllocator &gpu_mem_allocator,
const NCVMatrix<Ncv32f> &frame0,
const NCVMatrix<Ncv32f> &frame1,
NCVMatrix<Ncv32f> &uOut,
NCVMatrix<Ncv32f> &vOut,
cudaStream_t stream)
{
ncvAssertPrintReturn(desc.alpha > 0.0f , "Invalid alpha" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.gamma >= 0.0f , "Invalid gamma" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_inner_iterations > 0 , "Invalid number of inner iterations" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_outer_iterations > 0 , "Invalid number of outer iterations" , NCV_INCONSISTENT_INPUT);
ncvAssertPrintReturn(desc.number_of_solver_iterations > 0, "Invalid number of solver iterations", NCV_INCONSISTENT_INPUT);
const Ncv32u kSourceWidth = frame0.width();
const Ncv32u kSourceHeight = frame0.height();
ncvAssertPrintReturn(frame1.width() == kSourceWidth && frame1.height() == kSourceHeight, "Frame dims do not match", NCV_INCONSISTENT_INPUT);
ncvAssertReturn(uOut.width() == kSourceWidth && vOut.width() == kSourceWidth &&
uOut.height() == kSourceHeight && vOut.height() == kSourceHeight, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(gpu_mem_allocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
bool kSkipProcessing = gpu_mem_allocator.isCounting();
int cuda_device;
ncvAssertCUDAReturn(cudaGetDevice(&cuda_device), NCV_CUDA_ERROR);
cudaDeviceProp device_props;
ncvAssertCUDAReturn(cudaGetDeviceProperties(&device_props, cuda_device), NCV_CUDA_ERROR);
Ncv32u alignmentValue = gpu_mem_allocator.alignment ();
const Ncv32u kStrideAlignmentFloat = alignmentValue / sizeof(float);
const Ncv32u kSourcePitch = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float);
const Ncv32f scale_factor = desc.scale_factor;
const Ncv32f alpha = desc.alpha;
const Ncv32f gamma = desc.gamma;
const Ncv32u kSizeInPixelsAligned = alignUp(kSourceWidth, kStrideAlignmentFloat)*kSourceHeight;
#if defined SAFE_VECTOR_DECL
#undef SAFE_VECTOR_DECL
#endif
#define SAFE_VECTOR_DECL(name, allocator, size) \
FloatVector name((allocator), (size)); \
ncvAssertReturn(name.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
// matrix elements
SAFE_VECTOR_DECL(diffusivity_x, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(diffusivity_y, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(denom_u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(denom_v, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_dudv, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(num_v, gpu_mem_allocator, kSizeInPixelsAligned);
// flow components
SAFE_VECTOR_DECL(u, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(v, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(u_new, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(v_new, gpu_mem_allocator, kSizeInPixelsAligned);
// flow increments
SAFE_VECTOR_DECL(du, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(dv, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(du_new, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(dv_new, gpu_mem_allocator, kSizeInPixelsAligned);
// temporary storage
SAFE_VECTOR_DECL(device_buffer, gpu_mem_allocator,
alignUp(kSourceWidth, kStrideAlignmentFloat) * alignUp(kSourceHeight, kStrideAlignmentFloat));
// image derivatives
SAFE_VECTOR_DECL(Ix, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ixx, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ix0, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iy, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iyy, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Iy0, gpu_mem_allocator, kSizeInPixelsAligned);
SAFE_VECTOR_DECL(Ixy, gpu_mem_allocator, kSizeInPixelsAligned);
// spatial derivative filter size
const int kDFilterSize = 5;
SAFE_VECTOR_DECL(derivativeFilter, gpu_mem_allocator, kDFilterSize);
if (!kSkipProcessing)
{
const float derivativeFilterHost[kDFilterSize] = {1.0f, -8.0f, 0.0f, 8.0f, -1.0f};
ncvAssertCUDAReturn(cudaMemcpy(derivativeFilter.ptr(), derivativeFilterHost, sizeof(float) * kDFilterSize,
cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
InitTextures();
}
//prepare image pyramid
ImagePyramid pyr(desc.number_of_outer_iterations);
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
float scale = 1.0f;
//cuda arrays for frames
std::auto_ptr<FloatVector> pI0(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
ncvAssertReturn(pI0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
std::auto_ptr<FloatVector> pI1(new FloatVector(gpu_mem_allocator, kSizeInPixelsAligned));
ncvAssertReturn(pI1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
if (!kSkipProcessing)
{
//copy frame data to device
size_t dst_width_in_bytes = alignUp(kSourceWidth, kStrideAlignmentFloat) * sizeof(float);
size_t src_width_in_bytes = kSourceWidth * sizeof(float);
size_t src_pitch_in_bytes = frame0.pitch();
ncvAssertCUDAReturn( cudaMemcpy2DAsync(pI0->ptr(), dst_width_in_bytes, frame0.ptr(),
src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, cudaMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR );
ncvAssertCUDAReturn( cudaMemcpy2DAsync(pI1->ptr(), dst_width_in_bytes, frame1.ptr(),
src_pitch_in_bytes, src_width_in_bytes, kSourceHeight, cudaMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR );
}
FloatVector* I0 = pI0.release();
FloatVector* I1 = pI1.release();
//prepare pyramid
pyr.img0.push_back(I0);
pyr.img1.push_back(I1);
pyr.w.push_back(kSourceWidth);
pyr.h.push_back(kSourceHeight);
scale *= scale_factor;
Ncv32u prev_level_width = kSourceWidth;
Ncv32u prev_level_height = kSourceHeight;
while((prev_level_width > 15) && (prev_level_height > 15) && (static_cast<Ncv32u>(pyr.img0.size()) < desc.number_of_outer_iterations))
{
//current resolution
Ncv32u level_width = static_cast<Ncv32u>(ceilf(kSourceWidth * scale));
Ncv32u level_height = static_cast<Ncv32u>(ceilf(kSourceHeight * scale));
Ncv32u level_width_aligned = alignUp(level_width, kStrideAlignmentFloat);
Ncv32u buffer_size = alignUp(level_width, kStrideAlignmentFloat) * level_height; // buffer size in floats
Ncv32u prev_level_pitch = alignUp(prev_level_width, kStrideAlignmentFloat) * sizeof(float);
std::auto_ptr<FloatVector> level_frame0(new FloatVector(gpu_mem_allocator, buffer_size));
ncvAssertReturn(level_frame0->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
std::auto_ptr<FloatVector> level_frame1(new FloatVector(gpu_mem_allocator, buffer_size));
ncvAssertReturn(level_frame1->isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
if (!kSkipProcessing)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR);
NcvSize32u srcSize (prev_level_width, prev_level_height);
NcvSize32u dstSize (level_width, level_height);
NcvRect32u srcROI (0, 0, prev_level_width, prev_level_height);
NcvRect32u dstROI (0, 0, level_width, level_height);
// frame 0
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I0->ptr(), srcSize, prev_level_pitch, srcROI,
level_frame0->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) );
// frame 1
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (I1->ptr(), srcSize, prev_level_pitch, srcROI,
level_frame1->ptr(), dstSize, level_width_aligned * sizeof (float), dstROI, scale_factor, scale_factor, nppStSupersample) );
}
I0 = level_frame0.release();
I1 = level_frame1.release();
//store pointers
pyr.img0.push_back(I0);
pyr.img1.push_back(I1);
pyr.w.push_back(level_width);
pyr.h.push_back(level_height);
scale *= scale_factor;
prev_level_width = level_width;
prev_level_height = level_height;
}
if (!kSkipProcessing)
{
//initial values for flow is 0
ncvAssertCUDAReturn(cudaMemsetAsync(u.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemsetAsync(v.ptr(), 0, kSizeInPixelsAligned * sizeof(float), stream), NCV_CUDA_ERROR);
//select images with lowest resolution
size_t pitch = alignUp(pyr.w.back(), kStrideAlignmentFloat) * sizeof(float);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I0, pyr.img0.back()->ptr(), channel_desc, pyr.w.back(), pyr.h.back(), pitch), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I1, pyr.img1.back()->ptr(), channel_desc, pyr.w.back(), pyr.h.back(), pitch), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR);
FloatVector* ptrU = &u;
FloatVector* ptrV = &v;
FloatVector* ptrUNew = &u_new;
FloatVector* ptrVNew = &v_new;
std::vector<FloatVector*>::const_reverse_iterator img0Iter = pyr.img0.rbegin();
std::vector<FloatVector*>::const_reverse_iterator img1Iter = pyr.img1.rbegin();
//outer loop
//warping fixed point iteration
while(!pyr.w.empty())
{
//current grid dimensions
const Ncv32u kLevelWidth = pyr.w.back();
const Ncv32u kLevelHeight = pyr.h.back();
const Ncv32u kLevelStride = alignUp(kLevelWidth, kStrideAlignmentFloat);
//size of current image in bytes
const int kLevelSizeInBytes = kLevelStride * kLevelHeight * sizeof(float);
//number of points at current resolution
const int kLevelSizeInPixels = kLevelStride * kLevelHeight;
//initial guess for du and dv
ncvAssertCUDAReturn(cudaMemsetAsync(du.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemsetAsync(dv.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR);
//texture format descriptor
cudaChannelFormatDesc ch_desc = cudaCreateChannelDesc<float>();
I0 = *img0Iter;
I1 = *img1Iter;
++img0Iter;
++img1Iter;
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I0, I0->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I1, I1->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
//compute derivatives
dim3 dBlocks(iDivUp(kLevelWidth, 32), iDivUp(kLevelHeight, 6));
dim3 dThreads(32, 6);
const int kPitchTex = kLevelStride * sizeof(float);
NcvSize32u srcSize(kLevelWidth, kLevelHeight);
Ncv32u nSrcStep = kLevelStride * sizeof(float);
NcvRect32u oROI(0, 0, kLevelWidth, kLevelHeight);
// Ix0
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Ix0.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iy0
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I0->ptr(), srcSize, nSrcStep, Iy0.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ix
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Ix.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iy
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (I1->ptr(), srcSize, nSrcStep, Iy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ixx
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Ix.ptr(), srcSize, nSrcStep, Ixx.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Iyy
ncvAssertReturnNcvStat( nppiStFilterColumnBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Iyy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
// Ixy
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Ixy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix, Ix.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixx, Ixx.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix0, Ix0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy, Iy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iyy, Iyy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy0, Iy0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixy, Ixy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
// flow
ncvAssertCUDAReturn(cudaBindTexture(0, tex_u, ptrU->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_v, ptrV->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
// flow increments
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
dim3 psor_blocks(iDivUp(kLevelWidth, PSOR_TILE_WIDTH), iDivUp(kLevelHeight, PSOR_TILE_HEIGHT));
dim3 psor_threads(PSOR_TILE_WIDTH, PSOR_TILE_HEIGHT);
dim3 sor_blocks(iDivUp(kLevelWidth, SOR_TILE_WIDTH), iDivUp(kLevelHeight, SOR_TILE_HEIGHT));
dim3 sor_threads(SOR_TILE_WIDTH, SOR_TILE_HEIGHT);
// inner loop
// lagged nonlinearity fixed point iteration
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR);
for (Ncv32u current_inner_iteration = 0; current_inner_iteration < desc.number_of_inner_iterations; ++current_inner_iteration)
{
//compute coefficients
prepare_sor_stage_1_tex<<<psor_blocks, psor_threads, 0, stream>>>
(diffusivity_x.ptr(),
diffusivity_y.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_dudv.ptr(),
num_u.ptr(),
num_v.ptr(),
kLevelWidth,
kLevelHeight,
kLevelStride,
alpha,
gamma);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
prepare_sor_stage_2<<<psor_blocks, psor_threads, 0, stream>>>(denom_u.ptr(), denom_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
// linear system coefficients
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_u, denom_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_v, denom_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
//solve linear system
for (Ncv32u solver_iteration = 0; solver_iteration < desc.number_of_solver_iterations; ++solver_iteration)
{
float omega = 1.99f;
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
sor_pass<0><<<sor_blocks, sor_threads, 0, stream>>>
(du_new.ptr(),
dv_new.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_u.ptr(),
num_v.ptr(),
num_dudv.ptr(),
omega,
kLevelWidth,
kLevelHeight,
kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
sor_pass<1><<<sor_blocks, sor_threads, 0, stream>>>
(du.ptr(),
dv.ptr(),
denom_u.ptr(),
denom_v.ptr(),
num_u.ptr(),
num_v.ptr(),
num_dudv.ptr(),
omega,
kLevelWidth,
kLevelHeight,
kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
}//end of solver loop
}// end of inner loop
//update u and v
add(ptrU->ptr(), du.ptr(), kLevelSizeInPixels, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
add(ptrV->ptr(), dv.ptr(), kLevelSizeInPixels, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
//prolongate using texture
pyr.w.pop_back();
pyr.h.pop_back();
if (!pyr.w.empty())
{
//compute new image size
Ncv32u nw = pyr.w.back();
Ncv32u nh = pyr.h.back();
Ncv32u ns = alignUp(nw, kStrideAlignmentFloat);
dim3 p_blocks(iDivUp(nw, 32), iDivUp(nh, 8));
dim3 p_threads(32, 8);
NcvSize32u inner_srcSize (kLevelWidth, kLevelHeight);
NcvSize32u dstSize (nw, nh);
NcvRect32u srcROI (0, 0, kLevelWidth, kLevelHeight);
NcvRect32u dstROI (0, 0, nw, nh);
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrU->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI,
ptrUNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );
ScaleVector(ptrUNew->ptr(), ptrUNew->ptr(), 1.0f/scale_factor, ns * nh, stream);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrV->ptr(), inner_srcSize, kLevelStride * sizeof (float), srcROI,
ptrVNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );
ScaleVector(ptrVNew->ptr(), ptrVNew->ptr(), 1.0f/scale_factor, ns * nh, stream);
ncvAssertCUDALastErrorReturn((int)NCV_CUDA_ERROR);
cv::cuda::device::swap<FloatVector*>(ptrU, ptrUNew);
cv::cuda::device::swap<FloatVector*>(ptrV, ptrVNew);
}
scale /= scale_factor;
}
// end of warping iterations
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), (int)NCV_CUDA_ERROR);
ncvAssertCUDAReturn( cudaMemcpy2DAsync
(uOut.ptr(), uOut.pitch(), ptrU->ptr(),
kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR );
ncvAssertCUDAReturn( cudaMemcpy2DAsync
(vOut.ptr(), vOut.pitch(), ptrV->ptr(),
kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR );
ncvAssertCUDAReturn(cudaStreamSynchronize(stream), (int)NCV_CUDA_ERROR);
}
return NCV_SUCCESS;
}
|
the_stack
|
#include <cfloat>
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
#define BLOCK_SIZE 128
#define BLOCK_SHIFT 7
#define MAX_BLOCKS 256
#define UCHAR_MIN 0
static __device__ uint block_count = 0;
__DEVICE__
void checkMinMax1(uchar value, int index, int element_x, int element_y,
uchar* min_vals, uchar* max_vals, int* min_loc_xs,
int* min_loc_ys, int* max_loc_xs, int* max_loc_ys) {
if (value < min_vals[index]) {
min_vals[index] = value;
min_loc_xs[index] = element_x;
min_loc_ys[index] = element_y;
}
if (value > max_vals[index]) {
max_vals[index] = value;
max_loc_xs[index] = element_x;
max_loc_ys[index] = element_y;
}
}
__DEVICE__
void checkMinMax1(float value, int index, int element_x, int element_y,
float* min_vals, float* max_vals, int* min_loc_xs,
int* min_loc_ys, int* max_loc_xs, int* max_loc_ys) {
if (value < min_vals[index]) {
min_vals[index] = value;
min_loc_xs[index] = element_x;
min_loc_ys[index] = element_y;
}
if (value > max_vals[index]) {
max_vals[index] = value;
max_loc_xs[index] = element_x;
max_loc_ys[index] = element_y;
}
}
__DEVICE__
void checkMinMax2(int index0, int index1, uchar* min_vals, uchar* max_vals,
int* min_loc_xs, int* min_loc_ys, int* max_loc_xs,
int* max_loc_ys) {
if (min_vals[index1] < min_vals[index0]) {
min_vals[index0] = min_vals[index1];
min_loc_xs[index0] = min_loc_xs[index1];
min_loc_ys[index0] = min_loc_ys[index1];
}
else if (min_vals[index1] == min_vals[index0]) {
if (min_loc_ys[index1] < min_loc_ys[index0]) {
min_loc_xs[index0] = min_loc_xs[index1];
min_loc_ys[index0] = min_loc_ys[index1];
}
else if (min_loc_ys[index1] == min_loc_ys[index0]) {
if (min_loc_xs[index1] < min_loc_xs[index0]) {
min_loc_xs[index0] = min_loc_xs[index1];
}
}
else {
}
}
else {
}
if (max_vals[index1] > max_vals[index0]) {
max_vals[index0] = max_vals[index1];
max_loc_xs[index0] = max_loc_xs[index1];
max_loc_ys[index0] = max_loc_ys[index1];
}
else if (max_vals[index1] == max_vals[index0]) {
if (max_loc_ys[index1] < max_loc_ys[index0]) {
max_loc_xs[index0] = max_loc_xs[index1];
max_loc_ys[index0] = max_loc_ys[index1];
}
else if (max_loc_ys[index1] == max_loc_ys[index0]) {
if (max_loc_xs[index1] < max_loc_xs[index0]) {
max_loc_xs[index0] = max_loc_xs[index1];
}
}
else {
}
}
else {
}
}
__DEVICE__
void checkMinMax2(int index0, int index1, float* min_vals, float* max_vals,
int* min_loc_xs, int* min_loc_ys, int* max_loc_xs,
int* max_loc_ys) {
if (min_vals[index1] < min_vals[index0]) {
min_vals[index0] = min_vals[index1];
min_loc_xs[index0] = min_loc_xs[index1];
min_loc_ys[index0] = min_loc_ys[index1];
}
else if (min_vals[index1] == min_vals[index0]) {
if (min_loc_ys[index1] < min_loc_ys[index0]) {
min_loc_xs[index0] = min_loc_xs[index1];
min_loc_ys[index0] = min_loc_ys[index1];
}
else if (min_loc_ys[index1] == min_loc_ys[index0]) {
if (min_loc_xs[index1] < min_loc_xs[index0]) {
min_loc_xs[index0] = min_loc_xs[index1];
}
}
else {
}
}
else {
}
if (max_vals[index1] > max_vals[index0]) {
max_vals[index0] = max_vals[index1];
max_loc_xs[index0] = max_loc_xs[index1];
max_loc_ys[index0] = max_loc_ys[index1];
}
else if (max_vals[index1] == max_vals[index0]) {
if (max_loc_ys[index1] < max_loc_ys[index0]) {
max_loc_xs[index0] = max_loc_xs[index1];
max_loc_ys[index0] = max_loc_ys[index1];
}
else if (max_loc_ys[index1] == max_loc_ys[index0]) {
if (max_loc_xs[index1] < max_loc_xs[index0]) {
max_loc_xs[index0] = max_loc_xs[index1];
}
}
else {
}
}
else {
}
}
__DEVICE__
void checkMinMax3(int g_index, int sh_index, uchar* g_min_vals,
uchar* g_max_vals, int* g_min_loc_xs, int* g_min_loc_ys,
int* g_max_loc_xs, int* g_max_loc_ys, uchar* min_vals,
uchar* max_vals, int* min_loc_xs, int* min_loc_ys,
int* max_loc_xs, int* max_loc_ys) {
if (g_min_vals[g_index] < min_vals[sh_index]) {
min_vals[sh_index] = g_min_vals[g_index];
min_loc_xs[sh_index] = g_min_loc_xs[g_index];
min_loc_ys[sh_index] = g_min_loc_ys[g_index];
}
if (g_max_vals[g_index] > max_vals[sh_index]) {
max_vals[sh_index] = g_max_vals[g_index];
max_loc_xs[sh_index] = g_max_loc_xs[g_index];
max_loc_ys[sh_index] = g_max_loc_ys[g_index];
}
}
__DEVICE__
void checkMinMax3(int g_index, int sh_index, float* g_min_vals,
float* g_max_vals, int* g_min_loc_xs, int* g_min_loc_ys,
int* g_max_loc_xs, int* g_max_loc_ys, float* min_vals,
float* max_vals, int* min_loc_xs, int* min_loc_ys,
int* max_loc_xs, int* max_loc_ys) {
if (g_min_vals[g_index] < min_vals[sh_index]) {
min_vals[sh_index] = g_min_vals[g_index];
min_loc_xs[sh_index] = g_min_loc_xs[g_index];
min_loc_ys[sh_index] = g_min_loc_ys[g_index];
}
if (g_max_vals[g_index] > max_vals[sh_index]) {
max_vals[sh_index] = g_max_vals[g_index];
max_loc_xs[sh_index] = g_max_loc_xs[g_index];
max_loc_ys[sh_index] = g_max_loc_ys[g_index];
}
}
__global__
void minMaxLocKernel(const uchar* src, int rows, int cols, int src_stride,
const uchar* mask, int mask_stride, uint blocks,
int* buffer) {
__shared__ uchar min_vals[BLOCK_SIZE];
__shared__ uchar max_vals[BLOCK_SIZE];
__shared__ int min_loc_xs[BLOCK_SIZE];
__shared__ int min_loc_ys[BLOCK_SIZE];
__shared__ int max_loc_xs[BLOCK_SIZE];
__shared__ int max_loc_ys[BLOCK_SIZE];
int threadIdx_x = threadIdx.x;
int element_x = ((blockIdx.x << BLOCK_SHIFT) + threadIdx_x) << 2;
int element_y = blockIdx.y;
min_vals[threadIdx_x] = UCHAR_MAX;
max_vals[threadIdx_x] = UCHAR_MIN;
min_loc_xs[threadIdx_x] = 0;
min_loc_ys[threadIdx_x] = 0;
max_loc_xs[threadIdx_x] = 0;
max_loc_ys[threadIdx_x] = 0;
uchar* input;
uchar* mask_row;
uchar value0, value1, value2, value3;
uchar mask_value0, mask_value1, mask_value2, mask_value3;
for (; element_y < rows; element_y += gridDim.y) {
if (element_x < cols) {
input = (uchar*)((uchar*)src + element_y * src_stride);
value0 = input[element_x];
value1 = input[element_x + 1];
value2 = input[element_x + 2];
value3 = input[element_x + 3];
if (mask == nullptr) {
checkMinMax1(value0, threadIdx_x, element_x, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
if (element_x < cols - 1) {
checkMinMax1(value1, threadIdx_x, element_x + 1, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (element_x < cols - 2) {
checkMinMax1(value2, threadIdx_x, element_x + 2, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (element_x < cols - 3) {
checkMinMax1(value3, threadIdx_x, element_x + 3, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
}
else {
mask_row = (uchar*)((uchar*)mask + element_y * mask_stride);
mask_value0 = mask_row[element_x];
mask_value1 = mask_row[element_x + 1];
mask_value2 = mask_row[element_x + 2];
mask_value3 = mask_row[element_x + 3];
if (mask_value0 > 0) {
checkMinMax1(value0, threadIdx_x, element_x, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (mask_value1 > 0 && element_x < cols - 1) {
checkMinMax1(value1, threadIdx_x, element_x + 1, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (mask_value2 > 0 && element_x < cols - 2) {
checkMinMax1(value2, threadIdx_x, element_x + 2, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (mask_value3 > 0 && element_x < cols - 3) {
checkMinMax1(value3, threadIdx_x, element_x + 3, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
}
}
}
__syncthreads();
#if BLOCK_SIZE == 512
if (threadIdx_x < 256) {
checkMinMax2(threadIdx_x, threadIdx_x + 256, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 256
if (threadIdx_x < 128) {
checkMinMax2(threadIdx_x, threadIdx_x + 128, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 128
if (threadIdx_x < 64) {
checkMinMax2(threadIdx_x, threadIdx_x + 64, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
if (threadIdx_x < 32) {
checkMinMax2(threadIdx_x, threadIdx_x + 32, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 16, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 8, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 4, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 2, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 1, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
__shared__ bool is_last_block_done;
int block_size = blocks * sizeof(int);
uchar* g_min_vals = (uchar*)buffer;
uchar* g_max_vals = (uchar*)buffer + block_size;
int* g_min_loc_xs = (int*)((uchar*)buffer + 2 * block_size);
int* g_min_loc_ys = (int*)((uchar*)buffer + 3 * block_size);
int* g_max_loc_xs = (int*)((uchar*)buffer + 4 * block_size);
int* g_max_loc_ys = (int*)((uchar*)buffer + 5 * block_size);
if (threadIdx_x == 0) {
int offset = gridDim.x * blockIdx.y + blockIdx.x;
g_min_vals[offset] = min_vals[0];
g_max_vals[offset] = max_vals[0];
g_min_loc_xs[offset] = min_loc_xs[0];
g_min_loc_ys[offset] = min_loc_ys[0];
g_max_loc_xs[offset] = max_loc_xs[0];
g_max_loc_ys[offset] = max_loc_ys[0];
__threadfence();
uint local_count = atomicInc(&block_count, blocks);
is_last_block_done = (local_count == (blocks - 1));
}
__syncthreads();
if (is_last_block_done) {
min_vals[threadIdx_x] = UCHAR_MAX;
max_vals[threadIdx_x] = UCHAR_MIN;
min_loc_xs[threadIdx_x] = 0;
min_loc_ys[threadIdx_x] = 0;
max_loc_xs[threadIdx_x] = 0;
max_loc_ys[threadIdx_x] = 0;
for (element_x = threadIdx_x; element_x < blocks; element_x += BLOCK_SIZE) {
checkMinMax3(element_x, threadIdx_x, g_min_vals, g_max_vals, g_min_loc_xs,
g_min_loc_ys, g_max_loc_xs, g_max_loc_ys, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#if BLOCK_SIZE == 512
if (threadIdx_x < 256) {
checkMinMax2(threadIdx_x, threadIdx_x + 256, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 256
if (threadIdx_x < 128) {
checkMinMax2(threadIdx_x, threadIdx_x + 128, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 128
if (threadIdx_x < 64) {
checkMinMax2(threadIdx_x, threadIdx_x + 64, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
if (threadIdx_x < 32) {
checkMinMax2(threadIdx_x, threadIdx_x + 32, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 16, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 8, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 4, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 2, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 1, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
if (threadIdx_x == 0) {
buffer[1] = (int)min_vals[0];
buffer[2] = (int)max_vals[0];
buffer[3] = min_loc_xs[0];
buffer[4] = min_loc_ys[0];
buffer[5] = max_loc_xs[0];
buffer[6] = max_loc_ys[0];
block_count = 0;
}
}
}
__global__
void minMaxLocKernel(const float* src, int rows, int cols, int src_stride,
const uchar* mask, int mask_stride, uint blocks,
float* buffer) {
__shared__ float min_vals[BLOCK_SIZE];
__shared__ float max_vals[BLOCK_SIZE];
__shared__ int min_loc_xs[BLOCK_SIZE];
__shared__ int min_loc_ys[BLOCK_SIZE];
__shared__ int max_loc_xs[BLOCK_SIZE];
__shared__ int max_loc_ys[BLOCK_SIZE];
int threadIdx_x = threadIdx.x;
int element_x = ((blockIdx.x << BLOCK_SHIFT) + threadIdx_x) << 2;
int element_y = blockIdx.y;
min_vals[threadIdx_x] = FLT_MAX;
max_vals[threadIdx_x] = FLT_MIN;
min_loc_xs[threadIdx_x] = 0;
min_loc_ys[threadIdx_x] = 0;
max_loc_xs[threadIdx_x] = 0;
max_loc_ys[threadIdx_x] = 0;
float* input;
uchar* mask_row;
float value0, value1, value2, value3;
uchar mask_value0, mask_value1, mask_value2, mask_value3;
for (; element_y < rows; element_y += gridDim.y) {
if (element_x < cols) {
input = (float*)((uchar*)src + element_y * src_stride);
value0 = input[element_x];
value1 = input[element_x + 1];
value2 = input[element_x + 2];
value3 = input[element_x + 3];
if (mask == nullptr) {
checkMinMax1(value0, threadIdx_x, element_x, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
if (element_x < cols - 1) {
checkMinMax1(value1, threadIdx_x, element_x + 1, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (element_x < cols - 2) {
checkMinMax1(value2, threadIdx_x, element_x + 2, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (element_x < cols - 3) {
checkMinMax1(value3, threadIdx_x, element_x + 3, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
}
else {
mask_row = (uchar*)((uchar*)mask + element_y * mask_stride);
mask_value0 = mask_row[element_x];
mask_value1 = mask_row[element_x + 1];
mask_value2 = mask_row[element_x + 2];
mask_value3 = mask_row[element_x + 3];
if (mask_value0 > 0) {
checkMinMax1(value0, threadIdx_x, element_x, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (mask_value1 > 0 && element_x < cols - 1) {
checkMinMax1(value1, threadIdx_x, element_x + 1, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (mask_value2 > 0 && element_x < cols - 2) {
checkMinMax1(value2, threadIdx_x, element_x + 2, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
if (mask_value3 > 0 && element_x < cols - 3) {
checkMinMax1(value3, threadIdx_x, element_x + 3, element_y, min_vals,
max_vals, min_loc_xs, min_loc_ys, max_loc_xs,
max_loc_ys);
}
}
}
}
__syncthreads();
#if BLOCK_SIZE == 512
if (threadIdx_x < 256) {
checkMinMax2(threadIdx_x, threadIdx_x + 256, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 256
if (threadIdx_x < 128) {
checkMinMax2(threadIdx_x, threadIdx_x + 128, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 128
if (threadIdx_x < 64) {
checkMinMax2(threadIdx_x, threadIdx_x + 64, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
if (threadIdx_x < 32) {
checkMinMax2(threadIdx_x, threadIdx_x + 32, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 16, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 8, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 4, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 2, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 1, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
__shared__ bool is_last_block_done;
int block_size = blocks * sizeof(float);
float* g_min_vals = (float*)buffer;
float* g_max_vals = (float*)((uchar*)buffer + block_size);
int* g_min_loc_xs = (int*)((uchar*)buffer + 2 * block_size);
int* g_min_loc_ys = (int*)((uchar*)buffer + 3 * block_size);
int* g_max_loc_xs = (int*)((uchar*)buffer + 4 * block_size);
int* g_max_loc_ys = (int*)((uchar*)buffer + 5 * block_size);
if (threadIdx_x == 0) {
int offset = gridDim.x * blockIdx.y + blockIdx.x;
g_min_vals[offset] = min_vals[0];
g_max_vals[offset] = max_vals[0];
g_min_loc_xs[offset] = min_loc_xs[0];
g_min_loc_ys[offset] = min_loc_ys[0];
g_max_loc_xs[offset] = max_loc_xs[0];
g_max_loc_ys[offset] = max_loc_ys[0];
__threadfence();
uint local_count = atomicInc(&block_count, blocks);
is_last_block_done = (local_count == (blocks - 1));
}
__syncthreads();
if (is_last_block_done) {
min_vals[threadIdx_x] = FLT_MAX;
max_vals[threadIdx_x] = FLT_MIN;
min_loc_xs[threadIdx_x] = 0;
min_loc_ys[threadIdx_x] = 0;
max_loc_xs[threadIdx_x] = 0;
max_loc_ys[threadIdx_x] = 0;
for (element_x = threadIdx_x; element_x < blocks; element_x += BLOCK_SIZE) {
checkMinMax3(element_x, threadIdx_x, g_min_vals, g_max_vals, g_min_loc_xs,
g_min_loc_ys, g_max_loc_xs, g_max_loc_ys, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#if BLOCK_SIZE == 512
if (threadIdx_x < 256) {
checkMinMax2(threadIdx_x, threadIdx_x + 256, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 256
if (threadIdx_x < 128) {
checkMinMax2(threadIdx_x, threadIdx_x + 128, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 128
if (threadIdx_x < 64) {
checkMinMax2(threadIdx_x, threadIdx_x + 64, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
}
__syncthreads();
#endif
if (threadIdx_x < 32) {
checkMinMax2(threadIdx_x, threadIdx_x + 32, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 16, min_vals, max_vals,
min_loc_xs, min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 8, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 4, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 2, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
checkMinMax2(threadIdx_x, threadIdx_x + 1, min_vals, max_vals, min_loc_xs,
min_loc_ys, max_loc_xs, max_loc_ys);
}
if (threadIdx_x == 0) {
buffer[1] = min_vals[0];
buffer[2] = max_vals[0];
buffer[3] = (float)min_loc_xs[0];
buffer[4] = (float)min_loc_ys[0];
buffer[5] = (float)max_loc_xs[0];
buffer[6] = (float)max_loc_ys[0];
block_count = 0;
}
}
}
RetCode minMaxLoc(const uchar* src, int rows, int cols, int src_stride,
const uchar* mask, int mask_stride, uchar* min_val,
uchar* max_val, int* min_loc_x, int* min_loc_y,
int* max_loc_x, int* max_loc_y, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * (int)sizeof(uchar));
if (mask != nullptr) {
PPL_ASSERT(mask_stride >= cols * (int)sizeof(uchar));
}
PPL_ASSERT(min_val != nullptr);
PPL_ASSERT(max_val != nullptr);
PPL_ASSERT(min_loc_x != nullptr);
PPL_ASSERT(min_loc_y != nullptr);
PPL_ASSERT(max_loc_x != nullptr);
PPL_ASSERT(max_loc_y != nullptr);
dim3 block, grid;
block.x = BLOCK_SIZE;
block.y = 1;
grid.x = divideUp(divideUp(cols, 4, 2), BLOCK_SIZE, BLOCK_SHIFT);
int grid_y = MAX_BLOCKS / grid.x;
grid.y = (grid_y < rows) ? grid_y : rows;
int blocks = grid.x * grid.y;
int buffer_size = blocks * sizeof(int) * 6;
int* buffer;
cudaMalloc(&buffer, buffer_size);
minMaxLocKernel<<<grid, block, 0, stream>>>(src, rows, cols, src_stride,
mask, mask_stride, blocks, buffer);
int results[7];
cudaMemcpy(results, buffer, 7 * sizeof(int), cudaMemcpyDeviceToHost);
*min_val = (uchar)results[1];
*max_val = (uchar)results[2];
*min_loc_x = results[3];
*min_loc_y = results[4];
*max_loc_x = results[5];
*max_loc_y = results[6];
cudaFree(buffer);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode minMaxLoc(const float* src, int rows, int cols, int src_stride,
const uchar* mask, int mask_stride, float* min_val,
float* max_val, int* min_loc_x, int* min_loc_y,
int* max_loc_x, int* max_loc_y, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * (int)sizeof(float));
if (mask != nullptr) {
PPL_ASSERT(mask_stride >= cols * (int)sizeof(uchar));
}
PPL_ASSERT(min_val != nullptr);
PPL_ASSERT(max_val != nullptr);
PPL_ASSERT(min_loc_x != nullptr);
PPL_ASSERT(min_loc_y != nullptr);
PPL_ASSERT(max_loc_x != nullptr);
PPL_ASSERT(max_loc_y != nullptr);
dim3 block, grid;
block.x = BLOCK_SIZE;
block.y = 1;
grid.x = divideUp(divideUp(cols, 4, 2), BLOCK_SIZE, BLOCK_SHIFT);
int grid_y = MAX_BLOCKS / grid.x;
grid.y = (grid_y < rows) ? grid_y : rows;
int blocks = grid.x * grid.y;
int buffer_size = blocks * sizeof(float) * 6;
float* buffer;
cudaMalloc(&buffer, buffer_size);
minMaxLocKernel<<<grid, block, 0, stream>>>(src, rows, cols, src_stride,
mask, mask_stride, blocks, buffer);
float results[7];
cudaMemcpy(results, buffer, 7 * sizeof(float), cudaMemcpyDeviceToHost);
*min_val = results[1];
*max_val = results[2];
*min_loc_x = (int)results[3];
*min_loc_y = (int)results[4];
*max_loc_x = (int)results[5];
*max_loc_y = (int)results[6];
cudaFree(buffer);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode MinMaxLoc<uchar>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
uchar* minVal,
uchar* maxVal,
int* minIdxX,
int* minIdxY,
int* maxIdxX,
int* maxIdxY,
int maskWidthStride,
const uchar* mask) {
RetCode code = minMaxLoc(inData, height, width, inWidthStride, mask,
maskWidthStride, minVal, maxVal, minIdxX, minIdxY,
maxIdxX, maxIdxY, stream);
return code;
}
template <>
RetCode MinMaxLoc<float>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
float* minVal,
float* maxVal,
int* minIdxX,
int* minIdxY,
int* maxIdxX,
int* maxIdxY,
int maskWidthStride,
const uchar* mask) {
inWidthStride *= sizeof(float);
RetCode code = minMaxLoc(inData, height, width, inWidthStride, mask,
maskWidthStride, minVal, maxVal, minIdxX, minIdxY,
maxIdxX, maxIdxY, stream);
return code;
}
} // cuda
} // cv
} // ppl
|
the_stack
|
#define LBANN_ENTRYWISE_BATCH_NORMALIZATION_LAYER_INSTANTIATE
#include "lbann/comm_impl.hpp"
#include "lbann/layers/regularizers/entrywise_batch_normalization.hpp"
#include "lbann/weights/weights_helpers.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/**
* On input, sums and sqsums are assumed to be filled with zeros.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (height / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void row_sums_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ vals,
size_t vals_ldim,
TensorDataType* __restrict__ sums,
TensorDataType* __restrict__ sqsums) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t row = gid; row < height; row += nthreads) {
auto& sum = sums[row];
auto& sqsum = sqsums[row];
for (size_t col = 0; col < width; ++col) {
const auto& x = vals[row + col * vals_ldim];
sum += x;
sqsum += x * x;
}
}
}
/**
* On input, batch_mean and batch_var are assumed to contain sums and
* squares of sums, respectively.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (size / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void compute_statistics_kernel(size_t size,
unsigned long long statistics_count,
TensorDataType decay,
TensorDataType* __restrict__ batch_mean,
TensorDataType* __restrict__ batch_var,
TensorDataType* __restrict__ running_mean,
TensorDataType* __restrict__ running_var) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i = gid; i < size; i += nthreads) {
auto& mean = batch_mean[i];
auto& var = batch_var[i];
auto& _running_mean = running_mean[i];
auto& _running_var = running_var[i];
const auto sum = batch_mean[i];
const auto sqsum = batch_var[i];
const TensorDataType statistics_count_dt = TensorDataType(statistics_count);
mean = sum / statistics_count_dt;
const auto sqmean = sqsum / statistics_count_dt;
var = (sqmean - mean * mean) * statistics_count_dt / TensorDataType(statistics_count - 1);
_running_mean = decay * _running_mean + (TensorDataType{1.f} - decay) * mean;
_running_var = decay * _running_var + (TensorDataType{1.f} - decay) * var;
}
}
/**
* mean = sum(x_i) / n
*
* var = ( sum(x_i^2)/n - mean^2 ) * n/(n-1)
*/
template <typename TensorDataType>
void compute_batch_statistics(lbann_comm& comm,
TensorDataType decay,
const El::AbstractDistMatrix<TensorDataType>& input,
El::AbstractDistMatrix<TensorDataType>& batch_statistics,
El::AbstractDistMatrix<TensorDataType>& running_mean,
El::AbstractDistMatrix<TensorDataType>& running_var) {
// Local matrices
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix());
auto& local_batch_statistics = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(batch_statistics.Matrix());
auto local_batch_mean = El::View(local_batch_statistics, El::ALL, El::IR(0));
auto local_batch_var = El::View(local_batch_statistics, El::ALL, El::IR(1));
auto& local_running_mean = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(running_mean.Matrix());
auto& local_running_var = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.Matrix());
// Dimensions
const size_t local_height = local_input.Height();
const size_t local_width = local_input.Width();
// Compute local sums
El::Zero(batch_statistics);
if (local_height > 0) {
auto multisync =
El::MakeMultiSync(gpu::get_sync_info(local_batch_statistics),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
row_sums_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height,
local_width,
local_input.LockedBuffer(),
local_input.LDim(),
local_batch_mean.Buffer(),
local_batch_var.Buffer());
}
// Accumulate sums between processes
/// @todo Local statistics
/// @todo Arbitrary group sizes
comm.allreduce(batch_statistics,
batch_statistics.RedundantComm(),
El::mpi::SUM);
const size_t statistics_count = input.Width();
// Compute mini-batch statistics from sums
if (statistics_count <= 1) {
// local_mean already has correct values
El::Fill(local_batch_var, El::TypeTraits<TensorDataType>::One());
} else {
if (local_height > 0) {
auto multisync =
El::MakeMultiSync(gpu::get_sync_info(local_batch_statistics),
gpu::get_sync_info(local_running_mean),
gpu::get_sync_info(local_running_var));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
compute_statistics_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height,
statistics_count,
decay,
local_batch_mean.Buffer(),
local_batch_var.Buffer(),
local_running_mean.Buffer(),
local_running_var.Buffer());
}
}
}
/**
* Block dimensions: bsizex x bsizey x 1
*
* Grid dimensions: (height / bsizex) x (width / bsizey) x 1
*/
template <typename TensorDataType>
__global__ void batchnorm_kernel(size_t height,
size_t width,
TensorDataType epsilon,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ mean,
const TensorDataType* __restrict__ var) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& _mean = mean[row];
const auto& _var = var[row];
const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon);
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& x = input[row + col*input_ldim];
auto& y = output[row + col*output_ldim];
y = (x - _mean) * inv_stdev;
}
}
}
/**
* y_i = (x_i - mean) / sqrt(var + epsilon)
*/
template <typename TensorDataType>
void apply_batchnorm(DataType epsilon,
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
El::Matrix<TensorDataType, El::Device::GPU>& local_output,
const El::Matrix<TensorDataType, El::Device::GPU>& local_mean,
const El::Matrix<TensorDataType, El::Device::GPU>& local_var) {
if (!local_input.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_input),
gpu::get_sync_info(local_mean),
gpu::get_sync_info(local_var));
const size_t local_height = local_input.Height();
const size_t local_width = local_input.Width();
constexpr size_t block_size_x = 256;
constexpr size_t block_size_y = 1;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
grid_dims.x = (local_height + block_size_x - 1) / block_size_x;
grid_dims.y = (local_width + block_size_y - 1) / block_size_y;
hydrogen::gpu::LaunchKernel(
batchnorm_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height,
local_width,
epsilon,
local_input.LockedBuffer(),
local_input.LDim(),
local_output.Buffer(),
local_output.LDim(),
local_mean.LockedBuffer(),
local_var.LockedBuffer());
}
}
template <typename TensorDataType>
void fp_impl(lbann_comm& comm,
TensorDataType decay,
TensorDataType epsilon,
bool is_training,
const El::AbstractDistMatrix<TensorDataType>& input,
El::AbstractDistMatrix<TensorDataType>& output,
El::AbstractDistMatrix<TensorDataType>& batch_statistics,
El::AbstractDistMatrix<TensorDataType>& running_mean,
El::AbstractDistMatrix<TensorDataType>& running_var) {
// Local matrices
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(output.Matrix());
// Batchnorm has different behavior for training and inference
if (is_training) {
// For training, normalize with batch statistics
compute_batch_statistics<TensorDataType>(comm,
decay,
input,
batch_statistics,
running_mean,
running_var);
const auto& local_batch_statistics
= dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(batch_statistics.LockedMatrix());
const auto local_batch_mean = El::LockedView(local_batch_statistics,
El::ALL, El::IR(0));
const auto local_batch_var = El::LockedView(local_batch_statistics,
El::ALL, El::IR(1));
apply_batchnorm<TensorDataType>(epsilon,
local_input,
local_output,
local_batch_mean,
local_batch_var);
}
else {
// For inference, normalize with running statistics
const auto& local_running_mean = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_mean.LockedMatrix());
const auto& local_running_var = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.LockedMatrix());
apply_batchnorm<TensorDataType>(epsilon,
local_input,
local_output,
local_running_mean,
local_running_var);
}
}
/**
* On input, gradient_wrt_mean and gradient_wrt_var are assumed to be
* filled with zeros.
*
* dL/dmean = - sum(dL/dy_i) / sqrt(var+epsilon)
*
* dL/dvar = - sum(dL/dy_i * (x_i-mean)) * (var+epsilon)^(-3/2) / 2
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (height / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void bp_training_stats_gradient_kernel(size_t height,
size_t width,
TensorDataType epsilon,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ mean,
const TensorDataType* __restrict__ var,
TensorDataType* __restrict__ gradient_wrt_mean,
TensorDataType* __restrict__ gradient_wrt_var) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t row = gid; row < height; row += nthreads) {
const auto& _mean = mean[row];
const auto& _var = var[row];
const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon);
auto& dmean = gradient_wrt_mean[row];
auto& dvar = gradient_wrt_var[row];
for (size_t col = 0; col < width; ++col) {
const auto& x = input[row + col * input_ldim];
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
dmean += - dy * inv_stdev;
dvar += - dy * (x - _mean) * inv_stdev*inv_stdev*inv_stdev / TensorDataType(2);
}
}
}
/**
* dL/dx_i = ( dL/dy_i / sqrt(var+epsilon)
* + dL/dmean / n
* + dL/dvar * (x_i - mean) * 2/(n-1) )
*
* Block dimensions: bsizex x bsizey x 1
*
* Grid dimensions: (height / bsizex) x (width / bsizey) x 1
*/
template <typename TensorDataType>
__global__ void bp_training_error_signal_kernel(size_t height,
size_t width,
TensorDataType epsilon,
unsigned long long statistics_count,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim,
const TensorDataType* __restrict__ mean,
const TensorDataType* __restrict__ var,
const TensorDataType* __restrict__ gradient_wrt_mean,
const TensorDataType* __restrict__ gradient_wrt_var) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& _mean = mean[row];
const auto& _var = var[row];
const auto& dmean = gradient_wrt_mean[row];
const auto& dvar = gradient_wrt_var[row];
const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon);
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& x = input[row + col * input_ldim];
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = (dy * inv_stdev
+ dmean / TensorDataType(statistics_count)
+ dvar * (x - _mean) * TensorDataType(2) / TensorDataType(statistics_count - 1));
}
}
}
/** @brief Backprop for training.
*
* Assumes forward prop uses mini-batch statistics. In other words,
* statistics are dependent on input.
*/
template <typename TensorDataType>
void bp_training_impl(lbann_comm& comm,
TensorDataType epsilon,
const El::AbstractDistMatrix<TensorDataType>& input,
const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output,
El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input,
const El::AbstractDistMatrix<TensorDataType>& statistics,
El::AbstractDistMatrix<TensorDataType>& gradient_wrt_statistics) {
// Local matrices
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix());
const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_output.LockedMatrix());
auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_input.Matrix());
const auto& local_statistics = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(statistics.LockedMatrix());
const auto local_mean = El::LockedView(local_statistics, El::ALL, El::IR(0));
const auto local_var = El::LockedView(local_statistics, El::ALL, El::IR(1));
auto& local_gradient_wrt_statistics = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_statistics.Matrix());
auto local_gradient_wrt_mean = El::View(local_gradient_wrt_statistics, El::ALL, El::IR(0));
auto local_gradient_wrt_var = El::View(local_gradient_wrt_statistics, El::ALL, El::IR(1));
// Dimensions
const size_t local_height = local_gradient_wrt_input.Height();
const size_t local_width = local_gradient_wrt_input.Width();
// Count for statistics
// Note: Output is constant if statistics count is <=1, so error
// signal is zero.
/// @todo Local statistics
/// @todo Arbitrary group sizes
const size_t statistics_count = input.Width();
if (statistics_count <= 1) {
El::Zero(local_gradient_wrt_input);
return;
}
// Compute local gradient w.r.t. batch statistics
El::Zero(gradient_wrt_statistics);
if (local_height > 0) {
auto multisync =
El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_statistics),
gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_input));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
bp_training_stats_gradient_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height,
local_width,
epsilon,
local_input.LockedBuffer(),
local_input.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_mean.LockedBuffer(),
local_var.LockedBuffer(),
local_gradient_wrt_mean.Buffer(),
local_gradient_wrt_var.Buffer());
}
// Accumulate gradient w.r.t. statistics across processes
/// @todo Local statistics
/// @todo Arbitrary group sizes
comm.allreduce(gradient_wrt_statistics,
gradient_wrt_statistics.RedundantComm(),
El::mpi::SUM);
// Compute gradient w.r.t. input
if (!local_input.IsEmpty()) {
auto multisync =
El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_gradient_wrt_statistics),
gpu::get_sync_info(local_statistics),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_input));
const size_t local_height = local_input.Height();
const size_t local_width = local_input.Width();
constexpr size_t block_size_x = 256;
constexpr size_t block_size_y = 1;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
grid_dims.x = (local_height + block_size_x - 1) / block_size_x;
grid_dims.y = (local_width + block_size_y - 1) / block_size_y;
hydrogen::gpu::LaunchKernel(
bp_training_error_signal_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height,
local_width,
epsilon,
statistics_count,
local_input.LockedBuffer(),
local_input.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim(),
local_mean.LockedBuffer(),
local_var.LockedBuffer(),
local_gradient_wrt_mean.LockedBuffer(),
local_gradient_wrt_var.LockedBuffer());
}
}
/**
* dL/dx_i = dL/dy_i / sqrt(var+epsilon)
*
* Block dimensions: bsizex x bsizey x 1
*
* Grid dimensions: (height / bsizex) x (width / bsizey) x 1
*/
template <typename TensorDataType>
__global__ void bp_inference_kernel(size_t height,
size_t width,
TensorDataType epsilon,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim,
const TensorDataType* __restrict__ running_var) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& var = running_var[row];
const auto inv_stdev = gpu_lib::rsqrt(var + epsilon);
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = dy * inv_stdev;
}
}
}
/** @brief Backprop for inference.
*
* Assumes forward prop uses running statistics. In other words,
* statistics are independent of input.
*/
template <typename TensorDataType>
void bp_inference_impl(DataType epsilon,
const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output,
El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input,
const El::AbstractDistMatrix<TensorDataType>& running_var) {
// Local matrices
const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_output.LockedMatrix());
auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_input.Matrix());
const auto& local_running_var = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.LockedMatrix());
// Compute gradient w.r.t. input
if (!local_gradient_wrt_output.IsEmpty()) {
auto multisync =
El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_running_var));
const size_t local_height = local_gradient_wrt_output.Height();
const size_t local_width = local_gradient_wrt_output.Width();
constexpr size_t block_size_x = 256;
constexpr size_t block_size_y = 1;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
grid_dims.x = (local_height + block_size_x - 1) / block_size_x;
grid_dims.y = (local_width + block_size_y - 1) / block_size_y;
hydrogen::gpu::LaunchKernel(
bp_inference_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height,
local_width,
epsilon,
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim(),
local_running_var.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_impl(lbann_comm& comm,
TensorDataType epsilon,
bool is_training,
const El::AbstractDistMatrix<TensorDataType>& input,
const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output,
El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input,
const El::AbstractDistMatrix<TensorDataType>& batch_statistics,
El::AbstractDistMatrix<TensorDataType>& gradient_wrt_batch_statistics,
const El::AbstractDistMatrix<TensorDataType>& running_var) {
// Batchnorm has different behavior for training and inference
if (is_training) {
bp_training_impl<TensorDataType>(comm,
epsilon,
input,
gradient_wrt_output,
gradient_wrt_input,
batch_statistics,
gradient_wrt_batch_statistics);
}
else {
bp_inference_impl<TensorDataType>(epsilon,
gradient_wrt_output,
gradient_wrt_input,
running_var);
}
}
} // namespace
// Template instantiation
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void entrywise_batch_normalization_layer<TensorDataType, T_layout, Dev>::fp_compute() {
using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>;
const auto mode = this->get_model()->get_execution_context().get_execution_mode();
fp_impl(*this->get_comm(),
this->m_decay,
this->m_epsilon,
mode == execution_mode::training,
this->get_prev_activations(),
this->get_activations(),
*this->m_batch_statistics,
ValuesGetter::mutable_values(this->get_weights(0)),
ValuesGetter::mutable_values(this->get_weights(1)));
}
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void entrywise_batch_normalization_layer<TensorDataType, T_layout, Dev>::bp_compute() {
const auto mode = this->get_model()->get_execution_context().get_execution_mode();
bp_impl(*this->get_comm(),
this->m_epsilon,
mode == execution_mode::training,
this->get_prev_activations(),
this->get_prev_error_signals(),
this->get_error_signals(),
*this->m_batch_statistics,
*this->m_batch_statistics_gradient,
this->weights_values(1));
}
#define PROTO(T) \
template class entrywise_batch_normalization_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class entrywise_batch_normalization_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
the_stack
|
#include <ops/declarable/helpers/s_t_b.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void batchToSpaceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft) {
// input [bS, H * blockSize, W * blockSize, iC]
// output [bS, H * blockSize - cropBottom - cropTop, W * blockSize - cropLeft - cropRight, iC]
// if (cropTop = cropBottom = cropRight = cropLeft = 0) shapes are the same
// else:
// oH -> [cropBottom, iH - cropTop]
// oW -> [cropLeft, iH - cropRight]
// xLen >= zLen
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ int rank, *sharedMem;
__shared__ Nd4jLong zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
rank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= zLen)
return;
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
coords[1] += cropBottom;
coords[2] += cropLeft;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void batchToSpaceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft) {
batchToSpaceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, cropBottom, cropLeft);
}
BUILD_SINGLE_TEMPLATE(template void batchToSpaceCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint cropBottom, const uint cropLeft), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
void batchToSpace(sd::LaunchContext* context, const NDArray& input, NDArray& output, const uint cropBottom, const uint cropTop, const uint cropLeft, const uint cropRight, const uint blockSize) {
// [bS*blockSize*blockSize, H/blockSize, W/blockSize, iC] is rearranged/permuted to [bS, oH, oW, iC]
// oH = H - cropTop - cropBottom
// oW = W - cropLeft - cropRight
NDArray inputRearranged0 = input.reshape(input.ordering(), {blockSize, blockSize, output.sizeAt(0), input.sizeAt(1), input.sizeAt(2), input.sizeAt(3)});
inputRearranged0.permutei({2, 3,0, 4,1, 5});
if(input.lengthOf() == output.lengthOf()) {
output.assign(inputRearranged0);
}
else {
NDArray inputRearranged1 = inputRearranged0.reshape(input.ordering(), {output.sizeAt(0), input.sizeAt(1) * blockSize, input.sizeAt(2) * blockSize, input.sizeAt(3)});
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * output.rankOf() + 128;
PointersManager manager(context, "batchToSpace");
NDArray::prepareSpecialUse({&output}, {&inputRearranged1});
BUILD_SINGLE_SELECTOR(input.dataType(), batchToSpaceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), inputRearranged1.specialBuffer(), inputRearranged1.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), cropBottom, cropLeft), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&inputRearranged1});
manager.synchronize();
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ static void batchToSpaceNDCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const uint numOfSpatialDims) {
// 4D example, numOfSpatialDims = 2
// input [bS, H * blockShape[0], W * blockShape[1], iC]
// output [bS, H * blockShape[0] - cropBottom - cropTop, W * blockShape[1] - cropLeft - cropRight, iC]
// if (cropTop = cropBottom = cropRight = cropLeft = 0) shapes are the same
// else:
// oH -> [cropBottom, iH - cropTop]
// oW -> [cropLeft, iH - cropRight]
// xLen >= zLen
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ int rank, *sharedMem;
__shared__ Nd4jLong zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
rank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
// evaluate spatial coordinates for x
for(uint j = 1; j <= numOfSpatialDims; ++j) {
const auto yOffset = (j - 1) * yShapeInfo[3]; // yRank = 2, calculate offset manually
coords[j] += y[yOffset]; // add crop left
}
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
///////////////////////////////////////////////////////////////////
template<typename X,typename Y>
static void batchToSpaceNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) {
batchToSpaceNDCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, numOfSpatialDims);
}
BUILD_DOUBLE_TEMPLATE(template void batchToSpaceNDCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES);
//////////////////////////////////////////////////////////////////////////
void batchToSpaceND(sd::LaunchContext* context, const NDArray& input, const NDArray& blockShape, const NDArray& crop, NDArray& output) {
// 4D example, numOfSpatialDims = 2 - two spatial dimensions
// [bS*blockShape[0]*blockShape[1], iH, iW, iC] is rearranged/permuted to [bS, iH*blockShape[0] - cropTop - cropBottom, iW*blockShape[1] - cropLeft - cropRight, iC]
const uint rank = input.rankOf();
const uint numOfSpatialDims = blockShape.sizeAt(0);
//*** construct reshaping std::vector for first reshape of input array ***//
std::vector<Nd4jLong> temp(numOfSpatialDims + rank);
int i;
for(i = 0; i < numOfSpatialDims; ++i)
temp[i] = blockShape.e<Nd4jLong>(i);
temp[i++] = output.sizeAt(0);
for(int j = 1; j < rank; ++i, ++j)
temp[i] = input.sizeAt(j);
NDArray inputRearranged0 = input.reshape(input.ordering(), temp);
//*** construct permuting std::vector for permutation of input array ***//
temp[0] = numOfSpatialDims;
for(i = 1; i <= numOfSpatialDims; ++i) {
temp[2*i - 1] = numOfSpatialDims + i;
temp[2*i] = i - 1;
}
for(i = 2 * numOfSpatialDims + 1; i < temp.size(); ++i)
temp[i] = i;
inputRearranged0.permutei(temp);
if(input.lengthOf() == output.lengthOf()) {
output.assign(inputRearranged0);
}
else {
//*** construct reshaping std::vector for second reshape of input array ***//
temp.resize(rank);
temp[0] = output.sizeAt(0);
for(i = 1; i < rank; ++i)
temp[i] = (i <= numOfSpatialDims) ? input.sizeAt(i) * blockShape.e<Nd4jLong>(i - 1) : input.sizeAt(i);
NDArray inputRearranged1 = inputRearranged0.reshape(input.ordering(), temp);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * output.rankOf() + 128;
PointersManager manager(context, "batchToSpaceND");
NDArray::prepareSpecialUse({&output}, {&inputRearranged1, &crop});
BUILD_DOUBLE_SELECTOR(input.dataType(), crop.dataType(), batchToSpaceNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), inputRearranged1.specialBuffer(), inputRearranged1.specialShapeInfo(), crop.specialBuffer(), crop.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES);
NDArray::registerSpecialUse({&output}, {&inputRearranged1, &crop});
manager.synchronize();
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void spaceToBatchCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight) {
// input [bS, H * blockSize - padBottom - padTop, W * blockSize - padLeft - padRight, iC]
// output [bs, H * blockSize, W * blockSize, iC]
// if (padTop = padBottom = padRight = padLeft = 0) shapes are the same
// else:
// iH -> [padBottom, oH - padTop]
// iW -> [padLeft, oW - padRight]
// zLen > xLen
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ int rank, *sharedMem;
__shared__ Nd4jLong zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
rank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= zLen)
return;
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if(coords[1] >= padBottom && coords[1] < zShapeInfo[2] - padTop && coords[2] >= padLeft && coords[2] < zShapeInfo[3] - padRight) {
coords[1] -= padBottom;
coords[2] -= padLeft;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
else
z[zOffset] = 0.f;
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void spaceToBatchCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight) {
spaceToBatchCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, padBottom, padTop, padLeft, padRight);
}
BUILD_SINGLE_TEMPLATE(template void spaceToBatchCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
void spaceToBatch(sd::LaunchContext* context, const NDArray& input, NDArray& output, const uint padBottom, const uint padTop, const uint padLeft, const uint padRight, const uint blockSize) {
// [bS, iH, iW, iC] is rearranged/permuted to [bS*blockSize*blockSize, (iH + padBottom + padTop)/blockSize, (iW + padLeft + padRight)/blockSize, iC]
NDArray outputRearranged0 = output.reshape(output.ordering(), {blockSize, blockSize, input.sizeAt(0), output.sizeAt(1), output.sizeAt(2), input.sizeAt(3)}, false);
outputRearranged0.permutei({2, 3,0, 4,1, 5});
if(input.lengthOf() == output.lengthOf()) {
outputRearranged0.assign(input);
}
else {
NDArray outputRearranged1 = outputRearranged0.reshape(output.ordering(), {input.sizeAt(0), output.sizeAt(1) * blockSize, output.sizeAt(2) * blockSize, input.sizeAt(3)}, false);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * output.rankOf() + 128;
PointersManager manager(context, "spaceToBatch");
NDArray::prepareSpecialUse({&outputRearranged1}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), spaceToBatchCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), outputRearranged1.specialBuffer(), outputRearranged1.specialShapeInfo(), padBottom, padTop, padLeft, padRight), LIBND4J_TYPES);
NDArray::registerSpecialUse({&outputRearranged1}, {&input});
manager.synchronize();
if(output.specialBuffer() != outputRearranged1.specialBuffer())
outputRearranged0.assign(outputRearranged1);
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ static void spaceToBatchNDCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const uint numOfSpatialDims) {
// x - input, y - padding, z - output
// 4D example
// input [bS, H * blockShape[0] - padBottom - padTop, W * blockShape[1] - padLeft - padRight, iC]
// output [bS, H * blockShape[0], W * blockShape[1], iC]
// if (padTop = padBottom = padRight = padLeft = 0) shapes are the same
// else:
// iH -> [padBottom, oH - padTop]
// iW -> [padLeft, oW - padRight]
// zLen > xLen
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ int rank, *sharedMem; // xRank = zRank, yRank = 2;
__shared__ Nd4jLong zLen, totalThreads;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int*>(shmem);
rank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
bool within = true;
for(uint j = 1; j <= numOfSpatialDims; ++j) {
// yRank = 2, calculate offset manually
const auto yOffset = (j - 1) * yShapeInfo[3];
const auto padLeft = y[yOffset];
const auto padRight = y[yOffset + yShapeInfo[4]];
within &= (coords[j] >= padLeft && coords[j] < shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo))[j] - padRight);
if(!within)
break;
coords[j] -= padLeft; // get coordinates for x
}
if(within)
z[zOffset] = x[shape::getOffset(xShapeInfo, coords)];
else
z[zOffset] = 0.f;
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void spaceToBatchNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims) {
spaceToBatchNDCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, numOfSpatialDims);
}
BUILD_DOUBLE_TEMPLATE(template void spaceToBatchNDCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES);
//////////////////////////////////////////////////////////////////////////
void spaceToBatchND(sd::LaunchContext* context, const NDArray& input, const NDArray& blockShape, const NDArray& padding, NDArray& output ) {
// 4D example with two spatial dimensions
// [bS, iH, iW, iC] is rearranged/permuted to [bS*blockShape[0]*blockShape[1], (iH + padBottom + padTop)/blockShape[0], (iW + padLeft + padRight)/blockShape[1], iC]
const uint rank = input.rankOf();
const uint numOfSpatialDims = blockShape.sizeAt(0);
//*** construct reshaping std::vector for first reshape of output array ***//
std::vector<Nd4jLong> temp(numOfSpatialDims + rank);
int i;
for(i = 0; i < numOfSpatialDims; ++i)
temp[i] = blockShape.e<Nd4jLong>(i);
temp[i++] = input.sizeAt(0);
for(int j = 1; j < rank; ++i, ++j)
temp[i] = output.sizeAt(j);
NDArray outputRearranged0 = output.reshape(output.ordering(), temp, false);
//*** construct permuting std::vector for permutation of output array ***//
temp[0] = numOfSpatialDims;
for(i = 1; i <= numOfSpatialDims; ++i) {
temp[2*i - 1] = numOfSpatialDims + i;
temp[2*i] = i - 1;
}
for(i = 2 * numOfSpatialDims + 1; i < temp.size(); ++i)
temp[i] = i;
outputRearranged0.permutei(temp);
// ****** //
if(input.lengthOf() == output.lengthOf()) {
outputRearranged0.assign(input);
}
else {
//*** construct reshaping std::vector for second reshape of output array ***//
temp.resize(rank);
temp[0] = input.sizeAt(0);
for(i = 1; i < rank; ++i)
temp[i] = (i <= numOfSpatialDims) ? output.sizeAt(i) * blockShape.e<Nd4jLong>(i - 1) : output.sizeAt(i);
NDArray outputRearranged1 = outputRearranged0.reshape(output.ordering(), temp, false);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * output.rankOf() + 128;
PointersManager manager(context, "spaceToBatchND");
NDArray::prepareSpecialUse({&outputRearranged1}, {&input, &padding});
BUILD_DOUBLE_SELECTOR(input.dataType(), padding.dataType(), spaceToBatchNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), padding.specialBuffer(), padding.specialShapeInfo(), outputRearranged1.specialBuffer(), outputRearranged1.specialShapeInfo(), numOfSpatialDims), LIBND4J_TYPES, INTEGER_TYPES);
NDArray::registerSpecialUse({&outputRearranged1}, {&input, &padding});
manager.synchronize();
if(output.specialBuffer() != outputRearranged1.specialBuffer())
outputRearranged0.assign(outputRearranged1);
}
}
/*
template <int N, bool B2S>
struct SpaceToBatchHelper {
template <typename T>
static void run(T *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, T *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) {
for (int batch_pos = 0; batch_pos < batch_shape[0]; ++batch_pos) {
const int space_pos = batch_pos * block_shape[0] + block_offsets[0] - pad_start[0];
if (space_pos >= 0 && space_pos < space_shape[0]) {
SpaceToBatchHelper<N - 1, B2S>::run(ptrSpace + space_pos * space_strides[0], space_shape + 1, space_strides + 1, block_shape + 1, pad_start + 1, block_offsets + 1, ptrBatch, batch_shape + 1, batch_strides + 1);
} else {
if (!B2S)
for (int i = 0; i < batch_strides[0]; i++)
ptrBatch[i] = (T) 0.f;
}
ptrBatch += batch_strides[0];
}
}
};
template <bool B2S>
struct SpaceToBatchHelper<0, B2S> {
template <typename T>
static void run(T *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, T *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) {
int str = batch_strides[-1];
for (int i = 0; i < str; i++)
if (B2S)
ptrSpace[i] = ptrBatch[i];
else
ptrBatch[i] = ptrSpace[i];
}
};
template <typename T, int NUM_BLOCK_DIMS, bool B2S>
void _execute(sd::LaunchContext * context, void *vptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, void *vptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides) {
auto ptrSpace = reinterpret_cast<T *>(vptrSpace);
auto ptrBatch = reinterpret_cast<T *>(vptrBatch);
SpaceToBatchHelper<NUM_BLOCK_DIMS, B2S>::run(ptrSpace, space_shape, space_strides, block_shape, pad_start, block_offsets, ptrBatch, batch_shape, batch_strides);
};
Nd4jStatus _batchToSpace(sd::LaunchContext * context, int internal_block_dims, NDArray *input, NDArray *output, std::vector<Nd4jLong> &internal_input_shape, std::vector<Nd4jLong> &internal_output_shape, Nd4jLong *block_shape, Nd4jLong *crops) {
return Status::OK();
}
#define STB_DIM (0, 1),\
(1, 2),\
(2, 3),\
(3, 4)
#define STB_BOOL (0, false),\
(1, true)
BUILD_TRIPLE_TEMPLATE(template void _execute, (sd::LaunchContext * context, void *ptrSpace, const Nd4jLong *space_shape, const Nd4jLong *space_strides, const Nd4jLong *block_shape, const Nd4jLong *pad_start, const Nd4jLong *block_offsets, void *ptrBatch, const Nd4jLong *batch_shape, const Nd4jLong *batch_strides), LIBND4J_TYPES, STB_DIM, STB_BOOL);
#undef STB_BOOL
#undef STB_DIM
*/
}
}
}
|
the_stack
|
#include <iostream>
#include "viennacl.hpp"
#include "viennacl_private.hpp"
//include basic scalar and vector types of ViennaCL
#include "viennacl/scalar.hpp"
#include "viennacl/vector.hpp"
//include the generic inner product functions of ViennaCL
#include "viennacl/linalg/inner_prod.hpp"
//include the generic norm functions of ViennaCL
#include "viennacl/linalg/norm_1.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/norm_inf.hpp"
// IxAMAX
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostiSamax(ViennaCLBackend /*backend*/, ViennaCLInt n,
ViennaCLInt *index,
float *x, ViennaCLInt offx, int incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
*index = static_cast<ViennaCLInt>(viennacl::linalg::index_norm_inf(v1));
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostiDamax(ViennaCLBackend /*backend*/, ViennaCLInt n,
ViennaCLInt *index,
double *x, ViennaCLInt offx, int incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
*index = static_cast<ViennaCLInt>(viennacl::linalg::index_norm_inf(v1));
return ViennaCLSuccess;
}
// xASUM
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSasum(ViennaCLBackend /*backend*/, ViennaCLInt n,
float *alpha,
float *x, ViennaCLInt offx, int incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
*alpha = viennacl::linalg::norm_1(v1);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDasum(ViennaCLBackend /*backend*/, ViennaCLInt n,
double *alpha,
double *x, ViennaCLInt offx, int incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
*alpha = viennacl::linalg::norm_1(v1);
return ViennaCLSuccess;
}
// xAXPY
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSaxpy(ViennaCLBackend /*backend*/, ViennaCLInt n,
float alpha,
float *x, ViennaCLInt offx, int incx,
float *y, ViennaCLInt offy, int incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<float> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
v2 += alpha * v1;
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDaxpy(ViennaCLBackend /*backend*/, ViennaCLInt n,
double alpha,
double *x, ViennaCLInt offx, int incx,
double *y, ViennaCLInt offy, int incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<double> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
v2 += alpha * v1;
return ViennaCLSuccess;
}
// xCOPY
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostScopy(ViennaCLBackend /*backend*/, ViennaCLInt n,
float *x, ViennaCLInt offx, int incx,
float *y, ViennaCLInt offy, int incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<float> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
v2 = v1;
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDcopy(ViennaCLBackend /*backend*/, ViennaCLInt n,
double *x, ViennaCLInt offx, int incx,
double *y, ViennaCLInt offy, int incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<double> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
v2 = v1;
return ViennaCLSuccess;
}
// xAXPY
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSdot(ViennaCLBackend /*backend*/, ViennaCLInt n,
float *alpha,
float *x, ViennaCLInt offx, int incx,
float *y, ViennaCLInt offy, int incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<float> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
*alpha = viennacl::linalg::inner_prod(v1, v2);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDdot(ViennaCLBackend /*backend*/, ViennaCLInt n,
double *alpha,
double *x, ViennaCLInt offx, int incx,
double *y, ViennaCLInt offy, int incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<double> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
*alpha = viennacl::linalg::inner_prod(v1, v2);
return ViennaCLSuccess;
}
// xNRM2
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSnrm2(ViennaCLBackend /*backend*/, ViennaCLInt n,
float *alpha,
float *x, ViennaCLInt offx, int incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
*alpha = viennacl::linalg::norm_2(v1);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDnrm2(ViennaCLBackend /*backend*/, ViennaCLInt n,
double *alpha,
double *x, ViennaCLInt offx, int incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
*alpha = viennacl::linalg::norm_2(v1);
return ViennaCLSuccess;
}
// xROT
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSrot(ViennaCLBackend /*backend*/, ViennaCLInt n,
float *x, ViennaCLInt offx, int incx,
float *y, ViennaCLInt offy, int incy,
float c, float s)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<float> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
viennacl::linalg::plane_rotation(v1, v2, c, s);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDrot(ViennaCLBackend /*backend*/, ViennaCLInt n,
double *x, ViennaCLInt offx, int incx,
double *y, ViennaCLInt offy, int incy,
double c, double s)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<double> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
viennacl::linalg::plane_rotation(v1, v2, c, s);
return ViennaCLSuccess;
}
// xSCAL
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSscal(ViennaCLBackend /*backend*/, ViennaCLInt n,
float alpha,
float *x, ViennaCLInt offx, int incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
v1 *= alpha;
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDscal(ViennaCLBackend /*backend*/, ViennaCLInt n,
double alpha,
double *x, ViennaCLInt offx, int incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
v1 *= alpha;
return ViennaCLSuccess;
}
// xSWAP
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSswap(ViennaCLBackend /*backend*/, ViennaCLInt n,
float *x, ViennaCLInt offx, int incx,
float *y, ViennaCLInt offy, int incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<float> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
viennacl::swap(v1, v2);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDswap(ViennaCLBackend /*backend*/, ViennaCLInt n,
double *x, ViennaCLInt offx, int incx,
double *y, ViennaCLInt offy, int incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<double> v2(y, viennacl::MAIN_MEMORY, size_type(n), size_type(offy), difference_type(incy));
viennacl::swap(v1, v2);
return ViennaCLSuccess;
}
|
the_stack
|
#include "modules/perception/inference/tensorrt/plugins/kernels.h"
#include "modules/perception/inference/tensorrt/plugins/rpn_proposal_ssd_plugin.h"
namespace apollo {
namespace perception {
namespace inference {
// TODO(chenjiahao): add heat_map_b as anchor_offset
// output anchors dims: [H, W, num_anchor_per_point, 4]
__global__ void generate_anchors_kernel(const int height, const int width,
const float anchor_stride,
const int num_anchor_per_point,
const float *anchor_heights,
const float *anchor_widths,
float *anchors) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int num_anchor = height * width * num_anchor_per_point;
if (index >= num_anchor) {
return;
}
float anchor_offset = 0;
int pos_index = index / num_anchor_per_point;
int anchor_id = index % num_anchor_per_point;
int w_i = pos_index % width;
int h_i = pos_index / width;
// center coordinates
float x_ctr = w_i * anchor_stride + anchor_offset;
float y_ctr = h_i * anchor_stride + anchor_offset;
float x_min = x_ctr - 0.5 * (anchor_widths[anchor_id] - 1);
float y_min = y_ctr - 0.5 * (anchor_heights[anchor_id] - 1);
float x_max = x_ctr + 0.5 * (anchor_widths[anchor_id] - 1);
float y_max = y_ctr + 0.5 * (anchor_heights[anchor_id] - 1);
anchors[index * 4] = x_min;
anchors[index * 4 + 1] = y_min;
anchors[index * 4 + 2] = x_max;
anchors[index * 4 + 3] = y_max;
}
// in_boxes dims: [N, num_box_per_point * 4, H, W],
// out_boxes dims: [N, H * W * num_box_per_point, 4]
template <typename Dtype>
__global__ void reshape_boxes_kernel(const int nthreads, const Dtype *in_boxes,
const int height, const int width,
const int num_box_per_point,
Dtype *out_boxes) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
int num_point = height * width;
int batch_id = index / num_point / num_box_per_point / 4;
int feature_id = index % 4;
int box_id = (index / 4) % num_box_per_point;
int point_id = (index / num_box_per_point / 4) % num_point;
int in_index =
((batch_id * num_box_per_point + box_id) * 4 + feature_id) * num_point +
point_id;
out_boxes[index] = in_boxes[in_index];
}
}
// in_scores dims: [N, 2 * num_box_per_point, H, W],
// out_scores dims: [N, H * W * num_box_per_point, 2]
template <typename Dtype>
__global__ void reshape_scores_kernel(const int nthreads,
const Dtype *in_scores, const int height,
const int width,
const int num_box_per_point,
Dtype *out_scores) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
int num_point = height * width;
int batch_id = index / num_point / num_box_per_point / 2;
int class_id = index % 2;
int box_id = (index / 2) % num_box_per_point;
int point_id = (index / num_box_per_point / 2) % num_point;
int in_index =
((batch_id * 2 + class_id) * num_box_per_point + box_id) * num_point +
point_id;
out_scores[index] = in_scores[in_index];
}
}
int RPNProposalSSDPlugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace,
cudaStream_t stream) {
// dimsNCHW: [N, 2 * num_anchor_per_point, H, W]
const float *rpn_cls_prob_reshape =
reinterpret_cast<const float *>(inputs[0]);
// dimsNCHW: [N, num_anchor_per_point * 4, H, W]
const float *rpn_bbox_pred = reinterpret_cast<const float *>(inputs[1]);
// dims: [N, 6, 1, 1]
const float *im_info = reinterpret_cast<const float *>(inputs[2]);
float *out_rois = reinterpret_cast<float *>(outputs[0]);
float *host_im_info = new float[batchSize * 6]();
BASE_CUDA_CHECK(cudaMemcpyAsync(host_im_info, im_info,
batchSize * 6 * sizeof(float),
cudaMemcpyDeviceToHost, stream));
const int origin_height = (int)(host_im_info[0]);
const int origin_width = (int)(host_im_info[1]);
int num_anchor = height_ * width_ * num_anchor_per_point_;
int rpn_bbox_pred_size = batchSize * num_anchor * 4;
int scores_size = batchSize * num_anchor * 2;
int anchors_size = num_anchor * 4;
int out_rois_size = batchSize * top_n_ * 5;
// Using thrust::fill might cause crash
float *init_out_rois = new float[out_rois_size]();
std::fill_n(init_out_rois, out_rois_size, -1.0f);
BASE_CUDA_CHECK(cudaMemcpyAsync(out_rois, init_out_rois,
out_rois_size * sizeof(float),
cudaMemcpyHostToDevice, stream));
int block_size, nthreads;
// reshape to [N, num_anchor, 4]
float *temp_rpn_bbox_pred;
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&temp_rpn_bbox_pred),
rpn_bbox_pred_size * sizeof(float)));
nthreads = rpn_bbox_pred_size;
block_size = (nthreads - 1) / thread_size_ + 1;
reshape_boxes_kernel<<<block_size, thread_size_, 0, stream>>>(
nthreads, rpn_bbox_pred, height_, width_, num_anchor_per_point_,
temp_rpn_bbox_pred);
// Normalization
float *dev_bbox_mean, *dev_bbox_std;
BASE_CUDA_CHECK(
cudaMalloc(reinterpret_cast<void **>(&dev_bbox_mean), 4 * sizeof(float)));
BASE_CUDA_CHECK(
cudaMalloc(reinterpret_cast<void **>(&dev_bbox_std), 4 * sizeof(float)));
BASE_CUDA_CHECK(cudaMemcpyAsync(dev_bbox_mean, bbox_mean_, 4 * sizeof(float),
cudaMemcpyHostToDevice, stream));
BASE_CUDA_CHECK(cudaMemcpyAsync(dev_bbox_std, bbox_std_, 4 * sizeof(float),
cudaMemcpyHostToDevice, stream));
repeatedly_mul_cuda(block_size, thread_size_, 0, stream, nthreads,
temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_std, 4);
repeatedly_add_cuda(block_size, thread_size_, 0, stream, nthreads,
temp_rpn_bbox_pred, temp_rpn_bbox_pred, dev_bbox_mean, 4);
// generate anchors
float *anchors, *dev_anchor_heights, *dev_anchor_widths;
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&anchors),
anchors_size * sizeof(float)));
BASE_CUDA_CHECK(
cudaMemsetAsync(anchors, 0, anchors_size * sizeof(float), stream));
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&dev_anchor_heights),
num_anchor_per_point_ * sizeof(float)));
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&dev_anchor_widths),
num_anchor_per_point_ * sizeof(float)));
BASE_CUDA_CHECK(cudaMemsetAsync(
dev_anchor_heights, 0, num_anchor_per_point_ * sizeof(float), stream));
BASE_CUDA_CHECK(cudaMemsetAsync(
dev_anchor_widths, 0, num_anchor_per_point_ * sizeof(float), stream));
BASE_CUDA_CHECK(cudaMemcpyAsync(dev_anchor_heights, anchor_heights_,
num_anchor_per_point_ * sizeof(float),
cudaMemcpyHostToDevice, stream));
BASE_CUDA_CHECK(cudaMemcpyAsync(dev_anchor_widths, anchor_widths_,
num_anchor_per_point_ * sizeof(float),
cudaMemcpyHostToDevice, stream));
block_size = (anchors_size - 1) / thread_size_ + 1;
generate_anchors_kernel<<<block_size, thread_size_, 0, stream>>>(
height_, width_, heat_map_a_, num_anchor_per_point_, dev_anchor_heights,
dev_anchor_widths, anchors);
// decode bbox
float *proposals;
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&proposals),
rpn_bbox_pred_size * sizeof(float)));
BASE_CUDA_CHECK(cudaMemsetAsync(proposals, 0,
rpn_bbox_pred_size * sizeof(float), stream));
nthreads = batchSize * num_anchor;
block_size = (nthreads - 1) / thread_size_ + 1;
bbox_transform_inv_cuda(block_size, thread_size_, 0, stream, nthreads,
anchors, temp_rpn_bbox_pred, num_anchor, 1,
proposals);
// clip boxes, i.e. refine proposals which are out of map
if (refine_out_of_map_bbox_) {
nthreads = rpn_bbox_pred_size;
block_size = (nthreads - 1) / thread_size_ + 1;
clip_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals,
(float)origin_height, (float)origin_width);
}
// reshape scores to [N, num_anchor, 2]
float *temp_scores;
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&temp_scores),
scores_size * sizeof(float)));
nthreads = scores_size;
block_size = (nthreads - 1) / thread_size_ + 1;
reshape_scores_kernel<<<block_size, thread_size_, 0, stream>>>(
nthreads, rpn_cls_prob_reshape, height_, width_, num_anchor_per_point_,
temp_scores);
// filter boxes according to min_size_mode and threshold_objectness
float *filtered_proposals, *filtered_scores;
int *filtered_count;
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_proposals),
rpn_bbox_pred_size * sizeof(float)));
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_scores),
batchSize * num_anchor * sizeof(float)));
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_count),
batchSize * sizeof(int)));
BASE_CUDA_CHECK(cudaMemsetAsync(filtered_proposals, 0,
rpn_bbox_pred_size * sizeof(float), stream));
BASE_CUDA_CHECK(cudaMemsetAsync(
filtered_scores, 0, batchSize * num_anchor * sizeof(float), stream));
BASE_CUDA_CHECK(
cudaMemsetAsync(filtered_count, 0, batchSize * sizeof(int), stream));
nthreads = batchSize * num_anchor;
block_size = (nthreads - 1) / thread_size_ + 1;
// TODO(chenjiahao): filter area
filter_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, proposals,
temp_scores, nullptr, num_anchor, 1, 2, 0, 0, 1,
min_size_mode_, min_size_h_, min_size_w_,
threshold_objectness_, filtered_proposals, filtered_scores,
nullptr, filtered_count);
int *host_filtered_count = new int[batchSize]();
BASE_CUDA_CHECK(cudaMemcpyAsync(host_filtered_count, filtered_count,
batchSize * sizeof(int),
cudaMemcpyDeviceToHost, stream));
// descending sort proposals by score
int *sorted_indexes;
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&sorted_indexes),
batchSize * num_anchor * sizeof(int)));
for (int i = 0; i < batchSize; ++i) {
thrust::sequence(thrust::device, sorted_indexes + i * num_anchor,
sorted_indexes + i * num_anchor + host_filtered_count[i]);
thrust::sort_by_key(
thrust::device, filtered_scores + size_t(i * num_anchor),
filtered_scores + size_t(i * num_anchor + host_filtered_count[i]),
sorted_indexes + i * num_anchor, thrust::greater<float>());
}
// keep max N candidates
float *pre_nms_proposals;
BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&pre_nms_proposals),
batchSize * max_candidate_n_ * 4 * sizeof(float)));
BASE_CUDA_CHECK(cudaMemsetAsync(
pre_nms_proposals, 0, batchSize * max_candidate_n_ * 4 * sizeof(float),
stream));
nthreads = batchSize * max_candidate_n_;
block_size = (nthreads - 1) / thread_size_ + 1;
keep_topN_boxes_cuda(block_size, thread_size_, 0, stream, nthreads,
filtered_proposals, nullptr, nullptr, sorted_indexes,
filtered_count, false, num_anchor, 0, max_candidate_n_,
pre_nms_proposals, nullptr, nullptr);
// Nms, keep top N proposals and output final proposals
// output dims: [num_roi, 5] (axis-1: batch_id, x_min, y_min, x_max, y_max)
int acc_box_num = 0;
for (int i = 0; i < batchSize; ++i) {
int cur_filter_count = std::min(host_filtered_count[i], max_candidate_n_);
NmsForward(
false, cur_filter_count, 4, overlap_ratio_, max_candidate_n_, top_n_, i,
0, pre_nms_proposals + size_t(i * max_candidate_n_ * 4), nullptr,
nullptr, out_rois + size_t(acc_box_num * 5), &acc_box_num, stream);
}
out_rois_num_ = acc_box_num;
// Free cuda memory
BASE_CUDA_CHECK(cudaFree(temp_rpn_bbox_pred));
BASE_CUDA_CHECK(cudaFree(dev_bbox_mean));
BASE_CUDA_CHECK(cudaFree(dev_bbox_std));
BASE_CUDA_CHECK(cudaFree(anchors));
BASE_CUDA_CHECK(cudaFree(dev_anchor_heights));
BASE_CUDA_CHECK(cudaFree(dev_anchor_widths));
BASE_CUDA_CHECK(cudaFree(proposals));
BASE_CUDA_CHECK(cudaFree(temp_scores));
BASE_CUDA_CHECK(cudaFree(filtered_proposals));
BASE_CUDA_CHECK(cudaFree(filtered_scores));
BASE_CUDA_CHECK(cudaFree(filtered_count));
BASE_CUDA_CHECK(cudaFree(sorted_indexes));
BASE_CUDA_CHECK(cudaFree(pre_nms_proposals));
// Free host memory
delete[] host_im_info;
delete[] host_filtered_count;
delete[] init_out_rois;
return 0;
}
} // namespace inference
} // namespace perception
} // namespace apollo
|
the_stack
|
//
// This project supports comparisons against multi-core SSE-enabled CPUs using
// conditional compilation of the SSW library:
//
// https://github.com/mengyao/Complete-Striped-Smith-Waterman-Library
//
// In order to perform these additional tests, the user must download ssw.h and ssw.c
// from the above repository, copy them in the sw-benchmark directory, and run cmake with
// the option -DSSWLIB=ON.
//
#if defined(SSWLIB)
#include "ssw.h"
#include <omp.h>
#endif
#include <nvbio/basic/timer.h>
#include <nvbio/basic/console.h>
#include <nvbio/basic/cuda/ldg.h>
#include <nvbio/basic/packedstream.h>
#include <nvbio/basic/packedstream_loader.h>
#include <nvbio/basic/vector_view.h>
#include <nvbio/basic/shared_pointer.h>
#include <nvbio/io/sequence/sequence.h>
#include <nvbio/fasta/fasta.h>
#include <nvbio/basic/dna.h>
#include <nvbio/alignment/alignment.h>
#include <nvbio/alignment/batched.h>
#include <nvbio/alignment/sink.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
enum { MAX_READ_LENGTH = 1024 };
using namespace nvbio;
enum { CACHE_SIZE = 64 };
typedef nvbio::lmem_cache_tag<CACHE_SIZE> lmem_cache_tag_type;
typedef nvbio::uncached_tag uncached_tag_type;
enum { REF_BITS = 2 };
enum { REF_BIG_ENDIAN = false };
//
// An alignment stream class to be used in conjunction with the BatchAlignmentScore class
//
template <typename t_aligner_type, typename cache_type = lmem_cache_tag_type>
struct AlignmentStream
{
typedef t_aligner_type aligner_type;
typedef nvbio::cuda::ldg_pointer<uint32> storage_iterator;
typedef nvbio::PackedStringLoader<
storage_iterator,
io::SequenceDataTraits<DNA_N>::SEQUENCE_BITS,
io::SequenceDataTraits<DNA_N>::SEQUENCE_BIG_ENDIAN,cache_type> pattern_loader_type;
typedef typename pattern_loader_type::input_iterator uncached_pattern_iterator;
typedef typename pattern_loader_type::iterator pattern_iterator;
typedef nvbio::vector_view<pattern_iterator> pattern_string;
typedef nvbio::PackedStringLoader<
storage_iterator,
REF_BITS,
REF_BIG_ENDIAN,uncached_tag_type> text_loader_type;
typedef typename text_loader_type::input_iterator uncached_text_iterator;
typedef typename text_loader_type::iterator text_iterator;
typedef nvbio::vector_view<text_iterator> text_string;
// an alignment context
struct context_type
{
int32 min_score;
aln::BestSink<int32> sink;
};
// a container for the strings to be aligned
struct strings_type
{
pattern_loader_type pattern_loader;
text_loader_type text_loader;
pattern_string pattern;
aln::trivial_quality_string quals;
text_string text;
};
// constructor
AlignmentStream(
aligner_type _aligner,
const uint32 _count,
const uint32* _offsets,
const uint32* _patterns,
const uint32 _max_pattern_len,
const uint32 _total_pattern_len,
const uint32* _text,
const uint32 _text_len,
int16* _scores) :
m_aligner ( _aligner ),
m_count (_count),
m_max_pattern_len (_max_pattern_len),
m_total_pattern_len (_total_pattern_len),
m_text_len (_text_len),
m_offsets (_offsets),
m_patterns (storage_iterator(_patterns)),
m_text (storage_iterator(_text)),
m_scores (_scores) {}
// get the aligner
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
const aligner_type& aligner() const { return m_aligner; };
// return the maximum pattern length
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 max_pattern_length() const { return m_max_pattern_len; }
// return the maximum text length
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 max_text_length() const { return m_text_len; }
// return the stream size
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 size() const { return m_count; }
// return the i-th pattern's length
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 pattern_length(const uint32 i, context_type* context) const { return m_offsets[i+1] - m_offsets[i]; }
// return the i-th text's length
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 text_length(const uint32 i, context_type* context) const { return m_text_len; }
// return the total number of cells
uint64 cells() const { return uint64( m_total_pattern_len ) * uint64( m_text_len ); }
// initialize the i-th context
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool init_context(
const uint32 i,
context_type* context) const
{
context->min_score = Field_traits<int32>::min();
return true;
}
// initialize the i-th context
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void load_strings(
const uint32 i,
const uint32 window_begin,
const uint32 window_end,
const context_type* context,
strings_type* strings) const
{
const uint32 offset = m_offsets[i];
const uint32 length = m_offsets[i+1] - offset;
strings->text = text_string( m_text_len,
strings->text_loader.load(
m_text,
m_text_len,
make_uint2( window_begin, window_end ),
false ) );
strings->pattern = pattern_string( length,
strings->pattern_loader.load( m_patterns + offset, length ) );
}
// handle the output
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void output(
const uint32 i,
const context_type* context) const
{
// copy the output score
m_scores[i] = context->sink.score;
}
aligner_type m_aligner;
uint32 m_count;
uint32 m_max_pattern_len;
uint32 m_total_pattern_len;
uint32 m_text_len;
const uint32* m_offsets;
uncached_pattern_iterator m_patterns;
uncached_text_iterator m_text;
int16* m_scores;
};
// A simple kernel to test the speed of alignment without the possible overheads of the BatchAlignmentScore interface
//
template <uint32 BLOCKDIM, uint32 MAX_PATTERN_LEN, typename aligner_type, typename score_type>
__global__ void alignment_test_kernel(
const aligner_type aligner,
const uint32 N_probs,
const uint32* offsets,
const uint32* pattern_ptr,
const uint32 text_len,
const uint32* text_ptr,
score_type* score)
{
const uint32 tid = blockIdx.x * BLOCKDIM + threadIdx.x;
typedef lmem_cache_tag_type lmem_cache_type;
typedef nvbio::cuda::ldg_pointer<uint32> storage_iterator;
typedef nvbio::PackedStringLoader<
storage_iterator,
io::SequenceDataTraits<DNA_N>::SEQUENCE_BITS,
io::SequenceDataTraits<DNA_N>::SEQUENCE_BIG_ENDIAN,lmem_cache_type> pattern_loader_type;
typedef typename pattern_loader_type::input_iterator uncached_pattern_iterator;
typedef typename pattern_loader_type::iterator pattern_iterator;
typedef nvbio::vector_view<pattern_iterator> pattern_string;
typedef nvbio::PackedStringLoader<
storage_iterator,
REF_BITS,
REF_BIG_ENDIAN,uncached_tag_type> text_loader_type;
typedef typename text_loader_type::input_iterator uncached_text_iterator;
typedef typename text_loader_type::iterator text_iterator;
typedef nvbio::vector_view<text_iterator> text_string;
if (tid >= N_probs)
return;
const uint32 pattern_off = offsets[tid];
const uint32 pattern_len = offsets[tid+1] - pattern_off;
pattern_loader_type pattern_loader;
pattern_string pattern = pattern_string( pattern_len, pattern_loader.load( uncached_pattern_iterator( pattern_ptr ) + pattern_off, pattern_len ) );
text_loader_type text_loader;
text_string text = text_string( text_len, text_loader.load( uncached_text_iterator( text_ptr ), text_len ) );
aln::BestSink<int32> sink;
aln::alignment_score<MAX_PATTERN_LEN>(
aligner,
pattern,
aln::trivial_quality_string(),
text,
Field_traits<int32>::min(),
sink );
score[tid] = sink.score;
}
unsigned char nst_nt4_table[256] = {
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5 /*'-'*/, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
};
struct ReferenceCounter
{
ReferenceCounter() : m_size(0) {}
void begin_read() {}
void end_read() {}
void id(const uint8 c) {}
void read(const uint8 c) { ++m_size; }
uint32 m_size;
};
struct ReferenceCoder
{
typedef PackedStream<uint32*,uint8,2,false> stream_type;
ReferenceCoder(uint32* storage) :
m_size(0), m_stream( storage )
{}
void begin_read() {}
void end_read() {}
void id(const uint8 c) {}
void read(const uint8 s)
{
const uint8 c = nst_nt4_table[s];
m_stream[ m_size++ ] = c < 4 ? c : 0;
}
uint32 m_size;
stream_type m_stream;
};
// execute a given batch alignment type on a given stream
//
// \tparam batch_type a \ref BatchAlignment "Batch Alignment"
// \tparam stream_type a stream compatible to the given batch_type
//
// \return average time
//
template <typename batch_type, typename stream_type>
float enact_batch(
batch_type& batch,
const stream_type& stream)
{
// allow to alloc all the needed temporary storage
const uint64 temp_size = batch_type::max_temp_storage(
stream.max_pattern_length(),
stream.max_text_length(),
stream.size() );
Timer timer;
timer.start();
// enact the batch
batch.enact( stream, temp_size, NULL );
cudaDeviceSynchronize();
timer.stop();
return timer.seconds();
}
// execute and time a batch of full DP alignments using BatchAlignmentScore
//
template <typename scheduler_type, typename stream_type>
void batch_score_profile(
const stream_type stream)
{
typedef aln::BatchedAlignmentScore<stream_type, scheduler_type> batch_type; // our batch type
// setup a batch
batch_type batch;
const float time = enact_batch(
batch,
stream );
fprintf(stderr," %5.1f", 1.0e-9f * float(stream.cells())/time );
}
// execute and time the batch_score<scheduler> algorithm for all possible schedulers
//
template <typename aligner_type>
void batch_score_profile_all(
const aligner_type aligner,
const uint32 n_tasks,
const uint32* offsets_dvec,
const uint32* pattern_dvec,
const uint32 max_pattern_len,
const uint32 total_pattern_len,
const uint32* text_dvec,
const uint32 text_len,
int16* score_dvec)
{
{
typedef AlignmentStream<aligner_type> stream_type;
// create a stream
stream_type stream(
aligner,
n_tasks,
offsets_dvec,
pattern_dvec,
max_pattern_len,
total_pattern_len,
text_dvec,
text_len,
score_dvec );
// test the DeviceThreadScheduler
batch_score_profile<aln::DeviceThreadScheduler>( stream );
// test the DeviceStagedThreadScheduler
//batch_score_profile<aln::DeviceStagedThreadScheduler>( stream );
}
{
const uint32 BLOCKDIM = 128;
const uint32 N_BLOCKS = (n_tasks + BLOCKDIM-1) / BLOCKDIM;
Timer timer;
timer.start();
// enact the batch
alignment_test_kernel<BLOCKDIM,MAX_READ_LENGTH> <<<N_BLOCKS,BLOCKDIM>>>(
aligner,
n_tasks,
offsets_dvec,
pattern_dvec,
text_len,
text_dvec,
score_dvec );
cudaDeviceSynchronize();
timer.stop();
const float time = timer.seconds();
fprintf(stderr," %5.1f", 1.0e-9f * float(uint64(total_pattern_len)*uint64(text_len))/time );
}
fprintf(stderr, " GCUPS\n");
}
enum AlignmentTest
{
ALL = 0xFFFFFFFFu,
ED = 1u,
SW = 2u,
GOTOH = 4u,
ED_BANDED = 8u,
SW_BANDED = 16u,
GOTOH_BANDED = 32u,
SSW = 64u
};
int main(int argc, char* argv[])
{
uint32 TEST_MASK = 0xFFFFFFFFu;
const char* reads_name = argv[argc-2];
const char* ref_name = argv[argc-1];
uint32 threads = omp_get_num_procs();
io::QualityEncoding qencoding = io::Phred33;
for (int i = 0; i < argc-2; ++i)
{
if (strcmp( argv[i], "-tests" ) == 0)
{
const std::string tests_string( argv[++i] );
char temp[256];
const char* begin = tests_string.c_str();
const char* end = begin;
TEST_MASK = 0u;
while (1)
{
while (*end != ':' && *end != '\0')
{
temp[end - begin] = *end;
end++;
}
temp[end - begin] = '\0';
if (strcmp( temp, "ed" ) == 0)
TEST_MASK |= ED;
else if (strcmp( temp, "sw" ) == 0)
TEST_MASK |= SW;
else if (strcmp( temp, "gotoh" ) == 0)
TEST_MASK |= GOTOH;
else if (strcmp( temp, "ssw" ) == 0)
TEST_MASK |= SSW;
if (*end == '\0')
break;
++end; begin = end;
}
}
else if (strcmp( argv[i], "-threads" ) == 0)
threads = atoi( argv[++i] );
}
fprintf(stderr,"sw-benchmark... started\n");
log_visible(stderr, "opening read file \"%s\"\n", reads_name);
SharedPointer<nvbio::io::SequenceDataStream> read_data_file(
nvbio::io::open_sequence_file(reads_name,
qencoding)
);
log_visible(stderr, "reading reference file \"%s\"... started\n", ref_name);
// read the reference
thrust::host_vector<uint32> h_ref_storage;
uint32 ref_length;
uint32 ref_words;
{
ReferenceCounter counter;
FASTA_inc_reader fasta( ref_name );
if (fasta.valid() == false)
{
fprintf(stderr, " error: unable to open reference file \"%s\"\n", ref_name);
exit(1);
}
while (fasta.read( 1024, counter ) == 1024);
ref_length = counter.m_size;
ref_words = (ref_length + 15)/16; // # of words at 2 bits per symbol
}
{
h_ref_storage.resize( ref_words );
ReferenceCoder coder( &h_ref_storage[0] );
FASTA_inc_reader fasta( ref_name );
if (fasta.valid() == false)
{
fprintf(stderr, " error: unable to open reference file \"%s\"\n", ref_name);
exit(1);
}
while (fasta.read( 1024, coder ) == 1024);
}
log_visible(stderr, "reading reference file \"%s\"... done (%u bps)\n", ref_name, ref_length);
typedef PackedStream<uint32*,uint8,REF_BITS,REF_BIG_ENDIAN> ref_stream_type;
thrust::device_vector<uint32> d_ref_storage( h_ref_storage );
ref_stream_type d_ref_stream( nvbio::raw_pointer( d_ref_storage ) );
const uint32 batch_size = 256*1024;
thrust::device_vector<int16> score_dvec( batch_size, 0 );
#if defined(SSWLIB)
std::vector<int8_t> unpacked_ref( ref_length );
{
ref_stream_type h_ref_stream( nvbio::raw_pointer( h_ref_storage ) );
for (uint32 i = 0; i < ref_length; ++i)
unpacked_ref[i] = h_ref_stream[i];
}
// Now set the number of threads
omp_set_num_threads( threads );
#pragma omp parallel
{
fprintf(stderr, " running on multiple threads\n");
}
#endif
io::SequenceDataHost h_read_data;
while (io::next( DNA_N, &h_read_data, read_data_file.get(), batch_size ))
{
// build the device side representation
const io::SequenceDataDevice d_read_data( h_read_data );
const uint32 n_read_symbols = h_read_data.bps();
fprintf(stderr," %u reads, avg: %u bps, max: %u bps\n",
h_read_data.size(),
h_read_data.avg_sequence_len(),
h_read_data.max_sequence_len());
if (TEST_MASK & GOTOH)
{
aln::SimpleGotohScheme scoring;
scoring.m_match = 2;
scoring.m_mismatch = -1;
scoring.m_gap_open = -2;
scoring.m_gap_ext = -1;
fprintf(stderr," testing Gotoh scoring speed...\n");
fprintf(stderr," %15s : ", "global");
{
batch_score_profile_all(
aln::make_gotoh_aligner<aln::GLOBAL,aln::TextBlockingTag>( scoring ),
d_read_data.size(),
nvbio::plain_view( d_read_data ).sequence_index(),
nvbio::plain_view( d_read_data ).sequence_storage(),
d_read_data.max_sequence_len(),
n_read_symbols,
nvbio::raw_pointer( d_ref_storage ),
ref_length,
nvbio::raw_pointer( score_dvec ) );
}
fprintf(stderr," %15s : ", "semi-global");
{
batch_score_profile_all(
aln::make_gotoh_aligner<aln::SEMI_GLOBAL,aln::TextBlockingTag>( scoring ),
d_read_data.size(),
nvbio::plain_view( d_read_data ).sequence_index(),
nvbio::plain_view( d_read_data ).sequence_storage(),
d_read_data.max_sequence_len(),
n_read_symbols,
nvbio::raw_pointer( d_ref_storage ),
ref_length,
nvbio::raw_pointer( score_dvec ) );
}
fprintf(stderr," %15s : ", "local");
{
batch_score_profile_all(
aln::make_gotoh_aligner<aln::LOCAL,aln::TextBlockingTag>( scoring ),
d_read_data.size(),
nvbio::plain_view( d_read_data ).sequence_index(),
nvbio::plain_view( d_read_data ).sequence_storage(),
d_read_data.max_sequence_len(),
n_read_symbols,
nvbio::raw_pointer( d_ref_storage ),
ref_length,
nvbio::raw_pointer( score_dvec ) );
}
}
if (TEST_MASK & ED)
{
fprintf(stderr," testing Edit Distance scoring speed...\n");
fprintf(stderr," %15s : ", "semi-global");
{
batch_score_profile_all(
aln::make_edit_distance_aligner<aln::SEMI_GLOBAL,aln::TextBlockingTag>(),
d_read_data.size(),
nvbio::plain_view( d_read_data ).sequence_index(),
nvbio::plain_view( d_read_data ).sequence_storage(),
d_read_data.max_sequence_len(),
n_read_symbols,
nvbio::raw_pointer( d_ref_storage ),
ref_length,
nvbio::raw_pointer( score_dvec ) );
}
}
#if defined(SSWLIB)
if (TEST_MASK & SSW)
{
fprintf(stderr," testing SSW scoring speed...\n");
fprintf(stderr," %15s : ", "local");
const int8_t mat[4*4] = {2, -1, -1, -1, -1, 2, -1, -1, -1, -1, 2, -1, -1, -1, -1, 2};
std::vector<int8_t> unpacked_reads( n_read_symbols );
typedef io::SequenceDataAccess<DNA_N> read_access_type;
typedef read_access_type::sequence_stream_type read_stream_type;
const read_access_type reads_access( h_read_data );
const read_stream_type packed_reads( reads_access.sequence_stream() );
#pragma omp parallel for
for (int i = 0; i < int( n_read_symbols ); ++i)
unpacked_reads[i] = packed_reads[i];
Timer timer;
timer.start();
#pragma omp parallel for
for (int i = 0; i < int( h_read_data.size() ); ++i)
{
const uint32 read_off = reads_access.sequence_index()[i];
const uint32 read_len = reads_access.sequence_index()[i+1] - read_off;
s_profile* prof = ssw_init( &unpacked_reads[read_off], read_len, mat, 4, 2 );
s_align* align = ssw_align(
prof,
&unpacked_ref[0],
ref_length,
2,
2,
0u,
0u,
0,
15 );
align_destroy( align );
init_destroy( prof );
}
timer.stop();
const float time = timer.seconds();
fprintf(stderr," %5.1f", 1.0e-9f * float(uint64(n_read_symbols)*uint64(ref_length))/time );
fprintf(stderr, " GCUPS\n");
}
#endif
}
fprintf(stderr,"sw-benchmark... done\n");
return 0;
}
|
the_stack
|
#include "sha3.h"
#include "sha3_cu.h"
#include <cuda.h>
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
static const char *_cudaErrorToString(cudaError_t error) {
switch (error) {
case cudaSuccess:
return "cudaSuccess";
case cudaErrorMissingConfiguration:
return "cudaErrorMissingConfiguration";
case cudaErrorMemoryAllocation:
return "cudaErrorMemoryAllocation";
case cudaErrorInitializationError:
return "cudaErrorInitializationError";
case cudaErrorLaunchFailure:
return "cudaErrorLaunchFailure";
case cudaErrorPriorLaunchFailure:
return "cudaErrorPriorLaunchFailure";
case cudaErrorLaunchTimeout:
return "cudaErrorLaunchTimeout";
case cudaErrorLaunchOutOfResources:
return "cudaErrorLaunchOutOfResources";
case cudaErrorInvalidDeviceFunction:
return "cudaErrorInvalidDeviceFunction";
case cudaErrorInvalidConfiguration:
return "cudaErrorInvalidConfiguration";
case cudaErrorInvalidDevice:
return "cudaErrorInvalidDevice";
case cudaErrorInvalidValue:
return "cudaErrorInvalidValue";
case cudaErrorInvalidPitchValue:
return "cudaErrorInvalidPitchValue";
case cudaErrorInvalidSymbol:
return "cudaErrorInvalidSymbol";
case cudaErrorMapBufferObjectFailed:
return "cudaErrorMapBufferObjectFailed";
case cudaErrorUnmapBufferObjectFailed:
return "cudaErrorUnmapBufferObjectFailed";
case cudaErrorInvalidHostPointer:
return "cudaErrorInvalidHostPointer";
case cudaErrorInvalidDevicePointer:
return "cudaErrorInvalidDevicePointer";
case cudaErrorInvalidTexture:
return "cudaErrorInvalidTexture";
case cudaErrorInvalidTextureBinding:
return "cudaErrorInvalidTextureBinding";
case cudaErrorInvalidChannelDescriptor:
return "cudaErrorInvalidChannelDescriptor";
case cudaErrorInvalidMemcpyDirection:
return "cudaErrorInvalidMemcpyDirection";
case cudaErrorAddressOfConstant:
return "cudaErrorAddressOfConstant";
case cudaErrorTextureFetchFailed:
return "cudaErrorTextureFetchFailed";
case cudaErrorTextureNotBound:
return "cudaErrorTextureNotBound";
case cudaErrorSynchronizationError:
return "cudaErrorSynchronizationError";
case cudaErrorInvalidFilterSetting:
return "cudaErrorInvalidFilterSetting";
case cudaErrorInvalidNormSetting:
return "cudaErrorInvalidNormSetting";
case cudaErrorMixedDeviceExecution:
return "cudaErrorMixedDeviceExecution";
case cudaErrorCudartUnloading:
return "cudaErrorCudartUnloading";
case cudaErrorUnknown:
return "cudaErrorUnknown";
case cudaErrorNotYetImplemented:
return "cudaErrorNotYetImplemented";
case cudaErrorMemoryValueTooLarge:
return "cudaErrorMemoryValueTooLarge";
case cudaErrorInvalidResourceHandle:
return "cudaErrorInvalidResourceHandle";
case cudaErrorNotReady:
return "cudaErrorNotReady";
case cudaErrorInsufficientDriver:
return "cudaErrorInsufficientDriver";
case cudaErrorSetOnActiveProcess:
return "cudaErrorSetOnActiveProcess";
case cudaErrorInvalidSurface:
return "cudaErrorInvalidSurface";
case cudaErrorNoDevice:
return "cudaErrorNoDevice";
case cudaErrorECCUncorrectable:
return "cudaErrorECCUncorrectable";
case cudaErrorSharedObjectSymbolNotFound:
return "cudaErrorSharedObjectSymbolNotFound";
case cudaErrorSharedObjectInitFailed:
return "cudaErrorSharedObjectInitFailed";
case cudaErrorUnsupportedLimit:
return "cudaErrorUnsupportedLimit";
case cudaErrorDuplicateVariableName:
return "cudaErrorDuplicateVariableName";
case cudaErrorDuplicateTextureName:
return "cudaErrorDuplicateTextureName";
case cudaErrorDuplicateSurfaceName:
return "cudaErrorDuplicateSurfaceName";
case cudaErrorDevicesUnavailable:
return "cudaErrorDevicesUnavailable";
case cudaErrorInvalidKernelImage:
return "cudaErrorInvalidKernelImage";
case cudaErrorNoKernelImageForDevice:
return "cudaErrorNoKernelImageForDevice";
case cudaErrorIncompatibleDriverContext:
return "cudaErrorIncompatibleDriverContext";
case cudaErrorPeerAccessAlreadyEnabled:
return "cudaErrorPeerAccessAlreadyEnabled";
case cudaErrorPeerAccessNotEnabled:
return "cudaErrorPeerAccessNotEnabled";
case cudaErrorDeviceAlreadyInUse:
return "cudaErrorDeviceAlreadyInUse";
case cudaErrorProfilerDisabled:
return "cudaErrorProfilerDisabled";
case cudaErrorProfilerNotInitialized:
return "cudaErrorProfilerNotInitialized";
case cudaErrorProfilerAlreadyStarted:
return "cudaErrorProfilerAlreadyStarted";
case cudaErrorProfilerAlreadyStopped:
return "cudaErrorProfilerAlreadyStopped";
case cudaErrorAssert:
return "cudaErrorAssert";
case cudaErrorTooManyPeers:
return "cudaErrorTooManyPeers";
case cudaErrorHostMemoryAlreadyRegistered:
return "cudaErrorHostMemoryAlreadyRegistered";
case cudaErrorHostMemoryNotRegistered:
return "cudaErrorHostMemoryNotRegistered";
case cudaErrorOperatingSystem:
return "cudaErrorOperatingSystem";
case cudaErrorPeerAccessUnsupported:
return "cudaErrorPeerAccessUnsupported";
case cudaErrorLaunchMaxDepthExceeded:
return "cudaErrorLaunchMaxDepthExceeded";
case cudaErrorLaunchFileScopedTex:
return "cudaErrorLaunchFileScopedTex";
case cudaErrorLaunchFileScopedSurf:
return "cudaErrorLaunchFileScopedSurf";
case cudaErrorSyncDepthExceeded:
return "cudaErrorSyncDepthExceeded";
case cudaErrorLaunchPendingCountExceeded:
return "cudaErrorLaunchPendingCountExceeded";
case cudaErrorNotPermitted:
return "cudaErrorNotPermitted";
case cudaErrorNotSupported:
return "cudaErrorNotSupported";
case cudaErrorHardwareStackError:
return "cudaErrorHardwareStackError";
case cudaErrorIllegalInstruction:
return "cudaErrorIllegalInstruction";
case cudaErrorMisalignedAddress:
return "cudaErrorMisalignedAddress";
case cudaErrorInvalidAddressSpace:
return "cudaErrorInvalidAddressSpace";
case cudaErrorInvalidPc:
return "cudaErrorInvalidPc";
case cudaErrorIllegalAddress:
return "cudaErrorIllegalAddress";
case cudaErrorInvalidPtx:
return "cudaErrorInvalidPtx";
case cudaErrorInvalidGraphicsContext:
return "cudaErrorInvalidGraphicsContext";
case cudaErrorStartupFailure:
return "cudaErrorStartupFailure";
case cudaErrorApiFailureBase:
return "cudaErrorApiFailureBase";
case cudaErrorNvlinkUncorrectable:
return "cudaErrorNvlinkUncorrectable";
case cudaErrorJitCompilerNotFound:
return "cudaErrorJitCompilerNotFound";
case cudaErrorCooperativeLaunchTooLarge:
return "cudaErrorCooperativeLaunchTooLarge";
}
return "<unknown>";
}
__device__ int memcmp_cu(const void *p1, const void *p2, size_t len) {
for (size_t i = 0; i < len; i++) {
uint8_t b1 = ((uint8_t *)p1)[i];
uint8_t b2 = ((uint8_t *)p2)[i];
if (b1 < b2) {
return -1;
}
if (b1 > b2) {
return 1;
}
}
return 0;
}
__device__ int strlen_cu(char *s) {
int i;
for (i = 0; s[i] != '\0';) {
i++;
}
return i;
}
__device__ char *reverse(char *str) {
char tmp, *src, *dst;
size_t len;
if (str != NULL) {
len = strlen_cu(str);
if (len > 1) {
src = str;
dst = src + len - 1;
while (src < dst) {
tmp = *src;
*src++ = *dst;
*dst-- = tmp;
}
}
}
return str;
}
__device__ int itoa(int64_t n, char s[]) {
int i;
int64_t sign;
if ((sign = n) < 0) /* record sign */
n = -n; /* make n positive */
i = 0;
do { /* generate digits in reverse order */
s[i++] = n % 10 + '0'; /* get next digit */
} while ((n /= 10) > 0); /* delete it */
if (sign < 0)
s[i++] = '-';
s[i] = '\0';
reverse(s);
return i;
}
__device__ void debug_print_buf(const void *buf, size_t len) {
for (int i = 0; i < len; i++) {
printf("%c", ((char *)buf)[i]);
}
printf("\n");
}
__device__ void debug_print_hash(const void *hash) {
for (int i = 0; i < 32; i++) {
printf("%02x", ((char *)hash)[i] & 0xFF);
}
printf("\n");
}
// called by each device thread
__global__ void try_solve(int64_t start_nonce, const sha3_ctx_t *prev_sha3,
const void *last, size_t last_len, const void *target,
int64_t *good_nonce) {
uint8_t hash[32];
uint8_t nonce_s[20];
int index = blockDim.x * blockIdx.x + threadIdx.x;
int64_t nonce = start_nonce + (int64_t)index;
size_t n = (size_t)itoa(nonce, (char *)nonce_s);
sha3_ctx_t sha3;
memcpy(&sha3, prev_sha3, sizeof(sha3_ctx_t));
sha3_update_cu(&sha3, nonce_s, n);
sha3_update_cu(&sha3, last, last_len);
sha3_final_cu(hash, &sha3);
if (memcmp_cu(hash, target, 32) <= 0) {
// found a solution. not thread-safe but a race is very unlikely
*good_nonce = nonce;
}
}
// device-local state
struct miner_state {
int num_blocks, block_size, max_threads;
sha3_ctx_t *prev_sha3_cu;
void *last_cu, *target_cu;
size_t last_len;
int64_t *nonce_cu;
};
static struct miner_state *states = 0;
extern "C" {
// called on startup
int cuda_init() {
int device_count = -1;
cudaError_t error = cudaGetDeviceCount(&device_count);
if (error != cudaSuccess) {
printf("cudaGetDeviceCount: %s\n", _cudaErrorToString(error));
return -1;
}
if (device_count <= 0) {
return -1;
}
states = new struct miner_state[device_count];
for (int i = 0; i < device_count; i++) {
cudaDeviceProp props;
error = cudaGetDeviceProperties(&props, i);
if (error != cudaSuccess) {
printf("cudaGetDeviceProperties: %s\n", _cudaErrorToString(error));
return -1;
}
states[i].max_threads =
props.maxThreadsPerMultiProcessor * props.multiProcessorCount;
states[i].block_size = props.warpSize;
states[i].num_blocks = states[i].max_threads / states[i].block_size;
error = cudaSetDevice(i);
if (error != cudaSuccess) {
printf("cudaSetDevice: %s\n", _cudaErrorToString(error));
return -1;
}
error = cudaDeviceReset();
if (error != cudaSuccess) {
printf("cudaDeviceReset: %s\n", _cudaErrorToString(error));
return -1;
}
#if 0
// I tried this but it noticeably impacted performance
error = cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
if (error != cudaSuccess) {
printf("cudaSetDeviceFlags: %s\n", _cudaErrorToString(error));
return -1;
}
#endif
// allocate memory used on device written to by the host
cudaMalloc(&states[i].prev_sha3_cu, sizeof(sha3_ctx_t));
cudaMalloc(&states[i].last_cu, 512);
cudaMalloc(&states[i].target_cu, 32);
cudaMalloc(&states[i].nonce_cu, sizeof(int64_t));
}
return device_count;
}
// called after updating the block header
int miner_update(int miner_num, const void *first, size_t first_len,
const void *last, size_t last_len, const void *target) {
cudaSetDevice(miner_num);
// hash the first (largest) part of the header once and copy the state
sha3_ctx_t sha3;
sha3_init(&sha3, 32);
sha3_update(&sha3, first, first_len);
cudaMemcpy(states[miner_num].prev_sha3_cu, &sha3, sizeof(sha3_ctx_t),
cudaMemcpyHostToDevice);
// copy the end part of the header
states[miner_num].last_len = last_len;
cudaMemcpy(states[miner_num].last_cu, last, last_len, cudaMemcpyHostToDevice);
// copy the target
cudaMemcpy(states[miner_num].target_cu, target, 32, cudaMemcpyHostToDevice);
// set the nonce to "not found"
cudaMemset(states[miner_num].nonce_cu, 0x7F, sizeof(int64_t));
cudaMemset(states[miner_num].nonce_cu, 0xFF, sizeof(int64_t) - 1);
return states[miner_num].num_blocks * states[miner_num].block_size;
}
// called in a loop until solved
// returns a solving nonce if found; otherwise 0x7FFFFFFFFFFFFFFF
int64_t miner_mine(int miner_num, int64_t start_nonce) {
cudaSetDevice(miner_num);
int64_t nonce;
int num_blocks = states[miner_num].num_blocks;
int block_size = states[miner_num].block_size;
try_solve<<<num_blocks, block_size>>>(
start_nonce, states[miner_num].prev_sha3_cu, states[miner_num].last_cu,
states[miner_num].last_len, states[miner_num].target_cu,
states[miner_num].nonce_cu);
cudaDeviceSynchronize();
cudaMemcpy(&nonce, states[miner_num].nonce_cu, sizeof(int64_t),
cudaMemcpyDeviceToHost);
return nonce;
}
}
|
the_stack
|
#include "core/providers/cuda/cu_inc/common.cuh"
#include "tile_impl.h"
namespace onnxruntime {
namespace cuda {
#ifdef USE_ROCM
constexpr int num_elements_per_thread = 2;
constexpr int num_threads_per_block = 512;
#else
constexpr int num_elements_per_thread = GridDim::maxElementsPerThread;
constexpr int num_threads_per_block = GridDim::maxThreadsPerBlock;
#endif
template <typename T>
__global__ void _UnRolledTileKernel(const size_t shape_rank, const TArray<fast_divmod> fdm_input_shape,
const TArray<int64_t> input_strides, const T* input_data,
const TArray<fast_divmod> fdm_output_strides, T* output_data, const CUDA_LONG N) {
CUDA_LONG start = num_elements_per_thread * num_threads_per_block * blockIdx.x + threadIdx.x;
T value[num_elements_per_thread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < num_elements_per_thread; ++i) {
if (id < N) {
CUDA_LONG input_index = 0;
CUDA_LONG offset = id;
#pragma unroll
for (auto dim = 0; dim < fdm_output_strides.Capacity(); ++dim) {
if (dim >= shape_rank) {
break;
}
int out_coord, r;
fdm_output_strides[dim].divmod(offset, out_coord, r);
int in_coord = fdm_input_shape[dim].mod(out_coord);
input_index += input_strides[dim] * in_coord;
offset = r;
}
value[i] = input_data[input_index];
id += num_threads_per_block;
}
}
id = start;
#pragma unroll
for (int i = 0; i < num_elements_per_thread; ++i) {
if (id < N) {
output_data[id] = value[i];
id += num_threads_per_block;
}
}
}
template <typename T>
void TileImpl(cudaStream_t stream, const size_t shape_rank, const TArray<fast_divmod>& fdm_input_shape,
const TArray<int64_t>& input_stride, const T* input_data, const TArray<fast_divmod>& fdm_output_strides,
T* output_data, const size_t N) {
int blocksPerGrid = static_cast<int>(CeilDiv(N, num_threads_per_block * num_elements_per_thread));
_UnRolledTileKernel<T><<<blocksPerGrid, num_threads_per_block, 0, stream>>>(shape_rank, fdm_input_shape, input_stride,
input_data, fdm_output_strides,
output_data, static_cast<CUDA_LONG>(N));
}
template <typename T>
__global__ void _TileMemcpyKernelFromOutput(const T* input_data, T* output_data,
const fast_divmod divmod_num_input_elements, const CUDA_LONG N) {
CUDA_LONG start = num_elements_per_thread * num_threads_per_block * blockIdx.x + threadIdx.x;
T value[num_elements_per_thread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < num_elements_per_thread; ++i) {
if (id < N) {
value[i] = input_data[divmod_num_input_elements.mod(id)];
id += num_threads_per_block;
}
}
id = start;
#pragma unroll
for (int i = 0; i < num_elements_per_thread; ++i) {
if (id < N) {
output_data[id] = value[i];
id += num_threads_per_block;
}
}
}
template <typename T>
__global__ void _TileMemcpyKernelFromInput(const T* input_data, T* output_data, const CUDA_LONG N,
const size_t repeats) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T input_val = input_data[id];
for (size_t i = 0; i < repeats; ++i) {
output_data[id] = input_val;
id += N;
}
}
template <typename T>
size_t GetVectorizedSize(size_t num_input_elements, size_t num_elements_per_batch, uint64_t address_input,
uint64_t address_output, CUDA_LONG& N, int& blocksPerGrid) {
constexpr int vec4_alignment = std::alignment_of<aligned_vector<T, 4>>::value;
constexpr int vec2_alignment = std::alignment_of<aligned_vector<T, 2>>::value;
N = static_cast<CUDA_LONG>(num_input_elements);
size_t vectorized_size = 1;
if (num_elements_per_batch % 4 == 0 && address_input % vec4_alignment == 0 && address_output % vec4_alignment == 0) {
N /= 4;
vectorized_size = 4;
} else if (num_elements_per_batch % 2 == 0 && address_input % vec2_alignment == 0 &&
address_output % vec2_alignment == 0) {
N /= 2;
vectorized_size = 2;
}
blocksPerGrid = CeilDiv(N, num_threads_per_block);
return vectorized_size;
}
template <typename T>
void TileMemcpyImpl(cudaStream_t stream, const T* input_data, T* output_data, const size_t num_input_elements,
const size_t repeats) {
// If the block number from input size is too small to fill all streaming multiprocessors,
// it won't have perf gain to launch from inputs. In this case we will use the output based kernel.
CUDA_LONG N;
int blocksPerGrid;
size_t vectorized_size =
GetVectorizedSize<T>(num_input_elements, num_input_elements, reinterpret_cast<uint64_t>(input_data),
reinterpret_cast<uint64_t>(output_data), N, blocksPerGrid);
if (blocksPerGrid < 128) {
N = static_cast<CUDA_LONG>(num_input_elements * repeats);
blocksPerGrid = CeilDiv(N, num_threads_per_block * num_elements_per_thread);
_TileMemcpyKernelFromOutput<<<blocksPerGrid, num_threads_per_block, 0, stream>>>(
input_data, output_data, fast_divmod(static_cast<int>(num_input_elements)), N);
return;
}
if (vectorized_size == 4) {
using Vec4T = aligned_vector<T, 4>;
_TileMemcpyKernelFromInput<<<blocksPerGrid, num_threads_per_block, 0, stream>>>(
reinterpret_cast<const Vec4T*>(input_data), reinterpret_cast<Vec4T*>(output_data), N, repeats);
return;
} else if (vectorized_size == 2) {
using Vec2T = aligned_vector<T, 2>;
_TileMemcpyKernelFromInput<<<blocksPerGrid, num_threads_per_block, 0, stream>>>(
reinterpret_cast<const Vec2T*>(input_data), reinterpret_cast<Vec2T*>(output_data), N, repeats);
return;
}
_TileMemcpyKernelFromInput<<<blocksPerGrid, num_threads_per_block, 0, stream>>>(input_data, output_data, N, repeats);
}
template <typename T>
__global__ void _TileBatchedMemcpyKernelFromOutput(const T* input_data, T* output_data,
const fast_divmod divmod_size_output_row,
const size_t size_input_row, const fast_divmod divmod_batch,
const fast_divmod divmod_size_input_row, const CUDA_LONG N) {
CUDA_LONG start = num_elements_per_thread * num_threads_per_block * blockIdx.x + threadIdx.x;
T value[num_elements_per_thread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < num_elements_per_thread; ++i) {
if (id < N) {
int batch_idx, element_idx;
divmod_size_output_row.divmod(id, batch_idx, element_idx);
value[i] = input_data[divmod_batch.mod(batch_idx) * size_input_row + divmod_size_input_row.mod(element_idx)];
id += num_threads_per_block;
}
}
id = start;
#pragma unroll
for (int i = 0; i < num_elements_per_thread; ++i) {
if (id < N) {
output_data[id] = value[i];
id += num_threads_per_block;
}
}
}
// Input size is [batch, data], output size is [batch * batch_repeats, data * repeats_per_batch].
// Here size_input_row = data, size_output_row = data * repeats_per_batch,
// size_output_batch = batch * data * repeats_per_batch
template <typename T>
__global__ void _TileBatchedMemcpyKernelFromInput(const T* input_data, T* output_data,
const fast_divmod divmod_size_input_row,
const CUDA_LONG size_input_row, const CUDA_LONG size_output_row,
const CUDA_LONG size_output_batch, const size_t batch_repeats,
const size_t repeats_per_batch, const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
T input_val = input_data[id];
CUDA_LONG q, r;
divmod_size_input_row.divmod(id, q, r);
CUDA_LONG batch_offset = q * size_output_row + r;
for (size_t i = 0; i < batch_repeats; ++i) {
CUDA_LONG offset = batch_offset;
for (size_t j = 0; j < repeats_per_batch; ++j) {
output_data[offset] = input_val;
offset += size_input_row;
}
batch_offset += size_output_batch;
}
}
// Input size is [batch, data], output size is [batch * batch_repeats, data * repeats_per_batch].
// Here size_input_row = data, num_input_elements = batch * data
template <typename T>
void TileBatchedMemcpyImpl(cudaStream_t stream, const T* input_data, T* output_data, const size_t size_input_row,
const size_t num_input_elements, const size_t batch_repeats,
const size_t repeats_per_batch) {
// If the block number from input size is too small to fill all streaming multiprocessors,
// it won't have perf gain to launch from inputs. In this case we will use the output based kernel.
CUDA_LONG N;
int blocksPerGrid;
size_t vectorized_size =
GetVectorizedSize<T>(num_input_elements, size_input_row, reinterpret_cast<uint64_t>(input_data),
reinterpret_cast<uint64_t>(output_data), N, blocksPerGrid);
if (blocksPerGrid < 128) {
N = static_cast<CUDA_LONG>(num_input_elements * batch_repeats * repeats_per_batch);
blocksPerGrid = CeilDiv(N, num_threads_per_block * num_elements_per_thread);
_TileBatchedMemcpyKernelFromOutput<<<blocksPerGrid, num_threads_per_block, 0, stream>>>(
input_data, output_data, fast_divmod(static_cast<int>(size_input_row * repeats_per_batch)), size_input_row,
fast_divmod(static_cast<int>(num_input_elements / size_input_row)),
fast_divmod(static_cast<int>(size_input_row)), N);
return;
}
CUDA_LONG size_input_row_vec = static_cast<CUDA_LONG>(size_input_row);
if (vectorized_size == 4) {
using Vec4T = aligned_vector<T, 4>;
size_input_row_vec /= 4;
_TileBatchedMemcpyKernelFromInput<<<blocksPerGrid, num_threads_per_block, 0, stream>>>(
reinterpret_cast<const Vec4T*>(input_data), reinterpret_cast<Vec4T*>(output_data),
fast_divmod(size_input_row_vec), size_input_row_vec,
size_input_row_vec * static_cast<CUDA_LONG>(repeats_per_batch), N * static_cast<CUDA_LONG>(repeats_per_batch),
batch_repeats, repeats_per_batch, N);
return;
} else if (vectorized_size == 2) {
using Vec2T = aligned_vector<T, 2>;
size_input_row_vec /= 2;
_TileBatchedMemcpyKernelFromInput<<<blocksPerGrid, num_threads_per_block, 0, stream>>>(
reinterpret_cast<const Vec2T*>(input_data), reinterpret_cast<Vec2T*>(output_data),
fast_divmod(size_input_row_vec), size_input_row_vec,
size_input_row_vec * static_cast<CUDA_LONG>(repeats_per_batch), N * static_cast<CUDA_LONG>(repeats_per_batch),
batch_repeats, repeats_per_batch, N);
return;
}
_TileBatchedMemcpyKernelFromInput<<<blocksPerGrid, num_threads_per_block, 0, stream>>>(
input_data, output_data, fast_divmod(size_input_row_vec), size_input_row_vec,
size_input_row_vec * static_cast<CUDA_LONG>(repeats_per_batch), N * static_cast<CUDA_LONG>(repeats_per_batch),
batch_repeats, repeats_per_batch, N);
}
#define SPECIALIZED_IMPL(T) \
template void TileImpl<T>(cudaStream_t stream, const size_t shape_rank, const TArray<fast_divmod>& fdm_input_shape, \
const TArray<int64_t>& input_stride, const T* input_data, \
const TArray<fast_divmod>& fdm_output_strides, T* output_data, const size_t N); \
template void TileMemcpyImpl<T>(cudaStream_t stream, const T* input_data, T* output_data, \
const size_t num_input_elements, const size_t repeats); \
template void TileBatchedMemcpyImpl<T>(cudaStream_t stream, const T* input_data, T* output_data, \
const size_t size_input_row, const size_t num_input_elements, \
const size_t batch_repeats, const size_t repeats_per_batch);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(half)
} // namespace cuda
} // namespace onnxruntime
|
the_stack
|
#define STR1(X) #X
#define STR(X) STR1(X)
#define STRINGIFY(X,Y) X ## Y
#define CON(X,Y) STRINGIFY(X,Y)
#define KDir kernels
#include "includes/ourmacros.h"
extern __shared__ type tile[];
__device__ void fvinomgeneral_main(const type * __restrict__ Atmp, type * __restrict__ Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int ilimit, const int olimit, type alpha, type beta)
{
const int TPR = tb_size/32;
int in_s_colId = threadIdx.x % 32;
int in_s_rowId = threadIdx.x / 32;
#ifdef printd
if(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
{
printf("ilimit = %d, olimit = %d, TPR = %d\n", ilimit, olimit, TPR);
printf("\n%d %d\n\n\n", aexpr[10], bexpr[10]);
printf("\nAtmp = %p, Btmp = %p\n\n\n", Atmp, Btmp);
}
#endif
for(int rowBatchId=0; rowBatchId < (olimit+31)/32; rowBatchId++)
{
int in_g_rowId = rowBatchId*32 + threadIdx.x / 32;
int out_g_colId = rowBatchId * 32 + threadIdx.x % 32;
for(int colBatchId=0; colBatchId < (ilimit+31)/32; colBatchId++)
{
int in_g_colId = colBatchId * 32 + threadIdx.x % 32;
if(in_g_colId < ilimit)
{
for(int cur_row = in_g_rowId, local_r=0; local_r < 32 && cur_row < olimit && cur_row < (rowBatchId+1)* 32; local_r+=TPR, cur_row+=TPR)
//for(int cur_row = in_g_rowId, local_r=0; local_r < 32 && cur_row < olimit; local_r+=TPR, cur_row+=TPR)
{
/* if(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
{
printf("%d %d\n",cur_row, aexpr[cur_row] + in_g_colId);
}*/
tile[ (in_s_rowId + local_r) * 33 + in_s_colId] = Atmp[aexpr[cur_row] + in_g_colId];
}
}
__syncthreads();
int out_s_colId = in_s_rowId;
int out_g_rowId = colBatchId*32 + threadIdx.x / 32;
int out_s_rowId = in_s_colId;
if(out_g_colId < olimit)
{
for(int cur_row = out_g_rowId, local_c=0; local_c < 32 && cur_row <ilimit && cur_row < (colBatchId+1)*32; local_c+=TPR, cur_row += TPR)
//for(int cur_row = out_g_rowId, local_c=0; local_c < 32 && cur_row <ilimit; local_c+=TPR, cur_row += TPR)
{
// if(blockIdx.x == 1 && blockIdx.y == 1 && blockIdx.z == 1 && threadIdx.x == 1)
{
// printf("%p %p %d %d\n",Atmp, Btmp, cur_row, bexpr[cur_row] + out_g_colId);
}
Btmp[bexpr[cur_row] + out_g_colId ] = alpha* tile[out_s_rowId * 33 + out_s_colId+ local_c] + beta* Btmp[bexpr[cur_row] + out_g_colId ];
}
}
__syncthreads();
// break;
}
}
}
#define FNAME fvinomatchgeneral.h
#include "includes/macro.h"
#undef FNAME
void fvinomatchgeneral_kernel_CallerWrapper(int ndim, type * A, type * B, const int ilimit, const int olimit, const int blockAI, const int blockBI, const int numblocks, int numthreads, int shm
, const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s
, const int remainder1, const int remainder2, const int * idx_ss,
const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int ilimitr, const int olimitr,
const int inputrem, const int outputrem,
type alpha, type beta
)
{
/*int second, third;
if(ndim > 2)
{
second = idx_ss[1]; third = numblocks/(idx_ss[0]*idx_ss[1]);
}
else if(ndim > 1)
{
second = idx_ss[1]; third = 1;
}
else
{
second = third = 1;
}
dim3 thread_blocks(idx_ss[0], second, third);*/
dim3 thread_blocks(numblocks, 1, 1);
switch(ndim)
{
EXPANDDIMS(fvinomgeneral_kernel_, thread_blocks, numthreads, shm, (A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, remainder1,remainder2, aexpr, bexpr, ilimitr, olimitr, inputrem, outputrem, alpha, beta))
default: {}
// fvinomgeneralolap_kernel<<<thread_blocks, numthreads, shm>>>(ndim, A, B, ilimit, olimit, i_blkindex, b_blkindex, numthreads, lda_s,ldb_s, idx_s, remainder1,remainder2, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem);
}
}
void swap(int array[], int ind1, int ind2)
{
if(ind1 == ind2) return;
int tmp = array[ind1];
array[ind1] = array[ind2];
array[ind2] = tmp;
}
extern "C"
void fvinomatchgeneral_transpose_kernel(int ndim, type *A, type *B, const int *lda, const int *ldb, const int* params, const int * perm, const int* rperm, type alpha, type beta)
{
// int numBlocks = computeNumBlocksCode ;
#ifdef printd
printf("\nA Dims: %d \t %d \t %d\t %d\t %d\n", lda[0], lda[1], lda[2], lda[3], lda[4]);
printf("\nAll diff Params: %d \t %d \t %d\t %d\t %d\t %d\t %d\t %d\t %d \t%d\t %d\t %d\n", params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8], params[9], params[10], params[11]);
printf("\nB Dims: %d \t %d \t %d\t %d\t %d\n", ldb[0], ldb[1], ldb[2], ldb[3], ldb[4]);
printf("\nR perm: %d \t %d \t %d\t %d\t %d\n", rperm[0], rperm[1], rperm[2], rperm[3], rperm[4]);
#endif
int alimit = params[3];
int blimit = params[4];
int blockA=params[0];
int blockB = params[11];
int ilimit = params[7];
int olimit = params[8];
#ifdef SLICE
printf("\t%d\t%d\t", ilimit, olimit);
#endif
int i = 0;
//printf("blockA = %d, blockB = %d\n",blockA, blockB);
//for(int y = 0; y < j; y++)
//printf("bo[%d] = %d ",y, bo[y]);
//exit(0);
int numBlocks = params[6];//((size[1] + 8 -1)/8) * size[2] * ((size[3] + 8 -1)/8) * size[4] ;
int *d_lda_s, *d_ldb_s, *d_idx_s;
const int remainder1 = lda[params[3]] % blockA;
const int remainder2 = lda[perm[params[4]]] % blockB;
const int ilimitr = ilimit * remainder1 / blockA;
const int olimitr = olimit * remainder2 / blockB;
#ifdef MODEL
printf("\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t", ilimit, olimit, remainder1, remainder2,ilimitr, olimitr, lda[params[3]] / blockA, lda[perm[params[4]]] / blockB);
#endif
#ifdef printd
printf("\nrem1 = %d, rem2 = %d\n", remainder1, remainder2);
printf("\nilimit = %d, olimit = %d", ilimit, olimit);
#endif
int *input_base, *output_base;
int *aexpr, *bexpr;
int lda_s[20], ldb_s[20], idx_s[20], temp[20];
lda_s[0] = 1;
ldb_s[0] = 1;
idx_s[0] = 1;
for(i = 1; i < alimit; i++)
{
idx_s[i] = 1;
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
}
if(blockA == 1)
{
idx_s[alimit] = 1;//(lda[i] + blockA - 1) / blockA;
}
else
{
idx_s[alimit] = (lda[alimit] + blockA - 1) / blockA;
}
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
for(i = alimit+1; i < ndim; i++)
{
lda_s[i] = lda_s[i-1] * lda[i-1];
ldb_s[i] = ldb_s[i-1] * ldb[i-1];
if(rperm[i] < blimit)
{
idx_s[i] = 1;// (lda[i] + blockA - 1) / blockA;
}
else if(rperm[i] == blimit)
{
if(blockB == 1)
{
idx_s[i] = 1;
}
else
{
idx_s[i] = (lda[i] + blockB - 1) / blockB;
}
}
else
{
idx_s[i] = lda[i];
}
}
for(i = 0; i < ndim; i++)
{
temp[i] = ldb_s[rperm[i]];
// printf("temp[%d] = %d\n", i, temp[i]);
#ifdef printd
printf("idx[%d] = %d\n", i, idx_s[i]);
#endif
}
int irem, orem;
if(remainder1 == 0) irem = lda[alimit];
else irem = (lda[alimit] - remainder1)/blockA;
if(remainder2 == 0) orem = ldb[blimit];
else orem = (ldb[blimit] - remainder2)/blockB;
aexpr = (int*)malloc(olimit * sizeof(int));
bexpr = (int*)malloc(ilimit * sizeof(int));
SAFECUDAMALLOC(&input_base, olimit*sizeof(int));
SAFECUDAMALLOC(&output_base, ilimit*sizeof(int));
const int TPR = params[2]/32;
for(int i = 0; i < params[2]; i++)
{
for(int rowBatchId=0; rowBatchId < (olimit+31)/32; rowBatchId++)
{
for(int colBatchId=0; colBatchId < (ilimit + 31)/32; colBatchId++)
{
int in_g_colId = colBatchId * 32 + i % 32;
int in_g_rowId = rowBatchId*32 + i / 32;
int cur_row = in_g_rowId;
if(in_g_colId < ilimit)
{
for(int local_r=0; local_r < 32 && cur_row < olimit &&cur_row < in_g_rowId + 32; local_r++, cur_row+=TPR)
{
int tmp = cur_row;
int ii[20];int aoff=0,j;
for(j = 0; j < blimit; j++)
{
ii[j] = tmp%ldb[j];
tmp /= ldb[j];//tmp/bo[j];
aoff += ii[j]* lda_s[perm[j]];
//printf(" j = %d\t aoff = %d\n", j, aoff);
}
aoff += (tmp)* lda_s[perm[j]];
aexpr[cur_row] = aoff;// i2*lda0 + i1 * lda1;
}
}
//exit(0);
int out_g_colId = rowBatchId * 32 + i % 32;
int out_g_rowId = colBatchId*32 + i / 32;
if(out_g_colId < olimit)
{
for(int cur_row = out_g_rowId, local_c=0; local_c < 32 && cur_row < ilimit && cur_row < out_g_rowId + 32; local_c++, cur_row +=TPR)
{
int tmp = cur_row;
int ii[20];int boff=0,j;
for(j = 0; j < alimit; j++)
{
ii[j] = tmp%lda[j];
tmp = tmp/lda[j];
boff += ii[j]* ldb_s[rperm[j]];
//printf(" j = %d\t boff = %d\n", j, boff);
}
boff += tmp* ldb_s[rperm[j]];
bexpr[cur_row] = boff;
// printf("currow = %d, b = %d\n", cur_row, boff);
}
}
}
}
}
#ifdef printd
printf("\nA..\n");
for(int i = 0; i < olimit; i++)
{
printf("%d ", aexpr[i]);
}
printf("\n");
printf("\nB..\n");
for(int i = 0; i < ilimit; i++)
{
printf("%d ", bexpr[i]);
}
printf("\n");
#endif
lda_s[params[3]] *= params[0];
temp[params[3]] *= params[0];
lda_s[perm[params[4]]] *= params[11];
temp[perm[params[4]]] *= params[11];
//Lets remove unwanted dimensions for thread block indexing
int c = 0, d = 0;
c = alimit + 1;//c = No. of dimensions to be removed from input for thread blocking
if(blockA > 1) c--;
int ablockI, bblockI;
ablockI = alimit-c;
bblockI = perm[blimit]-c;
int tempbblockI = bblockI;
for(int i = c; i < ndim; i++)
{
if(((rperm[i] < blimit) || ((rperm[i] == blimit) && (blockB ==1))))
{
idx_s[i] = 1;// idx_s[j];
/*for(int j = i+1; j < ndim-d; j ++)
{
idx_s[j-1] = idx_s[j];
lda_s[j-1] = lda_s[j];
temp[j-1] = temp[j];
}*/
d++;
// printf("\n i = %d\n", i);
if((i < bblockI + c) || (i == bblockI + c) && (blockB == 1))
{
tempbblockI--;
}
}
}
bblockI = tempbblockI;
#ifdef printd
printf("\nd = %d, bblockI_changed = %d\n", d, bblockI);
#endif
int cnt = 0;
for(int i = c; i < ndim; i++)
{
if(idx_s[i] == 1)
{
for(int j = i+1; j < ndim; j ++)
{
idx_s[j-1] = idx_s[j];
lda_s[j-1] = lda_s[j];
temp[j-1] = temp[j];
}
cnt++;
i--;
}
if(cnt > ndim) break;
}
const int newndim = ndim - (c + d);
#ifdef printd
for(i = c; i < ndim-d; i++)
{
printf("idx[%d] = %d\n", i, idx_s[i]);
}
printf("ndim = %d, c = %d, d = %d, newndim = %d, ablockI = %d, bblockI = %d\n", ndim, c, d, newndim, ablockI, bblockI);
#endif
//Find the largest dimension and make it the first as only Dimx can have > 65k size
/*int max = 0;
for(int i = 1; i < newndim; i++)
{
if(idx_s[c+i] > idx_s[c+max]) max = i;
}
//printf("\nmax: %d ", max);
swap(idx_s, c, max+c);
swap(lda_s, c, max+c);
swap(temp, c, max+c);
if(max == ablockI) ablockI = 0;
else if(ablockI == 0) ablockI = max;
if(max == bblockI) bblockI = 0;
else if(bblockI ==0) bblockI = max;
*/
if(blockB == 1) bblockI = -1;
if(ablockI > 0)//move it to first//shouldnt happen
{
swap(idx_s, ablockI+c, c);
swap(lda_s, ablockI+c, c);
swap(temp, ablockI+c, c);
}
if(bblockI >= 0)//move it to second
{
if(bblockI != 0 || ablockI < 0)
{
swap(idx_s, bblockI+c, c+ 1);
swap(lda_s, bblockI+c, c+ 1);
swap(temp, bblockI+c, c+1);
}
else
{
swap(idx_s, ablockI+c, c+ 1);
swap(lda_s, ablockI+c, c+ 1);
swap(temp, ablockI+c, c+ 1);
}
}
if(bblockI >= 0) {
// if(ablockI < 0) bblockI = 0;
bblockI = 1;
}
if(ablockI >=0) ablockI = 0;
#ifdef printd
printf("\nIDx: ");
for(int i = 0; i < newndim; i++)
{
printf("%d ",idx_s[i+c]);
}
printf("ndim = %d, c = %d, d = %d, newndim = %d, ablockI = %d, bblockI = %d\n", ndim, c, d, newndim, ablockI, bblockI);
#endif
SAFECUDAMALLOC(&d_lda_s,newndim*sizeof(int));
SAFECUDAMALLOC(&d_ldb_s,newndim*sizeof(int));
SAFECUDAMALLOC(&d_idx_s,newndim*sizeof(int));
SAFECUDAMEMCPY(d_idx_s, idx_s+c,newndim*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(d_lda_s, lda_s+c,newndim*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(d_ldb_s, temp+c,newndim*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(input_base, aexpr, olimit*sizeof(int), cudaMemcpyHostToDevice);
SAFECUDAMEMCPY(output_base, bexpr, ilimit*sizeof(int), cudaMemcpyHostToDevice);
#ifdef MODEL
printf("\tilimit=%d\tolimit=%d\t", ilimit, olimit);
printf("\t%d\t%d\t%d\t%d\t", ilimit/32, ilimit%32, olimit/32,olimit%32 );
double f1, f2, f3, f4, f;
printf("\tf1=%lf\t", f1 = ((ilimit/32) * (olimit/32) + (double)(ilimit/32) * (olimit%32) /32+ (double)(ilimit%32) * (olimit/32) /32 + (double)(ilimit%32) * (olimit%32) /(32*32) )/ (int)(((ilimit+31)/32) * ((olimit+31)/32)));
printf("\tf2=%lf\t", f2 = ((ilimitr/32) * (olimit/32) + (double)(ilimitr/32) * (olimit%32) /32+ (double)(ilimitr%32) * (olimit/32) /32 + (double)(ilimitr%32) * (olimit%32) /(32*32) )/ max(1,(int)(((ilimitr+31)/32) * ((olimit+31)/32))));
printf("\tf3=%lf\t", f3 = ((ilimit/32) * (olimitr/32) + (double)(ilimit/32) * (olimitr%32) /32+ (double)(ilimit%32) * (olimitr/32) /32 + (double)(ilimit%32) * (olimitr%32) /(32*32) )/ max(1,(int)(((ilimit+31)/32) * ((olimitr+31)/32))));
printf("\tf4=%lf\t", f4 = ((ilimitr/32) * (olimitr/32) + (double)(ilimitr/32) * (olimitr%32) /32+ (double)(ilimitr%32) * (olimitr/32) /32 + (double)(ilimitr%32) * (olimitr%32) /(32*32) )/ max(1,(int)(((ilimitr+31)/32) * ((olimitr+31)/32))));
printf("\t%d\t%d\t%d\t%d\t", lda[alimit], ldb[blimit],blockA,blockB);
int asize = lda[alimit];
int bsize = ldb[blimit];
printf("\t%d\t%d\t%d\t%d\t", asize/blockA, asize%blockA, bsize/blockB,bsize%blockB );
//int amax = min(blockA, 32);
//int bmax = min(blockB, 32);
int amax = blockA;
int bmax = blockB;
printf("\tf=%lf\t", f = ((asize/amax) * (bsize/bmax) *f1 + (double)((asize/amax) * (bsize%bmax > 0) *f3)+ (double)((asize%amax > 0) * (bsize/bmax)*f2) + (double)((asize%amax>0) * (bsize%bmax > 0) *f4) )/ (int)(((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax)));
//printf("\tg=%lf\t%lf\t%lf\t%lf, den=%d\t", (asize/amax) * (bsize/bmax) *f1 , (double)((asize/amax) * (bsize%bmax) *f3)/bmax, (double)((asize%amax) * (bsize/bmax)*f2)/amax , (double)((asize%amax) * (bsize%bmax) *f4)/(amax*bmax), (((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax)));
printf("\t%lf\t", f);
#endif
#ifdef NOHTIME
#include "includes/nohtimestart.h"
#endif
//fvinomgeneral_kernel_CallerWrapper(newndim, A, B,ilimit,olimit, ablockI, bblockI
fvinomatchgeneral_kernel_CallerWrapper(newndim, A, B,ilimit,olimit, ablockI, bblockI
,numBlocks, params[2],params[10]* params[5]*sizeof(type)
, d_lda_s,d_ldb_s,d_idx_s
,remainder1,remainder2,idx_s+c, input_base, output_base, ilimitr, olimitr, irem, orem, alpha, beta);
#ifdef NOHTIME
#include "includes/nohtimestop.h"
#endif
{cudaError_t err = cudaGetLastError();
if(err != cudaSuccess){
printf("\nKernel ERROR in fvi_nomatch_general: %s (line: %d)\n", cudaGetErrorString(err), __LINE__);
//exit(-1);
}}
free(aexpr);
free(bexpr);
cudaFree(d_lda_s);
cudaFree(d_ldb_s);
cudaFree(d_idx_s);
cudaFree(input_base);
cudaFree(output_base);
//cudaFree(d_ablock);
//cudaFree(d_bblock);
//#endif
}
|
the_stack
|
* \test Tests vector operations (BLAS level 1) for unsigned integer arithmetic.
**/
//
// *** System
//
#include <iostream>
#include <iomanip>
#include <vector>
//
// *** ViennaCL
//
#include "viennacl/vector.hpp"
#include "viennacl/vector_proxy.hpp"
#include "viennacl/linalg/inner_prod.hpp"
#include "viennacl/linalg/norm_1.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/norm_inf.hpp"
#include "viennacl/linalg/maxmin.hpp"
#include "viennacl/linalg/sum.hpp"
//
// -------------------------------------------------------------
//
template<typename ScalarType>
ScalarType diff(ScalarType const & s1, ScalarType const & s2)
{
viennacl::backend::finish();
return s1 - s2;
}
//
// -------------------------------------------------------------
//
template<typename ScalarType>
ScalarType diff(ScalarType const & s1, viennacl::scalar<ScalarType> const & s2)
{
viennacl::backend::finish();
return s1 - s2;
}
//
// -------------------------------------------------------------
//
template<typename ScalarType>
ScalarType diff(ScalarType const & s1, viennacl::entry_proxy<ScalarType> const & s2)
{
viennacl::backend::finish();
return s1 - s2;
}
//
// -------------------------------------------------------------
//
template<typename ScalarType, typename VCLVectorType>
ScalarType diff(std::vector<ScalarType> const & v1, VCLVectorType const & v2)
{
std::vector<ScalarType> v2_cpu(v2.size());
viennacl::backend::finish(); //workaround for a bug in APP SDK 2.7 on Trinity APUs (with Catalyst 12.8)
viennacl::copy(v2.begin(), v2.end(), v2_cpu.begin());
for (unsigned int i=0;i<v1.size(); ++i)
{
if (v2_cpu[i] != v1[i])
return 1;
}
return 0;
}
template<typename T1, typename T2>
int check(T1 const & t1, T2 const & t2)
{
int retval = EXIT_SUCCESS;
if (diff(t1, t2) != 0)
{
std::cout << "# Error! Difference: " << diff(t1, t2) << std::endl;
retval = EXIT_FAILURE;
}
return retval;
}
//
// -------------------------------------------------------------
//
template< typename NumericT, typename STLVectorType, typename ViennaCLVectorType1, typename ViennaCLVectorType2 >
int test(STLVectorType & std_v1, STLVectorType & std_v2,
ViennaCLVectorType1 & vcl_v1, ViennaCLVectorType2 & vcl_v2)
{
int retval = EXIT_SUCCESS;
NumericT cpu_result = 42;
viennacl::scalar<NumericT> gpu_result = 43;
//
// Initializer:
//
std::cout << "Checking for zero_vector initializer..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = 0;
vcl_v1 = viennacl::zero_vector<NumericT>(vcl_v1.size());
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Checking for scalar_vector initializer..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = cpu_result;
vcl_v1 = viennacl::scalar_vector<NumericT>(vcl_v1.size(), cpu_result);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = cpu_result + 1;
vcl_v1 = viennacl::scalar_vector<NumericT>(vcl_v1.size(), gpu_result);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Checking for unit_vector initializer..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = (i == 5) ? 1 : 0;
vcl_v1 = viennacl::unit_vector<NumericT>(vcl_v1.size(), 5);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
for (std::size_t i=0; i<std_v1.size(); ++i)
{
std_v1[i] = NumericT(i);
std_v2[i] = NumericT(i+42);
}
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin()); //resync
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
std::cout << "Checking for successful copy..." << std::endl;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (check(std_v2, vcl_v2) != EXIT_SUCCESS)
return EXIT_FAILURE;
//
// Part 1: Norms and inner product
//
// --------------------------------------------------------------------------
std::cout << "Testing inner_prod..." << std::endl;
cpu_result = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result += std_v1[i] * std_v2[i];
NumericT cpu_result2 = viennacl::linalg::inner_prod(vcl_v1, vcl_v2);
gpu_result = viennacl::linalg::inner_prod(vcl_v1, vcl_v2);
if (check(cpu_result, cpu_result2) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result += (std_v1[i] + std_v2[i]) * (2*std_v2[i]);
NumericT cpu_result3 = viennacl::linalg::inner_prod(vcl_v1 + vcl_v2, 2*vcl_v2);
gpu_result = viennacl::linalg::inner_prod(vcl_v1 + vcl_v2, 2*vcl_v2);
if (check(cpu_result, cpu_result3) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
std::cout << "Testing norm_1..." << std::endl;
cpu_result = 0;
for (std::size_t i=0; i<std_v1.size(); ++i) //note: norm_1 broken for unsigned ints on MacOS
cpu_result += std_v1[i];
gpu_result = viennacl::linalg::norm_1(vcl_v1);
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result2 = 0; //reset
for (std::size_t i=0; i<std_v1.size(); ++i) //note: norm_1 broken for unsigned ints on MacOS
cpu_result2 += std_v1[i];
cpu_result = viennacl::linalg::norm_1(vcl_v1);
if (check(cpu_result, cpu_result2) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result2 = 0;
for (std::size_t i=0; i<std_v1.size(); ++i) //note: norm_1 broken for unsigned ints on MacOS
cpu_result2 += std_v1[i] + std_v2[i];
cpu_result = viennacl::linalg::norm_1(vcl_v1 + vcl_v2);
if (check(cpu_result, cpu_result2) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
std::cout << "Testing norm_inf..." << std::endl;
cpu_result = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
if (std_v1[i] > cpu_result)
cpu_result = std_v1[i];
gpu_result = viennacl::linalg::norm_inf(vcl_v1);
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result2 = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
if (std_v1[i] > cpu_result2)
cpu_result2 = std_v1[i];
cpu_result = viennacl::linalg::norm_inf(vcl_v1);
if (check(cpu_result, cpu_result2) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result2 = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
if (std_v1[i] + std_v2[i] > cpu_result2)
cpu_result2 = std_v1[i] + std_v2[i];
cpu_result = viennacl::linalg::norm_inf(vcl_v1 + vcl_v2);
if (check(cpu_result, cpu_result2) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
std::cout << "Testing index_norm_inf..." << std::endl;
std::size_t cpu_index = 0;
cpu_result = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
if (std_v1[i] > cpu_result)
{
cpu_result = std_v1[i];
cpu_index = i;
}
std::size_t gpu_index = viennacl::linalg::index_norm_inf(vcl_v1);
if (check(static_cast<NumericT>(cpu_index), static_cast<NumericT>(gpu_index)) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
gpu_result = vcl_v1[viennacl::linalg::index_norm_inf(vcl_v1)];
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_index = 0;
cpu_result = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
if (std_v1[i] + std_v2[i] > cpu_result)
{
cpu_result = std_v1[i];
cpu_index = i;
}
gpu_result = vcl_v1[viennacl::linalg::index_norm_inf(vcl_v1 + vcl_v2)];
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
std::cout << "Testing max..." << std::endl;
cpu_result = std_v1[0];
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result = std::max<NumericT>(cpu_result, std_v1[i]);
gpu_result = viennacl::linalg::max(vcl_v1);
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result = std_v1[0];
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result = std::max<NumericT>(cpu_result, std_v1[i]);
gpu_result = cpu_result;
cpu_result *= 2; //reset
cpu_result = viennacl::linalg::max(vcl_v1);
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result = std_v1[0] + std_v2[0];
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result = std::max<NumericT>(cpu_result, std_v1[i] + std_v2[i]);
gpu_result = cpu_result;
cpu_result *= 2; //reset
cpu_result = viennacl::linalg::max(vcl_v1 + vcl_v2);
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
std::cout << "Testing min..." << std::endl;
cpu_result = std_v1[0];
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result = std::min<NumericT>(cpu_result, std_v1[i]);
gpu_result = viennacl::linalg::min(vcl_v1);
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result = std_v1[0];
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result = std::min<NumericT>(cpu_result, std_v1[i]);
gpu_result = cpu_result;
cpu_result *= 2; //reset
cpu_result = viennacl::linalg::min(vcl_v1);
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result = std_v1[0] + std_v2[0];
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result = std::min<NumericT>(cpu_result, std_v1[i] + std_v2[i]);
gpu_result = cpu_result;
cpu_result *= 2; //reset
cpu_result = viennacl::linalg::min(vcl_v1 + vcl_v2);
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
std::cout << "Testing sum..." << std::endl;
cpu_result = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result += std_v1[i];
cpu_result2 = viennacl::linalg::sum(vcl_v1);
gpu_result = viennacl::linalg::sum(vcl_v1);
if (check(cpu_result, cpu_result2) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
cpu_result = 0;
for (std::size_t i=0; i<std_v1.size(); ++i)
cpu_result += std_v1[i] + std_v2[i];
cpu_result3 = viennacl::linalg::sum(vcl_v1 + vcl_v2);
gpu_result = viennacl::linalg::sum(vcl_v1 + vcl_v2);
if (check(cpu_result, cpu_result3) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (check(cpu_result, gpu_result) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
std::cout << "Testing assignments..." << std::endl;
NumericT val = static_cast<NumericT>(1);
for (size_t i=0; i < std_v1.size(); ++i)
std_v1[i] = val;
for (size_t i=0; i < vcl_v1.size(); ++i)
vcl_v1(i) = val;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
//
// multiplication and division of vectors by scalars
//
std::cout << "Testing scaling with CPU scalar..." << std::endl;
NumericT alpha = static_cast<NumericT>(3);
viennacl::scalar<NumericT> gpu_alpha = alpha;
for (size_t i=0; i<std_v1.size(); ++i)
std_v1[i] *= alpha;
vcl_v1 *= alpha;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing scaling with GPU scalar..." << std::endl;
for (size_t i=0; i<std_v1.size(); ++i)
std_v1[i] *= alpha;
vcl_v1 *= gpu_alpha;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
NumericT beta = static_cast<NumericT>(2);
viennacl::scalar<NumericT> gpu_beta = beta;
std::cout << "Testing shrinking with CPU scalar..." << std::endl;
for (size_t i=0; i<std_v1.size(); ++i)
std_v1[i] /= beta;
vcl_v1 /= beta;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing shrinking with GPU scalar..." << std::endl;
for (size_t i=0; i<std_v1.size(); ++i)
std_v1[i] /= beta;
vcl_v1 /= gpu_beta;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
//
// add and inplace_add of vectors
//
for (size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = NumericT(i);
for (size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin()); //resync
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
std::cout << "Testing add on vector..." << std::endl;
std::cout << "Checking for successful copy..." << std::endl;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (check(std_v2, vcl_v2) != EXIT_SUCCESS)
return EXIT_FAILURE;
for (size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] + std_v2[i];
vcl_v1 = vcl_v1 + vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing inplace-add on vector..." << std::endl;
for (size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v2[i];
vcl_v1 += vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
//
// multiply-add
//
std::cout << "Testing multiply-add on vector with CPU scalar (right)..." << std::endl;
for (size_t i=0; i < std_v1.size(); ++i)
std_v1[i] = NumericT(i);
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] + alpha * std_v2[i];
vcl_v1 = vcl_v1 + alpha * vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing multiply-add on vector with CPU scalar (left)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = alpha * std_v1[i] + std_v2[i];
vcl_v1 = alpha * vcl_v1 + vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing multiply-add on vector with CPU scalar (both)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = alpha * std_v1[i] + beta * std_v2[i];
vcl_v1 = alpha * vcl_v1 + beta * vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing inplace multiply-add on vector with CPU scalar..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += alpha * std_v2[i];
vcl_v1 += alpha * vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing multiply-add on vector with GPU scalar (right)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] + alpha * std_v2[i];
vcl_v1 = vcl_v1 + gpu_alpha * vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing multiply-add on vector with GPU scalar (left)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] + alpha * std_v2[i];
vcl_v1 = vcl_v1 + gpu_alpha * vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing multiply-add on vector with GPU scalar (both)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = alpha * std_v1[i] + beta * std_v2[i];
vcl_v1 = gpu_alpha * vcl_v1 + gpu_beta * vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing inplace multiply-add on vector with GPU scalar (both, adding)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += alpha * std_v1[i] + beta * std_v2[i];
vcl_v1 += gpu_alpha * vcl_v1 + gpu_beta * vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing inplace multiply-add on vector with GPU scalar..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += alpha * std_v2[i];
vcl_v1 += gpu_alpha * vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
//
// division-add
//
std::cout << "Testing division-add on vector with CPU scalar (right)..." << std::endl;
for (size_t i=0; i < std_v1.size(); ++i)
std_v1[i] = NumericT(i);
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] + std_v2[i] / alpha;
vcl_v1 = vcl_v1 + vcl_v2 / alpha;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing division-add on vector with CPU scalar (left)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] / alpha + std_v2[i];
vcl_v1 = vcl_v1 / alpha + vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing division-add on vector with CPU scalar (both)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] / alpha + std_v2[i] / beta;
vcl_v1 = vcl_v1 / alpha + vcl_v2 / beta;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing division-multiply-add on vector with CPU scalar..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] / alpha + std_v2[i] * beta;
vcl_v1 = vcl_v1 / alpha + vcl_v2 * beta;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing multiply-division-add on vector with CPU scalar..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] * alpha + std_v2[i] / beta;
vcl_v1 = vcl_v1 * alpha + vcl_v2 / beta;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing inplace division-add on vector with CPU scalar..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v2[i] / alpha;
vcl_v1 += vcl_v2 / alpha;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing division-add on vector with GPU scalar (right)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] + std_v2[i] / alpha;
vcl_v1 = vcl_v1 + vcl_v2 / gpu_alpha;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing division-add on vector with GPU scalar (left)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] + std_v2[i] / alpha;
vcl_v1 = vcl_v1 + vcl_v2 / gpu_alpha;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing division-add on vector with GPU scalar (both)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] / alpha + std_v2[i] / beta;
vcl_v1 = vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing inplace division-add on vector with GPU scalar (both, adding)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v1[i] / alpha + std_v2[i] / beta;
vcl_v1 += vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing inplace division-multiply-add on vector with GPU scalar (adding)..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v1[i] / alpha + std_v2[i] * beta;
vcl_v1 += vcl_v1 / gpu_alpha + vcl_v2 * gpu_beta;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing inplace division-add on vector with GPU scalar..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v2[i] * alpha;
vcl_v1 += vcl_v2 * gpu_alpha;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
//
// More complicated expressions (for ensuring the operator overloads work correctly)
//
for (size_t i=0; i < std_v1.size(); ++i)
std_v1[i] = NumericT(i);
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
std::cout << "Testing three vector additions..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v2[i] + std_v1[i] + std_v2[i];
vcl_v1 = vcl_v2 + vcl_v1 + vcl_v2;
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v2[i] = 3 * std_v1[i];
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
std::cout << "Testing swap..." << std::endl;
swap(std_v1, std_v2);
swap(vcl_v1, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing elementwise multiplication..." << std::endl;
std::cout << " v1 = element_prod(v1, v2);" << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] * std_v2[i];
vcl_v1 = viennacl::linalg::element_prod(vcl_v1, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " v1 += element_prod(v1, v2);" << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v1[i] * std_v2[i];
vcl_v1 += viennacl::linalg::element_prod(vcl_v1, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " v1 = element_prod(v1 + v2, v2);" << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = (std_v1[i] + std_v2[i]) * std_v2[i];
vcl_v1 = viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " v1 += element_prod(v1 + v2, v2);" << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += (std_v1[i] + std_v2[i]) * std_v2[i];
vcl_v1 += viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " v1 = element_prod(v1, v2 + v1);" << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] * (std_v2[i] + std_v1[i]);
vcl_v1 = viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " v1 += element_prod(v1, v2 + v1);" << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v1[i] * (std_v2[i] + std_v1[i]);
vcl_v1 += viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " v1 = element_prod(v1 + v2, v2 + v1);" << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = (std_v1[i] + std_v2[i]) * (std_v2[i] + std_v1[i]);
vcl_v1 = viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " v1 += element_prod(v1 + v2, v2 + v1);" << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += (std_v1[i] + std_v2[i]) * (std_v2[i] + std_v1[i]);
vcl_v1 += viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << "Testing elementwise division..." << std::endl;
for (std::size_t i=0; i<std_v1.size(); ++i)
{
std_v1[i] = NumericT(1 + i);
std_v2[i] = NumericT(5 + i);
}
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] / std_v2[i];
vcl_v1 = viennacl::linalg::element_div(vcl_v1, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v1[i] / std_v2[i];
vcl_v1 += viennacl::linalg::element_div(vcl_v1, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = (std_v1[i] + std_v2[i]) / std_v2[i];
vcl_v1 = viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += (std_v1[i] + std_v2[i]) / std_v2[i];
vcl_v1 += viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = std_v1[i] / (std_v2[i] + std_v1[i]);
vcl_v1 = viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += std_v1[i] / (std_v2[i] + std_v1[i]);
vcl_v1 += viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] = (std_v1[i] + std_v2[i]) / (std_v2[i] + std_v1[i]);
vcl_v1 = viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
for (std::size_t i=0; i<std_v1.size(); ++i)
std_v1[i] += (std_v1[i] + std_v2[i]) / (std_v2[i] + std_v1[i]);
vcl_v1 += viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1);
if (check(std_v1, vcl_v1) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
return retval;
}
template< typename NumericT >
int test()
{
int retval = EXIT_SUCCESS;
std::size_t size = 12345;
std::cout << "Running tests for vector of size " << size << std::endl;
//
// Set up STL objects
//
std::vector<NumericT> std_full_vec(size);
std::vector<NumericT> std_full_vec2(std_full_vec.size());
for (std::size_t i=0; i<std_full_vec.size(); ++i)
{
std_full_vec[i] = NumericT(1.0) + NumericT(i);
std_full_vec2[i] = NumericT(2.0) + NumericT(i) / NumericT(2);
}
std::vector<NumericT> std_range_vec (2 * std_full_vec.size() / 4 - std_full_vec.size() / 4);
std::vector<NumericT> std_range_vec2(2 * std_full_vec.size() / 4 - std_full_vec.size() / 4);
for (std::size_t i=0; i<std_range_vec.size(); ++i)
std_range_vec[i] = std_full_vec[i + std_full_vec.size() / 4];
for (std::size_t i=0; i<std_range_vec2.size(); ++i)
std_range_vec2[i] = std_full_vec2[i + 2 * std_full_vec2.size() / 4];
std::vector<NumericT> std_slice_vec (std_full_vec.size() / 4);
std::vector<NumericT> std_slice_vec2(std_full_vec.size() / 4);
for (std::size_t i=0; i<std_slice_vec.size(); ++i)
std_slice_vec[i] = std_full_vec[3*i + std_full_vec.size() / 4];
for (std::size_t i=0; i<std_slice_vec2.size(); ++i)
std_slice_vec2[i] = std_full_vec2[2*i + 2 * std_full_vec2.size() / 4];
//
// Set up ViennaCL objects
//
viennacl::vector<NumericT> vcl_full_vec(std_full_vec.size());
viennacl::vector<NumericT> vcl_full_vec2(std_full_vec2.size());
viennacl::fast_copy(std_full_vec.begin(), std_full_vec.end(), vcl_full_vec.begin());
viennacl::copy(std_full_vec2.begin(), std_full_vec2.end(), vcl_full_vec2.begin());
viennacl::range vcl_r1( vcl_full_vec.size() / 4, 2 * vcl_full_vec.size() / 4);
viennacl::range vcl_r2(2 * vcl_full_vec2.size() / 4, 3 * vcl_full_vec2.size() / 4);
viennacl::vector_range< viennacl::vector<NumericT> > vcl_range_vec(vcl_full_vec, vcl_r1);
viennacl::vector_range< viennacl::vector<NumericT> > vcl_range_vec2(vcl_full_vec2, vcl_r2);
{
viennacl::vector<NumericT> vcl_short_vec(vcl_range_vec);
viennacl::vector<NumericT> vcl_short_vec2 = vcl_range_vec2;
std::vector<NumericT> std_short_vec(std_range_vec);
std::vector<NumericT> std_short_vec2(std_range_vec2);
std::cout << "Testing creation of vectors from range..." << std::endl;
if (check(std_short_vec, vcl_short_vec) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (check(std_short_vec2, vcl_short_vec2) != EXIT_SUCCESS)
return EXIT_FAILURE;
}
viennacl::slice vcl_s1( vcl_full_vec.size() / 4, 3, vcl_full_vec.size() / 4);
viennacl::slice vcl_s2(2 * vcl_full_vec2.size() / 4, 2, vcl_full_vec2.size() / 4);
viennacl::vector_slice< viennacl::vector<NumericT> > vcl_slice_vec(vcl_full_vec, vcl_s1);
viennacl::vector_slice< viennacl::vector<NumericT> > vcl_slice_vec2(vcl_full_vec2, vcl_s2);
viennacl::vector<NumericT> vcl_short_vec(vcl_slice_vec);
viennacl::vector<NumericT> vcl_short_vec2 = vcl_slice_vec2;
std::vector<NumericT> std_short_vec(std_slice_vec);
std::vector<NumericT> std_short_vec2(std_slice_vec2);
std::cout << "Testing creation of vectors from slice..." << std::endl;
if (check(std_short_vec, vcl_short_vec) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (check(std_short_vec2, vcl_short_vec2) != EXIT_SUCCESS)
return EXIT_FAILURE;
//
// Now start running tests for vectors, ranges and slices:
//
std::cout << " ** vcl_v1 = vector, vcl_v2 = vector **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_short_vec, vcl_short_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_v1 = vector, vcl_v2 = range **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_short_vec, vcl_range_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_v1 = vector, vcl_v2 = slice **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_short_vec, vcl_slice_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " ** vcl_v1 = range, vcl_v2 = vector **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_range_vec, vcl_short_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_v1 = range, vcl_v2 = range **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_range_vec, vcl_range_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_v1 = range, vcl_v2 = slice **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_range_vec, vcl_slice_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " ** vcl_v1 = slice, vcl_v2 = vector **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_slice_vec, vcl_short_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_v1 = slice, vcl_v2 = range **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_slice_vec, vcl_range_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_v1 = slice, vcl_v2 = slice **" << std::endl;
retval = test<NumericT>(std_short_vec, std_short_vec2,
vcl_slice_vec, vcl_slice_vec2);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
//
// -------------------------------------------------------------
//
int main()
{
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "## Test :: Vector with Integer types" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
int retval = EXIT_SUCCESS;
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
{
std::cout << "# Testing setup:" << std::endl;
std::cout << " numeric: unsigned int" << std::endl;
retval = test<unsigned int>();
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
{
std::cout << "# Testing setup:" << std::endl;
std::cout << " numeric: long" << std::endl;
retval = test<unsigned long>();
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
std::cout << std::endl;
std::cout << "------- Test completed --------" << std::endl;
std::cout << std::endl;
return retval;
}
|
the_stack
|
#include <cuml/manifold/umapparams.h>
#include <cuml/common/logger.hpp>
#include <cuml/manifold/common.hpp>
#include "optimize.cuh"
#include "supervised.cuh"
#include "fuzzy_simpl_set/runner.cuh"
#include "init_embed/runner.cuh"
#include "knn_graph/runner.cuh"
#include "simpl_set_embed/runner.cuh"
#include <memory>
#include <thrust/count.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/system/cuda/execution_policy.h>
#include <raft/sparse/convert/csr.hpp>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/linalg/norm.hpp>
#include <raft/sparse/op/filter.hpp>
#include <raft/sparse/op/sort.hpp>
#include <raft/cuda_utils.cuh>
#include <cuda_runtime.h>
#include <raft/common/nvtx.hpp>
namespace UMAPAlgo {
// Swap this as impls change for now.
namespace FuzzySimplSetImpl = FuzzySimplSet::Naive;
namespace SimplSetEmbedImpl = SimplSetEmbed::Algo;
using namespace ML;
template <int TPB_X, typename T>
__global__ void init_transform(int* indices,
T* weights,
int n,
const T* embeddings,
int embeddings_n,
int n_components,
T* result,
int n_neighbors)
{
// row-based matrix 1 thread per row
int row = (blockIdx.x * TPB_X) + threadIdx.x;
int i = row * n_neighbors; // each thread processes one row of the dist matrix
if (row < n) {
for (int j = 0; j < n_neighbors; j++) {
for (int d = 0; d < n_components; d++) {
result[row * n_components + d] +=
weights[i + j] * embeddings[indices[i + j] * n_components + d];
}
}
}
}
/**
* Fit exponential decay curve to find the parameters
* a and b, which are based on min_dist and spread
* parameters.
*/
void find_ab(UMAPParams* params, cudaStream_t stream) { Optimize::find_params_ab(params, stream); }
template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X>
void _fit(const raft::handle_t& handle,
const umap_inputs& inputs,
UMAPParams* params,
value_t* embeddings)
{
raft::common::nvtx::range fun_scope("umap::unsupervised::fit");
cudaStream_t stream = handle.get_stream();
int k = params->n_neighbors;
ML::Logger::get().setLevel(params->verbosity);
CUML_LOG_DEBUG("n_neighbors=%d", params->n_neighbors);
raft::common::nvtx::push_range("umap::knnGraph");
std::unique_ptr<rmm::device_uvector<value_idx>> knn_indices_b = nullptr;
std::unique_ptr<rmm::device_uvector<value_t>> knn_dists_b = nullptr;
knn_graph<value_idx, value_t> knn_graph(inputs.n, k);
/**
* If not given precomputed knn graph, compute it
*/
if (inputs.alloc_knn_graph()) {
/**
* Allocate workspace for kNN graph
*/
knn_indices_b = std::make_unique<rmm::device_uvector<value_idx>>(inputs.n * k, stream);
knn_dists_b = std::make_unique<rmm::device_uvector<value_t>>(inputs.n * k, stream);
knn_graph.knn_indices = knn_indices_b->data();
knn_graph.knn_dists = knn_dists_b->data();
}
CUML_LOG_DEBUG("Calling knn graph run");
kNNGraph::run<value_idx, value_t, umap_inputs>(
handle, inputs, inputs, knn_graph, k, params, stream);
raft::common::nvtx::pop_range();
CUML_LOG_DEBUG("Done. Calling fuzzy simplicial set");
raft::common::nvtx::push_range("umap::simplicial_set");
raft::sparse::COO<value_t> rgraph_coo(stream);
FuzzySimplSet::run<TPB_X, value_idx, value_t>(
inputs.n, knn_graph.knn_indices, knn_graph.knn_dists, k, &rgraph_coo, params, stream);
CUML_LOG_DEBUG("Done. Calling remove zeros");
/**
* Remove zeros from simplicial set
*/
raft::sparse::COO<value_t> cgraph_coo(stream);
raft::sparse::op::coo_remove_zeros<value_t>(&rgraph_coo, &cgraph_coo, stream);
raft::common::nvtx::pop_range();
/**
* Run initialization method
*/
raft::common::nvtx::push_range("umap::embedding");
InitEmbed::run(handle, inputs.n, inputs.d, &cgraph_coo, params, embeddings, stream, params->init);
if (params->callback) {
params->callback->setup<value_t>(inputs.n, params->n_components);
params->callback->on_preprocess_end(embeddings);
}
/**
* Run simplicial set embedding to approximate low-dimensional representation
*/
SimplSetEmbed::run<TPB_X, value_t>(inputs.n, inputs.d, &cgraph_coo, params, embeddings, stream);
raft::common::nvtx::pop_range();
if (params->callback) params->callback->on_train_end(embeddings);
}
template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X>
void _get_graph(const raft::handle_t& handle,
const umap_inputs& inputs,
UMAPParams* params,
raft::sparse::COO<value_t>* cgraph_coo // assumes single-precision int as the
// second template argument for COO
)
{
raft::common::nvtx::range fun_scope("umap::supervised::_get_graph");
cudaStream_t stream = handle.get_stream();
int k = params->n_neighbors;
ML::Logger::get().setLevel(params->verbosity);
CUML_LOG_DEBUG("n_neighbors=%d", params->n_neighbors);
raft::common::nvtx::push_range("umap::knnGraph");
std::unique_ptr<rmm::device_uvector<value_idx>> knn_indices_b = nullptr;
std::unique_ptr<rmm::device_uvector<value_t>> knn_dists_b = nullptr;
knn_graph<value_idx, value_t> knn_graph(inputs.n, k);
/**
* If not given precomputed knn graph, compute it
*/
if (inputs.alloc_knn_graph()) {
/**
* Allocate workspace for kNN graph
*/
knn_indices_b = std::make_unique<rmm::device_uvector<value_idx>>(inputs.n * k, stream);
knn_dists_b = std::make_unique<rmm::device_uvector<value_t>>(inputs.n * k, stream);
knn_graph.knn_indices = knn_indices_b->data();
knn_graph.knn_dists = knn_dists_b->data();
}
CUML_LOG_DEBUG("Calling knn graph run");
kNNGraph::run<value_idx, value_t, umap_inputs>(
handle, inputs, inputs, knn_graph, k, params, stream);
raft::common::nvtx::pop_range();
CUML_LOG_DEBUG("Done. Calling fuzzy simplicial set");
raft::common::nvtx::push_range("umap::simplicial_set");
raft::sparse::COO<value_t> rgraph_coo(stream);
FuzzySimplSet::run<TPB_X, value_idx, value_t>(
inputs.n, knn_graph.knn_indices, knn_graph.knn_dists, k, &rgraph_coo, params, stream);
CUML_LOG_DEBUG("Done. Calling remove zeros");
/**
* Remove zeros from simplicial set
*/
raft::sparse::op::coo_remove_zeros<value_t>(&rgraph_coo, cgraph_coo, stream);
raft::common::nvtx::pop_range();
}
template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X>
void _get_graph_supervised(
const raft::handle_t& handle,
const umap_inputs& inputs,
UMAPParams* params,
raft::sparse::COO<value_t>* cgraph_coo // assumes single-precision int as the
// second template argument for COO
)
{
raft::common::nvtx::range fun_scope("umap::supervised::_get_graph_supervised");
cudaStream_t stream = handle.get_stream();
int k = params->n_neighbors;
ML::Logger::get().setLevel(params->verbosity);
if (params->target_n_neighbors == -1) params->target_n_neighbors = params->n_neighbors;
raft::common::nvtx::push_range("umap::knnGraph");
std::unique_ptr<rmm::device_uvector<value_idx>> knn_indices_b = nullptr;
std::unique_ptr<rmm::device_uvector<value_t>> knn_dists_b = nullptr;
knn_graph<value_idx, value_t> knn_graph(inputs.n, k);
/**
* If not given precomputed knn graph, compute it
*/
if (inputs.alloc_knn_graph()) {
/**
* Allocate workspace for kNN graph
*/
knn_indices_b = std::make_unique<rmm::device_uvector<value_idx>>(inputs.n * k, stream);
knn_dists_b = std::make_unique<rmm::device_uvector<value_t>>(inputs.n * k, stream);
knn_graph.knn_indices = knn_indices_b->data();
knn_graph.knn_dists = knn_dists_b->data();
}
kNNGraph::run<value_idx, value_t, umap_inputs>(
handle, inputs, inputs, knn_graph, k, params, stream);
raft::common::nvtx::pop_range();
/**
* Allocate workspace for fuzzy simplicial set.
*/
raft::common::nvtx::push_range("umap::simplicial_set");
raft::sparse::COO<value_t> rgraph_coo(stream);
raft::sparse::COO<value_t> tmp_coo(stream);
/**
* Run Fuzzy simplicial set
*/
// int nnz = n*k*2;
FuzzySimplSet::run<TPB_X, value_idx, value_t>(inputs.n,
knn_graph.knn_indices,
knn_graph.knn_dists,
params->n_neighbors,
&tmp_coo,
params,
stream);
CUDA_CHECK(cudaPeekAtLastError());
raft::sparse::op::coo_remove_zeros<value_t>(&tmp_coo, &rgraph_coo, stream);
/**
* If target metric is 'categorical', perform
* categorical simplicial set intersection.
*/
if (params->target_metric == ML::UMAPParams::MetricType::CATEGORICAL) {
CUML_LOG_DEBUG("Performing categorical intersection");
Supervised::perform_categorical_intersection<TPB_X, value_t>(
inputs.y, &rgraph_coo, cgraph_coo, params, stream);
/**
* Otherwise, perform general simplicial set intersection
*/
} else {
CUML_LOG_DEBUG("Performing general intersection");
Supervised::perform_general_intersection<TPB_X, value_idx, value_t>(
handle, inputs.y, &rgraph_coo, cgraph_coo, params, stream);
}
/**
* Remove zeros
*/
raft::sparse::op::coo_sort<value_t>(cgraph_coo, stream);
raft::sparse::COO<value_t> ocoo(stream);
raft::sparse::op::coo_remove_zeros<value_t>(cgraph_coo, &ocoo, stream);
raft::common::nvtx::pop_range();
}
template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X>
void _refine(const raft::handle_t& handle,
const umap_inputs& inputs,
UMAPParams* params,
raft::sparse::COO<value_t>* cgraph_coo,
value_t* embeddings)
{
cudaStream_t stream = handle.get_stream();
/**
* Run simplicial set embedding to approximate low-dimensional representation
*/
SimplSetEmbed::run<TPB_X, value_t>(inputs.n, inputs.d, cgraph_coo, params, embeddings, stream);
}
template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X>
void _fit_supervised(const raft::handle_t& handle,
const umap_inputs& inputs,
UMAPParams* params,
value_t* embeddings)
{
raft::common::nvtx::range fun_scope("umap::supervised::fit");
cudaStream_t stream = handle.get_stream();
int k = params->n_neighbors;
ML::Logger::get().setLevel(params->verbosity);
if (params->target_n_neighbors == -1) params->target_n_neighbors = params->n_neighbors;
raft::common::nvtx::push_range("umap::knnGraph");
std::unique_ptr<rmm::device_uvector<value_idx>> knn_indices_b = nullptr;
std::unique_ptr<rmm::device_uvector<value_t>> knn_dists_b = nullptr;
knn_graph<value_idx, value_t> knn_graph(inputs.n, k);
/**
* If not given precomputed knn graph, compute it
*/
if (inputs.alloc_knn_graph()) {
/**
* Allocate workspace for kNN graph
*/
knn_indices_b = std::make_unique<rmm::device_uvector<value_idx>>(inputs.n * k, stream);
knn_dists_b = std::make_unique<rmm::device_uvector<value_t>>(inputs.n * k, stream);
knn_graph.knn_indices = knn_indices_b->data();
knn_graph.knn_dists = knn_dists_b->data();
}
kNNGraph::run<value_idx, value_t, umap_inputs>(
handle, inputs, inputs, knn_graph, k, params, stream);
raft::common::nvtx::pop_range();
/**
* Allocate workspace for fuzzy simplicial set.
*/
raft::common::nvtx::push_range("umap::simplicial_set");
raft::sparse::COO<value_t> rgraph_coo(stream);
raft::sparse::COO<value_t> tmp_coo(stream);
/**
* Run Fuzzy simplicial set
*/
// int nnz = n*k*2;
FuzzySimplSet::run<TPB_X, value_idx, value_t>(inputs.n,
knn_graph.knn_indices,
knn_graph.knn_dists,
params->n_neighbors,
&tmp_coo,
params,
stream);
CUDA_CHECK(cudaPeekAtLastError());
raft::sparse::op::coo_remove_zeros<value_t>(&tmp_coo, &rgraph_coo, stream);
raft::sparse::COO<value_t> final_coo(stream);
/**
* If target metric is 'categorical', perform
* categorical simplicial set intersection.
*/
if (params->target_metric == ML::UMAPParams::MetricType::CATEGORICAL) {
CUML_LOG_DEBUG("Performing categorical intersection");
Supervised::perform_categorical_intersection<TPB_X, value_t>(
inputs.y, &rgraph_coo, &final_coo, params, stream);
/**
* Otherwise, perform general simplicial set intersection
*/
} else {
CUML_LOG_DEBUG("Performing general intersection");
Supervised::perform_general_intersection<TPB_X, value_idx, value_t>(
handle, inputs.y, &rgraph_coo, &final_coo, params, stream);
}
/**
* Remove zeros
*/
raft::sparse::op::coo_sort<value_t>(&final_coo, stream);
raft::sparse::COO<value_t> ocoo(stream);
raft::sparse::op::coo_remove_zeros<value_t>(&final_coo, &ocoo, stream);
raft::common::nvtx::pop_range();
/**
* Initialize embeddings
*/
raft::common::nvtx::push_range("umap::supervised::fit");
InitEmbed::run(handle, inputs.n, inputs.d, &ocoo, params, embeddings, stream, params->init);
if (params->callback) {
params->callback->setup<value_t>(inputs.n, params->n_components);
params->callback->on_preprocess_end(embeddings);
}
/**
* Run simplicial set embedding to approximate low-dimensional representation
*/
SimplSetEmbed::run<TPB_X, value_t>(inputs.n, inputs.d, &ocoo, params, embeddings, stream);
raft::common::nvtx::pop_range();
if (params->callback) params->callback->on_train_end(embeddings);
CUDA_CHECK(cudaPeekAtLastError());
}
/**
*
*/
template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X>
void _transform(const raft::handle_t& handle,
const umap_inputs& inputs,
umap_inputs& orig_x_inputs,
value_t* embedding,
int embedding_n,
UMAPParams* params,
value_t* transformed)
{
raft::common::nvtx::range fun_scope("umap::transform");
cudaStream_t stream = handle.get_stream();
ML::Logger::get().setLevel(params->verbosity);
CUML_LOG_DEBUG("Running transform");
CUML_LOG_DEBUG("Building KNN Graph");
raft::common::nvtx::push_range("umap::knnGraph");
std::unique_ptr<rmm::device_uvector<value_idx>> knn_indices_b = nullptr;
std::unique_ptr<rmm::device_uvector<value_t>> knn_dists_b = nullptr;
int k = params->n_neighbors;
knn_graph<value_idx, value_t> knn_graph(inputs.n, k);
/**
* If not given precomputed knn graph, compute it
*/
if (inputs.alloc_knn_graph()) {
/**
* Allocate workspace for kNN graph
*/
knn_indices_b = std::make_unique<rmm::device_uvector<value_idx>>(inputs.n * k, stream);
knn_dists_b = std::make_unique<rmm::device_uvector<value_t>>(inputs.n * k, stream);
knn_graph.knn_indices = knn_indices_b->data();
knn_graph.knn_dists = knn_dists_b->data();
}
kNNGraph::run<value_idx, value_t, umap_inputs>(
handle, orig_x_inputs, inputs, knn_graph, k, params, stream);
raft::common::nvtx::pop_range();
raft::common::nvtx::push_range("umap::smooth_knn");
float adjusted_local_connectivity = max(0.0, params->local_connectivity - 1.0);
CUML_LOG_DEBUG("Smoothing KNN distances");
/**
* Perform smooth_knn_dist
*/
rmm::device_uvector<value_t> sigmas(inputs.n, stream);
rmm::device_uvector<value_t> rhos(inputs.n, stream);
CUDA_CHECK(cudaMemsetAsync(sigmas.data(), 0, inputs.n * sizeof(value_t), stream));
CUDA_CHECK(cudaMemsetAsync(rhos.data(), 0, inputs.n * sizeof(value_t), stream));
dim3 grid_n(raft::ceildiv(inputs.n, TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
FuzzySimplSetImpl::smooth_knn_dist<TPB_X, value_idx, value_t>(inputs.n,
knn_graph.knn_indices,
knn_graph.knn_dists,
rhos.data(),
sigmas.data(),
params,
params->n_neighbors,
adjusted_local_connectivity,
stream);
raft::common::nvtx::pop_range();
/**
* Compute graph of membership strengths
*/
int nnz = inputs.n * params->n_neighbors;
dim3 grid_nnz(raft::ceildiv(nnz, TPB_X), 1, 1);
CUML_LOG_DEBUG("Executing fuzzy simplicial set");
/**
* Allocate workspace for fuzzy simplicial set.
*/
raft::sparse::COO<value_t> graph_coo(stream, nnz, inputs.n, inputs.n);
FuzzySimplSetImpl::compute_membership_strength_kernel<TPB_X>
<<<grid_nnz, blk, 0, stream>>>(knn_graph.knn_indices,
knn_graph.knn_dists,
sigmas.data(),
rhos.data(),
graph_coo.vals(),
graph_coo.rows(),
graph_coo.cols(),
graph_coo.n_rows,
params->n_neighbors);
CUDA_CHECK(cudaPeekAtLastError());
rmm::device_uvector<int> row_ind(inputs.n, stream);
rmm::device_uvector<int> ia(inputs.n, stream);
raft::sparse::convert::sorted_coo_to_csr(&graph_coo, row_ind.data(), stream);
raft::sparse::linalg::coo_degree(&graph_coo, ia.data(), stream);
rmm::device_uvector<value_t> vals_normed(graph_coo.nnz, stream);
CUDA_CHECK(cudaMemsetAsync(vals_normed.data(), 0, graph_coo.nnz * sizeof(value_t), stream));
CUML_LOG_DEBUG("Performing L1 normalization");
raft::sparse::linalg::csr_row_normalize_l1<value_t>(
row_ind.data(), graph_coo.vals(), graph_coo.nnz, graph_coo.n_rows, vals_normed.data(), stream);
init_transform<TPB_X, value_t><<<grid_n, blk, 0, stream>>>(graph_coo.cols(),
vals_normed.data(),
graph_coo.n_rows,
embedding,
embedding_n,
params->n_components,
transformed,
params->n_neighbors);
CUDA_CHECK(cudaPeekAtLastError());
CUDA_CHECK(cudaMemsetAsync(ia.data(), 0.0, ia.size() * sizeof(int), stream));
CUDA_CHECK(cudaPeekAtLastError());
/**
* Go through raft::sparse::COO values and set everything that's less than
* vals.max() / params->n_epochs to 0.0
*/
thrust::device_ptr<value_t> d_ptr = thrust::device_pointer_cast(graph_coo.vals());
value_t max = *(thrust::max_element(thrust::cuda::par.on(stream), d_ptr, d_ptr + nnz));
int n_epochs = params->n_epochs;
if (n_epochs <= 0) {
if (inputs.n <= 10000)
n_epochs = 100;
else
n_epochs = 30;
} else {
n_epochs /= 3;
}
CUML_LOG_DEBUG("n_epochs=%d", n_epochs);
raft::linalg::unaryOp<value_t>(
graph_coo.vals(),
graph_coo.vals(),
graph_coo.nnz,
[=] __device__(value_t input) {
if (input < (max / float(n_epochs)))
return 0.0f;
else
return input;
},
stream);
CUDA_CHECK(cudaPeekAtLastError());
/**
* Remove zeros
*/
raft::sparse::COO<value_t> comp_coo(stream);
raft::sparse::op::coo_remove_zeros<value_t>(&graph_coo, &comp_coo, stream);
raft::common::nvtx::push_range("umap::optimization");
CUML_LOG_DEBUG("Computing # of epochs for training each sample");
rmm::device_uvector<value_t> epochs_per_sample(nnz, stream);
SimplSetEmbedImpl::make_epochs_per_sample(
comp_coo.vals(), comp_coo.nnz, n_epochs, epochs_per_sample.data(), stream);
CUML_LOG_DEBUG("Performing optimization");
if (params->callback) {
params->callback->setup<value_t>(inputs.n, params->n_components);
params->callback->on_preprocess_end(transformed);
}
auto initial_alpha = params->initial_alpha / 4.0;
SimplSetEmbedImpl::optimize_layout<TPB_X, value_t>(transformed,
inputs.n,
embedding,
embedding_n,
comp_coo.rows(),
comp_coo.cols(),
comp_coo.nnz,
epochs_per_sample.data(),
params->repulsion_strength,
params,
n_epochs,
stream);
raft::common::nvtx::pop_range();
if (params->callback) params->callback->on_train_end(transformed);
}
} // namespace UMAPAlgo
|
the_stack
|
#include <math.h>
#ifdef WIN32
#include <float.h>
# define isnan(x) _isnan(x)
# define isinf(x) (! _finite(x))
#endif
#define notanum(x) (isnan(x) || isinf(x))
/* SFILE_BEGIN */
#include "essential_matrix_5pt_dcl.h"
#include "essential_matrix_5pt.h"
typedef double Matches[][3];
/* SFILE_END */
// Actual expected degree of the polynomial
const int PolynomialDegree = 10;
//=============================================================================
// Various operators on the polynomial classes
//=============================================================================
__host__ __device__ poly4_2 poly4_1::operator * (poly4_1 p2)
{
poly4_1 &p1 = *this;
poly4_2 prod;
prod(0,0) = p1(0)*p2(0);
prod(0,1) = p1(0)*p2(1);
prod(0,2) = p1(0)*p2(2);
prod(0,3) = p1(0)*p2(3);
prod(0,1) += p1(1)*p2(0);
prod(1,1) = p1(1)*p2(1);
prod(1,2) = p1(1)*p2(2);
prod(1,3) = p1(1)*p2(3);
prod(0,2) += p1(2)*p2(0);
prod(1,2) += p1(2)*p2(1);
prod(2,2) = p1(2)*p2(2);
prod(2,3) = p1(2)*p2(3);
prod(0,3) += p1(3)*p2(0);
prod(1,3) += p1(3)*p2(1);
prod(2,3) += p1(3)*p2(2);
prod(3,3) = p1(3)*p2(3);
return prod;
}
__host__ __device__ poly4_3 poly4_2::operator * (poly4_1 p2)
{
poly4_2 &p1 = *this;
poly4_3 prod;
prod(0,0,0) = p1(0,0)*p2(0);
prod(0,0,1) = p1(0,0)*p2(1);
prod(0,0,2) = p1(0,0)*p2(2);
prod(0,0,3) = p1(0,0)*p2(3);
prod(0,0,1) += p1(0,1)*p2(0);
prod(0,1,1) = p1(0,1)*p2(1);
prod(0,1,2) = p1(0,1)*p2(2);
prod(0,1,3) = p1(0,1)*p2(3);
prod(0,0,2) += p1(0,2)*p2(0);
prod(0,1,2) += p1(0,2)*p2(1);
prod(0,2,2) = p1(0,2)*p2(2);
prod(0,2,3) = p1(0,2)*p2(3);
prod(0,0,3) += p1(0,3)*p2(0);
prod(0,1,3) += p1(0,3)*p2(1);
prod(0,2,3) += p1(0,3)*p2(2);
prod(0,3,3) = p1(0,3)*p2(3);
prod(0,1,1) += p1(1,1)*p2(0);
prod(1,1,1) = p1(1,1)*p2(1);
prod(1,1,2) = p1(1,1)*p2(2);
prod(1,1,3) = p1(1,1)*p2(3);
prod(0,1,2) += p1(1,2)*p2(0);
prod(1,1,2) += p1(1,2)*p2(1);
prod(1,2,2) = p1(1,2)*p2(2);
prod(1,2,3) = p1(1,2)*p2(3);
prod(0,1,3) += p1(1,3)*p2(0);
prod(1,1,3) += p1(1,3)*p2(1);
prod(1,2,3) += p1(1,3)*p2(2);
prod(1,3,3) = p1(1,3)*p2(3);
prod(0,2,2) += p1(2,2)*p2(0);
prod(1,2,2) += p1(2,2)*p2(1);
prod(2,2,2) = p1(2,2)*p2(2);
prod(2,2,3) = p1(2,2)*p2(3);
prod(0,2,3) += p1(2,3)*p2(0);
prod(1,2,3) += p1(2,3)*p2(1);
prod(2,2,3) += p1(2,3)*p2(2);
prod(2,3,3) = p1(2,3)*p2(3);
prod(0,3,3) += p1(3,3)*p2(0);
prod(1,3,3) += p1(3,3)*p2(1);
prod(2,3,3) += p1(3,3)*p2(2);
prod(3,3,3) = p1(3,3)*p2(3);
#ifdef RH_DEBUG
printf ("In poly4_2 * poly4_1\n");
printf ("poly4_2 = \n");
p1.print();
printf ("poly4_1 = \n");
p2.print();
printf ("poly4_2 * poly4_2 = \n");
prod.print();
#endif
return prod;
}
__host__ __device__ poly4_3 poly4_3::operator * (double k)
{
poly4_3 &p1 = *this;
poly4_3 prod;
prod(0,0,0) = p1(0,0,0) * k;
prod(0,0,1) = p1(0,0,1) * k;
prod(0,0,2) = p1(0,0,2) * k;
prod(0,0,3) = p1(0,0,3) * k;
prod(0,1,1) = p1(0,1,1) * k;
prod(0,1,2) = p1(0,1,2) * k;
prod(0,1,3) = p1(0,1,3) * k;
prod(0,2,2) = p1(0,2,2) * k;
prod(0,2,3) = p1(0,2,3) * k;
prod(0,3,3) = p1(0,3,3) * k;
prod(1,1,1) = p1(1,1,1) * k;
prod(1,1,2) = p1(1,1,2) * k;
prod(1,1,3) = p1(1,1,3) * k;
prod(1,2,2) = p1(1,2,2) * k;
prod(1,2,3) = p1(1,2,3) * k;
prod(1,3,3) = p1(1,3,3) * k;
prod(2,2,2) = p1(2,2,2) * k;
prod(2,2,3) = p1(2,2,3) * k;
prod(2,3,3) = p1(2,3,3) * k;
prod(3,3,3) = p1(3,3,3) * k;
return prod;
}
__host__ __device__ poly4_3 poly4_3::operator + (poly4_3 p2)
{
poly4_3 &p1 = *this;
poly4_3 sum;
sum(0,0,0) = p1(0,0,0) + p2(0,0,0);
sum(0,0,1) = p1(0,0,1) + p2(0,0,1);
sum(0,0,2) = p1(0,0,2) + p2(0,0,2);
sum(0,0,3) = p1(0,0,3) + p2(0,0,3);
sum(0,1,1) = p1(0,1,1) + p2(0,1,1);
sum(0,1,2) = p1(0,1,2) + p2(0,1,2);
sum(0,1,3) = p1(0,1,3) + p2(0,1,3);
sum(0,2,2) = p1(0,2,2) + p2(0,2,2);
sum(0,2,3) = p1(0,2,3) + p2(0,2,3);
sum(0,3,3) = p1(0,3,3) + p2(0,3,3);
sum(1,1,1) = p1(1,1,1) + p2(1,1,1);
sum(1,1,2) = p1(1,1,2) + p2(1,1,2);
sum(1,1,3) = p1(1,1,3) + p2(1,1,3);
sum(1,2,2) = p1(1,2,2) + p2(1,2,2);
sum(1,2,3) = p1(1,2,3) + p2(1,2,3);
sum(1,3,3) = p1(1,3,3) + p2(1,3,3);
sum(2,2,2) = p1(2,2,2) + p2(2,2,2);
sum(2,2,3) = p1(2,2,3) + p2(2,2,3);
sum(2,3,3) = p1(2,3,3) + p2(2,3,3);
sum(3,3,3) = p1(3,3,3) + p2(3,3,3);
return sum;
}
__host__ __device__ void poly4_3::operator += (poly4_3 p2)
{
poly4_3 &p1 = *this;
p1(0,0,0) += p2(0,0,0);
p1(0,0,1) += p2(0,0,1);
p1(0,0,2) += p2(0,0,2);
p1(0,0,3) += p2(0,0,3);
p1(0,1,1) += p2(0,1,1);
p1(0,1,2) += p2(0,1,2);
p1(0,1,3) += p2(0,1,3);
p1(0,2,2) += p2(0,2,2);
p1(0,2,3) += p2(0,2,3);
p1(0,3,3) += p2(0,3,3);
p1(1,1,1) += p2(1,1,1);
p1(1,1,2) += p2(1,1,2);
p1(1,1,3) += p2(1,1,3);
p1(1,2,2) += p2(1,2,2);
p1(1,2,3) += p2(1,2,3);
p1(1,3,3) += p2(1,3,3);
p1(2,2,2) += p2(2,2,2);
p1(2,2,3) += p2(2,2,3);
p1(2,3,3) += p2(2,3,3);
p1(3,3,3) += p2(3,3,3);
}
__host__ __device__ poly4_3 poly4_3::operator - (poly4_3 p2)
{
poly4_3 &p1 = *this;
poly4_3 dif;
dif(0,0,0) = p1(0,0,0) - p2(0,0,0);
dif(0,0,1) = p1(0,0,1) - p2(0,0,1);
dif(0,0,2) = p1(0,0,2) - p2(0,0,2);
dif(0,0,3) = p1(0,0,3) - p2(0,0,3);
dif(0,1,1) = p1(0,1,1) - p2(0,1,1);
dif(0,1,2) = p1(0,1,2) - p2(0,1,2);
dif(0,1,3) = p1(0,1,3) - p2(0,1,3);
dif(0,2,2) = p1(0,2,2) - p2(0,2,2);
dif(0,2,3) = p1(0,2,3) - p2(0,2,3);
dif(0,3,3) = p1(0,3,3) - p2(0,3,3);
dif(1,1,1) = p1(1,1,1) - p2(1,1,1);
dif(1,1,2) = p1(1,1,2) - p2(1,1,2);
dif(1,1,3) = p1(1,1,3) - p2(1,1,3);
dif(1,2,2) = p1(1,2,2) - p2(1,2,2);
dif(1,2,3) = p1(1,2,3) - p2(1,2,3);
dif(1,3,3) = p1(1,3,3) - p2(1,3,3);
dif(2,2,2) = p1(2,2,2) - p2(2,2,2);
dif(2,2,3) = p1(2,2,3) - p2(2,2,3);
dif(2,3,3) = p1(2,3,3) - p2(2,3,3);
dif(3,3,3) = p1(3,3,3) - p2(3,3,3);
return dif;
}
__host__ __device__ poly4_2 poly4_2::operator + (poly4_2 p2)
{
poly4_2 &p1 = *this;
poly4_2 sum;
sum(0,0) = p1(0,0) + p2(0,0);
sum(0,1) = p1(0,1) + p2(0,1);
sum(0,2) = p1(0,2) + p2(0,2);
sum(0,3) = p1(0,3) + p2(0,3);
sum(1,1) = p1(1,1) + p2(1,1);
sum(1,2) = p1(1,2) + p2(1,2);
sum(1,3) = p1(1,3) + p2(1,3);
sum(2,2) = p1(2,2) + p2(2,2);
sum(2,3) = p1(2,3) + p2(2,3);
sum(3,3) = p1(3,3) + p2(3,3);
return sum;
}
__host__ __device__ void poly4_2::operator += (poly4_2 p2)
{
poly4_2 &p1 = *this;
p1(0,0) += p2(0,0);
p1(0,1) += p2(0,1);
p1(0,2) += p2(0,2);
p1(0,3) += p2(0,3);
p1(1,1) += p2(1,1);
p1(1,2) += p2(1,2);
p1(1,3) += p2(1,3);
p1(2,2) += p2(2,2);
p1(2,3) += p2(2,3);
p1(3,3) += p2(3,3);
}
__host__ __device__ poly4_2 poly4_2::operator - (poly4_2 p2)
{
poly4_2 &p1 = *this;
poly4_2 dif;
dif(0,0) = p1(0,0) - p2(0,0);
dif(0,1) = p1(0,1) - p2(0,1);
dif(0,2) = p1(0,2) - p2(0,2);
dif(0,3) = p1(0,3) - p2(0,3);
dif(1,1) = p1(1,1) - p2(1,1);
dif(1,2) = p1(1,2) - p2(1,2);
dif(1,3) = p1(1,3) - p2(1,3);
dif(2,2) = p1(2,2) - p2(2,2);
dif(2,3) = p1(2,3) - p2(2,3);
dif(3,3) = p1(3,3) - p2(3,3);
return dif;
}
__host__ __device__ poly4_1 poly4_1::operator + (poly4_1 p2)
{
poly4_1 &p1 = *this;
poly4_1 sum;
sum(0) = p1(0) + p2(0);
sum(1) = p1(1) + p2(1);
sum(2) = p1(2) + p2(2);
sum(3) = p1(3) + p2(3);
return sum;
}
__host__ __device__ poly4_1 poly4_1::operator - (poly4_1 p2)
{
poly4_1 &p1 = *this;
poly4_1 dif;
dif(0) = p1(0) - p2(0);
dif(1) = p1(1) - p2(1);
dif(2) = p1(2) - p2(2);
dif(3) = p1(3) - p2(3);
return dif;
}
//=============================================================================
__host__ __device__ poly4_3 polydet4 (EmatrixSet_5pt E)
{
// Takes the determinant of a polynomial
poly4_3 det =
(E(1,1)*E(2,2) - E(2,1)*E(1,2)) * E(0,0) +
(E(2,1)*E(0,2) - E(0,1)*E(2,2)) * E(1,0) +
(E(0,1)*E(1,2) - E(1,1)*E(0,2)) * E(2,0);
#ifdef RH_DEBUG
printf ("Det =\n");
det.print();
#endif
return det;
}
#define FULL_TRACE
#ifdef FULL_TRACE
__host__ __device__ poly4_2 traceEEt (EmatrixSet_5pt E)
{
// Takes the trace of E E' -- returns a quadratic polynomial
// Trace of product is the elementwise product of the elements
poly4_2 tr = E(0,0) * E(0, 0) + E(0,1) * E(0, 1) + E(0,2) * E(0, 2)
+ E(1,0) * E(1, 0) + E(1,1) * E(1, 1) + E(1,2) * E(1, 2)
+ E(2,0) * E(2, 0) + E(2,1) * E(2, 1) + E(2,2) * E(2, 2);
#ifdef RH_DEBUG
printf ("Trace is:\n");
tr.print();
#endif
return tr;
}
#else
__host__ __device__ poly4_2 traceEEt (EmatrixSet_5pt E)
{
// We know that the trace has a simple form, provided that the
// E-matrix basis is orthogonal.
poly4_2 tr;
tr.clear();
tr(0,0) = 1.0;
tr(1,1) = 1.0;
tr(2,2) = 1.0;
tr(3,3) = 1.0;
return tr;
}
#endif
__host__ __device__ void mono_coeff (poly4_3 B, EquationSet A, int n)
{
// Extracts the monomial coefficients in x and y (with z = 1) from
// a cubic homogeneous polynomial. Returns 4 vectors (degrees 0 to 3 in w)
// Make some constants to make the code easier to read
// Degrees of terms in w
const int w0 = 0;
const int w1 = 1;
const int w2 = 2;
const int w3 = 3;
// Linear variables
const int w = 0;
const int x = 1;
const int y = 2;
const int z = 3;
// Monomials
const int xx = 3;
const int xy = 4;
const int yy = 5;
const int xxx = 6;
const int xxy = 7;
const int xyy = 8;
const int yyy = 9;
// Terms in w^0
A[w0][n][ 0 ] = B(z, z, z);
A[w0][n][ x ] = B(x, z, z);
A[w0][n][ y ] = B(y, z, z);
A[w0][n][ xx ] = B(x, x, z);
A[w0][n][ yy ] = B(y, y, z);
A[w0][n][ xy ] = B(x, y, z);
A[w0][n][ xxx] = B(x, x, x);
A[w0][n][ xxy] = B(x, x, y);
A[w0][n][ xyy] = B(x, y, y);
A[w0][n][ yyy] = B(y, y, y);
// Terms in w^1
A[w1][n][ 0 ] = B(w, z, z);
A[w1][n][ x ] = B(w, x, z);
A[w1][n][ y ] = B(w, y, z);
A[w1][n][ xx ] = B(w, x, x);
A[w1][n][ yy ] = B(w, y, y);
A[w1][n][ xy ] = B(w, x, y);
// Terms in w^2
A[w2][n][ 0 ] = B(w, w, z);
A[w2][n][ x ] = B(w, w, x);
A[w2][n][ y ] = B(w, w, y);
// Terms in w^3
A[w3][n][ 0 ] = B(w, w, w);
}
__host__ __device__ void EEeqns_5pt (EmatrixSet_5pt E, EquationSet A)
{
//
// Computes the equations that will be used to input to polyeig.
// void EEeqns_5pt(E, A)
// where E has dimensions E(3, 3, 4). The output is a matrix
// of dimension A(4, 10, 10), where A(i, :, :) is the coeffient of w^{i-1}
//
// Makes all the equations from the essential matrix E
// First of all, set the equations to zero
memset (&(A[0][0][0]), 0, sizeof(EquationSet));
// Find the trace - this is a quadratic polynomial
poly4_2 tr = traceEEt(E);
// First equation is from the determinant
mono_coeff (polydet4(E), A, 0);
// Other equations from the equation 2 E*E'*E - tr(E*E') E = 0
// In the following loop, we compute EE'E(i,j) = sum_pq E(i,p)*E(q,p)*E(q,j)
// The way this is done is optimized for speed. We compute first the matrix
// EE'(i, q) and then use this to accumulate EE'E(i, j)
int eqn = 1; // Count on the next equation
for (int i=0; i<3; i++)
{
// An array of cubic polynomials, one for each j = 0 ... 2
poly4_3 EEE_i[3]; // Will hold (EE'E)(i,j)
for (int j=0; j<3; j++) EEE_i[j].clear();
// Compute each EE'(i,q) = sum_p E(i,p) E(q,p)
for (int q=0; q<3; q++)
{
// Accumulate EE(i, q)
poly4_2 EE_iq; EE_iq.clear();
for (int p=0; p<3; p++)
EE_iq += E(i,p) * E(q,p);
// Now, accumulate EEE(ij) = sum_q EE'(i,q) * E(q, j)
for (int j=0; j<3; j++)
EEE_i[j] += EE_iq * E(q,j);
}
// Now, EE'E(i,j) is computed for this i and all j
// We can complete the computation of the coefficients from EE'E(i, j)
for (int j=0; j<3; j++)
mono_coeff(EEE_i[j]*2.0 - tr* E(i,j), A, eqn++);
}
}
__host__ __device__ void null_space_solve_3x3_half_pivot (double A[3][3], double &x, double &y)
{
//
// Solve for the null-space of the matrix. The value returned is
// (x, y), where (1, x, y) is the generator of the null-space.
//
// This time we will do pivoting
int p1;
double f0 = fabs(A[0][2]), f1 = fabs(A[1][2]), f2 = fabs(A[2][2]);
if (f0 > f1) p1 = (f0>f2)? 0 : 2;
else p1 = (f1>f2) ? 1 : 2;
// The other two rows
int r1 = (p1+1)%3, r2 = (p1+2)%3;
// Now, use this to pivot
double fac = A[r1][2] / A[p1][2];
A[r1][0] -= fac * A[p1][0];
A[r1][1] -= fac * A[p1][1];
fac = A[r2][2] / A[p1][2];
A[r2][0] -= fac * A[p1][0];
A[r2][1] -= fac * A[p1][1];
// Second pivot - largest element in column 1
int p2 = fabs(A[r1][1]) > fabs(A[r2][1]) ? r1 : r2;
// Now, read off the values - back substitution
x = - A[p2][0] / A[p2][1];
y = -(A[p1][0] + A[p1][1]*x) / A[p1][2];
}
#if 0
//
// THIS DOES NOT COMPILE BECAUSE OF THE USE OF rhMatrix.
// IT IS POSSIBLE THAT IT SHOULD BE USED>
//
__host__ __device__ void null_space_solve_3x3 (double A[3][3], double &x, double &y)
{
// Solve for the null-space of the matrix
rhMatrix AA (3, 3, &(A[0][0]));
rhVector D(3);
rhMatrix V(3, 3);
// Take the SVD
svd(AA, D, V);
if (V[0][2] == 0.0)
{
// Just something that stops it from crashing
x = 0.0;
y = 0.0;
}
else
{
x = V[1][2] / V[0][2];
y = V[2][2] / V[0][2];
}
}
#endif
__host__ __device__ void null_space_solve_5x9_nopivot (double A[5][9], EmatrixSet_5pt &E)
{
// This will compute the set of solutions for the equations
// Sweep out one column at a time, starting with highest column number
// We do Gaussian elimination to convert M to the form M = [X | I]
// Then the null space will be [-I | X].
// For present, this is done without pivoting.
// Mostly, do not need to actually change right hand part (that becomes I)
const int lastrow = 4;
const int firstcol = 4; // First column to do elimination to make I
const int lastcol = 8;
// First sweep is to get rid of the above diagonal parts
for (int col=lastcol; col>firstcol; col--) // No need to do first col
{
// Remove column col
const int row = col-firstcol; // Row to pivot around
const double pivot = A[row][col];
// Sweep out all rows up to the current one
for (int i=0; i<row; i++)
{
// This factor of the pivot row is to subtract from row i
const double fac = A[i][col] / pivot;
// Constant terms
for (int j=0; j<col; j++)
A[i][j] -= fac * A[row][j];
}
}
// Now, do backward sweep to clear below the diagonal
for (int col=firstcol; col<lastcol; col++) // No need to do lastcol
{
// Remove column col
const int row = col-firstcol; // Row to pivot around
const double pivot = A[row][col];
// Sweep out all rows up to the current one
for (int i=row+1; i<=lastrow; i++)
{
// This factor of the pivot row is to subtract from row i
const double fac = A[i][col] / pivot;
// Constant terms
for (int j=0; j<firstcol; j++)
A[i][j] -= fac * A[row][j];
}
}
// Make this into a matrix of solutions
double fac;
E(0, 0) = poly4_1(1.0, 0.0, 0.0, 0.0);
E(0, 1) = poly4_1(0.0, 1.0, 0.0, 0.0);
E(0, 2) = poly4_1(0.0, 0.0, 1.0, 0.0);
E(1, 0) = poly4_1(0.0, 0.0, 0.0, 1.0);
fac = -1.0/A[0][4];
E(1, 1) = poly4_1(fac*A[0][0], fac*A[0][1], fac*A[0][2], fac*A[0][3]);
fac = -1.0/A[1][5];
E(1, 2) = poly4_1(fac*A[1][0], fac*A[1][1], fac*A[1][2], fac*A[1][3]);
fac = -1.0/A[2][6];
E(2, 0) = poly4_1(fac*A[2][0], fac*A[2][1], fac*A[2][2], fac*A[2][3]);
fac = -1.0/A[3][7];
E(2, 1) = poly4_1(fac*A[3][0], fac*A[3][1], fac*A[3][2], fac*A[3][3]);
fac = -1.0/A[4][8];
E(2, 2) = poly4_1(fac*A[4][0], fac*A[4][1], fac*A[4][2], fac*A[4][3]);
// #define USE_TEST_VALUES
#ifdef USE_TEST_VALUES
// Put an artificial value in
E(0,0)(0) = 2; E(0,1)(0) = 4; E(0,2)(0) = -1;
E(1,0)(0) = 4; E(1,1)(0) = 5; E(1,2)(0) = -8;
E(2,0)(0) = 2; E(2,1)(0) = -11; E(2,2)(0) = 8;
E(0,0)(1) = 0; E(0,1)(1) = -1; E(0,2)(1) = 2;
E(1,0)(1) = 1; E(1,1)(1) = 7; E(1,2)(1) = 1;
E(2,0)(1) = -2; E(2,1)(1) = 6; E(2,2)(1) = 7;
E(0,0)(2) = 2; E(0,1)(2) = -3; E(0,2)(2) = 7;
E(1,0)(2) = 1; E(1,1)(2) = -3; E(1,2)(2) = -9;
E(2,0)(2) = 4; E(2,1)(2) = 1; E(2,2)(2) = -9;
E(0,0)(3) = 5; E(0,1)(3) = 2; E(0,2)(3) = 7;
E(1,0)(3) = 1; E(1,1)(3) = -2; E(1,2)(3) = -4;
E(2,0)(3) = 5; E(2,1)(3) = -1; E(2,2)(3) = 8;
#endif
}
__host__ __device__ void null_space_solve_5x9 (double A[9][9], EmatrixSet_5pt &E)
{
// This will compute the set of solutions for the equations
// We do orthogonal reduction of the rows of the matrix
const int nrows = 9;
const int ncols = 9;
// First, fill out some random entries in the remaining rows
const double PPi = 3.18730379; // Basically a random number
double ran = PPi;
for (int i=5; i<nrows; i++)
{
for (int j=0; j<ncols; j++)
{
ran *= PPi;
ran = 2.0 * (ran - floor(ran)) - 1.0;
A[i][j] = ran;
}
}
// Now, do Gram-Schmidt
for (int row=0; row<nrows; row++)
{
// Normalize the row
double sum = 0.0;
for (int j=0; j<ncols; j++) sum += A[row][j]*A[row][j];
double fac = 1.0 / sqrt(sum);
for (int j=0; j<ncols; j++) A[row][j] *= fac;
// Use to sweep out the subsequent rows
for (int i=row+1; i<nrows; i++)
{
// Inner product of row i and row j
double prod = 0.0;
for (int j=0; j<ncols; j++)
prod += A[row][j]*A[i][j]; // Inner product
for (int j=0; j<ncols; j++) A[i][j] -= prod * A[row][j];
}
}
// Make this into a matrix of solutions
int count = 0;
for (int i=0; i<3; i++)
for (int j=0; j<3; j++)
{
E(i,j) = poly4_1(A[5][count], A[6][count], A[7][count], A[8][count]);
count++;
}
}
__host__ __device__ void Ematrix_5pt(Matches q, Matches qp, EmatrixSet_5pt &E, EquationSet &A)
{
// Computes the E-matrix from match inputs
// A matrix to solve linearly for the ematrix
double M[9][9];
memset (&(M[0][0]), 0, sizeof (M));
for (int i=0; i<5; i++)
{
M[i][0] = qp[i][0]*q[i][0];
M[i][1] = qp[i][0]*q[i][1];
M[i][2] = qp[i][0]*q[i][2];
M[i][3] = qp[i][1]*q[i][0];
M[i][4] = qp[i][1]*q[i][1];
M[i][5] = qp[i][1]*q[i][2];
M[i][6] = qp[i][2]*q[i][0];
M[i][7] = qp[i][2]*q[i][1];
M[i][8] = qp[i][2]*q[i][2];
}
// Solve using null_space_solve
null_space_solve_5x9 (M, E);
# ifdef RH_DEBUG
printf ("E = \n");
E.print();
# endif
// Now, get the equations
EEeqns_5pt(E, A);
# ifdef RH_DEBUG
print_equation_set (A, 3);
#endif
}
__host__ __device__ void sweep_up (EquationSet A, int row, int col, int degree)
{
// Use the given pivot point to sweep out above the pivot
const int num1 = 6; // number of nonzero columns of A in degree 1
const int num2 = 3; // number of nonzero columns of A in degree 2
const int num3 = 1; // number of nonzero columns of A in degree 3
// Find the pivot value
const double pivot = A[degree][row][col];
// Sweep out all rows up to the current one
for (int i=0; i<row; i++)
{
// This factor of the pivot row is to subtract from row i
const double fac = A[degree][i][col] / pivot;
// Constant terms
for (int j=0; j<=col; j++)
A[0][i][j] -= fac * A[0][row][j];
// Degree 1 terms
for (int j=0; j<num1; j++)
A[1][i][j] -= fac * A[1][row][j];
// Degree 2 terms
for (int j=0; j<num2; j++)
A[2][i][j] -= fac * A[2][row][j];
// Degree 3 terms
for (int j=0; j<num3; j++)
A[3][i][j] -= fac * A[3][row][j];
}
}
__host__ __device__ void sweep_down (EquationSet A, int row, int col, int degree, int lastrow)
{
// Use the given pivot point to sweep out below the pivot
const int num1 = 6; // number of nonzero columns of A in degree 1
const int num2 = 3; // number of nonzero columns of A in degree 2
const int num3 = 1; // number of nonzero columns of A in degree 3
// The value of the pivot point
const double pivot = A[degree][row][col];
// Sweep out all rows up to the current one
for (int i=row+1; i<=lastrow; i++)
{
// This factor of the pivot row is to subtract from row i
const double fac = A[degree][i][col] / pivot;
// Constant terms
for (int j=0; j<=col; j++)
A[0][i][j] -= fac * A[0][row][j];
// Degree 1 terms
for (int j=0; j<num1; j++)
A[1][i][j] -= fac * A[1][row][j];
// Degree 2 terms
for (int j=0; j<num2; j++)
A[2][i][j] -= fac * A[2][row][j];
// Degree 3 terms
for (int j=0; j<num3; j++)
A[3][i][j] -= fac * A[3][row][j];
}
}
__host__ __device__ void print_equation_set (EquationSet A, int maxdegree)
{
// Print out the matrix
printf ("Equation matrix\n");
for (int degree=0; degree<=maxdegree; degree++)
{
for (int i=0; i<10; i++)
{
for (int j=0; j<10; j++)
printf ("%7.1f ", A[degree][i][j]);
printf ("\n");
}
printf ("\n");
}
}
__host__ __device__ void print_Ematrix (Ematrix E)
{
// Not actually used in this directory at least.
for (int i=0; i<3; i++)
printf ("\t%12.5f %12.5f %12.5f\n", E[i][0], E[i][1], E[i][2]);
printf ("\n");
}
__host__ __device__ inline void swap (double &a, double &b)
{
double temp = a; a = b; b = temp;
}
__host__ __device__ void pivot (EquationSet A, int last, int deg, int dummy)
{
// Pivot so that the largest element in the column is in the diagonal
// Use the given pivot point to sweep out below the pivot
const int num1 = 6; // number of nonzero columns of A in degree 1
const int num2 = 3; // number of nonzero columns of A in degree 2
const int num3 = 1; // number of nonzero columns of A in degree 3
// Find the maximum value in the column
double maxval = fabs(A[deg][last][last]);
int row = last;
for (int i=0; i<last; i++)
{
if (fabs(A[deg][i][last]) > maxval)
{
row = i;
maxval = fabs(A[deg][i][last]);
}
}
// If is in the maximum position, then return.
if (row == last) return;
// Otherwise, swap
// Constant terms
for (int j=0; j<=last; j++)
swap(A[0][last][j], A[0][row][j]);
// Degree 1 terms
for (int j=0; j<num1; j++)
swap(A[1][last][j], A[1][row][j]);
// Degree 2 terms
for (int j=0; j<num2; j++)
swap(A[2][last][j], A[2][row][j]);
// Degree 3 terms
for (int j=0; j<num3; j++)
swap(A[3][last][j], A[3][row][j]);
}
__host__ __device__ void reduce_Ematrix (EquationSet A)
{
// This reduces the equation set to 3 x 3. In this version there is
// no pivoting, which relies on the pivots to be non-zero.
// Relies on the particular form of the A matrix to reduce it
// That means that there are several rows of zero elements in different
// degrees, as given below.
// Sweeping out the constant terms to reduce to 6 x 6
pivot (A, 9, 0, 8); sweep_up (A, 9, 9, 0);
pivot (A, 8, 0, 7); sweep_up (A, 8, 8, 0);
pivot (A, 7, 0, 6); sweep_up (A, 7, 7, 0);
pivot (A, 6, 0, 5); sweep_up (A, 6, 6, 0);
// Now, the matrix is 6 x 6. Next we need to handle linear terms
pivot (A, 5, 0, 4); sweep_up (A, 5, 5, 0);
pivot (A, 4, 0, 3); sweep_up (A, 4, 4, 0);
pivot (A, 3, 0, 2); sweep_up (A, 3, 3, 0);
int lastrow = 5;
sweep_down (A, 3, 3, 0, lastrow);
sweep_down (A, 4, 4, 0, lastrow);
// Also sweep out the first-order terms
sweep_up (A, 2, 5, 1);
sweep_up (A, 1, 4, 1);
sweep_down (A, 0, 3, 1, lastrow);
sweep_down (A, 1, 4, 1, lastrow);
sweep_down (A, 2, 5, 1, lastrow);
// Now, sweep out the x terms by increasing the degree
for (int i=0; i<3; i++)
{
double fac = A[1][i][3+i] / A[0][3+i][3+i];
// Introduces 4-th degree term
A[4][i][0] = -A[3][i+3][0] * fac;
// Transfer terms of degree 0 to 3
for (int j=0; j<3; j++)
{
A[3][i][j] -= A[2][i+3][j] * fac;
A[2][i][j] -= A[1][i+3][j] * fac;
A[1][i][j] -= A[0][i+3][j] * fac;
}
}
}
__host__ __device__ void reduce_constant_terms (EquationSet A)
{
// This reduces the equation set to 6 x 6 by eliminating the
// constant terms at the end. In this
// no pivoting, which relies on the pivots to be non-zero.
// Sweeping out the constant terms to reduce to 6 x 6
pivot (A, 9, 0, 8); sweep_up (A, 9, 9, 0);
pivot (A, 8, 0, 7); sweep_up (A, 8, 8, 0);
pivot (A, 7, 0, 6); sweep_up (A, 7, 7, 0);
pivot (A, 6, 0, 5); sweep_up (A, 6, 6, 0);
}
__host__ __device__ inline void one_cofactor (EquationSet A, Polynomial poly,
int r0, int r1, int r2)
{
// Computes one term of the 3x3 cofactor expansion
// Get a polynomial to hold a 2x2 determinant
double two[7];
memset (&(two[0]), 0, 7*sizeof(double));
// Compute the 2x2 determinant - results in a 6-degree polynomial
for (int i=0; i<=3; i++)
for (int j=0; j<=3; j++)
two [i+j] += A[i][r1][1]*A[j][r2][2] - A[i][r2][1]*A[j][r1][2];
// Now, multiply by degree 4 polynomial
for (int i=0; i<=6; i++)
for (int j=0; j<=4; j++)
poly [i+j] += A[j][r0][0]*two[i];
}
__host__ __device__ void compute_determinant (EquationSet A, Polynomial poly)
{
// Does the final determinant computation to return the determinant
// Input is a 3x3 matrix of polynomialsm A,
// Output is poly (degree 10)
// Clear out the polynomial
memset (&(poly[0]), 0, (PolynomialDegree+1)*sizeof(double));
// Now, the three cofactors
one_cofactor (A, poly, 0, 1, 2);
one_cofactor (A, poly, 1, 2, 0);
one_cofactor (A, poly, 2, 0, 1);
}
// Declaration of the function to find roots
__host__ __device__ int find_real_roots_sturm(
double *p, int order, double *roots, int *nroots, bool non_neg = false);
__host__ __device__ void compute_E_matrix (EmatrixSet_5pt &Es, EquationSet &A, double w, Ematrix &E)
{
// Compute the essential matrix corresponding to this root
double w2 = w*w;
double w3 = w2*w;
double w4 = w3*w;
// Form equations to solve
double M[3][3];
for (int i=0; i<3; i++)
{
for (int j=0; j<3; j++)
{
M[i][j] = A[0][i][j] + w*A[1][i][j] + w2*A[2][i][j] + w3*A[3][i][j];
}
// Only the first row has degree 4 terms
M[i][0] += w4*A[4][i][0];
}
// Now, find the solution
double x, y;
null_space_solve_3x3_half_pivot (M, x, y);
//-----------------------------------------------------
// Insurance that it worked
// If this failed, then try again with different method
#if 0
//
// POSSIBLY THIS SHOULD BE USED, BUT IT DOES NOT COMPILE.
//
if (notanum(x) || notanum(y))
{
// Do it again
for (int i=0; i<3; i++)
{
for (int j=0; j<3; j++)
{
M[i][j] = A[0][i][j] + w*A[1][i][j] + w2*A[2][i][j] + w3*A[3][i][j];
}
// Only the first row has degree 4 terms
M[i][0] += w4*A[4][i][0];
}
// Solve using safer SVD solver
null_space_solve_3x3 (M, x, y);
}
#endif
//-----------------------------------------------------
// Multiply out the solution to get the essential matrix
for (int i=0; i<3; i++)
for (int j=0; j<3; j++)
{
poly4_1 &p = Es(i, j);
E[i][j] = w*p(0) + x*p(1) + y*p(2) + p(3);
}
}
__host__ __device__ void compute_E_A_poly (
Matches q, Matches qp,
double EE[4][3][3],
double AA[5][3][3],
Polynomial poly)
{
// This is used by the Matlab interface.
// It takes the matches and returns the basis for the E-matrices (EE)
// along with a 3x3 matrix of polynomials, which allows us to solve
// for w. It also returns the polynomial to solve
// Get the matrix set
EquationSet A;
EmatrixSet_5pt E;
Ematrix_5pt(q, qp, E, A);
// Now, reduce its dimension to 3 x 3
reduce_Ematrix (A);
// Finally, get the 10-th degree polynomial out of this
if (poly) compute_determinant (A, poly);
// Now, copy to the simple arrays
if (EE)
for (int d=0; d<4; d++) for (int i=0; i<3; i++) for (int j=0; j<3; j++)
EE[d][i][j] = E(i,j)(d); // Do not transpose - we want Ematrices thus
if (AA)
for (int d=0; d<5; d++) for (int i=0; i<3; i++) for (int j=0; j<3; j++)
AA[d][i][j] = A[d][j][i]; // Transpose
}
__host__ __device__ static inline double pval (double *p, int deg, double x)
{
// Evaluates a polynomial at a given point x. Assumes deg >= 0
double val = p[deg];
for (int i=deg-1; i>=0; i--)
val = x*val + p[i];
return val;
}
__host__ __device__ static void compute_E_matrix_generic (
EmatrixSet_5pt &Es,
PolyMatrix A,
PolyDegree deg, // Degree of each entry in A
int rows[Nrows],
double w,
double scale,
Ematrix &E
)
{
// Compute the essential matrix corresponding to this root from
// the matrix of equations A, assumed to be in row-echelon form
// as defined by the array rows.
double a10 = pval(A[rows[1]][0], deg[rows[1]][0], w);
double a11 = pval(A[rows[1]][1], deg[rows[1]][1], w);
double a20 = pval(A[rows[2]][0], deg[rows[2]][0], w);
double a21 = pval(A[rows[2]][1], deg[rows[2]][1], w);
double a22 = pval(A[rows[2]][2], deg[rows[2]][2], w);
double x = -a10/a11;
double y = -(a20 + x*a21) / a22;
// Multiply out the solution to get the essential matrix
for (int i=0; i<3; i++)
for (int j=0; j<3; j++)
{
poly4_1 &p = Es(i, j);
E[i][j] = scale*w*p(0) + x*p(1) + y*p(2) + p(3);
}
}
__host__ __device__ void compute_E_matrices (
Matches q, Matches qp,
Ematrix Ematrices[10],
int &nroots,
bool optimized
)
{
// Get the matrix set
EquationSet A;
EmatrixSet_5pt E;
Ematrix_5pt(q, qp, E, A);
// print_equation_set (A, 3);
if (!optimized)
{
//------------------------------------------------------------------------
// This is the generic version of the solver as in our paper
//------------------------------------------------------------------------
int dim = Nrows;
// First of all, reduce to 6 x 6 by eliminating constant columns
reduce_constant_terms (A);
dim = 6;
// Set up array of degrees
PolyDegree degrees;
for (int i=0; i<dim; i++)
{
degrees[i][0] = 3;
degrees[i][1] = 2;
degrees[i][2] = 2;
degrees[i][3] = 1;
degrees[i][4] = 1;
degrees[i][5] = 1;
degrees[i][6] = 0;
degrees[i][7] = 0;
degrees[i][8] = 0;
degrees[i][9] = 0;
}
// Unfortunately, we need to rearrange the data since it is incompatible
PolyMatrix P;
for (int i=0; i<dim; i++)
for (int j=0; j<dim; j++)
for (int d=0; d<=degrees[i][j]; d++)
P[i][j][d] = A[d][i][j];
// print_polymatrix (P, 3);
// Go ahead and find the polynomial determinant
double scale_factor = 1.0;
do_scale (P, degrees, scale_factor, false, dim);
int rows[Nrows];
find_polynomial_determinant (P, degrees, rows, dim);
double *poly = P[rows[0]][0];
int poly_degree = degrees[rows[0]][0];
// Find the positive real roots
double roots[Maxdegree];
find_real_roots_sturm(poly, poly_degree, roots, &nroots);
// Now, get the ematrices
for (int i=0; i<nroots; i++)
compute_E_matrix_generic (E, P, degrees, rows,
roots[i], scale_factor, Ematrices[i]);
}
else
{
//------------------------------------------------------------------------
// This is the highly optimized version of the code - similar to Nister's
//------------------------------------------------------------------------
// Now, reduce its dimension to 3 x 3
reduce_Ematrix (A);
// Finally, get the 10-th degree polynomial out of this
Polynomial poly;
compute_determinant (A, poly);
// Find the roots
double roots[PolynomialDegree];
find_real_roots_sturm(poly, PolynomialDegree, roots, &nroots);
// Now, get the ematrices
for (int i=0; i<nroots; i++)
compute_E_matrix (E, A, roots[i], Ematrices[i]);
}
// #define PRINT_RESULTS
#ifdef PRINT_RESULTS
#undef PRINT_RESULTS
printf ("Polynomial\n");
for (int i=0; i<=PolynomialDegree; i++)
printf ("\t%14.6f\n", poly[i]/poly[0]);
#endif
// #define PRINT_RESULTS
#ifdef PRINT_RESULTS
#undef PRINT_RESULTS
// Print out the roots
printf ("Roots\n");
for (int i=0; i<nroots; i++)
printf ("\t%14.6f\n", roots[i]);
#endif
// #define PRINT_RESULTS
#ifdef PRINT_RESULTS
#undef PRINT_RESULTS
// Print out the essential matrices
printf ("Ematrices\n");
for (int m=0; m<nroots; m++)
{
const Ematrix &E = Ematrices[m];
for (int i=0; i<3; i++)
printf ("\t%12.5f %12.5f %12.5f\n", E[i][0], E[i][1], E[i][2]);
printf ("\n");
// Now, compute to see if it has worked
printf ("Verify: ");
for (int pt=0; pt<5; pt++)
{
double sum = 0.0;
for (int i=0; i<3; i++) for (int j=0; j<3; j++)
sum += qp[pt][i] * E[i][j] * q[pt][j];
printf ("%11.3e ", sum);
}
printf ("\n\n");
}
#endif
}
__host__ __device__ void compute_E_matrices_optimized (
Matches q, Matches qp,
Ematrix Ematrices[10],
int &nroots
)
{
// Get the matrix set
EquationSet A;
EmatrixSet_5pt E;
Ematrix_5pt(q, qp, E, A);
// Now, reduce its dimension to 3 x 3
reduce_Ematrix(A);
// Finally, get the 10-th degree polynomial out of this
Polynomial poly;
compute_determinant(A, poly);
// Find the roots
double roots[PolynomialDegree];
find_real_roots_sturm(poly, PolynomialDegree, roots, &nroots); // ~50MB
// Now, get the ematrices
for (int i=0; i<nroots; i++)
compute_E_matrix(E, A, roots[i], Ematrices[i]);
}
|
the_stack
|
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
int nblock_size = 64;
int ngrid_size = 1;
int maxgsx = 65535;
int mmcc = 0;
static int devid;
static cudaError_t crc;
__global__ void emptyKernel() {}
/*--------------------------------------------------------------------*/
extern "C" void setgbsize(int nblock) {
/* set blocksize */
nblock_size = nblock;
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc() {
/* get major and minor computer capability */
return mmcc;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) {
/* allocate global float memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMalloc float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_f = (float *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc) {
/* allocate global integer memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(int)*nsize);
if (crc) {
printf("cudaMalloc int Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_i = (int *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate(void *g_d, int *irc) {
/* deallocate global memory on GPU */
crc = cudaFree(g_d);
if (crc) {
printf("cudaFree Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin(float *f, float *g_f, int nsize) {
/* copy float array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(float)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout(float *f, float *g_f, int nsize) {
/* copy float array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(float)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel() {
int ngx, ngy;
ngx = nblock_size < 32768 ? nblock_size : 32768;
ngy = (ngrid_size - 1)/ngx + 1;
dim3 dimBlock(nblock_size,1);
dim3 dimGrid(ngx,ngy);
crc = cudaGetLastError();
emptyKernel<<<dimGrid,dimBlock>>>();
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("emptyKernel error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu(int dev, int *irc) {
/* initialize CUDA with device dev or selects best GPU available */
/* searches throughs devices, selects the device with the most compute */
/* units, and saves the device id devid */
/* if dev is a valid device, it is used, otherwise the GPU with the */
/* most multi-processors is selected */
/* error code is modified only if there is an error */
int maxcpus = 0, jm = -1;
int j, ndevs, maxunits;
unsigned long msize;
double z;
struct cudaDeviceProp prop;
/* returns number of device */
crc = cudaGetDeviceCount(&ndevs);
if (crc) {
printf("cudaGetDeviceCount Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* get information about devices */
for (j = 0; j < ndevs; j++) {
crc = cudaGetDeviceProperties(&prop,j);
if (crc) {
printf("cudaGetDeviceProperties Error=%i:%s\n",crc,
cudaGetErrorString(crc));
prop.name[0] = 0;
}
maxunits = prop.multiProcessorCount;
if (dev <= 0) {
printf("j=%i:CUDA_DEVICE_NAME=%s,CUDA_MULTIPROCESSOR_COUNT=%i\n",
j,prop.name,maxunits);
msize = prop.totalGlobalMem;
z = ((double) msize)/1073741824.0;
mmcc = 10*prop.major + prop.minor;
printf(" CUDA_GLOBAL_MEM_SIZE=%lu(%f GB),Capability=%d\n",
msize,(float) z,mmcc);
printf(" Capability=%d\n",mmcc);
if (maxunits > maxcpus) {
maxcpus = maxunits;
jm = j;
}
}
}
devid = jm;
if (dev >= 0)
devid = dev % ndevs;
printf("using device j=%i\n",devid);
/* get properties for this device */
crc = cudaGetDeviceProperties(&prop,devid);
maxgsx = prop.maxGridSize[0];
mmcc = 10*prop.major + prop.minor;
/* set device */
crc = cudaSetDevice(devid);
if (crc) {
printf("cudaSetDevice Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* run empty kernel */
emptykernel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu() {
/* terminate CUDA */
crc = cudaThreadExit();
if (crc) {
printf("cudaThreadExit Error=%d:%s\n",crc,cudaGetErrorString(crc));
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gcopy1(float a[], float b[], int nx) {
/* 1d copy a = b */
/* one block of mx threads copies mx values */
/* ((nx-1)/mx+1) independent blocks */
/* nx = size of arrays in x */
/* local data */
int j, js, id, mx;
mx = blockDim.x;
j = threadIdx.x;
id = blockIdx.x;
js = j + mx*id;
if (js < nx) a[js] = b[js];
return;
}
/*--------------------------------------------------------------------*/
__global__ void gcopy2a(float a[], float b[], int nx, int ny) {
/* 2d copy a = b */
/* one block of mx threads copies mx values */
/* nbx*ny independent blocks */
/* local data */
int j, k, js, id, mx;
mx = blockDim.x;
j = threadIdx.x;
id = blockIdx.x;
k = blockIdx.y;
js = j + mx*id;
if ((js < nx) && (k < ny)) {
a[js+nx*k] = b[js+nx*k];
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gcopy2b(float a[], float b[], int nx, int ny) {
/* 2d copy a = b */
/* one block of mx threads copies nx values */
/* ny independent blocks */
/* local data */
int j, k, mx;
mx = blockDim.x;
k = blockIdx.x;
j = threadIdx.x;
while (j < nx) {
if (k < ny)
a[j+nx*k] = b[j+nx*k];
j += mx;
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gsaxpy2(float a[], float b[], float s, int nx, int ny) {
/* 2d vector multiplye a = s*b + a */
/* one block of mx threads copies nx values */
/* ny independent blocks */
/* local data */
int j, k, mx;
mx = blockDim.x;
k = blockIdx.x;
j = threadIdx.x;
while (j < nx) {
if (k < ny)
a[j+nx*k] = s*b[j+nx*k] + a[j+nx*k];
j += mx;
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gcopy3(float a[], float b[], int nx, int ny) {
/* 2d copy a = b */
/* one block of mx*my threads copies mx*my values */
/* ((nx-1)/mx+1)*((ny-1)/my+1) independent blocks */
/* local data */
int j, k, js, ks, idx, idy, mx, my;
mx = blockDim.x; my = blockDim.y;
j = threadIdx.x; k = threadIdx.y;
idx = blockIdx.x; idy = blockIdx.y;
ks = k + my*idy;
js = j + mx*idx;
if ((js < nx) && (ks < ny))
a[js+nx*ks] = b[js+nx*ks];
return;
}
/*--------------------------------------------------------------------*/
__global__ void gtranspose2(float a[], float b[], int nx, int ny) {
/* a = transpose(b) */
/* one block of mx*mx threads transposes mx*mx values */
/* ((nx-1)/mx+1)*((ny-1)/mx+1) independent blocks */
/* local data */
int j, k, js, ks, idx, idy, joff, koff, mx, mxv;
extern __shared__ float s[];
mx = blockDim.x; mxv = mx + 1;
j = threadIdx.x; k = threadIdx.y;
idx = blockIdx.x; idy = blockIdx.y;
koff = mx*idy;
joff = mx*idx;
ks = k + koff;
js = j + joff;
if ((js < nx) && (ks < ny))
s[j+mxv*k] = b[js+nx*ks];
/* synchronize threads */
__syncthreads();
js = k + joff;
ks = j + koff;
if ((js < nx) && (ks < ny))
a[ks+ny*js] = s[k+mxv*j];
return;
}
/*--------------------------------------------------------------------*/
__global__ void gsum1(float a[], float *sa, int nx) {
/* 1d serial sum reductions, each of length mx */
/* sa = sum(a) */
/* local data */
int j, js, jb, mx, joff, mxm;
float t;
extern __shared__ float s[];
mx = blockDim.x;
js = threadIdx.x;
jb = blockIdx.x;
joff = mx*jb;
j = js + joff;
/* copy global data to shared memory */
if (j < nx) s[js] = a[j];
/* synchronize to make sure each thread in block has the data */
__syncthreads();
if (js==0) {
mxm = nx - joff;
if (mxm > mx) mxm = mx;
/* perform serial local sum reduction: result in t */
t = 0.0f;
for (j = 0; j < mxm; j++) {
t += s[j];
}
/* accumulate results to global memory for each block */
/* for devices with compute capability 2.x */
atomicAdd(&sa[0],t);
}
return;
}
/*--------------------------------------------------------------------*/
__device__ void lsum2(float *sdata, int n) {
/* finds local sum of nths data items shared by threads */
/* using binary tree method. input is modified. */
/* local data */
int l, k;
float s;
l = threadIdx.x;
k = blockDim.x >> 1;
s = 0.0f;
if (l < n) s = sdata[l];
while (k > 0) {
if (l < k) {
if ((l+k) < n) {
s += sdata[l+k];
sdata[l] = s;
}
}
__syncthreads();
k >>= 1;
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gsum2(float a[], float d[], int nx) {
/* segmented 1d sum reductions, each of length mx */
/* forall (j = 1:nbx); d(j) = sum(a(1+mx*(j-1):min(nx,mx*j))) */
/* parallel summation */
/* local data */
int j, js, jb, mx, joff, mxm;
extern __shared__ float s[];
mx = blockDim.x;
js = threadIdx.x;
jb = blockIdx.x;
joff = mx*jb;
j = js + joff;
/* copy global data to shared memory */
if (j < nx) s[js] = a[j];
/* synchronize to make sure each thread in block has the data */
__syncthreads();
mxm = nx - joff;
if (mxm > mx) mxm = mx;
/* perform parallel local sum reduction: result in s[0] */
lsum2(s,mxm);
/* write out result to global memory for each block */
if (js==0) d[jb] = s[0];
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_copy1(float *a, float *b, int mx, int nx) {
/* 2d copy of length nx, with block size mx */
/* one block of mx threads copies mx values */
/* ((nx-1)/mx+1) independent blocks */
/* local data */
int nbx;
nbx = (nx - 1)/mx + 1;
dim3 dimBlock(mx);
dim3 dimGrid(nbx);
crc = cudaGetLastError();
gcopy1<<<dimGrid,dimBlock>>>(a,b,nx);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gcopy1 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_copy2a(float *a, float *b, int mx, int nx, int ny) {
/* 2d copy a = b */
/* one block of mx threads copies mx values */
/* nbx*ny independent blocks */
/* local data */
int nbx;
nbx = (nx - 1)/mx + 1;
dim3 dimBlock(mx);
dim3 dimGrid(nbx,ny);
crc = cudaGetLastError();
gcopy2a<<<dimGrid,dimBlock>>>(a,b,nx,ny);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gcopy2a error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_copy2b(float *a, float *b, int mx, int nx, int ny) {
/* 2d copy a = b */
/* one block of mx threads copies nx values */
/* ny independent blocks */
/* local data */
dim3 dimBlock(mx);
dim3 dimGrid(ny);
crc = cudaGetLastError();
gcopy2b<<<dimGrid,dimBlock>>>(a,b,nx,ny);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gcopy2b error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_saxpy2(float *a, float *b, float s, int mx, int nx,
int ny) {
/* 2d vector multiply a = s*b + a */
/* one block of mx threads copies nx values */
/* ny independent blocks */
/* local data */
dim3 dimBlock(mx);
dim3 dimGrid(ny);
crc = cudaGetLastError();
gsaxpy2<<<dimGrid,dimBlock>>>(a,b,s,nx,ny);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gsaxpy2 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_copy3(float *a, float *b, int mx, int my, int nx,
int ny) {
/* 2d copy a = b */
/* one block of mx*my threads copies mx*my values */
/* ((nx-1)/mx+1)*((ny-1)/my+1) independent blocks */
/* local data */
int nbx, nby;
nbx = (nx - 1)/mx + 1; nby = (ny - 1)/my + 1;
dim3 dimBlock(mx,my);
dim3 dimGrid(nbx,nby);
crc = cudaGetLastError();
gcopy3<<<dimGrid,dimBlock>>>(a,b,nx,ny);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gcopy3 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_transpose2(float *a, float *b, int mx, int nx,
int ny) {
/* 2d transpose of length nx, ny, with block size mx, mx */
/* one block of mx*mx threads transposes mx*mx values */
/* ((nx-1)/mx+1)*((ny-1)/mx+1) independent blocks */
/* local data */
int nbx, nby, ns;
nbx = (nx - 1)/mx + 1; nby = (ny - 1)/mx + 1;
dim3 dimBlock(mx,mx);
dim3 dimGrid(nbx,nby);
/* calculate size of shared memory */
ns = (mx + 1)*mx*sizeof(float);
crc = cudaGetLastError();
gtranspose2<<<dimGrid,dimBlock,ns>>>(a,b,nx,ny);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gtranspose2 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_sum1(float *a, float *sa, int mx, int nx) {
/* 1d serial sum reductions, each of length mx */
/* one block of mx threads sums mx values */
/* ((nx-1)/mx+1) independent blocks */
/* local data */
int nbx, ns;
float t;
nbx = (nx - 1)/mx + 1;
dim3 dimBlock(mx);
dim3 dimGrid(nbx);
t = 0.0f;
gpu_fcopyin(&t,sa,1);
/* calculate size of shared memory */
ns = mx*sizeof(float);
crc = cudaGetLastError();
gsum1<<<dimGrid,dimBlock,ns>>>(a,sa,nx);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gsum1 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_sum2(float *a, float *d, int mx, int nx) {
/* segmented 1d parallel sum reductions, each of length mx */
/* one block of mx threads sums mx values */
/* ((nx-1)/mx+1) independent blocks */
/* local data */
int nbx, ns;
nbx = (nx - 1)/mx + 1;
dim3 dimBlock(mx);
dim3 dimGrid(nbx);
/* calculate size of shared memory */
ns = mx*sizeof(float);
crc = cudaGetLastError();
gsum2<<<dimGrid,dimBlock,ns>>>(a,d,nx);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gsum2 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_sum3(float *a, float *d, float *sa, int mx,
int nx) {
/* segmented 1d parallel sum reductions */
/* one block of mx threads sums mx values */
/* ((nx-1)/mx+1) independent blocks */
/* local data */
int nxs, nbx, n, ns;
nxs = nx;
nbx = (nxs - 1)/mx + 1;
dim3 dimBlock(mx);
dim3 dimGrid(nbx);
/* calculate size of shared memory */
ns = mx*sizeof(float);
crc = cudaGetLastError();
gsum2<<<dimGrid,dimBlock,ns>>>(a,d,nxs);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gsum2:0 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
/* write out result */
if (nbx==1) {
dimGrid.x = 1;
crc = cudaGetLastError();
gcopy1<<<dimGrid,dimBlock>>>(sa,d,1);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gcopy1:0 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/* reduce further if necessary */
if (nbx > 1) {
nxs = nbx;
nbx = (nxs - 1)/mx + 1;
dimGrid.x = nbx;
crc = cudaGetLastError();
gsum2<<<dimGrid,dimBlock,ns>>>(d,sa,nxs);
if (nbx==1)
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gsum2:1 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
n = 0;
}
if (nbx==1)
return;
/* iterate if necessary */
while (nbx > 1) {
n += nbx;
nxs = nbx;
nbx = (nxs - 1)/mx + 1;
dimGrid.x = nbx;
crc = cudaGetLastError();
gsum2<<<dimGrid,dimBlock,ns>>>(&sa[n-nxs],&sa[n],nxs);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gsum2:n error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
}
/* write out result */
dimGrid.x = 1;
crc = cudaGetLastError();
gcopy1<<<dimGrid,dimBlock>>>(sa,&sa[n],1);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gcopy1:n error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void setgbsize_(int *nblock) {
setgbsize(*nblock);
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc_() {
/* get major and minor computer capability */
return getmmcc();
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float memory on GPU, return pointer to Fortran */
float *fptr;
gpu_fallocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate_(unsigned long *gp_i, int *nsize,
int *irc) {
/* allocate global integer memory on GPU, return pointer to Fortran */
int *iptr;
gpu_iallocate(&iptr,*nsize,irc);
*gp_i = (long )iptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate_(unsigned long *gp_d, int *irc) {
/* deallocate global memory on GPU, return pointer to Fortran */
void *d;
d = (void *)*gp_d;
gpu_deallocate(d,irc);
*gp_d = 0;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from main memory to global GPU memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from global GPU memory to main memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel_() {
emptykernel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu_(int *dev, int *irc) {
init_cu(*dev,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu_() {
end_cu();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_copy1_(unsigned long *gp_a, unsigned long *gp_b,
int *mx, int *nx) {
float *a, *b;
a = (float *)*gp_a;
b = (float *)*gp_b;
gpu_copy1(a,b,*mx,*nx);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_copy2a_(unsigned long *gp_a, unsigned long *gp_b,
int *mx, int *nx, int *ny) {
float *a, *b;
a = (float *)*gp_a;
b = (float *)*gp_b;
gpu_copy2a(a,b,*mx,*nx,*ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_copy2b_(unsigned long *gp_a, unsigned long *gp_b,
int *mx, int *nx, int *ny) {
float *a, *b;
a = (float *)*gp_a;
b = (float *)*gp_b;
gpu_copy2b(a,b,*mx,*nx,*ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_saxpy2_(unsigned long *gp_a, unsigned long *gp_b,
float *s, int *mx, int *nx, int *ny) {
float *a, *b;
a = (float *)*gp_a;
b = (float *)*gp_b;
gpu_saxpy2(a,b,*s,*mx,*nx,*ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_copy3_(unsigned long *gp_a, unsigned long *gp_b,
int *mx, int *my, int *nx, int *ny) {
float *a, *b;
a = (float *)*gp_a;
b = (float *)*gp_b;
gpu_copy3(a,b,*mx,*my,*nx,*ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_transpose2_(unsigned long *gp_a,
unsigned long *gp_b, int *mx, int *nx,
int *ny) {
float *a, *b;
a = (float *)*gp_a;
b = (float *)*gp_b;
gpu_transpose2(a,b,*mx,*nx,*ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_sum1_(unsigned long *gp_a, unsigned long *gp_sa,
int *mx, int *nx) {
float *a, *sa;
a = (float *)*gp_a;
sa = (float *)*gp_sa;
gpu_sum1(a,sa,*mx,*nx);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_sum2_(unsigned long *gp_a, unsigned long *gp_d,
int *mx, int *nx) {
float *a, *d;
a = (float *)*gp_a;
d = (float *)*gp_d;
gpu_sum2(a,d,*mx,*nx);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_sum3_(unsigned long *gp_a, unsigned long *gp_d,
unsigned long *gp_sa,int *mx, int *nx) {
float *a, *d, *sa;
a = (float *)*gp_a;
d = (float *)*gp_d;
sa = (float *)*gp_sa;
gpu_sum3(a,d,sa,*mx,*nx);
return;
}
|
the_stack
|
#pragma once
#include <gunrock/app/problem_base.cuh>
namespace gunrock {
namespace app {
namespace rw {
/**
* @brief Speciflying parameters for RW Problem
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
return retval;
}
/**
* @brief Template Problem structure.
* @tparam _GraphT Type of the graph
* @tparam _FLAG Problem flags
*/
template <typename _GraphT, ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG> {
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CsrT CsrT;
typedef typename GraphT::GpT GpT;
typedef ProblemBase<GraphT, FLAG> BaseProblem;
typedef DataSliceBase<GraphT, FLAG> BaseDataSlice;
// ----------------------------------------------------------------
// Dataslice structure
/**
* @brief Data structure containing problem specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice {
// problem specific storage arrays:
util::Array1D<SizeT, VertexT> walks;
util::Array1D<SizeT, float> rand;
util::Array1D<SizeT, uint64_t> neighbors_seen;
util::Array1D<SizeT, uint64_t> steps_taken;
int walk_length;
int walks_per_node;
int walk_mode;
bool store_walks;
curandGenerator_t gen;
/*
* @brief Default constructor
*/
DataSlice() : BaseDataSlice() {
walks.SetName("walks");
rand.SetName("rand");
neighbors_seen.SetName("neighbors_seen");
steps_taken.SetName("steps_taken");
}
/*
* @brief Default destructor
*/
virtual ~DataSlice() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(walks.Release(target));
GUARD_CU(rand.Release(target));
GUARD_CU(neighbors_seen.Release(target));
GUARD_CU(steps_taken.Release(target));
GUARD_CU(BaseDataSlice ::Release(target));
return retval;
}
/**
* @brief initializing sssp-specific data on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &sub_graph, int num_gpus, int gpu_idx,
util::Location target, ProblemFlag flag, int walk_length_,
int walks_per_node_, int walk_mode_, bool store_walks_,
int seed) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
walk_length = walk_length_;
walks_per_node = walks_per_node_;
walk_mode = walk_mode_;
store_walks = store_walks_;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, seed);
if (store_walks_) {
GUARD_CU(walks.Allocate(sub_graph.nodes * walk_length * walks_per_node,
target));
} else {
GUARD_CU(walks.Allocate(1, target)); // Dummy allocation
}
GUARD_CU(rand.Allocate(sub_graph.nodes * walks_per_node, target));
GUARD_CU(
neighbors_seen.Allocate(sub_graph.nodes * walks_per_node, target));
GUARD_CU(steps_taken.Allocate(sub_graph.nodes * walks_per_node, target));
if (target & util::DEVICE) {
GUARD_CU(sub_graph.CsrT::Move(util::HOST, target, this->stream));
}
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
SizeT nodes = this->sub_graph->nodes;
int walks_per_node = this->walks_per_node;
int walk_length = this->walk_length;
// Ensure data are allocated
if (this->store_walks) {
GUARD_CU(
walks.EnsureSize_(nodes * walk_length * walks_per_node, target));
} else {
GUARD_CU(walks.EnsureSize_(1, target));
}
GUARD_CU(rand.EnsureSize_(nodes * walks_per_node, target));
GUARD_CU(neighbors_seen.EnsureSize_(nodes * walks_per_node, target));
GUARD_CU(steps_taken.EnsureSize_(nodes * walks_per_node, target));
// Reset data
if (this->store_walks) {
GUARD_CU(walks.ForEach(
[] __host__ __device__(VertexT & x) {
x = util::PreDefinedValues<VertexT>::InvalidValue;
},
nodes * walk_length * walks_per_node, target, this->stream));
} else {
GUARD_CU(walks.ForEach(
[] __host__ __device__(VertexT & x) {
x = util::PreDefinedValues<VertexT>::InvalidValue;
},
1, target, this->stream));
}
GUARD_CU(
rand.ForEach([] __host__ __device__(float &x) { x = (float)0.0; },
nodes * walks_per_node, target, this->stream));
GUARD_CU(neighbors_seen.ForEach(
[] __host__ __device__(uint64_t & x) { x = (uint64_t)0; },
nodes * walks_per_node, target, this->stream));
GUARD_CU(steps_taken.ForEach(
[] __host__ __device__(uint64_t & x) { x = (uint64_t)0; },
nodes * walks_per_node, target, this->stream));
return retval;
}
}; // DataSlice
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
int walk_length;
int walks_per_node;
int walk_mode;
bool store_walks;
int seed;
// ----------------------------------------------------------------
// Problem Methods
/**
* @brief RW default constructor
*/
Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None)
: BaseProblem(_parameters, _flag), data_slices(NULL) {
walk_length = _parameters.Get<int>("walk-length");
walks_per_node = _parameters.Get<int>("walks-per-node");
walk_mode = _parameters.Get<int>("walk-mode");
store_walks = _parameters.Get<bool>("store-walks");
seed = _parameters.Get<int>("seed");
}
/**
* @brief RW default destructor
*/
virtual ~Problem() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL) {
delete[] data_slices;
data_slices = NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* @brief Copy result distancess computed on GPUs back to host-side arrays.
...
* \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(VertexT *h_walks, uint64_t *h_neighbors_seen,
uint64_t *h_steps_taken,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
SizeT nodes = this->org_graph->nodes;
if (this->num_gpus == 1) {
auto &data_slice = data_slices[0][0];
int walk_length = this->walk_length;
int walks_per_node = this->walks_per_node;
// Set device
if (target == util::DEVICE) {
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
if (this->store_walks) {
GUARD_CU(data_slice.walks.SetPointer(
h_walks, nodes * walk_length * walks_per_node, util::HOST));
GUARD_CU(data_slice.walks.Move(util::DEVICE, util::HOST));
}
GUARD_CU(data_slice.neighbors_seen.SetPointer(
h_neighbors_seen, nodes * walks_per_node, util::HOST));
GUARD_CU(data_slice.neighbors_seen.Move(util::DEVICE, util::HOST));
GUARD_CU(data_slice.steps_taken.SetPointer(
h_steps_taken, nodes * walks_per_node, util::HOST));
GUARD_CU(data_slice.steps_taken.Move(util::DEVICE, util::HOST));
} else if (target == util::HOST) {
if (this->store_walks) {
GUARD_CU(data_slice.walks.ForEach(
h_walks,
[] __host__ __device__(const VertexT &device_val,
VertexT &host_val) {
host_val = device_val;
},
nodes * walk_length * walks_per_node, util::HOST));
}
GUARD_CU(data_slice.neighbors_seen.ForEach(
h_neighbors_seen,
[] __host__ __device__(const uint64_t &device_val,
uint64_t &host_val) {
host_val = device_val;
},
nodes * walks_per_node, util::HOST));
GUARD_CU(data_slice.steps_taken.ForEach(
h_steps_taken,
[] __host__ __device__(const uint64_t &device_val,
uint64_t &host_val) {
host_val = device_val;
},
nodes * walks_per_node, util::HOST));
}
} else { // num_gpus != 1
// ============ INCOMPLETE TEMPLATE - MULTIGPU ============
// // TODO: extract the results from multiple GPUs, e.g.:
// // util::Array1D<SizeT, ValueT *> th_distances;
// // th_distances.SetName("bfs::Problem::Extract::th_distances");
// // GUARD_CU(th_distances.Allocate(this->num_gpus, util::HOST));
// for (int gpu = 0; gpu < this->num_gpus; gpu++)
// {
// auto &data_slice = data_slices[gpu][0];
// if (target == util::DEVICE)
// {
// GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
// // GUARD_CU(data_slice.distances.Move(util::DEVICE,
// util::HOST));
// }
// // th_distances[gpu] = data_slice.distances.GetPointer(util::HOST);
// } //end for(gpu)
// for (VertexT v = 0; v < nodes; v++)
// {
// int gpu = this -> org_graph -> GpT::partition_table[v];
// VertexT v_ = v;
// if ((GraphT::FLAG & gunrock::partitioner::Keep_Node_Num) != 0)
// v_ = this -> org_graph -> GpT::convertion_table[v];
// // h_distances[v] = th_distances[gpu][v_];
// }
// // GUARD_CU(th_distances.Release());
}
return retval;
}
/**
* @brief initialization function.
* @param graph The graph that SSSP processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]");
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus,
this->gpu_idx[gpu], target, this->flag,
this->walk_length, this->walks_per_node,
this->walk_mode, this->store_walks, this->seed));
}
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
// Reset data slices
for (int gpu = 0; gpu < this->num_gpus; ++gpu) {
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu]->Reset(target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
return retval;
}
};
} // namespace rw
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include <iostream>
#include <sstream>
#include <map>
#include <vector>
#include <stdio.h>
#include <cublas_v2.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include "somoclu.h"
#ifdef _WIN32
#define popen _popen
#define pclose _pclose
#endif
// Error handling macro
#define CUDA_CHECK(call) \
if((call) != cudaSuccess) { \
cudaError_t err = cudaGetLastError(); \
stringstream sstm; \
sstm << "CUDA error calling \""#call"\", code is " << err; \
cuda_abort(sstm.str()); }
//Globals
cublasHandle_t handle;
thrust::device_vector<float> deviceData;
thrust::device_vector<float> deviceDataNorms;
thrust::device_vector<float> deviceCodebook;
thrust::device_vector<float> deviceCodebookNorms;
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T, T> {
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i) {
return i / C;
}
};
// note: functor inherits from unary_function
template <typename T>
struct square : public thrust::unary_function<T, T> {
__host__ __device__
T operator()(T x) const {
return x * x;
}
};
typedef thrust::tuple<int, float> argMinType;
struct argMin : public thrust::binary_function<argMinType, argMinType, argMinType> {
__host__ __device__
argMinType operator()(const argMinType& a, const argMinType& b) const {
if (thrust::get<1>(a) < thrust::get<1>(b)) {
return a;
}
else {
return b;
}
}
};
template <typename T>
thrust::device_vector<T> normsOfRowSpace(thrust::device_vector<T> A, int nRows, int nColumns) {
// allocate storage for row sums and indices
thrust::device_vector<T> row_sums(nRows);
thrust::device_vector<int> row_indices(nRows);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)) + (nRows * nColumns),
thrust::make_transform_iterator(A.begin(), square<T>()),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<T>());
return row_sums;
}
thrust::device_vector<argMinType> minsOfRowSpace(thrust::device_vector<float> A, int nRows, int nColumns) {
// allocate storage for row sums and indices
thrust::device_vector<argMinType> row_sums(nRows);
thrust::device_vector<int> row_indices(nRows);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)) + (nRows * nColumns),
thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<int>(0), A.begin())),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
argMin());
return row_sums;
}
template <int BLOCK_DIM>
__global__ void euclidean(float *anorm2, float *bnorm2, float *M, int height, int width) {
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yStartIndex = blockIdx.y * BLOCK_DIM;
if (xIndex < width) {
float bNormForX = bnorm2[xIndex];
unsigned int yEndIndex = (yStartIndex + BLOCK_DIM < height ? yStartIndex + BLOCK_DIM : height);
for (unsigned int yIndex = yStartIndex; yIndex < yEndIndex; yIndex++) {
unsigned int index = yIndex * width + xIndex;
M[index] = anorm2[yIndex] - 2 * M[index] + bNormForX;
}
}
}
template <typename T>
void printMatrix(thrust::device_vector<T> A, int nRows, int nColumns) {
for (size_t i = 0; i < nRows; i++) {
for (size_t j = 0; j < nColumns; j++) {
std::cout << A[i * nColumns + j] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
/** Clear the device memory and shut down CUBLAS
*
*/
void freeGpu() {
deviceData.clear();
deviceDataNorms.clear();
deviceCodebook.clear();
deviceCodebookNorms.clear();
thrust::device_vector<float>().swap(deviceData);
thrust::device_vector<float>().swap(deviceDataNorms);
thrust::device_vector<float>().swap(deviceCodebook);
thrust::device_vector<float>().swap(deviceCodebookNorms);
cublasStatus_t status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
cuda_abort("CuBLAS shutdown error");
}
}
/** Find the best matching units -- called from the map function
* @param bmus - array of best matching units
* @param map.codebook - the map.codebook to save
* @param map.nSomX - dimensions of SOM map in the x direction
* @param map.nSomY - dimensions of SOM map in the y direction
* @param map.nDimensions - dimensions of a data instance
* @param nVectorsPerRank - the number of data points assigned to this GPU
*/
void getBmusOnGpu(int *bmus, som map, int nVectorsPerRank) {
deviceCodebook = thrust::device_vector<float>(map.codebook, map.codebook + map.nSomX * map.nSomY * map.nDimensions);
deviceCodebookNorms = normsOfRowSpace<float>(deviceCodebook, map.nSomX * map.nSomY, map.nDimensions);
thrust::device_vector<float> deviceGramMatrix(map.nSomX * map.nSomY * nVectorsPerRank, 0);
//Calculate the inner products of the data vectors and the weight vectors
float alpha = 1.0f;
float beta = 0.0f;
cublasStatus_t status = cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
map.nSomX * map.nSomY, nVectorsPerRank, map.nDimensions,
&alpha, thrust::raw_pointer_cast(&deviceCodebook[0]), map.nDimensions,
thrust::raw_pointer_cast(&deviceData[0]), map.nDimensions,
&beta, thrust::raw_pointer_cast(&deviceGramMatrix[0]), map.nSomX * map.nSomY);
if (status != CUBLAS_STATUS_SUCCESS) {
cuda_abort("Kernel execution error.");
}
//All components of the vectorized Euclidean distance are available
// 32 is a magic number, this is the block size that works best on Tesla C2050
int BLOCK_DIM = 32;
dim3 grid((map.nSomX * map.nSomY + BLOCK_DIM - 1) / BLOCK_DIM, (nVectorsPerRank + BLOCK_DIM - 1) / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, 1, 1);
if (BLOCK_DIM == 32) {
euclidean<32> <<< grid, threads>>>(thrust::raw_pointer_cast(&deviceDataNorms[0]),
thrust::raw_pointer_cast(&deviceCodebookNorms[0]),
thrust::raw_pointer_cast(&deviceGramMatrix[0]),
nVectorsPerRank, map.nSomX * map.nSomY);
}
//Finding minimums
thrust::host_vector<argMinType> minsOfA = minsOfRowSpace(deviceGramMatrix, nVectorsPerRank, map.nSomX * map.nSomY);
CUDA_CHECK(cudaDeviceSynchronize());
//Getting back SOM coordinates from minimums
for (int i = 0; i < nVectorsPerRank; i++) {
argMinType tmp = minsOfA[i];
int somCoordinate = thrust::get<0>(tmp) % (map.nSomX * map.nSomY);
bmus[i * 2] = somCoordinate % map.nSomX;
bmus[i * 2 + 1] = somCoordinate / map.nSomX;
}
}
/** Initialize CUBLAS and device data
* @param hostData - the data in the main memory
* @param height - number of data points assigned to this GPU
* @param width - dimensions of a data instance
*/
void initializeGpu(float *hostData, int nVectorsPerRank, som map) {
/* Initialize CUBLAS */
cublasStatus_t status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
cuda_abort("CuBLAS initialization error");
}
deviceData = thrust::device_vector<float>(hostData, hostData + nVectorsPerRank * map.nDimensions);
deviceDataNorms = normsOfRowSpace<float>(deviceData, nVectorsPerRank, map.nDimensions);
deviceCodebook = thrust::device_vector<float>(map.nSomX * map.nSomY * map.nDimensions, 0);
deviceCodebookNorms = thrust::device_vector<float>(map.nSomX * map.nSomY, 0);
}
/** Check and initialize a device attached to a node
* @param commRank - the MPI rank of this process
* @param commSize - the size of MPI comm world
*/
/// Note that this function was lifted from http://code.google.com/p/gpmr/
void setDevice(int commRank, int commSize) {
int devCount;
int deviceNum = 0;
CUDA_CHECK(cudaGetDeviceCount(&devCount));
#ifdef HAVE_MPI
#ifdef _WIN32
FILE * fp = popen("hostname.exe", "r");
#else
FILE * fp = popen("/bin/hostname", "r");
#endif
char buf[1024];
if (fgets(buf, 1023, fp) == NULL) strcpy(buf, "localhost");
pclose(fp);
string host = buf;
host = host.substr(0, host.size() - 1);
strcpy(buf, host.c_str());
if (commRank == 0) {
map<string, vector<int> > hosts;
map<string, int> devCounts;
hosts[buf].push_back(0);
devCounts[buf] = devCount;
MPI_Status stat;
MPI_Request req;
for (int i = 1; i < commSize; ++i) {
MPI_Recv(buf, 1024, MPI_CHAR, i, 0, MPI_COMM_WORLD, &stat);
MPI_Recv(&devCount, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &stat);
// check to make sure each process on each node reports the same number of devices.
hosts[buf].push_back(i);
if (devCounts.find(buf) != devCounts.end()) {
if (devCounts[buf] != devCount) {
printf("Error, device count mismatch %d != %d on %s\n", devCounts[buf], devCount, buf);
fflush(stdout);
}
}
else devCounts[buf] = devCount;
}
// check to make sure that we don't have more jobs on a node than we have GPUs.
for (map<string, vector<int> >::iterator it = hosts.begin(); it != hosts.end(); ++it) {
if (it->second.size() > static_cast<unsigned int>(devCounts[it->first])) {
stringstream sstm;
sstm << "Error, more jobs running on " << it->first.c_str() << " than devices - " << static_cast<int>(it->second.size()) << " jobs > " << devCounts[it->first] << " devices.";
cuda_abort(sstm.str());
}
}
// send out the device number for each process to use.
MPI_Irecv(&deviceNum, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &req);
for (map<string, vector<int> >::iterator it = hosts.begin(); it != hosts.end(); ++it) {
for (unsigned int i = 0; i < it->second.size(); ++i) {
int devID = i;
MPI_Send(&devID, 1, MPI_INT, it->second[i], 0, MPI_COMM_WORLD);
}
}
MPI_Wait(&req, &stat);
}
else {
// send out the hostname and device count for your local node, then get back the device number you should use.
MPI_Status stat;
MPI_Send(buf, strlen(buf) + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
MPI_Send(&devCount, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Recv(&deviceNum, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &stat);
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
CUDA_CHECK(cudaSetDevice(deviceNum));
}
/** One epoch on the GPU, dense variant
*/
void trainOneEpochDenseGPU(int itask, float *data, float *numerator,
float *denominator, som map,
unsigned int nVectorsPerRank, float radius,
float scale, bool compact_support, bool gaussian,
bool only_bmus, float std_coeff) {
int *bmus;
#ifdef HAVE_MPI
bmus = new int[nVectorsPerRank * 2];
#else
bmus = map.bmus;
#endif
getBmusOnGpu(bmus, map, nVectorsPerRank);
if (only_bmus) {
#ifdef HAVE_MPI
MPI_Gather(bmus, nVectorsPerRank * 2, MPI_INT, map.bmus, nVectorsPerRank * 2, MPI_INT, 0, MPI_COMM_WORLD);
delete [] bmus;
#endif
return;
}
#ifdef HAVE_MPI
float *localNumerator = new float[map.nSomY * map.nSomX * map.nDimensions];
float *localDenominator = new float[map.nSomY * map.nSomX];
#pragma omp for
for (omp_iter_t som_y = 0; som_y < map.nSomY; som_y++) {
for (unsigned int som_x = 0; som_x < map.nSomX; som_x++) {
localDenominator[som_y * map.nSomX + som_x] = 0.0;
for (unsigned int d = 0; d < map.nDimensions; d++)
localNumerator[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] = 0.0;
}
}
#pragma omp parallel default(shared)
#else // not HAVE_MPI
float *localNumerator;
float localDenominator = 0;
#pragma omp parallel default(shared) private(localDenominator) private(localNumerator)
#endif
{
#ifndef HAVE_MPI
localNumerator = new float[map.nDimensions];
#endif // HAVE_MPI
#pragma omp for
for (omp_iter_t som_y = 0; som_y < map.nSomY; som_y++) {
for (unsigned int som_x = 0; som_x < map.nSomX; som_x++) {
for (unsigned int n = 0; n < nVectorsPerRank; n++) {
if (itask * nVectorsPerRank + n < map.nVectors) {
float dist = 0.0f;
if (map.gridType == "rectangular") {
if (map.mapType == "planar") {
dist = euclideanDistanceOnPlanarMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1]);
}
else if (map.mapType == "toroid") {
dist = euclideanDistanceOnToroidMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1], map.nSomX, map.nSomY);
}
}
else {
if (map.mapType == "planar") {
dist = euclideanDistanceOnHexagonalPlanarMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1]);
}
else if (map.mapType == "toroid") {
dist = euclideanDistanceOnHexagonalToroidMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1], map.nSomX, map.nSomY);
}
}
float neighbor_fuct = getWeight(dist, radius, scale, compact_support, gaussian, std_coeff);
#ifdef HAVE_MPI
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] +=
1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
localDenominator[som_y * map.nSomX + som_x] += neighbor_fuct;
#else // In this case, we can update in place
if (n == 0) {
localDenominator = neighbor_fuct;
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[d] = 1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
} else {
localDenominator += neighbor_fuct;
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[d] += 1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
}
#endif // HAVE_MPI
}
} // Looping over data instances
#ifndef HAVE_MPI // We update in-place
for (unsigned int d = 0; d < map.nDimensions; d++) {
if (localDenominator != 0) {
float newWeight = localNumerator[d] / localDenominator;
map.codebook[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] = newWeight;
}
}
#endif
} // Looping over som_x
} // Looping over som_y
#ifndef HAVE_MPI
delete [] localNumerator;
#endif
} // OPENMP
#ifdef HAVE_MPI
MPI_Reduce(localNumerator, numerator,
map.nSomY * map.nSomX * map.nDimensions, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(localDenominator, denominator,
map.nSomY * map.nSomX, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Gather(bmus, nVectorsPerRank * 2, MPI_INT, map.bmus, nVectorsPerRank * 2, MPI_INT, 0, MPI_COMM_WORLD);
delete [] bmus;
delete [] localNumerator;
delete [] localDenominator;
#endif
}
|
the_stack
|
* rtk #includes *
*****************/
#include "rtkCudaUtilities.hcu"
#include "rtkConfiguration.h"
#include "rtkCudaIntersectBox.hcu"
#include "rtkCudaWarpForwardProjectionImageFilter.hcu"
/*****************
* C #includes *
*****************/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cmath>
/*****************
* CUDA #includes *
*****************/
#include <cuda.h>
#include <cublas_v2.h>
#include <cuda_runtime.h>
// TEXTURES AND CONSTANTS //
texture<float, 3, cudaReadModeElementType> tex_xdvf;
texture<float, 3, cudaReadModeElementType> tex_ydvf;
texture<float, 3, cudaReadModeElementType> tex_zdvf;
texture<float, 3, cudaReadModeElementType> tex_vol;
__constant__ int3 c_projSize;
__constant__ float3 c_boxMin;
__constant__ float3 c_boxMax;
__constant__ float3 c_spacing;
__constant__ int3 c_volSize;
__constant__ float c_tStep;
__constant__ float c_matrices[SLAB_SIZE * 12]; // Can process stacks of at most SLAB_SIZE projections
__constant__ float c_sourcePos[SLAB_SIZE * 3]; // Can process stacks of at most SLAB_SIZE projections
__constant__ float c_IndexInputToPPInputMatrix[12];
__constant__ float c_IndexInputToIndexDVFMatrix[12];
__constant__ float c_PPInputToIndexInputMatrix[12];
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_( S T A R T )_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// KERNEL kernel_forwardProject
__global__ void
kernel_warped_forwardProject(float * dev_proj_in, float * dev_proj_out)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int numThread = j * c_projSize.x + i;
if (i >= c_projSize.x || j >= c_projSize.y)
return;
// Setting ray origin
Ray ray;
float3 pixelPos;
float tnear, tfar;
for (unsigned int proj = 0; proj < c_projSize.z; proj++)
{
// Setting ray origin
ray.o = make_float3(c_sourcePos[3 * proj], c_sourcePos[3 * proj + 1], c_sourcePos[3 * proj + 2]);
pixelPos = matrix_multiply(make_float3(i, j, 0), &(c_matrices[12 * proj]));
ray.d = pixelPos - ray.o;
ray.d = ray.d / sqrtf(dot(ray.d, ray.d));
// Detect intersection with box
if (!intersectBox(ray, &tnear, &tfar, c_boxMin, c_boxMax) || tfar < 0.f)
{
dev_proj_out[numThread + proj * c_projSize.x * c_projSize.y] =
dev_proj_in[numThread + proj * c_projSize.x * c_projSize.y];
}
else
{
if (tnear < 0.f)
tnear = 0.f; // clamp to near plane
// Step length in mm
float3 dirInMM = c_spacing * ray.d;
float vStep = c_tStep / sqrtf(dot(dirInMM, dirInMM));
float3 step = vStep * ray.d;
// First position in the box
float halfVStep = 0.5f * vStep;
tnear = tnear + halfVStep;
float3 pos = ray.o + tnear * ray.d;
float t;
float sample = 0.0f;
float sum = 0.0f;
float3 IndexInDVF, Displacement, PP, IndexInInput;
for (t = tnear; t <= tfar; t += vStep)
{
IndexInDVF = matrix_multiply(pos, c_IndexInputToIndexDVFMatrix);
// Get each component of the displacement vector by
// interpolation in the dvf
Displacement.x = tex3D(tex_xdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f);
Displacement.y = tex3D(tex_ydvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f);
Displacement.z = tex3D(tex_zdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f);
// Matrix multiply to get the physical coordinates of the current point in the output volume
// + the displacement
PP = matrix_multiply(pos, c_IndexInputToPPInputMatrix) + Displacement;
// Convert it to a continuous index
IndexInInput = matrix_multiply(PP, c_PPInputToIndexInputMatrix);
// Read from 3D texture from volume
sample = tex3D(tex_vol, IndexInInput.x, IndexInInput.y, IndexInInput.z);
// Accumulate, and move forward along the ray
sum += sample;
pos += step;
}
dev_proj_out[numThread + proj * c_projSize.x * c_projSize.y] =
dev_proj_in[numThread + proj * c_projSize.x * c_projSize.y] +
(sum + (tfar - t + halfVStep) / vStep * sample) * c_tStep;
}
}
}
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
// K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-( E N D )-_-_
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
///////////////////////////////////////////////////////////////////////////
// FUNCTION: CUDA_forward_project() //////////////////////////////////
void
CUDA_warp_forward_project(int projSize[3],
int volSize[3],
int dvfSize[3],
float * matrices,
float * dev_proj_in,
float * dev_proj_out,
float * dev_vol,
float t_step,
float * source_positions,
float box_min[3],
float box_max[3],
float spacing[3],
float * dev_input_dvf,
float IndexInputToIndexDVFMatrix[12],
float PPInputToIndexInputMatrix[12],
float IndexInputToPPInputMatrix[12])
{
// Create CUBLAS context
cublasHandle_t handle;
cublasCreate(&handle);
// constant memory
cudaMemcpyToSymbol(c_projSize, projSize, sizeof(int3));
cudaMemcpyToSymbol(c_boxMin, box_min, sizeof(float3));
cudaMemcpyToSymbol(c_boxMax, box_max, sizeof(float3));
cudaMemcpyToSymbol(c_spacing, spacing, sizeof(float3));
cudaMemcpyToSymbol(c_volSize, volSize, sizeof(int3));
cudaMemcpyToSymbol(c_tStep, &t_step, sizeof(float));
// Copy the source position matrix into a float3 in constant memory
cudaMemcpyToSymbol(c_sourcePos, &(source_positions[0]), 3 * sizeof(float) * projSize[2]);
// Copy the projection matrices into constant memory
cudaMemcpyToSymbol(c_matrices, &(matrices[0]), 12 * sizeof(float) * projSize[2]);
// Prepare channel description for arrays
static cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
// Extent stuff, will be used for each component extraction
cudaExtent dvfExtent = make_cudaExtent(dvfSize[0], dvfSize[1], dvfSize[2]);
// Set texture parameters for the input volume
tex_vol.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
tex_vol.addressMode[1] = cudaAddressModeClamp;
tex_vol.addressMode[2] = cudaAddressModeClamp;
tex_vol.normalized = false; // access with normalized texture coordinates
tex_vol.filterMode = cudaFilterModeLinear; // linear interpolation
// Copy volume data to array, bind the array to the texture
cudaExtent volExtent = make_cudaExtent(volSize[0], volSize[1], volSize[2]);
cudaArray * array_vol;
cudaMalloc3DArray((cudaArray **)&array_vol, &channelDesc, volExtent);
CUDA_CHECK_ERROR;
// Copy data to 3D array
cudaMemcpy3DParms copyParams = cudaMemcpy3DParms();
copyParams.srcPtr = make_cudaPitchedPtr(dev_vol, volSize[0] * sizeof(float), volSize[0], volSize[1]);
copyParams.dstArray = (cudaArray *)array_vol;
copyParams.extent = volExtent;
copyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(©Params);
CUDA_CHECK_ERROR;
// Set texture parameters
tex_xdvf.addressMode[0] = cudaAddressModeBorder;
tex_xdvf.addressMode[1] = cudaAddressModeBorder;
tex_xdvf.addressMode[2] = cudaAddressModeBorder;
tex_xdvf.filterMode = cudaFilterModeLinear;
tex_xdvf.normalized = false; // don't access with normalized texture coords
tex_ydvf.addressMode[0] = cudaAddressModeBorder;
tex_ydvf.addressMode[1] = cudaAddressModeBorder;
tex_ydvf.addressMode[2] = cudaAddressModeBorder;
tex_ydvf.filterMode = cudaFilterModeLinear;
tex_ydvf.normalized = false;
tex_zdvf.addressMode[0] = cudaAddressModeBorder;
tex_zdvf.addressMode[1] = cudaAddressModeBorder;
tex_zdvf.addressMode[2] = cudaAddressModeBorder;
tex_zdvf.filterMode = cudaFilterModeLinear;
tex_zdvf.normalized = false;
// Allocate an intermediate memory space to extract x, y and z components of the DVF
float * DVFcomponent;
int numel = dvfSize[0] * dvfSize[1] * dvfSize[2];
cudaMalloc(&DVFcomponent, numel * sizeof(float));
float one = 1.0;
// Allocate the arrays used for textures
cudaArray ** DVFcomponentArrays = new cudaArray *[3];
CUDA_CHECK_ERROR;
// Copy image data to arrays. The tricky part is the make_cudaPitchedPtr.
// The best way to understand it is to read
// http://stackoverflow.com/questions/16119943/how-and-when-should-i-use-pitched-pointer-with-the-cuda-api
for (unsigned int component = 0; component < 3; component++)
{
// Reset the intermediate memory
cudaMemset((void *)DVFcomponent, 0, numel * sizeof(float));
// Fill it with the current component
float * pComponent = dev_input_dvf + component;
cublasSaxpy(handle, numel, &one, pComponent, 3, DVFcomponent, 1);
// Allocate the cudaArray and fill it with the current DVFcomponent
cudaMalloc3DArray((cudaArray **)&DVFcomponentArrays[component], &channelDesc, dvfExtent);
cudaMemcpy3DParms CopyParams = cudaMemcpy3DParms();
CopyParams.srcPtr = make_cudaPitchedPtr(DVFcomponent, dvfSize[0] * sizeof(float), dvfSize[0], dvfSize[1]);
CopyParams.dstArray = (cudaArray *)DVFcomponentArrays[component];
CopyParams.extent = dvfExtent;
CopyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(&CopyParams);
CUDA_CHECK_ERROR;
}
// Intermediate memory is no longer needed
cudaFree(DVFcomponent);
// Bind 3D arrays to 3D textures
cudaBindTextureToArray(tex_xdvf, (cudaArray *)DVFcomponentArrays[0], channelDesc);
cudaBindTextureToArray(tex_ydvf, (cudaArray *)DVFcomponentArrays[1], channelDesc);
cudaBindTextureToArray(tex_zdvf, (cudaArray *)DVFcomponentArrays[2], channelDesc);
CUDA_CHECK_ERROR;
// Copy matrices into constant memory
cudaMemcpyToSymbol(
c_IndexInputToPPInputMatrix, IndexInputToPPInputMatrix, 12 * sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(
c_IndexInputToIndexDVFMatrix, IndexInputToIndexDVFMatrix, 12 * sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(
c_PPInputToIndexInputMatrix, PPInputToIndexInputMatrix, 12 * sizeof(float), 0, cudaMemcpyHostToDevice);
///////////////
// RUN
dim3 dimBlock = dim3(16, 16, 1);
dim3 dimGrid = dim3(iDivUp(projSize[0], dimBlock.x), iDivUp(projSize[1], dimBlock.y));
// Bind 3D array to 3D texture
cudaBindTextureToArray(tex_vol, (cudaArray *)array_vol, channelDesc);
CUDA_CHECK_ERROR;
kernel_warped_forwardProject<<<dimGrid, dimBlock>>>(dev_proj_in, dev_proj_out);
cudaUnbindTexture(tex_xdvf);
cudaUnbindTexture(tex_ydvf);
cudaUnbindTexture(tex_zdvf);
cudaUnbindTexture(tex_vol);
CUDA_CHECK_ERROR;
cudaFreeArray((cudaArray *)DVFcomponentArrays[0]);
cudaFreeArray((cudaArray *)DVFcomponentArrays[1]);
cudaFreeArray((cudaArray *)DVFcomponentArrays[2]);
delete[] DVFcomponentArrays;
cudaFreeArray((cudaArray *)array_vol);
CUDA_CHECK_ERROR;
// Destroy CUBLAS context
cublasDestroy(handle);
}
|
the_stack
|
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/instance_normalization.hpp>
#include <nbla/variable.hpp>
// Kernels and ops
#include <nbla/cuda/function/kernel/instance_normalization.cuh>
#include <nbla/cuda/function/kernel/normalization.cuh>
#include <nbla/cuda/utils/reduce_ops/instance_normalization.cuh>
#include <nbla/cuda/utils/reduce_ops/welford.cuh>
namespace nbla {
template <typename T>
void InstanceNormalizationCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
InstanceNormalization<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
// Broadcasting scale and bias is not supported in CUDA backend.
if (this->need_beta_broadcast_ || this->need_gamma_broadcast_) {
this->fall_back_func_ = make_shared<InstanceNormalization<T>>(
this->ctx_, this->channel_axis_, this->batch_axis_, this->eps_,
this->no_scale_, this->no_bias_);
this->fall_back_func_->setup(inputs, outputs);
return;
}
// Setup input and output adaptor for channel-last memory format
need_adaptor_ = ChannelFirstAdaptor::need_adaptor(
inputs[0]->shape(), this->batch_axis_, this->channel_axis_);
if (need_adaptor_) {
adaptor_ = std::make_shared<ChannelFirstAdaptor>();
adaptor_->setup(inputs[0], &pre_adaptor_, &post_adaptor_, outputs[0],
inputs[0]->shape(), this->batch_axis_, this->channel_axis_,
this->ctx_);
reduce_size_ = pre_adaptor_.size(this->batch_axis_.size() + 1);
inv_reduce_size_ = 1.0f / reduce_size_;
outer_size_ = pre_adaptor_.size() / reduce_size_;
} else {
reduce_size_ = inputs[0]->size(this->channel_axis_ + 1);
inv_reduce_size_ = 1.0f / reduce_size_;
outer_size_ = inputs[0]->size() / reduce_size_;
}
//----------------
// Reshape buffers
//----------------
// Batch stats
mean_.reshape({outer_size_}, true);
var_.reshape({outer_size_}, true);
// Internal buffers for backward calculation
sum_dy_.reshape({outer_size_}, true);
sum_dyx_.reshape({outer_size_}, true);
factor_a_.reshape({outer_size_}, true);
factor_b_.reshape({outer_size_}, true);
}
template <typename T>
void InstanceNormalizationCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
// Currently, only channel-fist kernels are provided. Channel-last execution
// is performed by transforming input and output memory format to
// channel-first and using channel-first implementation. The transformation is
// performed by ChannelFirstAdaptor.
if (need_adaptor_) {
// Transpose input to [B, C, H, W] memory format.
adaptor_->convert_to_channel_first(inputs[0], &pre_adaptor_);
auto channel_first_inputs = inputs;
auto channel_first_outputs = outputs;
channel_first_inputs[0] = &pre_adaptor_;
channel_first_outputs[0] = &post_adaptor_;
// Instance normalization
forward_channel_first(channel_first_inputs, channel_first_outputs);
// Transpose output to original memory format.
adaptor_->convert_from_channel_first(&post_adaptor_, outputs[0]);
} else {
forward_channel_first(inputs, outputs);
}
}
template <typename T>
void InstanceNormalizationCuda<T>::forward_channel_first(
const Variables &inputs, const Variables &outputs) {
cuda_set_device(this->device_);
Variable *v_mean = &mean_;
Variable *v_var = &var_;
// Output mean and var when output_stats == true.
if (outputs.size() == 3) {
v_mean = outputs[1];
v_var = outputs[2];
}
// Calculate mean and variance
{
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *mean = v_mean->cast_data_and_get_pointer<Tc>(this->ctx_, true);
Tc *var = v_var->cast_data_and_get_pointer<Tc>(this->ctx_, true);
const int num_threads = reduce_size_ < NBLA_CUDA_IN_NUM_THREADS
? CUDA_WARP_SIZE
: NBLA_CUDA_IN_NUM_THREADS;
const auto grid =
std::min(outer_size_, static_cast<Size_t>(NBLA_CUDA_IN_MAX_BLOCKS));
const auto block = num_threads;
WelfordOp<Tc, Size_t> op(x, mean, var, reduce_size_);
reduce_2d_x<<<grid, block>>>(op, outer_size_, reduce_size_);
NBLA_CUDA_KERNEL_CHECK();
}
// Instance normalization
{
const auto beta_idx = 1;
const auto gamma_idx = this->no_bias_ ? 1 : 2;
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
const Tc *mean = v_mean->get_data_pointer<Tc>(this->ctx_);
const Tc *var = v_var->get_data_pointer<Tc>(this->ctx_);
const Tc *beta = this->no_bias_
? nullptr
: inputs[beta_idx]->get_data_pointer<Tc>(this->ctx_);
const Tc *gamma = this->no_scale_
? nullptr
: inputs[gamma_idx]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_);
const size_t elements_per_grid_y = NBLA_CUDA_IN_NUM_THREADS * 4;
dim3 grid;
grid.x =
std::min(outer_size_, static_cast<Size_t>(NBLA_CUDA_IN_MAX_BLOCKS));
grid.y = std::min(NBLA_CEIL_SIZE_T_DIV(reduce_size_, elements_per_grid_y),
static_cast<Size_t>(NBLA_CUDA_IN_MAX_BLOCKS));
grid.z = 1;
const auto block = NBLA_CUDA_IN_NUM_THREADS;
instance_norm_forward_normalization<<<grid, block>>>(
outer_size_, reduce_size_, x, mean, var, beta, gamma, y, this->eps_);
NBLA_CUDA_KERNEL_CHECK();
}
}
template <typename T>
void InstanceNormalizationCuda<T>::backward_impl(
const Variables &inputs, const Variables &outputs,
const vector<bool> &propagate_down, const vector<bool> &accum) {
if (!(propagate_down[0] || (inputs.size() > 1 && propagate_down[1]) ||
(inputs.size() > 2 && propagate_down[2]))) {
return;
}
cuda_set_device(this->device_);
if (need_adaptor_) {
adaptor_->convert_from_channel_first_backward(&post_adaptor_, outputs[0],
true, false);
auto channel_first_inputs = inputs;
auto channel_first_outputs = outputs;
channel_first_inputs[0] = &pre_adaptor_;
channel_first_outputs[0] = &post_adaptor_;
auto channel_first_accum = accum;
channel_first_accum[0] = false;
backward_channel_first(channel_first_inputs, channel_first_outputs,
propagate_down, channel_first_accum);
post_adaptor_.data()->array()->clear();
post_adaptor_.grad()->array()->clear();
adaptor_->convert_to_channel_first_backward(inputs[0], &pre_adaptor_,
propagate_down[0], accum[0]);
pre_adaptor_.data()->array()->clear();
pre_adaptor_.grad()->array()->clear();
} else {
backward_channel_first(inputs, outputs, propagate_down, accum);
}
}
template <typename T>
void InstanceNormalizationCuda<T>::backward_channel_first(
const Variables &inputs, const Variables &outputs,
const vector<bool> &propagate_down, const vector<bool> &accum) {
Variable *v_mean = &mean_;
Variable *v_var = &var_;
// Output mean and var when output_stats == true.
if (outputs.size() == 3) {
v_mean = outputs[1];
v_var = outputs[2];
}
// Calculate sum of dy and sum of dy * x.
{
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
Tc *sum_dy = sum_dy_.cast_data_and_get_pointer<Tc>(this->ctx_);
Tc *sum_dyx = sum_dyx_.cast_data_and_get_pointer<Tc>(this->ctx_);
const int num_threads = reduce_size_ < NBLA_CUDA_IN_NUM_THREADS
? CUDA_WARP_SIZE
: NBLA_CUDA_IN_NUM_THREADS;
const auto grid =
std::min(outer_size_, static_cast<Size_t>(NBLA_CUDA_IN_MAX_BLOCKS));
const auto block = num_threads;
INGradOp<Tc, Size_t> op(x, dy, sum_dy, sum_dyx);
reduce_2d_x<<<grid, block>>>(op, outer_size_, reduce_size_);
NBLA_CUDA_KERNEL_CHECK();
}
// Calculate a and b such that `dx = gamma / sqrt(var) * dy + a * x + b`.
if (propagate_down[0]) {
const auto gamma_idx = this->no_bias_ ? 1 : 2;
const Tc *gamma = this->no_scale_
? nullptr
: inputs[gamma_idx]->get_data_pointer<Tc>(this->ctx_);
const Tc *mean = v_mean->get_data_pointer<Tc>(this->ctx_);
const Tc *var = v_var->get_data_pointer<Tc>(this->ctx_);
const Tc *dmean = outputs.size() == 3
? v_mean->get_grad_pointer<Tc>(this->ctx_)
: nullptr;
const Tc *dvar =
outputs.size() == 3 ? v_var->get_grad_pointer<Tc>(this->ctx_) : nullptr;
const Tc *sum_dy = sum_dy_.get_data_pointer<Tc>(this->ctx_);
const Tc *sum_dyx = sum_dyx_.get_data_pointer<Tc>(this->ctx_);
Tc *factor_a = factor_a_.cast_data_and_get_pointer<Tc>(this->ctx_, true);
Tc *factor_b = factor_b_.cast_data_and_get_pointer<Tc>(this->ctx_, true);
const auto grid = std::min(static_cast<Size_t>(NBLA_CUDA_IN_MAX_BLOCKS),
static_cast<Size_t>(NBLA_CEIL_SIZE_T_DIV(
outer_size_, NBLA_CUDA_IN_NUM_THREADS)));
const auto block = NBLA_CUDA_IN_NUM_THREADS;
instance_norm_backward_dx_factor<<<grid, block>>>(
outer_size_, inv_reduce_size_, gamma, mean, var, dmean, dvar, sum_dy,
sum_dyx, factor_a, factor_b, this->eps_);
NBLA_CUDA_KERNEL_CHECK();
}
// Calculate dx.
if (propagate_down[0]) {
const auto gamma_idx = this->no_bias_ ? 1 : 2;
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
const Tc *gamma = this->no_scale_
? nullptr
: inputs[gamma_idx]->get_data_pointer<Tc>(this->ctx_);
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
const Tc *var = v_var->get_data_pointer<Tc>(this->ctx_);
const Tc *factor_a = factor_a_.get_data_pointer<Tc>(this->ctx_);
const Tc *factor_b = factor_b_.get_data_pointer<Tc>(this->ctx_);
Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]);
const size_t elements_per_grid_y = NBLA_CUDA_IN_NUM_THREADS * 4;
dim3 grid;
grid.x =
std::min(outer_size_, static_cast<Size_t>(NBLA_CUDA_IN_MAX_BLOCKS));
grid.y = std::min(NBLA_CEIL_SIZE_T_DIV(reduce_size_, elements_per_grid_y),
static_cast<Size_t>(NBLA_CUDA_IN_MAX_BLOCKS));
grid.z = 1;
const auto block = NBLA_CUDA_IN_NUM_THREADS;
auto kernel = accum[0] ? instance_norm_backward_dx<true, Tc, Size_t>
: instance_norm_backward_dx<false, Tc, Size_t>;
kernel<<<grid, block>>>(outer_size_, reduce_size_, x, gamma, dy, var,
factor_a, factor_b, dx, this->eps_);
NBLA_CUDA_KERNEL_CHECK();
// Clear internal buffer
factor_a_.data()->array()->clear();
factor_b_.data()->array()->clear();
}
// Calculate dbeta and dgamma.
if ((inputs.size() > 1 && propagate_down[1]) ||
(inputs.size() > 2 && propagate_down[2])) {
const auto beta_idx = 1;
const auto gamma_idx = this->no_bias_ ? 1 : 2;
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
const Tc *gamma = this->no_scale_
? nullptr
: inputs[gamma_idx]->get_data_pointer<Tc>(this->ctx_);
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
const Tc *sum_dy = sum_dy_.get_data_pointer<Tc>(this->ctx_);
const Tc *sum_dyx = sum_dyx_.get_data_pointer<Tc>(this->ctx_);
const Tc *mean = v_mean->get_data_pointer<Tc>(this->ctx_);
const Tc *var = v_var->get_data_pointer<Tc>(this->ctx_);
Tc *dbeta = !this->no_bias_ && propagate_down[beta_idx]
? inputs[beta_idx]->cast_grad_and_get_pointer<Tc>(
this->ctx_, !accum[beta_idx])
: nullptr;
Tc *dgamma = !this->no_scale_ && propagate_down[gamma_idx]
? inputs[gamma_idx]->cast_grad_and_get_pointer<Tc>(
this->ctx_, !accum[gamma_idx])
: nullptr;
const auto grid = std::min(static_cast<Size_t>(NBLA_CUDA_IN_MAX_BLOCKS),
static_cast<Size_t>(NBLA_CEIL_SIZE_T_DIV(
outer_size_, NBLA_CUDA_IN_NUM_THREADS)));
const auto block = NBLA_CUDA_IN_NUM_THREADS;
// Select kernels by accum combination.
auto kernel = instance_norm_backward_dbeta_dgamma<true, true, Tc, Size_t>;
if (!this->no_bias_ && accum[beta_idx]) {
kernel =
!this->no_scale_ && accum[gamma_idx]
? instance_norm_backward_dbeta_dgamma<true, true, Tc, Size_t>
: instance_norm_backward_dbeta_dgamma<true, false, Tc, Size_t>;
} else {
kernel =
!this->no_scale_ && accum[gamma_idx]
? instance_norm_backward_dbeta_dgamma<false, true, Tc, Size_t>
: instance_norm_backward_dbeta_dgamma<false, false, Tc, Size_t>;
}
kernel<<<grid, block>>>(outer_size_, reduce_size_, x, gamma, dy, sum_dy,
sum_dyx, mean, var, dbeta, dgamma, this->eps_);
NBLA_CUDA_KERNEL_CHECK();
}
// Clear internal buffer
sum_dy_.data()->array()->clear();
sum_dyx_.data()->array()->clear();
}
}
|
the_stack
|
#include "image/unpack.hpp"
#include "colorArrayDevice.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "backend/cuda/deviceBuffer2D.hpp"
#include "backend/cuda/surface.hpp"
#include "backend/cuda/deviceStream.hpp"
#include "cuda/util.hpp"
#include "unpackKernel.cu"
#include <cuda_runtime.h>
#include <cassert>
const unsigned int CudaBlockSize = 16;
namespace VideoStitch {
namespace Image {
// ---------------- Convert RGBA -> other colorspace --------------------------
Status unpackRGB(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height,
GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
unpackKernelRGB<<<dimGrid, dimBlock, 0, s.get()>>>(dst.get().raw(), (unsigned)dst.getPitch(), array.get(),
(unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackRGB(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
unpackSourceKernelRGB<<<dimGrid, dimBlock, 0, s.get()>>>(dst.get().raw(), (unsigned)dst.getPitch(),
surf.get().surface(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackRGBA(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t /*width*/,
std::size_t /*height*/, GPU::Stream s) {
return CUDA_ERROR(cudaMemcpy2DAsync(dst.get().raw(), (unsigned)dst.getPitch(), array.get(), dst.getWidth(),
dst.getWidth(), dst.getHeight(), cudaMemcpyDeviceToDevice, s.get()));
}
Status unpackRGBA(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
unpackSourceKernelRGBA<<<dimGrid, dimBlock, 0, s.get()>>>(
(uint32_t*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(uint32_t), // pitch is in bytes
surf.get().surface(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackF32C1(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& array, std::size_t /*width*/,
std::size_t /*height*/, GPU::Stream s) {
return CUDA_ERROR(cudaMemcpy2DAsync(dst.get().raw(), (unsigned)dst.getPitch(), array.get(), dst.getWidth(),
dst.getWidth(), dst.getHeight(), cudaMemcpyDeviceToDevice, s.get()));
}
Status unpackF32C1(GPU::Buffer2D& dst, const GPU::Surface& surf, std::size_t width, std::size_t height, GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
unpackSourceKernelF32C1<<<dimGrid, dimBlock, 0, s.get()>>>(
(float*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(float), // pitch is in bytes
surf.get().surface(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackGrayscale16(GPU::Buffer2D& /* dst */, const GPU::Buffer<const uint32_t>& /* input */, size_t /* width*/,
size_t /* height */, GPU::Stream /* s */) {
// TODO
return {Origin::GPU, ErrType::UnsupportedAction,
"Color space conversion for Grayscale16 not implemented from buffer"};
}
Status unpackGrayscale16(GPU::Buffer2D& dst, const GPU::Surface& surf, size_t width, size_t height, GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
unpackSourceKernelGrayscale16<<<dimGrid, dimBlock, 0, s.get()>>>(
(uint16_t*)dst.get().raw(), (unsigned)dst.getPitch() / sizeof(uint16_t), // pitch is in bytes
surf.get().surface(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackDepth(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst,
const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) {
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x),
(unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1);
unpackKernelDepth<<<dimGrid, dimBlock, 0, s.get()>>>(
yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(),
(unsigned)vDst.getPitch(), (float*)array.get().raw(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackDepth(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Surface& surf,
std::size_t width, std::size_t height, GPU::Stream s) {
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x),
(unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1);
unpackSourceKernelDepth<<<dimGrid, dimBlock, 0, s.get()>>>(
yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(),
(unsigned)vDst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackYV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst,
const GPU::Buffer<const uint32_t>& array, std::size_t width, std::size_t height, GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x),
(unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1);
unpackKernelYV12<<<dimGrid, dimBlock, 0, s.get()>>>(
yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(),
(unsigned)vDst.getPitch(), array.get(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackYV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst, const GPU::Surface& surf,
std::size_t width, std::size_t height, GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x),
(unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1);
unpackSourceKernelYV12<<<dimGrid, dimBlock, 0, s.get()>>>(
yDst.get().raw(), (unsigned)yDst.getPitch(), uDst.get().raw(), (unsigned)uDst.getPitch(), vDst.get().raw(),
(unsigned)vDst.getPitch(), surf.get().surface(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackNV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uvDst, const GPU::Surface& surf, std::size_t width,
std::size_t height, GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x),
(unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1);
unpackSourceKernelNV12<<<dimGrid, dimBlock, 0, s.get()>>>(yDst.get().raw(), (unsigned)yDst.getPitch(),
uvDst.get().raw(), (unsigned)uvDst.getPitch(),
surf.get().surface(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackNV12(GPU::Buffer2D& yDst, GPU::Buffer2D& uvDst, const GPU::Buffer<const uint32_t>& array,
std::size_t width, std::size_t height, GPU::Stream s) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x),
(unsigned)Cuda::ceilDiv((height + 1) / 2, dimBlock.y), 1);
unpackKernelNV12<<<dimGrid, dimBlock, 0, s.get()>>>(yDst.get().raw(), (unsigned)yDst.getPitch(), uvDst.get().raw(),
(unsigned)uvDst.getPitch(), array.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status unpackYUY2(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y),
1);
unpackYUY2Kernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().raw(), (unsigned)dst.getPitch(), src.get(),
(unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackYUY2(GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t, GPU::Stream) {
return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"};
}
Status unpackUYVY(GPU::Buffer2D& dst, const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y),
1);
unpackUYVYKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().raw(), (unsigned)dst.getPitch(), src.get(),
(unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status unpackUYVY(GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t, GPU::Stream) {
return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"};
}
Status convertGrayscale(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
convertKernelGrayscale<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status unpackYUV422P10(GPU::Buffer2D& yDst, GPU::Buffer2D& uDst, GPU::Buffer2D& vDst,
const GPU::Buffer<const uint32_t>& src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv((width + 1) / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y),
1);
unpackYUV422P10Kernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
reinterpret_cast<uint16_t*>(yDst.get().raw()), (unsigned)yDst.getPitch() / 2,
reinterpret_cast<uint16_t*>(uDst.get().raw()), (unsigned)uDst.getPitch() / 2,
reinterpret_cast<uint16_t*>(vDst.get().raw()), (unsigned)vDst.getPitch() / 2, src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status unpackYUV422P10(GPU::Buffer2D&, GPU::Buffer2D&, GPU::Buffer2D&, const GPU::Surface&, std::size_t, std::size_t,
GPU::Stream) {
return Status{Origin::GPU, ErrType::ImplementationError, "Unpacking not implemented from Surface"};
}
Status unpackGrayscale(GPU::Buffer2D& dst, const GPU::Surface& src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
unpackKernelGrayscale<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(
dst.get().raw(), (unsigned)dst.getPitch(), src.get().surface(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
// ---------------- Convert other colorspace -> RGBA --------------------------
Status convertRGBToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
convertRGBToRGBAKernel<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertRGB210ToRGBA(GPU::Surface& dst, GPU::Buffer<const uint32_t> src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
convertRGB210ToRGBAKernel<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertBGRToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
convertBGRToRGBAKernel<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertBGRUToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x),
(unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1);
convertBGRUToRGBAKernel<<<dimGrid2D, dimBlock2D, 0, stream.get()>>>(dst.get(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertBayerRGGBToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x),
(unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1);
convertBayerRGGBToRGBAKernel<<<dimGrid2D, dimBlock2D, sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1),
stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status convertBayerBGGRToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x),
(unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1);
convertBayerBGGRToRGBAKernel<<<dimGrid2D, dimBlock2D, sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1),
stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status convertBayerGRBGToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x),
(unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1);
convertBayerGRBGToRGBAKernel<<<dimGrid2D, dimBlock2D, sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1),
stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status convertBayerGBRGToRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimBlock2D(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width / 2, dimBlock2D.x),
(unsigned)Cuda::ceilDiv(height / 2, dimBlock2D.y), 1);
convertBayerGBRGToRGBAKernel<<<dimGrid2D, dimBlock2D, sizeof(uint32_t) * (dimBlock2D.x + 1) * (dimBlock2D.y + 1),
stream.get()>>>(dst.get(), src.get(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status convertUYVYToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock(16, 16, 1);
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
convertUYVYToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertYUV422P10ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
assert(!(width & 1));
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
convertYUV422P10ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
dst.get().surface(), src.as<const uint16_t>().get(), (unsigned)width, (unsigned)height);
return CUDA_STATUS;
}
Status convertYUY2ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock(16, 16, 1);
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
convertYUY2ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertYV12ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y),
1);
convertYV12ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertNV12ToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width, std::size_t height,
GPU::Stream stream) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y),
1);
convertNV12ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertYUV420ToMono(GPU::Buffer<unsigned char> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
assert(!(width & 1));
assert(!(height & 1));
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width / 2, dimBlock.x), (unsigned)Cuda::ceilDiv(height / 2, dimBlock.y),
1);
unpackMonoKernelYUV420P<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
Status convertGrayscaleToRGBA(GPU::Surface& dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, GPU::Stream stream) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
convertGrayscaleKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), src.get(), (unsigned)width,
(unsigned)height);
return CUDA_STATUS;
}
} // namespace Image
} // namespace VideoStitch
|
the_stack
|
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "filter_sample_depthwise_cuda.h"
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N) {
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
#if !defined(_MSC_VER)
#define CUDA_UNROLL _Pragma("unroll")
#define CUDA_NOUNROLL _Pragma("nounroll")
#else
#define CUDA_UNROLL
#define CUDA_NOUNROLL
#endif
template <typename scalar_t>
__device__ inline scalar_t ldg(const scalar_t* address) {
#if __CUDA_ARCH__ >= 350
return __ldg(address);
#else
return *address;
#endif
}
template <typename scalar_t>
inline scalar_t __device__ CudaMax(scalar_t a, scalar_t b) {
return a > b ? a : b;
}
template <typename scalar_t>
inline scalar_t __device__ CudaMin(scalar_t a, scalar_t b) {
return a < b ? a : b;
}
// assuming h, w is remainder of division, thus h in [0, height), w in [0, width)
template <typename scalar_t>
__device__ scalar_t planar_bilinear(
const scalar_t *data,
const int height,
const int width,
const scalar_t h,
const scalar_t w) {
if (h > -1 && w > -1 && h < height && w < width) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t val = 0;
if (h_low >= 0 && w_low >= 0)
val += hh * hw * ldg(data + h_low * width + w_low);
if (h_low >=0 && w_high <= width - 1)
val += hh * lw * ldg(data + h_low * width + w_high);
if (h_high <= height - 1 && w_low >= 0)
val += lh * hw * ldg(data + h_high * width + w_low);
if (h_high <= height - 1 && w_high <= width - 1)
val += lh * lw * ldg(data + h_high * width + w_high);
return val;
} else {
return 0;
}
}
template <typename scalar_t>
__device__ void planar_bilinear_backward_data(
const scalar_t partial_sum,
const int height,
const int width,
const scalar_t h,
const scalar_t w,
scalar_t* filter_gradient) {
if (h > -1 && w > -1 && h < height && w < width) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
if (h_low >= 0 && w_low >= 0)
atomicAdd(filter_gradient + h_low * width + w_low, hh * hw * partial_sum);
if (h_low >=0 && w_high <= width - 1)
atomicAdd(filter_gradient + h_low * width + w_high, hh * lw * partial_sum);
if (h_high <= height - 1 && w_low >= 0)
atomicAdd(filter_gradient + h_high * width + w_low, lh * hw * partial_sum);
if (h_high <= height - 1 && w_high <= width - 1)
atomicAdd(filter_gradient + h_high * width + w_high, lh * lw * partial_sum);
}
}
template <typename scalar_t>
__device__ scalar_t planar_bilinear_backward_coord(
const scalar_t partial_sum,
const scalar_t* filter,
const int height,
const int width,
const scalar_t h,
const scalar_t w,
const int bp_dir) {
if (h > -1 && w > -1 && h < height && w < width) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
if (bp_dir == 0) {
scalar_t gradient_h = 0;
if (h_low >= 0 && w_low >= 0)
gradient_h -= hw * partial_sum * ldg(filter + h_low * width + w_low);
if (h_low >=0 && w_high <= width - 1)
gradient_h -= lw * partial_sum * ldg(filter + h_low * width + w_high);
if (h_high <= height - 1 && w_low >= 0)
gradient_h += hw * partial_sum * ldg(filter + h_high * width + w_low);
if (h_high <= height - 1 && w_high <= width - 1)
gradient_h += lw * partial_sum * ldg(filter + h_high * width + w_high);
return gradient_h;
} else {
scalar_t gradient_w = 0;
if (h_low >= 0 && w_low >= 0)
gradient_w -= hh * partial_sum * ldg(filter + h_low * width + w_low);
if (h_low >=0 && w_high <= width - 1)
gradient_w += hh * partial_sum * ldg(filter + h_low * width + w_high);
if (h_high <= height - 1 && w_low >= 0)
gradient_w -= lh * partial_sum * ldg(filter + h_high * width + w_low);
if (h_high <= height - 1 && w_high <= width - 1)
gradient_w += lh * partial_sum * ldg(filter + h_high * width + w_high);
return gradient_w;
}
} else {
return 0;
}
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(
const scalar_t *bottom_data,
const int height,
const int width,
scalar_t h,
scalar_t w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = ldg(bottom_data + h_low * width + w_low);
scalar_t v2 = 0;
if (h_low >=0 && w_high <= width - 1)
v2 = ldg(bottom_data + h_low * width + w_high);
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = ldg(bottom_data + h_high * width + w_low);
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = ldg(bottom_data + h_high * width + w_high);
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ void deformable_im2col_bilinear_backward(
const scalar_t partial_sum,
const scalar_t h,
const scalar_t w,
const int height,
const int width,
scalar_t* data_gradient) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
if (h_low >= 0 && w_low >= 0)
atomicAdd(data_gradient + h_low * width + w_low, hh * hw * partial_sum);
if (h_low >=0 && w_high <= width - 1)
atomicAdd(data_gradient + h_low * width + w_high, hh * lw * partial_sum);
if (h_high <= height - 1 && w_low >= 0)
atomicAdd(data_gradient + h_high * width + w_low, lh * hw * partial_sum);
if (h_high <= height - 1 && w_high <= width - 1)
atomicAdd(data_gradient + h_high * width + w_high, lh * lw * partial_sum);
return;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int height,
const int width,
const scalar_t *im_data,
const int bp_dir) {
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * width + argmax_w_high];
}
return weight;
}
template<typename scalar_t, int kFilterHeight, int kFilterWidth>
__global__ __launch_bounds__(1024, 2) void SampleDepthwiseConv2dForwardKernel(
int n,
const scalar_t* input,
const scalar_t* rotation_ratio,
const scalar_t* filter,
const SampleDepthwiseArgs args,
scalar_t* output) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = kFilterHeight > 0 ? kFilterHeight : args.filter_height;
const int filter_width = kFilterWidth > 0 ? kFilterWidth : args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
const int out_w = thread_id % out_width;
const int out_h = (thread_id / out_width) % out_height;
const int out_c = (thread_id / out_width / out_height) % channel;
const int out_b = thread_id / out_width / out_height / channel;
const int in_c = out_c;
const int input_offset_temp =
(out_b * channel + in_c) * (in_height * in_width);
const int group_id = in_c % sampling_group;
const int rotation_offset_temp =
(out_b * sampling_group + group_id) * (filter_height * filter_width * 2) *
out_height * out_width + (out_h * out_width + out_w);
const int filter_offset_temp = in_c * scope_height * scope_width;
// Finally, we can iterate over the spatial dimensions and perform the
// convolution, writing into the output at the end.
const int input_h_start = out_h * stride_height - pad_height;
const int input_w_start = out_w * stride_width - pad_width;
const int input_h_end = input_h_start + (filter_height - 1) * dilation_height;
const int input_w_end = input_w_start + (filter_width - 1) * dilation_width;
scalar_t sum = 0;
if (input_h_start >= 0 && input_w_start >= 0 &&
input_h_end < in_height && input_w_end < in_width) {
// Loop that doesn't need to check for boundary conditions.
CUDA_UNROLL for (int f_h = 0; f_h < filter_height; ++f_h) {
const int in_h = input_h_start + f_h * dilation_height;
CUDA_UNROLL for (int f_w = 0; f_w < filter_width; ++f_w) {
const int in_w = input_w_start + f_w * dilation_width;
const int input_offset = (input_offset_temp) + (in_h * in_width) + in_w;
const int rotation_offset_fhw = rotation_offset_temp +
(f_h * filter_width + f_w) * 2 * out_height * out_width;
const scalar_t rotation_ratio_h =
ldg(rotation_ratio + rotation_offset_fhw);
const scalar_t rotation_ratio_w =
ldg(rotation_ratio + rotation_offset_fhw + out_height * out_width);
const scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
const scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
sum += ldg(input + input_offset) * planar_bilinear(
filter + filter_offset_temp,
scope_height,
scope_width,
filter_h,
filter_w);
}
}
} else {
// Loop that needs to check for boundary conditions.
CUDA_UNROLL for (int f_h = 0; f_h < filter_height; ++f_h) {
const int in_h = input_h_start + f_h * dilation_height;
CUDA_UNROLL for (int f_w = 0; f_w < filter_width; ++f_w) {
const int in_w = input_w_start + f_w * dilation_width;
// NOTE(Hang Gao @ 07/25): how much runtime will it save?
if (in_h >= 0 && in_h < in_height && in_w >= 0 && in_w < in_width) {
const int input_offset = input_offset_temp + (in_h * in_width) + in_w;
const int rotation_offset_fhw = rotation_offset_temp +
(f_h * filter_width + f_w) * 2 * out_height * out_width;
const scalar_t rotation_ratio_h =
ldg(rotation_ratio + rotation_offset_fhw);
const scalar_t rotation_ratio_w =
ldg(rotation_ratio + rotation_offset_fhw + out_height * out_width);
const scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
const scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
sum += ldg(input + input_offset) * planar_bilinear(
filter + filter_offset_temp,
scope_height,
scope_width,
filter_h,
filter_w);
}
}
}
}
output[thread_id] = sum;
}
}
void SampleDepthwiseConv2dForward(
const at::Tensor input,
const at::Tensor rotation_ratio,
const at::Tensor filter,
const SampleDepthwiseArgs args,
at::Tensor output) {
int num_kernels = args.batch * args.channel * args.out_height * args.out_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.type(), "SampleDepthwiseConv2dForward_GPU", ([&] {
const scalar_t *input_ = input.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
const scalar_t *filter_ = filter.data<scalar_t>();
scalar_t *output_ = output.data<scalar_t>();
if (args.filter_height == 3 && args.filter_width == 3) {
SampleDepthwiseConv2dForwardKernel<scalar_t, 3, 3>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input_,
rotation_ratio_,
filter_,
args,
output_);
} else {
SampleDepthwiseConv2dForwardKernel<scalar_t, -1, -1>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input_,
rotation_ratio_,
filter_,
args,
output_);
}
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SampleDepthwiseConv2dForwardKernel: %s\n", cudaGetErrorString(err));
}
}
template<typename scalar_t, int kFilterHeight, int kFilterWidth>
__global__ __launch_bounds__(1024, 2) void DeformableSampleDepthwiseConv2dForwardKernel(
int n,
const scalar_t* input,
const scalar_t* offset,
const scalar_t* rotation_ratio,
const scalar_t* filter,
const SampleDepthwiseArgs args,
scalar_t* output) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = kFilterHeight > 0 ? kFilterHeight : args.filter_height;
const int filter_width = kFilterWidth > 0 ? kFilterWidth : args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
const int out_w = thread_id % out_width;
const int out_h = (thread_id / out_width) % out_height;
const int out_c = (thread_id / out_width / out_height) % channel;
const int out_b = thread_id / out_width / out_height / channel;
const int in_c = out_c;
const int input_offset_temp =
(out_b * channel + in_c) * (in_height * in_width);
const int deformation_offset_temp =
out_b * (filter_height * filter_width * 2) * out_height * out_width +
(out_h * out_width + out_w);
const int group_id = in_c % sampling_group;
const int rotation_offset_temp = (out_b * sampling_group + group_id) *
(filter_height * filter_width * 2) * out_height * out_width +
(out_h * out_width + out_w);
const int filter_offset_temp = in_c * scope_height * scope_width;
// Finally, we can iterate over the spatial dimensions and perform the
// convolution, writing into the output at the end.
const int input_h_start = out_h * stride_height - pad_height;
const int input_w_start = out_w * stride_width - pad_width;
scalar_t sum = 0;
CUDA_UNROLL for (int f_h = 0; f_h < filter_height; ++f_h) {
const int in_h = input_h_start + f_h * dilation_height;
CUDA_UNROLL for (int f_w = 0; f_w < filter_width; ++f_w) {
const int in_w = input_w_start + f_w * dilation_width;
const int deformation_offset_fhw = deformation_offset_temp +
(f_h * filter_width + f_w) * 2 * out_height * out_width;
const int rotation_offset_fhw = rotation_offset_temp +
(f_h * filter_width + f_w) * 2 * out_height * out_width;
const scalar_t input_h = in_h +
ldg(offset + deformation_offset_fhw);
const scalar_t input_w = in_w +
ldg(offset + deformation_offset_fhw + out_height * out_width);
if (input_h > -1 && input_w > -1 && input_h < in_height && input_w < in_width) {
const scalar_t rotation_ratio_h =
ldg(rotation_ratio + rotation_offset_fhw);
const scalar_t rotation_ratio_w =
ldg(rotation_ratio + rotation_offset_fhw + out_height * out_width);
const scalar_t cur_input = deformable_im2col_bilinear(
input + input_offset_temp,
in_height,
in_width,
input_h,
input_w);
const scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
const scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
sum += cur_input * planar_bilinear(
filter + filter_offset_temp,
scope_height,
scope_width,
filter_h,
filter_w);
}
}
}
output[thread_id] = sum;
}
}
void DeformableSampleDepthwiseConv2dForward(
const at::Tensor input,
const at::Tensor offset,
const at::Tensor rotation_ratio,
const at::Tensor filter,
const SampleDepthwiseArgs args,
at::Tensor output) {
int num_kernels = args.batch * args.channel * args.out_height * args.out_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.type(), "DeformableSampleDepthwiseConv2dForward_GPU", ([&] {
const scalar_t *input_ = input.data<scalar_t>();
const scalar_t *offset_ = offset.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
const scalar_t *filter_ = filter.data<scalar_t>();
scalar_t *output_ = output.data<scalar_t>();
if (args.filter_height == 3 && args.filter_width == 3) {
DeformableSampleDepthwiseConv2dForwardKernel<scalar_t, 3, 3>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input_,
offset_,
rotation_ratio_,
filter_,
args,
output_);
} else {
DeformableSampleDepthwiseConv2dForwardKernel<scalar_t, -1, -1>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
input_,
offset_,
rotation_ratio_,
filter_,
args,
output_);
}
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in DeformableSampleDepthwiseConv2dForwardKernel: %s\n", cudaGetErrorString(err));
}
}
template<typename scalar_t>
__global__ __launch_bounds__(1024, 2) void SampleDepthwiseConv2dBackwardDataKernel(
int n,
const scalar_t* out_grad,
const scalar_t* rotation_ratio,
const scalar_t* filter,
const SampleDepthwiseArgs args,
scalar_t* in_grad) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = args.filter_height;
const int filter_width = args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
// Compute the indexes of this thread in the input.
const int in_w = thread_id % in_width;
const int in_h = (thread_id / in_width) % in_height;
const int channel_idx = (thread_id / in_width / in_height) % channel;
const int batch_idx = thread_id / channel / in_width / in_height;
const int out_h_start = CudaMax<int>(
0, (in_h + pad_height - (filter_height - 1) * dilation_height +
stride_height - 1) / stride_height);
const int out_h_end = CudaMin<int>(
out_height - 1, (in_h + pad_height) / stride_height);
const int out_w_start = CudaMax<int>(
0, (in_w + pad_width - (filter_width - 1) * dilation_width +
stride_width - 1) / stride_width);
const int out_w_end = CudaMin<int>(
out_width - 1, (in_w + pad_width) / stride_width);
const int group_id = channel_idx % sampling_group;
const int rotation_offset_temp =
(batch_idx * sampling_group + group_id) *
(filter_height * filter_width * 2) * out_height * out_width;
const int filter_offset_temp = channel_idx * scope_height * scope_width;
const int out_grad_offset_temp =
(batch_idx * channel + channel_idx) * (out_height * out_width);
scalar_t sum = 0.0f;
for (int out_h = out_h_start; out_h <= out_h_end; ++out_h) {
int f_h = in_h + pad_height - out_h * stride_height;
if (f_h % dilation_height == 0) {
f_h /= dilation_height;
const int out_grad_offset_h = out_grad_offset_temp + out_h * out_width;
for (int out_w = out_w_start; out_w <= out_w_end; ++out_w) {
int f_w = in_w + pad_width - out_w * stride_width;
if (f_w % dilation_width == 0) {
f_w /= dilation_width;
const int out_grad_offset = out_grad_offset_h + out_w;
const int rotation_offset_fhw = rotation_offset_temp +
(f_h * filter_width + f_w) * 2 * out_height * out_width +
(out_h * out_width + out_w);
const scalar_t rotation_ratio_h =
ldg(rotation_ratio + rotation_offset_fhw);
const scalar_t rotation_ratio_w =
ldg(rotation_ratio + rotation_offset_fhw + out_height * out_width);
const scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
const scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
sum += ldg(out_grad + out_grad_offset) * planar_bilinear(
filter + filter_offset_temp,
scope_height,
scope_width,
filter_h,
filter_w);
}
}
}
}
in_grad[thread_id] = sum;
}
}
void SampleDepthwiseConv2dBackwardData(
const at::Tensor out_grad,
const at::Tensor rotation_ratio,
const at::Tensor filter,
const SampleDepthwiseArgs args,
at::Tensor in_grad) {
int num_kernels = args.batch * args.channel * args.in_height * args.in_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
out_grad.type(), "SampleDepthwiseConv2dBackwardData_GPU", ([&] {
const scalar_t *out_grad_ = out_grad.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
const scalar_t *filter_ = filter.data<scalar_t>();
scalar_t *in_grad_ = in_grad.data<scalar_t>();
SampleDepthwiseConv2dBackwardDataKernel<scalar_t>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
rotation_ratio_,
filter_,
args,
in_grad_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SampleDepthwiseConv2dBackwardDataKernel: %s\n", cudaGetErrorString(err));
}
}
template<typename scalar_t>
__global__ __launch_bounds__(1024, 2) void DeformableSampleDepthwiseConv2dBackwardDataKernel(
int n,
const scalar_t* out_backprop,
const scalar_t* offset,
const scalar_t* rotation_ratio,
const scalar_t* filter,
const SampleDepthwiseArgs args,
scalar_t* in_grad) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = args.filter_height;
const int filter_width = args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
// Compute the indexes of this thread in the output.
const int out_w = thread_id % out_width;
const int out_h = (thread_id / out_width) % out_height;
const int in_c = (thread_id / out_width / out_height) % channel;
// NOTE(Hang Gao @ 07/26): why feed data like this? -- because
const int f_w = (thread_id / out_width / out_height / channel) % filter_width;
const int f_h = (thread_id / out_width / out_height / channel / filter_width) % filter_height;
const int out_b = (thread_id / out_width / out_height / channel / filter_width) / filter_height;
// Decide if all input is valid, if yes, we can skip the boundary checks
// for each input.
const int in_row = out_h * stride_height - pad_height + f_h * dilation_height;
const int in_col = out_w * stride_width - pad_width + f_w * dilation_width;
const int deformable_offset_temp =
(out_b * (filter_height * filter_width) + (f_h * filter_width + f_w)) * 2 *
out_height * out_width + (out_h * out_width + out_w);
const int group_id = in_c % sampling_group;
const int rotation_offset_temp =
((out_b * sampling_group + group_id) * (filter_height * filter_width) +
(f_h * filter_width + f_w)) * 2 * out_height * out_width +
(out_h * out_width + out_w);
const scalar_t input_h = in_row + ldg(offset + deformable_offset_temp);
const scalar_t input_w = in_col + ldg(
offset + deformable_offset_temp + out_height * out_width);
// Avoid repeated computation.
if (input_h > -1 && input_w > -1 && input_h < in_height && input_w < in_width) {
const int input_offset_temp = (out_b * channel + in_c) * (in_height * in_width);
const int filter_offset_temp = in_c * scope_height * scope_width;
const scalar_t out_bp = ldg(
out_backprop +
(out_b * channel + in_c) * (out_height * out_width) +
(out_h * out_width + out_w));
const scalar_t rotation_ratio_h = ldg(
rotation_ratio + rotation_offset_temp);
const scalar_t rotation_ratio_w = ldg(
rotation_ratio + rotation_offset_temp + out_height * out_width);
scalar_t cur_weight = 0;
const scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
const scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
cur_weight = planar_bilinear(
filter + filter_offset_temp,
scope_height,
scope_width,
filter_h,
filter_w);
const scalar_t partial_sum = cur_weight * out_bp;
deformable_im2col_bilinear_backward(
partial_sum,
input_h,
input_w,
in_height,
in_width,
in_grad + input_offset_temp);
}
}
}
void DeformableSampleDepthwiseConv2dBackwardData(
const at::Tensor out_grad,
const at::Tensor offset,
const at::Tensor rotation_ratio,
const at::Tensor filter,
const SampleDepthwiseArgs args,
at::Tensor in_grad) {
int num_kernels = args.batch * args.filter_height * args.filter_width *
args.channel * args.out_height * args.out_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
out_grad.type(), "DeformableSampleDepthwiseConv2dBackwardData_GPU", ([&] {
const scalar_t *out_grad_ = out_grad.data<scalar_t>();
const scalar_t *offset_ = offset.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
const scalar_t *filter_ = filter.data<scalar_t>();
scalar_t *in_grad_ = in_grad.data<scalar_t>();
DeformableSampleDepthwiseConv2dBackwardDataKernel<scalar_t>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
offset_,
rotation_ratio_,
filter_,
args,
in_grad_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf(
"error in DeformableSampleDepthwiseConv2dBackwardDataKernel: %s\n",
cudaGetErrorString(err));
}
}
template <typename scalar_t, int kFilterWidth, int kFilterHeight>
__global__ __launch_bounds__(1024, 2) void SampleDepthwiseConv2dBackwardFilterKernel(
int n,
const scalar_t* out_backprop,
const scalar_t* input,
const scalar_t* rotation_ratio,
const SampleDepthwiseArgs args,
scalar_t* filter_backprop) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = kFilterHeight > 0 ? kFilterHeight : args.filter_height;
const int filter_width = kFilterWidth > 0 ? kFilterWidth : args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
// Compute the indexes of this thread in the output.
const int out_w = thread_id % out_width;
const int out_h = (thread_id / out_width) % out_height;
const int out_c = (thread_id / out_width / out_height) % channel;
const int out_b = thread_id / out_width / out_height / channel;
const int in_c = out_c;
// Decide if all input is valid, if yes, we can skip the boundary checks
// for each input.
const int in_row_start = out_h * stride_height - pad_height;
const int in_col_start = out_w * stride_width - pad_width;
const int in_row_end = in_row_start + (filter_height - 1) * dilation_height;
const int in_col_end = in_col_start + (filter_width - 1) * dilation_width;
const int input_offset_temp = (out_b * channel + in_c) * (in_height * in_width);
const int group_id = in_c % sampling_group;
const int rotation_offset_temp = (out_b * sampling_group + group_id) *
(filter_height * filter_width * 2) * out_height * out_width +
(out_h * out_width + out_w);
const int filter_offset_temp = in_c * scope_height * scope_width;
const scalar_t out_bp = ldg(out_backprop + thread_id);
if (in_row_start >= 0 && in_col_start >= 0 &&
in_row_end < in_height && in_col_end < in_width) {
CUDA_UNROLL for (int f_h = 0; f_h < filter_height; ++f_h) {
const int in_row = in_row_start + f_h * dilation_height;
// Avoid repeated computation.
const int input_offset_local = input_offset_temp + in_row * in_width;
CUDA_UNROLL for (int f_w = 0; f_w < filter_width; ++f_w) {
const int in_col = in_col_start + f_w * dilation_width;
const int input_offset = input_offset_local + in_col;
const int rotation_offset_fhw =
rotation_offset_temp + (f_h * filter_width + f_w) * 2 * out_height * out_width;
const scalar_t rotation_ratio_h = ldg(
rotation_ratio + rotation_offset_fhw);
const scalar_t rotation_ratio_w = ldg(
rotation_ratio + rotation_offset_fhw + out_height * out_width);
scalar_t partial_sum = ldg(input + input_offset) * out_bp;
const scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
const scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
planar_bilinear_backward_data(
partial_sum,
scope_height,
scope_width,
filter_h,
filter_w,
filter_backprop + filter_offset_temp);
}
}
} else {
CUDA_UNROLL for (int f_h = 0; f_h < filter_height; ++f_h) {
const int in_row = in_row_start + f_h * dilation_height;
// Avoid repeated computation.
const int input_offset_local = input_offset_temp + in_row * in_width;
CUDA_UNROLL for (int f_w = 0; f_w < filter_width; ++f_w) {
const int in_col = in_col_start + f_w * dilation_width;;
if (in_row >= 0 && in_row < in_height && in_col >= 0 && in_col < in_width) {
const int input_offset = input_offset_local + in_col;
const int rotation_offset_fhw = rotation_offset_temp +
(f_h * filter_width + f_w) * 2 * out_height * out_width;
const scalar_t rotation_ratio_h = ldg(
rotation_ratio + rotation_offset_fhw);
const scalar_t rotation_ratio_w = ldg(
rotation_ratio + rotation_offset_fhw + out_height * out_width);
scalar_t partial_sum = ldg(input + input_offset) * out_bp;
const scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
const scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
planar_bilinear_backward_data(
partial_sum,
scope_height,
scope_width,
filter_h,
filter_w,
filter_backprop + filter_offset_temp);
}
}
}
}
}
}
void SampleDepthwiseConv2dBackwardFilter(
const at::Tensor out_grad,
const at::Tensor input,
const at::Tensor rotation_ratio,
const SampleDepthwiseArgs args,
at::Tensor filter_grad) {
int num_kernels = args.batch * args.channel * args.out_height * args.out_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
out_grad.type(), "SampleDepthwiseConv2dBackwardFilter_GPU", ([&] {
const scalar_t *out_grad_ = out_grad.data<scalar_t>();
const scalar_t *input_ = input.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
scalar_t *filter_grad_ = filter_grad.data<scalar_t>();
if (args.filter_height == 3 && args.filter_width == 3) {
SampleDepthwiseConv2dBackwardFilterKernel<scalar_t, 3, 3>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
input_,
rotation_ratio_,
args,
filter_grad_);
} else {
SampleDepthwiseConv2dBackwardFilterKernel<scalar_t, -1, -1>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
input_,
rotation_ratio_,
args,
filter_grad_);
}
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SampleDepthwiseConv2dBackwardFilterKernel: %s\n", cudaGetErrorString(err));
}
}
// A Cuda kernel to compute the depthwise convolution backprop w.r.t. filter.
template <typename scalar_t, int kFilterWidth, int kFilterHeight>
__global__ __launch_bounds__(1024, 2) void DeformableSampleDepthwiseConv2dBackwardFilterKernel(
int n,
const scalar_t* out_backprop,
const scalar_t* input,
const scalar_t* offset,
const scalar_t* rotation_ratio,
const SampleDepthwiseArgs args,
scalar_t* filter_backprop) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = kFilterHeight > 0 ? kFilterHeight : args.filter_height;
const int filter_width = kFilterWidth > 0 ? kFilterWidth : args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
// Compute the indexes of this thread in the output.
const int out_w = thread_id % out_width;
const int out_h = (thread_id / out_width) % out_height;
const int out_c = (thread_id / out_width / out_height) % channel;
const int out_b = thread_id / out_width / out_height / channel;
const int in_c = out_c;
// Decide if all input is valid, if yes, we can skip the boundary checks
// for each input.
const int in_row_start = out_h * stride_height - pad_height;
const int in_col_start = out_w * stride_width - pad_width;
const int input_offset_temp = (out_b * channel + in_c) * (in_height * in_width);
const int deformation_offset_temp = out_b * (filter_height * filter_width * 2) *
out_height * out_width + (out_h * out_width + out_w);
const int group_id = in_c % sampling_group;
const int rotation_offset_temp = (out_b * sampling_group + group_id) *
(filter_height * filter_width * 2) * out_height * out_width +
(out_h * out_width + out_w);
const int filter_offset_temp = in_c * scope_height * scope_width;
const scalar_t out_bp = ldg(out_backprop + thread_id);
CUDA_UNROLL for (int f_h = 0; f_h < filter_height; ++f_h) {
const int in_row = in_row_start + f_h * dilation_height;
// Avoid repeated computation.
CUDA_UNROLL for (int f_w = 0; f_w < filter_width; ++f_w) {
const int in_col = in_col_start + f_w * dilation_width;
const int deformation_offset_fhw = deformation_offset_temp +
(f_h * filter_width + f_w) * 2 * out_height * out_width;
const int rotation_offset_fhw = rotation_offset_temp +
(f_h * filter_width + f_w) * 2 * out_height * out_width;
const scalar_t input_h = in_row + ldg(
offset + deformation_offset_fhw);
const scalar_t input_w = in_col + ldg(
offset + deformation_offset_fhw + out_height * out_width);
if (input_h > -1 && input_w > -1 && input_h < in_height && input_w < in_width) {
const scalar_t rotation_ratio_h = ldg(
rotation_ratio + rotation_offset_fhw);
const scalar_t rotation_ratio_w = ldg(
rotation_ratio + rotation_offset_fhw + out_height * out_width);
const scalar_t partial_sum = deformable_im2col_bilinear(
input + input_offset_temp,
in_height,
in_width,
input_h,
input_w) * out_bp;
const scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
const scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
planar_bilinear_backward_data(
partial_sum,
scope_height,
scope_width,
filter_h,
filter_w,
filter_backprop + filter_offset_temp);
}
}
}
}
}
void DeformableSampleDepthwiseConv2dBackwardFilter(
const at::Tensor out_grad,
const at::Tensor input,
const at::Tensor offset,
const at::Tensor rotation_ratio,
const SampleDepthwiseArgs args,
at::Tensor filter_grad) {
int num_kernels = args.batch * args.channel * args.out_height * args.out_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
out_grad.type(), "DeformableSampleDepthwiseConv2dBackwardFilter_GPU", ([&] {
const scalar_t *out_grad_ = out_grad.data<scalar_t>();
const scalar_t *input_ = input.data<scalar_t>();
const scalar_t *offset_ = offset.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
scalar_t *filter_grad_ = filter_grad.data<scalar_t>();
if (args.filter_height == 3 && args.filter_width == 3) {
DeformableSampleDepthwiseConv2dBackwardFilterKernel<scalar_t, 3, 3>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
input_,
offset_,
rotation_ratio_,
args,
filter_grad_);
} else {
DeformableSampleDepthwiseConv2dBackwardFilterKernel<scalar_t, -1, -1>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
input_,
offset_,
rotation_ratio_,
args,
filter_grad_);
}
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in DeformableSampleDepthwiseConv2dBackwardFilterKernel: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ __launch_bounds__(1024, 2) void DeformableSampleDepthwiseConv2dBackwardOffsetKernel(
int n,
const scalar_t* out_backprop,
const scalar_t* input,
const scalar_t* offset,
const scalar_t* rotation_ratio,
const scalar_t* filter,
const SampleDepthwiseArgs args,
scalar_t* offset_backprop) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = args.filter_height;
const int filter_width = args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
// Compute the indexes of this thread in the output.
const int out_w = thread_id % out_width;
const int out_h = (thread_id / out_width) % out_height;
const int bp_dir = (thread_id / out_width / out_height) % 2;
const int f_w = (thread_id / out_width / out_height / 2) % filter_width;
const int f_h = (thread_id / out_width / out_height / 2 / filter_width) % filter_height;
const int out_b = (thread_id / out_width / out_height / 2 / filter_width) / filter_height;
// Decide if all input is valid, if yes, we can skip the boundary checks
// for each input.
const int in_row = out_h * stride_height - pad_height + f_h * dilation_height;
const int in_col = out_w * stride_width - pad_width + f_w * dilation_width;
const int deformable_offset_temp =
(out_b * (filter_height * filter_width) + (f_h * filter_width + f_w)) * 2 *
out_height * out_width +
(out_h * out_width + out_w);
const scalar_t input_h = in_row + ldg(
offset + deformable_offset_temp);
const scalar_t input_w = in_col + ldg(
offset + deformable_offset_temp + out_height * out_width);
scalar_t coord_gradient = 0;
// Avoid repeated computation.
if (input_h > -1 && input_w > -1 && input_h < in_height && input_w < in_width) {
for (int in_c = 0; in_c < channel; in_c++) {
const int group_id = in_c % sampling_group;
const int rotation_offset_temp = ((out_b * sampling_group + group_id) *
(filter_height * filter_width) + (f_h * filter_width + f_w)) * 2 *
out_height * out_width + (out_h * out_width + out_w);
const scalar_t rotation_ratio_h = ldg(
rotation_ratio + rotation_offset_temp);
const scalar_t rotation_ratio_w = ldg(
rotation_ratio + rotation_offset_temp + out_height * out_width);
scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
const int input_offset_temp = (out_b * channel + in_c) * (in_height * in_width);
const int filter_offset_temp = in_c * scope_height * scope_width;
const scalar_t out_bp = ldg(
out_backprop +
(out_b * channel + in_c) * (out_height * out_width) +
(out_h * out_width + out_w));
scalar_t cur_weight = planar_bilinear(
filter + filter_offset_temp,
scope_height,
scope_width,
filter_h,
filter_w);
scalar_t partial_sum = cur_weight * out_bp;
coord_gradient += get_coordinate_weight(
input_h,
input_w,
in_height,
in_width,
input + input_offset_temp,
bp_dir) * partial_sum;
}
}
offset_backprop[thread_id] = coord_gradient;
}
}
void DeformableSampleDepthwiseConv2dBackwardOffset(
const at::Tensor out_grad,
const at::Tensor input,
const at::Tensor offset,
const at::Tensor rotation_ratio,
const at::Tensor filter,
const SampleDepthwiseArgs args,
at::Tensor offset_grad) {
int num_kernels = args.batch * args.filter_height * args.filter_width * 2 *
args.out_height * args.out_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
out_grad.type(), "DeformableSampleDepthwiseConv2dBackwardOffset_GPU", ([&] {
const scalar_t *out_grad_ = out_grad.data<scalar_t>();
const scalar_t *input_ = input.data<scalar_t>();
const scalar_t *offset_ = offset.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
const scalar_t *filter_ = filter.data<scalar_t>();
scalar_t *offset_grad_ = offset_grad.data<scalar_t>();
DeformableSampleDepthwiseConv2dBackwardOffsetKernel<scalar_t>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
input_,
offset_,
rotation_ratio_,
filter_,
args,
offset_grad_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in DeformableSampleDepthwiseConv2dBackwardOffsetKernel: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ __launch_bounds__(1024, 2) void SampleDepthwiseConv2dBackwardRotationKernel(
int n,
const scalar_t* out_backprop,
const scalar_t* input,
const scalar_t* rotation_ratio,
const scalar_t* filter,
const SampleDepthwiseArgs args,
scalar_t* rotation_backprop) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = args.filter_height;
const int filter_width = args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
// Compute the indexes of this thread in the output.
const int out_w = thread_id % out_width;
const int out_h = (thread_id / out_width) % out_height;
const int bp_dir = (thread_id / out_width / out_height) % 2;
const int f_w = (thread_id / out_width / out_height / 2) % filter_width;
const int f_h = (thread_id / out_width / out_height / 2 / filter_width) % filter_height;
const int group_id = (thread_id / out_width / out_height / 2 /
filter_width / filter_height) % sampling_group;
const int out_b = (thread_id / out_width / out_height / 2 /
filter_width / filter_height) / sampling_group;
// Decide if all input is valid, if yes, we can skip the boundary checks
// for each input.
const int in_row = out_h * stride_height - pad_height + f_h * dilation_height;
const int in_col = out_w * stride_width - pad_width + f_w * dilation_width;
const int rotation_offset_temp =
((out_b * sampling_group + group_id) * (filter_height * filter_width) +
(f_h * filter_width + f_w)) * 2 * out_height * out_width +
(out_h * out_width + out_w);
const scalar_t rotation_ratio_h = ldg(
rotation_ratio + rotation_offset_temp);
const scalar_t rotation_ratio_w = ldg(
rotation_ratio + rotation_offset_temp + out_height * out_width);
scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
scalar_t coord_gradient = 0;
// Avoid repeated computation.
if (in_row >= 0 && in_row < in_height && in_col >= 0 && in_col < in_width) {
for (int in_c = group_id; in_c < channel; in_c += sampling_group) {
const int input_offset_temp =
(out_b * channel + in_c) * (in_height * in_width) +
(in_row * in_width + in_col);
const int filter_offset_temp = in_c * scope_height * scope_width;
const scalar_t out_bp = ldg(
out_backprop +
(out_b * channel + in_c) * (out_height * out_width) +
(out_h * out_width + out_w));
scalar_t partial_sum = ldg(input + input_offset_temp) * out_bp;
coord_gradient += planar_bilinear_backward_coord(
partial_sum,
filter + filter_offset_temp,
scope_height,
scope_width,
filter_h,
filter_w,
bp_dir);
}
}
rotation_backprop[thread_id] = coord_gradient;
}
}
void SampleDepthwiseConv2dBackwardRotation(
const at::Tensor out_grad,
const at::Tensor input,
const at::Tensor rotation_ratio,
const at::Tensor filter,
const SampleDepthwiseArgs args,
at::Tensor rotation_grad) {
int num_kernels = args.batch *
args.sampling_group * args.filter_height * args.filter_width * 2 *
args.out_height * args.out_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
out_grad.type(), "SampleDepthwiseConv2dBackwardRotation_GPU", ([&] {
const scalar_t *out_grad_ = out_grad.data<scalar_t>();
const scalar_t *input_ = input.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
const scalar_t *filter_ = filter.data<scalar_t>();
scalar_t *rotation_grad_ = rotation_grad.data<scalar_t>();
SampleDepthwiseConv2dBackwardRotationKernel<scalar_t>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
input_,
rotation_ratio_,
filter_,
args,
rotation_grad_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SampleDepthwiseConv2dBackwardRotationKernel: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ __launch_bounds__(1024, 2) void DeformableSampleDepthwiseConv2dBackwardRotationKernel(
int n,
const scalar_t* out_backprop,
const scalar_t* input,
const scalar_t* offset,
const scalar_t* rotation_ratio,
const scalar_t* filter,
const SampleDepthwiseArgs args,
scalar_t* rotation_backprop) {
const int channel = args.channel;
const int in_height = args.in_height;
const int in_width = args.in_width;
const int filter_height = args.filter_height;
const int filter_width = args.filter_width;
const int stride_height = args.stride_height;
const int stride_width = args.stride_width;
const int pad_height = args.pad_height;
const int pad_width = args.pad_width;
const int dilation_height = args.dilation_height;
const int dilation_width = args.dilation_width;
const int out_height = args.out_height;
const int out_width = args.out_width;
const int scope_height = args.scope_height;
const int scope_width = args.scope_width;
const int sampling_group = args.sampling_group;
CUDA_KERNEL_LOOP(thread_id, n) {
// Compute the indexes of this thread in the output.
const int out_w = thread_id % out_width;
const int out_h = (thread_id / out_width) % out_height;
const int bp_dir = (thread_id / out_width / out_height) % 2;
const int f_w = (thread_id / out_width / out_height / 2) % filter_width;
const int f_h = (thread_id / out_width / out_height / 2 / filter_width) % filter_height;
const int group_id = (thread_id / out_width / out_height / 2 /
filter_width / filter_height) % sampling_group;
const int out_b = (thread_id / out_width / out_height / 2 /
filter_width / filter_height) / sampling_group;
// Decide if all input is valid, if yes, we can skip the boundary checks
// for each input.
const int in_row = out_h * stride_height - pad_height + f_h * dilation_height;
const int in_col = out_w * stride_width - pad_width + f_w * dilation_width;
const int deformable_offset_temp =
(out_b * (filter_height * filter_width) + (f_h * filter_width + f_w)) * 2 *
out_height * out_width + (out_h * out_width + out_w);
const int rotation_offset_temp =
((out_b * sampling_group + group_id) * (filter_height * filter_width) +
(f_h * filter_width + f_w)) * 2 * out_height * out_width +
(out_h * out_width + out_w);
const scalar_t input_h = in_row + ldg(
offset + deformable_offset_temp);
const scalar_t input_w = in_col + ldg(
offset + deformable_offset_temp + out_height * out_width);
scalar_t coord_gradient = 0;
// Avoid repeated computation.
if (input_h > -1 && input_w > -1 && input_h < in_height && input_w < in_width) {
const scalar_t rotation_ratio_h = ldg(
rotation_ratio + rotation_offset_temp);
const scalar_t rotation_ratio_w = ldg(
rotation_ratio + rotation_offset_temp + out_height * out_width);
scalar_t filter_h = f_h + rotation_ratio_h +
(scope_height - filter_height) / 2.0;
scalar_t filter_w = f_w + rotation_ratio_w +
(scope_width - filter_width) / 2.0;
for (int in_c = group_id; in_c < channel; in_c += sampling_group) {
const int input_offset_temp = (out_b * channel + in_c) * (in_height * in_width);
const int filter_offset_temp = in_c * scope_height * scope_width;
const scalar_t out_bp = ldg(
out_backprop +
(out_b * channel + in_c) * (out_height * out_width) +
(out_h * out_width + out_w));
scalar_t partial_sum = deformable_im2col_bilinear(
input + input_offset_temp,
in_height,
in_width,
input_h,
input_w) * out_bp;
coord_gradient += planar_bilinear_backward_coord(
partial_sum,
filter + filter_offset_temp,
scope_height,
scope_width,
filter_h,
filter_w,
bp_dir);
}
}
rotation_backprop[thread_id] = coord_gradient;
}
}
void DeformableSampleDepthwiseConv2dBackwardRotation(
const at::Tensor out_grad,
const at::Tensor input,
const at::Tensor offset,
const at::Tensor rotation_ratio,
const at::Tensor filter,
const SampleDepthwiseArgs args,
at::Tensor rotation_grad) {
int num_kernels = args.batch *
args.sampling_group * args.filter_height * args.filter_width * 2 *
args.out_height * args.out_width;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
out_grad.type(), "DeformableSampleDepthwiseConv2dBackwardRotation_GPU", ([&] {
const scalar_t *out_grad_ = out_grad.data<scalar_t>();
const scalar_t *input_ = input.data<scalar_t>();
const scalar_t *offset_ = offset.data<scalar_t>();
const scalar_t *rotation_ratio_ = rotation_ratio.data<scalar_t>();
const scalar_t *filter_ = filter.data<scalar_t>();
scalar_t *rotation_grad_ = rotation_grad.data<scalar_t>();
DeformableSampleDepthwiseConv2dBackwardRotationKernel<scalar_t>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
out_grad_,
input_,
offset_,
rotation_ratio_,
filter_,
args,
rotation_grad_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in DeformableSampleDepthwiseConv2dBackwardRotationKernel: %s\n", cudaGetErrorString(err));
}
}
|
the_stack
|
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <stdio.h>
#include "sharedmem.cuh"
#if 1
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
// Macros to append an SM version identifier to a function name
// This allows us to compile a file multiple times for different architecture
// versions
// The second macro is necessary to evaluate the value of the SMVERSION macro
// rather than appending "SMVERSION" itself
#define FUNCVERSION(x, y) x ## _ ## y
#define XFUNCVERSION(x, y) FUNCVERSION(x, y)
#define FUNC(NAME) XFUNCVERSION(NAME, SMVERSION)
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
template <class T>
__global__ void
FUNC(reduce0)(T *g_idata, T *g_odata, unsigned int n)
{
SharedMemory<T> smem;
T *sdata = smem.getPointer();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts. */
template <class T>
__global__ void
FUNC(reduce1)(T *g_idata, T *g_odata, unsigned int n)
{
SharedMemory<T> smem;
T *sdata = smem.getPointer();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
FUNC(reduce2)(T *g_idata, T *g_odata, unsigned int n)
{
SharedMemory<T> smem;
T *sdata = smem.getPointer();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory
*/
template <class T>
__global__ void
FUNC(reduce3)(T *g_idata, T *g_odata, unsigned int n)
{
SharedMemory<T> smem;
T *sdata = smem.getPointer();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
sdata[tid] += g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version unrolls the last warp to avoid synchronization where it
isn't needed
*/
template <class T, unsigned int blockSize>
__global__ void
FUNC(reduce4)(T *g_idata, T *g_odata, unsigned int n)
{
SharedMemory<T> smem;
T *sdata = smem.getPointer();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
sdata[tid] += g_idata[i+blockSize];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32) if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; } EMUSYNC;
if (tid < 16) if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; } EMUSYNC;
if (tid < 8) if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; } EMUSYNC;
if (tid < 4) if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; } EMUSYNC;
if (tid < 2) if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; } EMUSYNC;
if (tid < 1) if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; } EMUSYNC;
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version is completely unrolled. It uses a template parameter to achieve
optimal code for any (power of 2) number of threads. This requires a switch
statement in the host code to handle all the different thread block sizes at
compile time.
*/
template <class T, unsigned int blockSize>
__global__ void
FUNC(reduce5)(T *g_idata, T *g_odata, unsigned int n)
{
SharedMemory<T> smem;
T *sdata = smem.getPointer();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
sdata[tid] += g_idata[i+blockSize];
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; } EMUSYNC;
if (tid < 16) if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; } EMUSYNC;
if (tid < 8) if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; } EMUSYNC;
if (tid < 4) if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; } EMUSYNC;
if (tid < 2) if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; } EMUSYNC;
if (tid < 1) if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; } EMUSYNC;
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
FUNC(reduce6)(T *g_idata, T *g_odata, unsigned int n)
{
SharedMemory<T> smem;
T *sdata = smem.getPointer();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
sdata[tid] += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
sdata[tid] += g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; } EMUSYNC;
if (tid < 16) if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; } EMUSYNC;
if (tid < 8) if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; } EMUSYNC;
if (tid < 4) if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; } EMUSYNC;
if (tid < 2) if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; } EMUSYNC;
if (tid < 1) if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; } EMUSYNC;
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
extern "C"
bool isPow2(unsigned int x);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
FUNC(reduce)(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = 2 * threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
FUNC(reduce0)<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
FUNC(reduce1)<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
FUNC(reduce2)<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 3:
FUNC(reduce3)<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 512:
FUNC(reduce4)<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
FUNC(reduce4)<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
FUNC(reduce4)<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
FUNC(reduce4)<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
FUNC(reduce4)<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
FUNC(reduce4)<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
FUNC(reduce4)<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
FUNC(reduce4)<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
FUNC(reduce4)<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
FUNC(reduce4)<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
break;
case 5:
switch (threads)
{
case 512:
FUNC(reduce5)<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
FUNC(reduce5)<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
FUNC(reduce5)<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
FUNC(reduce5)<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
FUNC(reduce5)<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
FUNC(reduce5)<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
FUNC(reduce5)<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
FUNC(reduce5)<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
FUNC(reduce5)<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
FUNC(reduce5)<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
FUNC(reduce6)<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
FUNC(reduce6)<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
FUNC(reduce6)<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
FUNC(reduce6)<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
FUNC(reduce6)<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
FUNC(reduce6)<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
FUNC(reduce6)<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
FUNC(reduce6)<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
FUNC(reduce6)<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
FUNC(reduce6)<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
}
else
{
switch (threads)
{
case 512:
FUNC(reduce6)<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
FUNC(reduce6)<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
FUNC(reduce6)<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
FUNC(reduce6)<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
FUNC(reduce6)<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
FUNC(reduce6)<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
FUNC(reduce6)<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
FUNC(reduce6)<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
FUNC(reduce6)<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
FUNC(reduce6)<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
}
break;
}
}
extern "C"
void FUNC(reduceInt)(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata)
{
FUNC(reduce)<int>(size, threads, blocks, whichKernel, d_idata, d_odata);
}
extern "C"
void FUNC(reduceFloat)(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata)
{
FUNC(reduce)<float>(size, threads, blocks, whichKernel, d_idata, d_odata);
}
extern "C"
void FUNC(reduceDouble)(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata)
{
FUNC(reduce)<double>(size, threads, blocks, whichKernel, d_idata, d_odata);
}
#endif // #ifndef _REDUCE_KERNEL_H_
|
the_stack
|
//#define UNROLL_INNER
//#define IMUL(a, b) __mul24(a, b)
#include <utility_kernels.h>
namespace vision {
// OpenGL mapped input textures
texture<uchar4, cudaTextureType2D, cudaReadModeElementType> d_rgba_texture;
texture<float, cudaTextureType2D, cudaReadModeElementType> d_float_texture0;
texture<float, cudaTextureType2D, cudaReadModeElementType> d_float_texture1;
// Round a / b to nearest higher integer value
int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); }
__device__ static float rgbaToGray(uchar4 rgba) {
return (0.299f * (float)rgba.x + 0.587f * (float)rgba.y +
0.114f * (float)rgba.z);
}
__global__ void deInterleave_kernel(float *d_X_out, float *d_Y_out,
float2 *d_XY_in, int pitch_out,
int pitch_in, int width, int height) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
float2 XY = *((float2 *)((char *)d_XY_in + y * pitch_in) + x);
*((float *)((char *)d_X_out + y *pitch_out) + x) = XY.x;
*((float *)((char *)d_Y_out + y *pitch_out) + x) = XY.y;
}
}
__global__ void deInterleave_kernel2(float *d_X_out, float *d_Y_out,
char *d_XY_in, int pitch_out, int pitch_in,
int width, int height) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
float *data = (float *)(d_XY_in + y * pitch_in) + 2 * x;
*((float *)((char *)d_X_out + y *pitch_out) + x) = data[0];
*((float *)((char *)d_Y_out + y *pitch_out) + x) = data[1];
}
}
__global__ void IMOMask_kernel(float *d_IMOMask, float *d_IMO,
const float *d_disparity, float offset,
int n_cols, int n_rows) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
unsigned int ind = x + y * n_cols;
if (!(bool)(d_IMOMask[ind])) {
d_IMO[ind] = nanf("");
}
}
}
__global__ void matchValidity_kernel(float *d_flow, float *d_disparity,
int n_cols, int n_rows) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
unsigned int ind = x + y * n_cols;
bool valid = (isfinite(d_flow[ind]) && isfinite(d_disparity[ind]));
if (!valid) {
d_flow[ind] = nanf("");
d_flow[ind + n_cols * n_rows] = nanf("");
d_disparity[ind] = nanf("");
}
}
}
// Convert float to RGBA kernel
#define GAIN (1.0f / STEREO_MAXD)
__global__ void convertFloatToRGBA_kernel(uchar4 *out_image,
const float *in_image, int width,
int height, float lowerLim,
float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = in_image[__mul24(y, width) + x];
// first draw unmatched pixels in white
if (!isfinite(val)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
// rescale value from [lowerLim,upperLim] to [0,1]
val -= lowerLim;
val /= (upperLim - lowerLim);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (val < 0.25f) {
r = 0;
g = 4.0f * val;
} else if (val < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - val);
} else if (val < 0.75f) {
r = 4.0f * (val - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - val);
b = 0;
}
temp.x = 255.0 * r;
temp.y = 255.0 * g;
temp.z = 255.0 * b;
temp.w = 255;
}
out_image[__mul24(y, width) + x] = temp;
}
}
__global__ void convertPitchedFloatToRGBA_kernel(uchar4 *out_image,
const float *in_image,
int width, int height,
int pitch, float lowerLim,
float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = *((float *)((char *)in_image + y * pitch) + x);
// first draw unmatched pixels in white
if (!isfinite(val)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
// rescale value from [lowerLim,upperLim] to [0,1]
val -= lowerLim;
val /= (upperLim - lowerLim);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (val < 0.25f) {
r = 0;
g = 4.0f * val;
} else if (val < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - val);
} else if (val < 0.75f) {
r = 4.0f * (val - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - val);
b = 0;
}
temp.x = 255.0 * r;
temp.y = 255.0 * g;
temp.z = 255.0 * b;
temp.w = 255;
}
out_image[__mul24(y, width) + x] = temp;
}
}
__global__ void convertKinectFloatToRGBA_kernel(uchar4 *out_image,
const float *in_image,
int width, int height,
int pitch, float lowerLim,
float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = *((float *)((char *)in_image + y * pitch) + x);
val = (val == 0.0f) ? nanf("") : val;
// first draw unmatched pixels in white
if (!isfinite(val)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
// rescale value from [lowerLim,upperLim] to [0,1]
val -= lowerLim;
val /= (upperLim - lowerLim);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (val < 0.25f) {
r = 0;
g = 4.0f * val;
} else if (val < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - val);
} else if (val < 0.75f) {
r = 4.0f * (val - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - val);
b = 0;
}
temp.x = 255.0 * r;
temp.y = 255.0 * g;
temp.z = 255.0 * b;
temp.w = 255;
}
out_image[__mul24(y, width) + x] = temp;
}
}
__global__ void convertFloatToRGBA_kernel(uchar4 *out_image,
const float *in_image, int width,
int height) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
int IND = y * width + x;
float val = in_image[IND];
temp.x = val;
temp.y = val;
temp.z = val;
temp.w = 255;
out_image[IND] = temp;
}
}
__global__ void convertFlowToRGBA_kernel(uchar4 *d_flowx_out,
uchar4 *d_flowy_out,
const float *d_flowx_in,
const float *d_flowy_in, int width,
int height, float lowerLim,
float upperLim, float minMag) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 tempx, tempy;
if (x < width && y < height) {
float ux = d_flowx_in[__mul24(y, width) + x];
float uy = d_flowy_in[__mul24(y, width) + x];
float mag = sqrtf(ux * ux + uy * uy);
// first draw unmatched pixels in white
if (!isfinite(ux) || (mag < minMag)) {
tempx.x = 255;
tempx.y = 255;
tempx.z = 255;
tempx.w = 255;
tempy.x = 255;
tempy.y = 255;
tempy.z = 255;
tempy.w = 255;
} else {
// rescale value from [lowerLim,upperLim] to [0,1]
ux -= lowerLim;
ux /= (upperLim - lowerLim);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (ux < 0.25f) {
r = 0;
g = 4.0f * ux;
} else if (ux < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - ux);
} else if (ux < 0.75f) {
r = 4.0f * (ux - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - ux);
b = 0;
}
tempx.x = 255.0 * r;
tempx.y = 255.0 * g;
tempx.z = 255.0 * b;
tempx.w = 255;
uy -= lowerLim;
uy /= (upperLim - lowerLim);
r = 1.0f;
g = 1.0f;
b = 1.0f;
if (uy < 0.25f) {
r = 0;
g = 4.0f * uy;
} else if (uy < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - uy);
} else if (uy < 0.75f) {
r = 4.0f * (uy - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - uy);
b = 0;
}
tempy.x = 255.0 * r;
tempy.y = 255.0 * g;
tempy.z = 255.0 * b;
tempy.w = 255;
}
d_flowx_out[__mul24(y, width) + x] = tempx;
d_flowy_out[__mul24(y, width) + x] = tempy;
}
}
// Convert pitched float to RGBA grayscale
__global__ void convertPitchedFloatToGrayRGBA_kernel(uchar4 *out_image,
const float *in_image,
int width, int height,
int pitch, float lowerLim,
float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
// float val = in_image[__mul24(y,pitch)+x];
float val = *((float *)((char *)in_image + y * pitch) + x);
// rescale value from [lowerLim,upperLim] to [0,255]
val -= lowerLim;
val /= (upperLim - lowerLim);
val *= 255.0;
temp.x = val;
temp.y = val;
temp.z = val;
temp.w = 255;
out_image[__mul24(y, width) + x] = temp;
}
}
// Convert float array to RGBA grayscale
__global__ void convertFloatArrayToGrayRGBA_kernel(uchar4 *out_image, int width,
int height, float lower_lim,
float upper_lim) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = tex2D(d_float_texture0, (float)x + 0.5f, (float)y + 0.5f);
// rescale value from [lowerLim,upperLim] to [0,255]
val -= lower_lim;
val /= (upper_lim - lower_lim);
val *= 255.0;
temp.x = val;
temp.y = val;
temp.z = val;
temp.w = 255;
out_image[y * width + x] = temp;
}
}
// Merge two grayscale images into RGBA anaglyph
__global__ void createAnaglyph_kernel(uchar4 *out_image,
const float *left_image,
const float *right_image, int width,
int height, int pre_shift) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int x_right = x - pre_shift;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
temp.x = left_image[__mul24(y, width) + x];
if (x_right > 0 && x_right < width) {
temp.y = right_image[__mul24(y, width) + x_right];
temp.z = temp.y;
} else {
temp.y = 0;
temp.z = 0;
}
temp.w = 255;
out_image[__mul24(y, width) + x] = temp;
}
}
__global__ void createAnaglyph_kernel(uchar4 *out_image,
const uchar4 *left_image,
const uchar4 *right_image, int width,
int height, int pre_shift) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int x_right = x - pre_shift;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
temp.x = rgbaToGray(left_image[y * width + x]);
if (x_right > 0 && x_right < width) {
temp.y = rgbaToGray(right_image[y * width + x_right]);
temp.z = temp.y;
} else {
temp.y = 0;
temp.z = 0;
}
temp.w = 255;
out_image[y * width + x] = temp;
}
}
// convert 2D vectors to an RGBA angle image and an RGBA magnitude image
__global__ void convert2DVectorToAngleMagnitude_kernel(
uchar4 *d_angle_image, uchar4 *d_magnitude_image, float *d_vector_X,
float *d_vector_Y, int width, int height, float lower_ang, float upper_ang,
float lower_mag, float upper_mag) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp_angle, temp_magnitude;
if (x < width && y < height) {
float vector_X = d_vector_X[__mul24(y, width) + x];
float vector_Y = d_vector_Y[__mul24(y, width) + x];
// compute angle and magnitude
float angle = atan2f(vector_Y, vector_X);
float magnitude = vector_X * vector_X + vector_Y * vector_Y;
magnitude = sqrtf(magnitude);
// first draw unmatched pixels in white
if (!isfinite(magnitude)) {
temp_angle.x = 255;
temp_angle.y = 255;
temp_angle.z = 255;
temp_angle.w = 255;
temp_magnitude.x = 255;
temp_magnitude.y = 255;
temp_magnitude.z = 255;
temp_magnitude.w = 255;
} else {
// rescale angle and magnitude from [lower,upper] to [0,1] and convert to
// RGBA jet colorspace
angle -= lower_ang;
angle /= (upper_ang - lower_ang);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (angle < 0.25f) {
r = 0;
g = 4.0f * angle;
} else if (angle < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - angle);
} else if (angle < 0.75f) {
r = 4.0f * (angle - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - angle);
b = 0;
}
temp_angle.x = 255.0 * r;
temp_angle.y = 255.0 * g;
temp_angle.z = 255.0 * b;
temp_angle.w = 255;
magnitude -= lower_mag;
magnitude /= (upper_mag - lower_mag);
r = 1.0f;
g = 1.0f;
b = 1.0f;
if (magnitude < 0.25f) {
r = 0;
g = 4.0f * magnitude;
} else if (magnitude < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - magnitude);
} else if (magnitude < 0.75f) {
r = 4.0f * (magnitude - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - magnitude);
b = 0;
}
temp_magnitude.x = 255.0 * r;
temp_magnitude.y = 255.0 * g;
temp_magnitude.z = 255.0 * b;
temp_magnitude.w = 255;
}
d_angle_image[__mul24(y, width) + x] = temp_angle;
d_magnitude_image[__mul24(y, width) + x] = temp_magnitude;
}
}
// threshold floats using lowerLim and upperLim into RGBA black/white image
__global__ void convertFloatToRGBAbinary_kernel(uchar4 *out_image,
const float *in_image,
int width, int height,
float lowerLim,
float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = in_image[__mul24(y, width) + x];
// draw everything invalid or out of lim in white
if (!isfinite(val) || (val < lowerLim) || (val > upperLim)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
temp.x = 0.0f;
temp.y = 0.0f;
temp.z = 0.0f;
temp.w = 0.0f;
}
out_image[__mul24(y, width) + x] = temp;
}
}
// blend float image with float label (membership specified by lowerLim and
// upperLim)
__global__ void blendFloatImageFloatLabelToRGBA_kernel(
uchar4 *out_image, const float *in_image, const float *label, int width,
int height, float lowerLim, float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
unsigned char img =
(unsigned char)(0.5f * in_image[__mul24(y, width) + x] + 128.0f);
float val = label[__mul24(y, width) + x];
// draw everything invalid or out of lim in white
if (!isfinite(val) || (val < lowerLim) || (val > upperLim)) {
// don't blend
temp.x = img;
temp.y = img;
temp.z = img;
temp.w = 255;
} else {
// blend
temp.x = 0.6f * img;
temp.y = 0.6f * img;
temp.z = img;
temp.w = 255;
}
out_image[__mul24(y, width) + x] = temp;
}
}
//__global__ void blendFloatImageFloatArrayToRGBA_kernel(uchar4 *out_image,
// const float *in_image, int width, int height, float w_r, float w_g, float
// w_b)
//{
// const int x = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
// const int y = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
// uchar4 temp;
// if(x < width && y < height) {
// unsigned char img = (unsigned char)(in_image[__mul24(y,width)+x]);
// uchar4 t = tex2D(d_rgba_texture,(float)x + 0.5f,(float)y + 0.5f);
// float model = rgbaToGray(t);
// if (t.w==0) { // don't blend
// temp.x = img;
// temp.y = img;
// temp.z = img;
// } else { // blend
//// model = (model-1.0f)*255.0f;
// temp.x = w_r*model;
// temp.y = w_g*img;
// temp.z = w_b*img;
// }
// out_image[__mul24(y,width)+x] = temp;
// }
//}
__global__ void blendFloatImageRGBAArrayToRGBA_kernel(uchar4 *out_image,
const float *in_image,
int width, int height) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float img = in_image[y * width + x];
uchar4 t = tex2D(d_rgba_texture, (float)x + 0.5f, (float)y + 0.5f);
float model = rgbaToGray(t);
unsigned char delta = (unsigned char)(abs(img - model));
unsigned char avg = (unsigned char)(0.5f * (img + model));
// increase image brightness
unsigned char u_img = (unsigned char)(img);
if (t.w == 0) { // outside mask -> don't blend
temp.x = u_img;
temp.y = u_img;
temp.z = u_img;
} else { // blend
temp.x = 0;
temp.y = avg;
temp.z = delta;
}
out_image[y * width + x] = temp;
}
}
__global__ void blendFloatImageFloatArrayToRGBA_kernel(uchar4 *out_image,
const float *in_image,
int pitch_out,
int pitch_in, int width,
int height) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float img = *((const float *)((const char *)in_image + y * pitch_in) + x);
float model = tex2D(d_float_texture0, (float)x + 0.5f, (float)y + 0.5f);
bool valid = (model != 0.0f);
model = model - 1.0f;
model *= 255.0;
unsigned char delta = (unsigned char)(abs(img - model));
unsigned char avg = (unsigned char)(0.5f * (img + model));
// increase image brightness
unsigned char u_img = (unsigned char)(img);
if (!valid) { // outside mask (or just black) -> don't blend
temp.x = u_img;
temp.y = u_img;
temp.z = u_img;
} else { // blend
temp.x = 0;
temp.y = avg;
temp.z = delta;
}
*((uchar4 *)((char *)out_image + y * pitch_out) + x) = temp;
}
}
__global__ void blendMultiColor_kernel(uchar4 *out_image, const float *in_image,
int width, int height) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float img = in_image[y * width + x];
// determine gl coord
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
float model = tex2D(d_float_texture0, xt, yt);
int segment_ind = (int)rintf(tex2D(d_float_texture1, xt, yt));
bool valid = (model != 0.0f) && (segment_ind != 0);
model = model - 1.0f;
model *= 255.0;
unsigned char u_img = (unsigned char)(img);
if (!valid) {
// outside mask (or just black) -> don't blend
temp.x = u_img;
temp.y = u_img;
temp.z = u_img;
} else if (segment_ind == 10) { // shelf
temp.x = (unsigned char)(0.8f * img + 0.2f * model);
temp.y = 0;
temp.z = 0;
} else if (segment_ind >= 20) { // robot
temp.x = 0;
temp.y = 0;
temp.z = (unsigned char)(0.8f * img + 0.2f * model);
} else { // object
temp.x = 0;
temp.y = (unsigned char)(0.5f * img + 0.5f * model);
temp.z = (unsigned char)(abs(img - model));
}
out_image[y * width + x] = temp;
}
}
__global__ void augmentedReality_kernel(float *out_image, const float *in_image,
int width, int height, int pitch) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
float img_back = *((float *)((char *)in_image + y * pitch) + x);
uchar4 img_front = tex2D(d_rgba_texture, (float)x + 0.5f, (float)y + 0.5f);
*((float *)((char *)out_image + y *pitch) + x) =
(img_front.w == 255) ? rgbaToGray(img_front) : img_back;
}
}
__global__ void augmentedRealityFloatArray_kernel(float *out_image,
const float *in_image,
int width, int height,
int pitch) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
float img_back = *((float *)((char *)in_image + y * pitch) + x);
float img_front = tex2D(d_float_texture0, (float)x + 0.5f, (float)y + 0.5f);
bool valid = (img_front != 0.0f);
img_front = img_front - 1.0f;
img_front *= 255.0;
*((float *)((char *)out_image + y *pitch) + x) =
valid ? img_front : img_back;
}
}
__global__ void augmentedRealityFloatArraySelectiveBlend_kernel(
float *out_image, const float *in_image, int width, int height, int pitch,
int max_segment_ind) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
float img_back = *((float *)((char *)in_image + y * pitch) + x);
// determine gl coord
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
float img_front = tex2D(d_float_texture0, xt, yt);
int segment_ind = (int)rintf(tex2D(d_float_texture1, xt, yt));
bool valid = (segment_ind > 0) && (segment_ind <= max_segment_ind);
img_front = img_front - 1.0f;
img_front *= 255.0;
*((float *)((char *)out_image + y *pitch) + x) =
valid ? img_front : img_back;
}
}
__global__ void colorBlend_kernel(uchar4 *out_image, const uchar4 *in_image,
int width, int height, float alpha_scale) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
uchar4 out;
int IND = y * width + x;
uchar4 img_back = in_image[IND];
uchar4 img_front = tex2D(d_rgba_texture, (float)x + 0.5f, (float)y + 0.5f);
float alpha_front = alpha_scale * 0.0039215686274510f *
(float)img_front.w; // divide by 255 and scale
float alpha_back = 1.0f - alpha_front;
out.x = alpha_front * img_front.x + alpha_back * img_back.x;
out.y = alpha_front * img_front.y + alpha_back * img_back.y;
out.z = alpha_front * img_front.z + alpha_back * img_back.z;
out.w = 255;
out_image[IND] = out;
}
}
__global__ void invalidateFlow_kernel(float *modFlowX, float *modFlowY,
const float *constFlowX,
const float *constFlowY, int width,
int height, float cons_thres) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x < width && y < height) {
int ind = __mul24(y, width) + x;
float mFX = modFlowX[ind];
float mFY = modFlowY[ind];
float cFX = constFlowX[ind];
float cFY = constFlowY[ind];
float err = (mFX - cFX) * (mFX - cFX) + (mFY - cFY) * (mFY - cFY);
err = sqrtf(err);
if (err > cons_thres) {
mFX = nanf("");
mFY = nanf("");
}
modFlowX[ind] = mFX;
modFlowY[ind] = mFY;
}
}
__global__ void colorInvalids_kernel(uchar4 *out_image, const float *in_image,
int width, int height) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x < width && y < height) {
int ind = __mul24(y, width) + x;
uchar4 temp = out_image[ind];
float value = in_image[ind];
if (!isfinite(value)) { // color
temp.x *= 0.5f;
temp.y *= 0.5f;
}
out_image[ind] = temp;
}
}
__global__ void convertKinectDisparityToRegularDisparity_kernel(
float *d_regularDisparity, int d_regularDisparityPitch,
const float *d_KinectDisparity, int d_KinectDisparityPitch, int width,
int height) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
float d_in =
*((float *)((char *)d_KinectDisparity + y * d_KinectDisparityPitch) +
x);
float d_out = (d_in == 0.0f) ? nanf("") : -d_in;
*((float *)((char *)d_regularDisparity + y *d_regularDisparityPitch) + x) =
d_out;
}
}
__global__ void convertKinectDisparityInPlace_kernel(float *d_disparity,
int pitch, int width,
int height,
float depth_scale) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
float *d_in = (float *)((char *)d_disparity + y * pitch) + x;
*d_in = (*d_in == 0.0f) ? nanf("") : (-depth_scale / *d_in);
}
}
__global__ void colorDistDiff_kernel(uchar4 *out_image, const float *disparity,
int disparity_pitch,
const float *disparity_prior, int width,
int height, float f, float b, float ox,
float oy, float dist_thres) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
int ind = y * width + x;
uchar4 temp = out_image[ind];
float disp = *((float *)((char *)disparity + y * disparity_pitch) + x);
float disp_model = disparity_prior[ind];
// 3D reconstruct and measure Euclidian distance
float xt = __fdividef((x - ox), f);
float yt = -__fdividef((y - oy), f); // coord. transform
float Zm = -(f * b) / disp_model;
float Xm = xt * Zm;
float Ym = yt * Zm;
float Zd = -(f * b) / disp;
float Xd = xt * Zd;
float Yd = yt * Zd;
float d_md = sqrtf((Xm - Xd) * (Xm - Xd) + (Ym - Yd) * (Ym - Yd) +
(Zm - Zd) * (Zm - Zd));
bool color = (d_md > dist_thres) | (isfinite(disp) & ~isfinite(disp_model));
if (color) { // color
temp.x *= 0.5f;
temp.y *= 0.5f;
}
out_image[ind] = temp;
}
}
// Calling functions
void convertFloatToRGBA(uchar4 *d_out_image, const float *d_in_image, int width,
int height, float lowerLim, float upperLim) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertFloatToRGBA_kernel << <dimGrid, dimBlock>>>
(d_out_image, d_in_image, width, height, lowerLim, upperLim);
}
void convertPitchedFloatToRGBA(uchar4 *d_out_image, const float *d_in_image,
int width, int height, int pitch, float lowerLim,
float upperLim) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertPitchedFloatToRGBA_kernel << <dimGrid, dimBlock>>>
(d_out_image, d_in_image, width, height, pitch, lowerLim, upperLim);
}
void convertKinectFloatToRGBA(uchar4 *d_out_image, const float *d_in_image,
int width, int height, int pitch, float lowerLim,
float upperLim) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertKinectFloatToRGBA_kernel << <dimGrid, dimBlock>>>
(d_out_image, d_in_image, width, height, pitch, lowerLim, upperLim);
}
void convertFloatToRGBA(uchar4 *d_out_image, const float *d_in_image, int width,
int height) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertFloatToRGBA_kernel << <dimGrid, dimBlock>>>
(d_out_image, d_in_image, width, height);
}
void convertFlowToRGBA(uchar4 *d_flowx_out, uchar4 *d_flowy_out,
const float *d_flowx_in, const float *d_flowy_in,
int width, int height, float lowerLim, float upperLim,
float minMag) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertFlowToRGBA_kernel << <dimGrid, dimBlock>>>
(d_flowx_out, d_flowy_out, d_flowx_in, d_flowy_in, width, height,
lowerLim, upperLim, minMag);
}
void convertPitchedFloatToGrayRGBA(uchar4 *d_out_image, const float *d_in_image,
int width, int height, int pitch,
float lowerLim, float upperLim) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertPitchedFloatToGrayRGBA_kernel << <dimGrid, dimBlock>>>
(d_out_image, d_in_image, width, height, pitch, lowerLim, upperLim);
}
void convertFloatArrayToGrayRGBA(uchar4 *d_out_image, cudaArray *in_array,
int width, int height, float lower_lim,
float upper_lim) {
// Bind textures to arrays
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_float_texture0, in_array, channelFloat);
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertFloatArrayToGrayRGBA_kernel << <dimGrid, dimBlock>>>
(d_out_image, width, height, lower_lim, upper_lim);
cudaUnbindTexture(d_float_texture0);
}
void createAnaglyph(uchar4 *d_out_image, const float *d_left_image,
const float *d_right_image, int width, int height,
int pre_shift) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
createAnaglyph_kernel << <dimGrid, dimBlock>>>
(d_out_image, d_left_image, d_right_image, width, height, pre_shift);
}
void createAnaglyph(uchar4 *d_out_image, const uchar4 *d_left_image,
const uchar4 *d_right_image, int width, int height,
int pre_shift) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
createAnaglyph_kernel << <dimGrid, dimBlock>>>
(d_out_image, d_left_image, d_right_image, width, height, pre_shift);
}
void convert2DVectorToAngleMagnitude(uchar4 *d_angle_image,
uchar4 *d_magnitude_image,
float *d_vector_X, float *d_vector_Y,
int width, int height, float lower_ang,
float upper_ang, float lower_mag,
float upper_mag) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convert2DVectorToAngleMagnitude_kernel << <dimGrid, dimBlock>>>
(d_angle_image, d_magnitude_image, d_vector_X, d_vector_Y, width, height,
lower_ang, upper_ang, lower_mag, upper_mag);
}
void convertFloatToRGBAbinary(uchar4 *out_image, const float *in_image,
int width, int height, float lowerLim,
float upperLim) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertFloatToRGBAbinary_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, width, height, lowerLim, upperLim);
}
void mutuallyValidateFlowStereo(float *d_flow, float *d_disparity, int width,
int height) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
matchValidity_kernel << <dimGrid, dimBlock>>>
(d_flow, d_disparity, width, height);
}
void deInterleave(float *d_X_out, float *d_Y_out, float2 *d_XY_in,
int pitch_out, int pitch_in, int width, int height) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
// deInterleave_kernel<<<dimGrid,dimBlock>>>(d_X_out, d_Y_out, d_XY_in,
// pitch_out, pitch_in, width, height);
deInterleave_kernel2 << <dimGrid, dimBlock>>>
(d_X_out, d_Y_out, (char *)d_XY_in, pitch_out, pitch_in, width, height);
}
void applyIMOMask(float *d_IMOMask, float *d_IMO, const float *d_disparity,
float offset, int width, int height) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
IMOMask_kernel << <dimGrid, dimBlock>>>
(d_IMOMask, d_IMO, d_disparity, offset, width, height);
}
void blendFloatImageFloatLabelToRGBA(uchar4 *out_image, const float *in_image,
const float *label, int width, int height,
float lowerLim, float upperLim) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
blendFloatImageFloatLabelToRGBA_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, label, width, height, lowerLim, upperLim);
}
void blendFloatImageRGBAArrayToRGBA(uchar4 *out_image, const float *in_image,
cudaArray *in_array, int width, int height,
float w_r, float w_g, float w_b) {
// Bind textures to arrays
cudaChannelFormatDesc channelUChar4 = cudaCreateChannelDesc<uchar4>();
cudaBindTextureToArray(d_rgba_texture, in_array, channelUChar4);
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
blendFloatImageRGBAArrayToRGBA_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, width, height);
cudaUnbindTexture(d_rgba_texture);
}
void blendFloatImageFloatArrayToRGBA(uchar4 *out_image, const float *in_image,
cudaArray *in_array, int pitch_out,
int pitch_in, int width, int height) {
// Bind textures to arrays
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_float_texture0, in_array, channelFloat);
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
blendFloatImageFloatArrayToRGBA_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, pitch_out, pitch_in, width, height);
cudaUnbindTexture(d_float_texture0);
}
void blendMultiColor(uchar4 *out_image, const float *in_image,
cudaArray *in_texture, cudaArray *in_segment_index,
int width, int height) {
// Bind textures to arrays
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_float_texture0, in_texture, channelFloat);
cudaBindTextureToArray(d_float_texture1, in_segment_index, channelFloat);
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
blendMultiColor_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, width, height);
cudaUnbindTexture(d_float_texture1);
cudaUnbindTexture(d_float_texture0);
}
void augmentedReality(float *out_image, const float *in_image,
cudaArray *in_array, int width, int height, int pitch) {
// Bind textures to arrays
cudaChannelFormatDesc channelUChar4 = cudaCreateChannelDesc<uchar4>();
cudaBindTextureToArray(d_rgba_texture, in_array, channelUChar4);
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
augmentedReality_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, width, height, pitch);
cudaUnbindTexture(d_rgba_texture);
}
void augmentedRealityFloatArray(float *out_image, const float *in_image,
cudaArray *in_array, int width, int height,
int pitch) {
// Bind textures to arrays
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_float_texture0, in_array, channelFloat);
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
augmentedRealityFloatArray_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, width, height, pitch);
cudaUnbindTexture(d_float_texture0);
}
void augmentedRealityFloatArraySelectiveBlend(float *out_image,
const float *in_image,
const cudaArray *texture,
const cudaArray *segment_index,
int width, int height, int pitch,
int max_segment_ind) {
// Bind textures to arrays
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_float_texture0, texture, channelFloat);
cudaBindTextureToArray(d_float_texture1, segment_index, channelFloat);
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
augmentedRealityFloatArraySelectiveBlend_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, width, height, pitch, max_segment_ind);
cudaUnbindTexture(d_float_texture1);
cudaUnbindTexture(d_float_texture0);
}
void colorBlend(uchar4 *out_image, const uchar4 *in_image, cudaArray *in_array,
int width, int height, float alpha_scale) {
// Bind texture to array
cudaChannelFormatDesc channelUChar4 = cudaCreateChannelDesc<uchar4>();
cudaBindTextureToArray(d_rgba_texture, in_array, channelUChar4);
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
colorBlend_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, width, height, alpha_scale);
cudaUnbindTexture(d_rgba_texture);
}
void invalidateFlow(float *modFlowX, float *modFlowY, const float *constFlowX,
const float *constFlowY, int width, int height,
float cons_thres) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
invalidateFlow_kernel << <dimGrid, dimBlock>>>
(modFlowX, modFlowY, constFlowX, constFlowY, width, height, cons_thres);
}
void colorInvalids(uchar4 *out_image, const float *in_image, int width,
int height) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
colorInvalids_kernel << <dimGrid, dimBlock>>>
(out_image, in_image, width, height);
}
void convertKinectDisparityToRegularDisparity(float *d_regularDisparity,
int d_regularDisparityPitch,
const float *d_KinectDisparity,
int d_KinectDisparityPitch,
int width, int height) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertKinectDisparityToRegularDisparity_kernel << <dimGrid, dimBlock>>>
(d_regularDisparity, d_regularDisparityPitch, d_KinectDisparity,
d_KinectDisparityPitch, width, height);
}
void convertKinectDisparityInPlace(float *d_disparity, int pitch, int width,
int height, float depth_scale) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
convertKinectDisparityInPlace_kernel << <dimGrid, dimBlock>>>
(d_disparity, pitch, width, height, depth_scale);
}
void colorDistDiff(uchar4 *out_image, const float *disparity,
int disparity_pitch, const float *disparity_prior, int width,
int height, float focal_length, float baseline,
float nodal_point_x, float nodal_point_y, float dist_thres) {
dim3 dimBlock(16, 8, 1);
dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1);
colorDistDiff_kernel << <dimGrid, dimBlock>>>
(out_image, disparity, disparity_pitch, disparity_prior, width, height,
focal_length, baseline, nodal_point_x, nodal_point_y, dist_thres);
}
} // end namespace vision
|
the_stack
|
#include "Segmentation.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 256
#define DEF_BLOCK_Y 1
// 核函数:_countW1Ker(统计各个向量 W1 近旁的向量个数)
// 该方法首先计算当前向量与其他各个向量之间的三个距离度量(坐标的欧式距离,
// 特征值的欧式距离以及向量的夹角),如果在 W1 范围内,则将当前 index 的
// count 个数加一,最终得到统计结果
static __global__ void // Kernel 函数没有返回值
_countW1Ker(
Segmentation segmentation, // 分割操作类
FeatureVecArray infeaturevecarray, // 输入特征向量
int *w1counts // 记录 W1 范围内的向量个数
);
// 核函数:_labelVectorsKer(标记各个向量的分类)
// 该函数根据传入的已标记的向量的 index ,比较各个向量如他们的距离如果在
// 指定的 W2 范围内,则将该向量标记为同一类的。在启动这个核函数的时候
// 横向的坐标表示各个待标记的向量,纵向的坐标表示各个已经标记的向量
static __global__ void // Kernel 函数没有返回值
_labelVectorsKer(
Segmentation segmentation, // 分割操作类
FeatureVecArray infeaturevecarray, // 输入特征向量
unsigned char *tmpbl, // 需要标记的值
int *tmpvecs, // 当前已经标记的向量的 index 数组
int tmpsize // 当前已经标记的向量的数组的大小
);
// 核函数:_countAppointW1Ker(统计指定向量 W1 近旁的向量个数)
// 该函数根据传入的标记值数组,选择性地计算未标记的向量和其它未标记向量之间的
// 三个距离度量(坐标的欧式距离,特征值的欧式距离以及向量夹角),如果在 W1
// 范围内,则将当前 index 的 cout 个数加一,最终得到统计结果
static __global__ void // Kernel 函数没有返回值
_countAppointW1Ker(
Segmentation segmentation, // 分割操作类
FeatureVecArray infeaturevecarray, // 输入特征向量
unsigned char *tmplbl, // 临时标记数组
int *w1counts // 记录 W1 范围内的向量个数
);
// 核函数:_segregateKer(对向量进行最终的分割)
// 该核函数,根据之前初步的分类结果,对每一个向量,统计其 W2 范围内的
// 类别1和类别2的向量的个数,根据二者个数来判定当前向量最终被划分到哪个类别中
static __global__ void // Kernel 函数没有返回值
_segregateKer(
Segmentation segmentation, // 分割操作类
FeatureVecArray infeaturevecarray, // 输入特征向量
unsigned char *tmp1lbl, // 类别1的临时标记数组
unsigned char *tmp2lbl, // 类别2的临时标记数组
int *outlabel // 用于输出的分类结果数组
);
// 核函数:_countW1Ker(统计各个向量 W1 近旁的向量个数)
static __global__ void _countW1Ker(
Segmentation segmentation, FeatureVecArray infeaturevecarray,
int *w1counts)
{
// 计算当前 Thread 所对应的坐标集中的点的位置
int index = blockIdx.x * blockDim.x + threadIdx.x;
// 如果 index 超过了处理的点的个数不做处理直接返回
if(index >= infeaturevecarray.count)
return;
int x1 = infeaturevecarray.x[index]; // 当前处理向量的横坐标
int y1 = infeaturevecarray.y[index]; // 当前处理向量的纵坐标
float cv1 = infeaturevecarray.CV[index]; // 当前处理向量的 CV
float sd1 = infeaturevecarray.SD[index]; // 当前处理向量的 SD
float nc1 = infeaturevecarray.NC[index]; // 当前处理向量的 NC
int count = 0; // 当前向量 W1 范围内的向量个数
// 统计当前向量 W1 范围内的向量个数
for (int i = 0; i < infeaturevecarray.count; i++) {
int x2 = infeaturevecarray.x[i]; // 当前比较向量的横坐标
int y2 = infeaturevecarray.y[i]; // 当前比较向量的纵坐标
float cv2 = infeaturevecarray.CV[i]; // 当前比较向量的 CV
float sd2 = infeaturevecarray.SD[i]; // 当前比较向量的 SD
float nc2 = infeaturevecarray.NC[i]; // 当前比较向量的 NC
// 计算两个向量之间坐标的欧式距离
float d1 = sqrtf((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1));
// 计算两个向量之间特征值的欧式距离
float d2 = sqrt((cv2 - cv1) * (cv2 - cv1) + (sd2 -sd1) * (sd2 -sd1) +
(nc2 - nc1) * (nc2 - nc1));
// 计算两个向量之间夹角的 cos 值
float d3 = (x1 * x2 + y1 * y2 + cv1 * cv2 + sd1 * sd2 + nc1 * nc2) /
sqrt(x1 * x1 + y1 * y1 + cv1 * cv1 + sd1 * sd1 + nc1 * nc1) /
sqrt(x2 * x2 + y2 * y2 + cv2 * cv2 + sd2 * sd2 + nc2 * nc2);
// 判断两个向量之间的距离是否在 W1 范围内
if (d1 < segmentation.getBw1().spaceWidth &&
d2 < segmentation.getBw1().rangeWidth &&
d3 < segmentation.getBw1().angleWidth)
count++;
}
// 将统计值写入统计值数组
w1counts[index] = count;
}
// 核函数:_labelVectorsKer(标记各个向量的分类)
static __global__ void _labelVectorsKer(
Segmentation segmentation, FeatureVecArray infeaturevecarray,
unsigned char *tmplbl, int *tmpvecs, int tmpsize)
{
// 计算当前 Thread 所对应的坐标集中的点的位置
int index = blockIdx.x * blockDim.x + threadIdx.x;
// 如果 index 超过了处理的点的个数不做处理直接返回
if(index >= infeaturevecarray.count)
return;
// 计算当前需要寻找的已标记向量的 index
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// 如果纵向坐标超过了 tmpsize则不做处理直接返回
if (index_y >= tmpsize)
return;
// 获取当前种子向量的 index
int seedindex = tmpvecs[index_y];
int x1 = infeaturevecarray.x[index]; // 当前处理向量的横坐标
int y1 = infeaturevecarray.y[index]; // 当前处理向量的纵坐标
float cv1 = infeaturevecarray.CV[index]; // 当前处理向量的 CV
float sd1 = infeaturevecarray.SD[index]; // 当前处理向量的 SD
float nc1 = infeaturevecarray.NC[index]; // 当前处理向量的 NC
int x2 = infeaturevecarray.x[seedindex]; // 当前种子向量的横坐标
int y2 = infeaturevecarray.y[seedindex]; // 当前种子向量的纵坐标
float cv2 = infeaturevecarray.CV[seedindex]; // 当前种子向量的 CV
float sd2 = infeaturevecarray.SD[seedindex]; // 当前种子向量的 SD
float nc2 = infeaturevecarray.NC[seedindex]; // 当前种子向量的 NC
// 计算两个向量之间坐标的欧式距离
float d1 = sqrtf((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1));
// 计算两个向量之间特征值的欧式距离
float d2 = sqrt((cv2 - cv1) * (cv2 - cv1) + (sd2 -sd1) * (sd2 -sd1) +
(nc2 - nc1) * (nc2 - nc1));
// 计算两个向量之间夹角的 cos 值
float d3 = (x1 * x2 + y1 * y2 + cv1 * cv2 + sd1 * sd2 + nc1 * nc2) /
sqrt(x1 * x1 + y1 * y1 + cv1 * cv1 + sd1 * sd1 + nc1 * nc1) /
sqrt(x2 * x2 + y2 * y2 + cv2 * cv2 + sd2 * sd2 + nc2 * nc2);
// 判断两个向量之间的距离是否在 W2 范围内,如果是则标记该向量
if (d1 < segmentation.getBw2().spaceWidth &&
d2 < segmentation.getBw2().rangeWidth &&
d3 < segmentation.getBw2().angleWidth)
tmplbl[index] = 1;
}
// 核函数:_countAppointW1Ker(统计指定向量 W1 近旁的向量个数)
static __global__ void _countAppointW1Ker(
Segmentation segmentation, FeatureVecArray infeaturevecarray,
unsigned char *tmplbl, int *w1counts)
{
// 计算当前 Thread 所对应的坐标集中的点的位置
int index = blockIdx.x * blockDim.x + threadIdx.x;
// 如果 index 超过了处理的点的个数不做处理直接返回
if(index >= infeaturevecarray.count)
return;
// 将每个向量近旁个数初始化为0
w1counts[index] = 0;
// 如果当前 index 已经被标记,不做处理直接返回
if (tmplbl[index] == 1)
return;
int x1 = infeaturevecarray.x[index]; // 当前处理向量的横坐标
int y1 = infeaturevecarray.y[index]; // 当前处理向量的纵坐标
float cv1 = infeaturevecarray.CV[index]; // 当前处理向量的 CV
float sd1 = infeaturevecarray.SD[index]; // 当前处理向量的 SD
float nc1 = infeaturevecarray.NC[index]; // 当前处理向量的 NC
int count = 0; // 当前向量 W1 范围内的向量个数
// 统计当前向量 W1 范围内的向量个数
for (int i = 0; i < infeaturevecarray.count; i++) {
// 忽略已经被标记的向量
if (tmplbl[i] == 1)
continue;
int x2 = infeaturevecarray.x[i]; // 当前比较向量的横坐标
int y2 = infeaturevecarray.y[i]; // 当前比较向量的纵坐标
float cv2 = infeaturevecarray.CV[i]; // 当前比较向量的 CV
float sd2 = infeaturevecarray.SD[i]; // 当前比较向量的 SD
float nc2 = infeaturevecarray.NC[i]; // 当前比较向量的 NC
// 计算两个向量之间坐标的欧式距离
float d1 = sqrtf((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1));
// 计算两个向量之间特征值的欧式距离
float d2 = sqrt((cv2 - cv1) * (cv2 - cv1) + (sd2 -sd1) * (sd2 -sd1) +
(nc2 - nc1) * (nc2 - nc1));
// 计算两个向量之间夹角的 cos 值
float d3 = (x1 * x2 + y1 * y2 + cv1 * cv2 + sd1 * sd2 + nc1 * nc2) /
sqrt(x1 * x1 + y1 * y1 + cv1 * cv1 + sd1 * sd1 + nc1 * nc1) /
sqrt(x2 * x2 + y2 * y2 + cv2 * cv2 + sd2 * sd2 + nc2 * nc2);
// 判断两个向量之间的距离是否在 W1 范围内
if (d1 < segmentation.getBw1().spaceWidth &&
d2 < segmentation.getBw1().rangeWidth &&
d3 < segmentation.getBw1().angleWidth)
count++;
}
// 将统计值写入统计值数组
w1counts[index] = count;
}
// 核函数:_segregateKer(对向量进行最终的分割)
static __global__ void _segregateKer(
Segmentation segmentation, FeatureVecArray infeaturevecarray,
unsigned char *tmp1lbl, unsigned char *tmp2lbl, int *outlabel)
{
// 计算当前 Thread 所对应的坐标集中的点的位置
int index = blockIdx.x * blockDim.x + threadIdx.x;
// 如果 index 超过了处理的点的个数不做处理直接返回
if(index >= infeaturevecarray.count)
return;
// 将每个向量近旁个数初始化为0
outlabel[index] = 0;
int x1 = infeaturevecarray.x[index]; // 当前处理向量的横坐标
int y1 = infeaturevecarray.y[index]; // 当前处理向量的纵坐标
float cv1 = infeaturevecarray.CV[index]; // 当前处理向量的 CV
float sd1 = infeaturevecarray.SD[index]; // 当前处理向量的 SD
float nc1 = infeaturevecarray.NC[index]; // 当前处理向量的 NC
long sprtcount1 = 0; // W2 范围内暂定1类别的向量个数
long sprtcount2 = 0; // W2 范围内暂定2类别的向量个数
float distsum1 = 0.0; // W2 范围内当前向量和暂定1类别
// 的平方差之和
float distsum2 = 0.0; // W2 范围内当前向量和暂定2类别
// 的平方差之和
// 统计当前向量 W2 范围内的属于类别1和类别2的向量个数
for (int i = 0; i < infeaturevecarray.count; i++) {
int x2 = infeaturevecarray.x[i]; // 当前比较向量的横坐标
int y2 = infeaturevecarray.y[i]; // 当前比较向量的纵坐标
float cv2 = infeaturevecarray.CV[i]; // 当前比较向量的 CV
float sd2 = infeaturevecarray.SD[i]; // 当前比较向量的 SD
float nc2 = infeaturevecarray.NC[i]; // 当前比较向量的 NC
// 计算两个向量之间坐标的欧式距离
float d1 = sqrtf((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1));
// 计算两个向量之间特征值的欧式距离
float d2 = sqrt((cv2 - cv1) * (cv2 - cv1) + (sd2 -sd1) * (sd2 -sd1) +
(nc2 - nc1) * (nc2 - nc1));
// 计算两个向量之间夹角的 cos 值
float d3 = (x1 * x2 + y1 * y2 + cv1 * cv2 + sd1 * sd2 + nc1 * nc2) /
sqrt(x1 * x1 + y1 * y1 + cv1 * cv1 + sd1 * sd1 + nc1 * nc1) /
sqrt(x2 * x2 + y2 * y2 + cv2 * cv2 + sd2 * sd2 + nc2 * nc2);
// 判断两个向量之间的距离是否在 W2 范围内
if (d1 < segmentation.getBw2().spaceWidth &&
d2 < segmentation.getBw2().rangeWidth &&
d3 < segmentation.getBw2().angleWidth) {
if (tmp1lbl[i] == 1) {
sprtcount1++;
distsum1 += d2 * d2;
}
if (tmp2lbl[i] == 1) {
sprtcount2++;
distsum2 += d2 * d2;
}
}
}
// 如果当前向量周围只有类别1的向量,则当前向量最终标记为1
if (sprtcount1 > 0 && sprtcount2 == 0) {
outlabel[index] = 1;
return;
}
// 如果当前向量周围只有类别1的向量,则当前向量最终标记为1
if (sprtcount2 > 0 && sprtcount1 == 0) {
outlabel[index] = 2;
return;
}
// 当前向量周围类别1相对于类别2占优势地位,则当前向量最终标记为1
if (sprtcount1 > segmentation.getBeta() * sprtcount2) {
outlabel[index] = 1;
return;
}
// 当前向量周围类别2相对类别1占优势地位,则当前向量最终标记为2
if (sprtcount2 > segmentation.getBeta() * sprtcount1) {
outlabel[index] = 2;
return;
}
// 标记特征值平方差的值
if (distsum1 / powf(sprtcount1, segmentation.getAlpha()) <=
distsum2 / powf(sprtcount2, segmentation.getAlpha()))
outlabel[index] = 1;
else
outlabel[index] = 2;
}
// 宏:FREE_LOCAL_MEMORY_SEGREGATE(清理局部申请的设备端或者主机端内存)
// 该宏用于清理在 segregate 过程中申请的设备端或者主机端内存空间
#define FREE_LOCAL_MEMORY_SEGREGATE do { \
if ((w1counts) != NULL) \
delete [] (w1counts); \
if ((w1countsdev) != NULL) \
cudaFree((w1countsdev)); \
if ((tmp1vecs) != NULL) \
delete [] (tmp1vecs); \
if ((tmp2vecs) != NULL) \
delete [] (tmp2vecs); \
if ((tmp1vecsdev) != NULL) \
cudaFree((tmp1vecsdev)); \
if ((tmp2vecsdev) != NULL) \
cudaFree((tmp2vecsdev)); \
if ((tmp1lbl) != NULL) \
delete [] (tmp1lbl); \
if ((tmp1lbldev) != NULL) \
cudaFree((tmp1lbldev)); \
if ((tmp2lbl) != NULL) \
delete [] (tmp2lbl); \
if ((tmp2lbldev) != NULL) \
cudaFree((tmp2lbldev)); \
if ((outlabeldev) != NULL) \
cudaFree((outlabeldev)); \
}while (0)
// Host 成员方法:segregate(图像分割)
__host__ int Segmentation::segregate(FeatureVecArray *featurevecarray,
int *outlabel)
{
// 检查输入的参数是否为 NULL ,如果为 NULL 直接报错返回
if (featurevecarray == NULL || outlabel == NULL)
return NULL_POINTER;
// 检查输入的参数是否为合法,如果不合法直接报错返回
if (featurevecarray->count <= 0)
return INVALID_DATA;
int errcode; // 局部变量,错误码
int *w1counts = NULL; // 每个向量 W1 范围内的向量个数
int *w1countsdev = NULL; // w1counts 对应的设备端指针
int count = featurevecarray->count; // 向量的个数
// 定义类别1和类别2的暂定数组,该数组存储暂时被标记为1和2的向量的 index
int *tmp1vecs = NULL; // 存储暂时被划分到类别1的向量的 index
int *tmp2vecs = NULL; // 存储暂时被划分到类别2的向量的 index
int *tmp1vecsdev = NULL; // 设备端存储暂时被划分到类别1的向量的 index
int *tmp2vecsdev = NULL; // 设备端存储暂时被划分到类别2的向量的 index
// 定义暂定类别1和类别2的标记数组,该数组标记对应的 index 是否属于某类别
unsigned char *tmp1lbl = NULL; // 主机端类别1标记数组
unsigned char *tmp1lbldev = NULL; // 设备端类别1标记数组
unsigned char *tmp2lbl = NULL; // 主机端类别2标记数组
unsigned char *tmp2lbldev = NULL; // 设备端类别2标记数组
// 定义设备端最终标记数组
int *outlabeldev = NULL; // 设备端最终标记数组
// 在主机端申请 W1 统计数组空间
w1counts = new int[count];
if (w1counts == NULL)
return UNKNOW_ERROR;
// 在设备端申请 W1 统计数组空间
errcode = cudaMalloc((void **)&w1countsdev, count * sizeof(int));
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (count + blocksize.x - 1) / blocksize.x;
gridsize.y = 1;
// 调用核函数,统计各个向量 W1 范围内的向量个数
_countW1Ker<<<gridsize, blocksize>>>(*this, *featurevecarray,
w1countsdev);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 将统计结果拷回到 Host 端
errcode = cudaMemcpy(w1counts, w1countsdev,
count * sizeof(int),
cudaMemcpyDeviceToHost);
// 若拷贝失败则返回报错
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
int seed1index = 0; // 种子点1对应的 index
int maxcounts = 0; // W1 距离内向量最多的向量所对应的个数
// 遍历统计数组,找出最大统计值对应的 index 即为种子点1的 index
for (int i = 0; i < count; i++) {
if (w1counts[i] > maxcounts) {
maxcounts = w1counts[i];
seed1index = i;
}
}
int tmp1size = 0; // 记录当前类别暂存数组中元素的个数,默认值为0
int tmp2size = 0; // 记录当前类别暂存数组中元素的个数,默认值为0
// 为1类别的暂存数组申请 host 空间
tmp1vecs = new int[count];
if (tmp1vecs == NULL) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return UNKNOW_ERROR;
}
// 将种子点1加入到类别1的暂存数组中, 同时将 tmp1size 加1
tmp1vecs[tmp1size++] = seed1index;
// 为1类别的暂存数组申请 device 空间
errcode = cudaMalloc((void **)&tmp1vecsdev, sizeof(int) * count);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 将主机端的暂存数组(有意义的部分)拷贝到 device 端
errcode = cudaMemcpy(tmp1vecsdev, tmp1vecs, sizeof(int) * tmp1size,
cudaMemcpyHostToDevice);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 为1类别的标记数组申请 Host 空间
tmp1lbl = new unsigned char[count];
if (tmp1lbl == NULL) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return UNKNOW_ERROR;
}
// 初始化1类别标记数组的值
for (int i = 0; i < count; i++) {
tmp1lbl[i] = 0;
}
tmp1lbl[seed1index] = 1;
// 为1类别的标记数组申请 device 空间
errcode = cudaMalloc((void **)&tmp1lbldev, sizeof(unsigned char) * count);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 将 host 端的标记数组拷贝到 device 端
errcode = cudaMemcpy(tmp1lbldev, tmp1lbl, sizeof(unsigned char) * count,
cudaMemcpyHostToDevice);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 循环标记暂时属于类别1的向量,直到暂时属于类别1的向量的个数不再增加
while (1) {
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (count + blocksize.x - 1) / blocksize.x;
gridsize.y = (tmp1size + blocksize.y - 1) / blocksize.y;
// 调用核函数,标记类别1的向量
_labelVectorsKer<<<gridsize,blocksize>>>(*this, *featurevecarray,
tmp1lbldev, tmp1vecsdev,
tmp1size);
// 检查核函数调用是否出错
errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 拷贝标记值数组到 Host 端
errcode = cudaMemcpy(tmp1lbl, tmp1lbldev, sizeof(unsigned char) * count,
cudaMemcpyDeviceToHost);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 定义当前已经被标记的数组大小
int tmp1sizenow = 0;
// 遍历标记值数组,将已经标记的值添加到已标记数组中
for (int i = 0; i < count; i++) {
if (tmp1lbl[i] == 1)
tmp1vecs[tmp1sizenow++] = i;
}
// 如果两次的大小没有发生变化,则跳出循环
if (tmp1sizenow == tmp1size)
break;
// 将当前 size 赋给原 size
tmp1size = tmp1sizenow;
// 将已标记数组拷贝到 Device 端
errcode = cudaMemcpy(tmp1vecsdev, tmp1vecs, tmp1size * sizeof(int),
cudaMemcpyHostToDevice);
}
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (count + blocksize.x - 1) / blocksize.x;
gridsize.y = 1;
// 调用核函数,寻找类别2的种子点,在不需要重新申请新的统计数组空间,
// 可以直接使用上一步中的统计数组,覆盖掉其数据,因为其数据已经不需要了
_countAppointW1Ker<<<gridsize, blocksize>>>(*this, *featurevecarray,
tmp1lbldev, w1countsdev);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 将统计结果拷回到 Host 端
errcode = cudaMemcpy(w1counts, w1countsdev,
count * sizeof(int),
cudaMemcpyDeviceToHost);
// 若拷贝失败则返回报错
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
int seed2index = 0; // 种子点2对应的 index
maxcounts = 0; // W1 距离内向量最多的向量所对应的个数
// 遍历统计数组,找出最大统计值对应的 index 即为种子点2的 index
for (int i = 0; i < count; i++) {
if (w1counts[i] > maxcounts) {
maxcounts = w1counts[i];
seed2index = i;
}
}
// 为2类别的暂存数组申请 host 空间
tmp2vecs = new int[count];
if (tmp2vecs == NULL) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return UNKNOW_ERROR;
}
// 将种子点2加入到类别2的暂存数组中, 同时将 tmp2size 加1
tmp2vecs[tmp2size++] = seed2index;
// 为2类别的暂存数组申请 device 空间
errcode = cudaMalloc((void **)&tmp2vecsdev, sizeof(int) * count);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 将主机端的暂存数组(有意义的部分)拷贝到 device 端
errcode = cudaMemcpy(tmp2vecsdev, tmp2vecs, sizeof(int) * tmp2size,
cudaMemcpyHostToDevice);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 为2类别的标记数组申请 Host 空间
tmp2lbl = new unsigned char[count];
if (tmp2lbl == NULL) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return UNKNOW_ERROR;
}
// 初始化2类别标记数组的值
for (int i = 0; i < count; i++) {
tmp2lbl[i] = 0;
}
tmp2lbl[seed2index] = 1;
// 为2类别的标记数组申请 device 空间
errcode = cudaMalloc((void **)&tmp2lbldev, sizeof(unsigned char) * count);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 将 host 端的标记数组拷贝到 device 端
errcode = cudaMemcpy(tmp2lbldev, tmp2lbl, sizeof(unsigned char) * count,
cudaMemcpyHostToDevice);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 循环标记暂时属于类别2的向量,直到暂时属于类别2的向量的个数不再增加
while (1) {
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (count + blocksize.x - 1) / blocksize.x;
gridsize.y = (tmp1size + blocksize.y - 1) / blocksize.y;
// 调用核函数,标记类别2的向量
_labelVectorsKer<<<gridsize, blocksize>>>(*this, *featurevecarray,
tmp2lbldev, tmp2vecsdev,
tmp2size);
// 检查核函数调用是否出错
errcode = cudaGetLastError();
if (errcode != cudaSuccess){
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 拷贝标记值数组到 Host 端
errcode = cudaMemcpy(tmp2lbl, tmp2lbldev, sizeof(unsigned char) * count,
cudaMemcpyDeviceToHost);
if (errcode != cudaSuccess){
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 定义当前已经被标记的数组大小
int tmp2sizenow = 0;
// 遍历标记值数组,将已经标记的值添加到已标记数组中
for (int i = 0; i < count; i++) {
if (tmp2lbl[i] == 1)
tmp2vecs[tmp2sizenow++] = i;
}
// 如果两次的大小没有发生变化,则跳出循环
if (tmp2sizenow == tmp2size)
break;
// 将当前 size 赋给原 size
tmp2size = tmp2sizenow;
// 将已标记数组拷贝到 Device 端
errcode = cudaMemcpy(tmp2vecsdev, tmp2vecs, tmp2size * sizeof(int),
cudaMemcpyHostToDevice);
}
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (count + blocksize.x - 1) / blocksize.x;
gridsize.y = 1;
// 为设备端的最终标记数组申请空间
errcode = cudaMalloc((void **)&outlabeldev, sizeof(int) * count);
if (errcode != cudaSuccess) {
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 调用核函数,完成最终分类
_segregateKer<<<gridsize, blocksize>>>(*this, *featurevecarray,
tmp1lbldev, tmp2lbldev, outlabeldev);
// 检查核函数调用是否出错
errcode = cudaGetLastError();
if (errcode != cudaSuccess){
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 将最终分类标记数组拷贝到 Host 端
errcode = cudaMemcpy(outlabel, outlabeldev, sizeof(int) * count,
cudaMemcpyDeviceToHost);
if (errcode != cudaSuccess) {
// 释放申请的内存,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return CUDA_ERROR;
}
// 清理内存空间,防止内存泄漏
FREE_LOCAL_MEMORY_SEGREGATE;
return NO_ERROR;
}
|
the_stack
|
/*
Utility functions
*/
//Computes the storage index of the tree-traverse stack
template<int SHIFT>
__forceinline__ __device__ int ACCS(const int i)
{
return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x;
}
#define BTEST(x) (-(int)(x))
__forceinline__ __device__ float4 get_float4(float4 const volatile &v)
{
return make_float4(v.x, v.y, v.z, v.w);
}
/*
End utility functions
*/
/*
Compute the properties of the current group on the fly
*/
__forceinline__ __device__ void computeGroupProps(real4 &group_pos,
real4 &curGroupSize,
real4 pos_i,
int* shmem)
{
const int tid = threadIdx.x;
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&sh_rmin[NTHREAD];
float3 r_min = (float3){+1e10f, +1e10f, +1e10f};
float3 r_max = (float3){-1e10f, -1e10f, -1e10f};
//Set the shared memory with the data
// if (tid >= nb_i)
{
// sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
// sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
}
// else
{
sh_rmin[tid].x = r_min.x = pos_i.x; sh_rmin[tid].y = r_min.y = pos_i.y; sh_rmin[tid].z = r_min.z = pos_i.z;
sh_rmax[tid].x = r_max.x = pos_i.x; sh_rmax[tid].y = r_max.y = pos_i.y; sh_rmax[tid].z = r_max.z = pos_i.z;
}
__syncthreads();
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax); } __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax); } __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax); } __syncthreads();
if(blockDim.x >= 64) if (tid < 32) {sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin, sh_rmax); }
if(blockDim.x >= 32) if (tid < 16) {sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin, sh_rmax); }
if(tid < 8)
{
sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin, sh_rmax);
}
__syncthreads();
r_min.x = sh_rmin[0].x;
r_min.y = sh_rmin[0].y;
r_min.z = sh_rmin[0].z;
r_max.x = sh_rmax[0].x;
r_max.y = sh_rmax[0].y;
r_max.z = sh_rmax[0].z;
//Compute the group center and size
group_pos.x = 0.5*(r_min.x + r_max.x);
group_pos.y = 0.5*(r_min.y + r_max.y);
group_pos.z = 0.5*(r_min.z + r_max.z);
float3 grpSize = (float3){fmaxf(fabs(group_pos.x-r_min.x), fabs(group_pos.x-r_max.x)),
fmaxf(fabs(group_pos.y-r_min.y), fabs(group_pos.y-r_max.y)),
fmaxf(fabs(group_pos.z-r_min.z), fabs(group_pos.z-r_max.z))};
//Store the box size and opening criteria
curGroupSize.x = grpSize.x;
curGroupSize.y = grpSize.y;
curGroupSize.z = grpSize.z;
float l = max(grpSize.x, max(grpSize.y, grpSize.z));
group_pos.w = l;
}
/*
Compute the softening of the group
*/
__forceinline__ __device__ float computeGroupSoftening(real4 *body_vel,
int body_i,
int* shmem)
{
float group_eps;
#ifdef INDSOFT
eps2 = body_vel[body_i].w;
group_eps = eps2;
volatile float *reduc = (float*) &shmem[0];
reduc[threadIdx.x] = eps2;
//Find the maximum softening value for the particles in this group
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 256]);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 128]);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 64]);} __syncthreads();
if(blockDim.x >= 64) if (tid < 32) { reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 32]);}
if(blockDim.x >= 32) if (tid < 16) { reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 16]);}
if(tid < 8)
{
reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 8]);
reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 4]);
reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 2]);
reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 1]);
}
__syncthreads();
group_eps = reduc[0];
#else
group_eps = 0;
#endif
return group_eps;
}
/*
Prefix sum functions
*/
template<class T>
struct ADDOP {
__device__ static inline T identity() {return (T)(0);}
__device__ static inline T apply(T a, T b) {return (T)(a + b);};
__device__ static inline T unapply(T a, T b) {return (T)(a - b);};
__device__ static inline T mask(bool flag, T b) {return (T)(-(int)(flag) & b);};
};
template<class OP, class T>
// __device__ T inclusive_scan_warp(volatile T *ptr, T mysum, const unsigned int idx = threadIdx.x) {
__device__ __forceinline__ T inclusive_scan_warp(volatile T *ptr, T mysum, const unsigned int idx ) {
const unsigned int lane = idx & 31;
if (lane >= 1) ptr[idx] = mysum = OP::apply(ptr[idx - 1], mysum);
if (lane >= 2) ptr[idx] = mysum = OP::apply(ptr[idx - 2], mysum);
if (lane >= 4) ptr[idx] = mysum = OP::apply(ptr[idx - 4], mysum);
if (lane >= 8) ptr[idx] = mysum = OP::apply(ptr[idx - 8], mysum);
if (lane >= 16) ptr[idx] = mysum = OP::apply(ptr[idx - 16], mysum);
return ptr[idx];
}
__device__ __forceinline__ int inclusive_scan_warp(volatile int *ptr, int mysum, const unsigned int idx) {
const unsigned int lane = idx & 31;
if (lane >= 1) ptr[idx] = mysum = ptr[idx - 1] + mysum;
if (lane >= 2) ptr[idx] = mysum = ptr[idx - 2] + mysum;
if (lane >= 4) ptr[idx] = mysum = ptr[idx - 4] + mysum;
if (lane >= 8) ptr[idx] = mysum = ptr[idx - 8] + mysum;
if (lane >= 16) ptr[idx] = mysum = ptr[idx - 16] + mysum;
return ptr[idx];
}
template<class OP, class T>
__device__ __inline__ T inclusive_scan_block(volatile T *ptr, const T v0, const unsigned int idx) {
const unsigned int lane = idx & 31;
const unsigned int warpid = idx >> 5;
// step 0: Write the valume from the thread to the memory
ptr[idx] = v0;
T mysum = v0;
__syncthreads();
// step 1: Intra-warp scan in each warp
// T val = inclusive_scan_warp<OP, T>(ptr, mysum, idx);
T val = inclusive_scan_warp(ptr, mysum, idx);
__syncthreads();
// step 2: Collect per-warp particle results
if (lane == 31) ptr[warpid] = ptr[idx];
__syncthreads();
mysum = ptr[idx];
// step 3: Use 1st warp to scan per-warp results
if (warpid == 0) inclusive_scan_warp<OP, T>(ptr,mysum, idx);
__syncthreads();
// step 4: Accumulate results from Steps 1 and 3;
if (warpid > 0) val = OP::apply(ptr[warpid - 1], val);
__syncthreads();
// Step 5: Write and return the final result
ptr[idx] = val;
__syncthreads();
return val; //ptr[blockDim.x - 1];
}
template<class OP, class T>
// __device__ T inclusive_scan_block(volatile T *ptr, const unsigned int idx = threadIdx.x) {
__device__ T inclusive_scan_block(volatile T *ptr, const unsigned int idx) {
const unsigned int lane = idx & 31;
const unsigned int warpid = idx >> 5;
T mysum = ptr[idx];
__syncthreads();
// step 1: Intra-warp scan in each warp
T val = inclusive_scan_warp<OP, T>(ptr, mysum, idx);
__syncthreads();
// step 2: Collect per-warp particle results
if (lane == 31) ptr[warpid] = ptr[idx];
__syncthreads();
mysum = ptr[idx];
// step 3: Use 1st warp to scan per-warp results
if (warpid == 0) inclusive_scan_warp<OP, T>(ptr,mysum, idx);
__syncthreads();
// step 4: Accumulate results from Steps 1 and 3;
if (warpid > 0) val = OP::apply(ptr[warpid - 1], val);
__syncthreads();
// Step 5: Write and return the final result
ptr[idx] = val;
__syncthreads();
return val; //ptr[blockDim.x - 1];
}
template<class OP, class T>
// __device__ T inclusive_scan_array(volatile T *ptr_global, const int N, const unsigned int idx = threadIdx.x) {
__device__ T inclusive_scan_array(volatile T *ptr_global, const int N, const unsigned int idx) {
T y = OP::identity();
volatile T *ptr = ptr_global;
for (int p = 0; p < N; p += blockDim.x) {
ptr = &ptr_global[p];
inclusive_scan_block<OP, T>(ptr, idx);
ptr[idx] = OP::apply(ptr[idx], y);
__syncthreads();
y = ptr[blockDim.x - 1];
__syncthreads();
}
return y;
}
/*
Opening criteria functions
*/
//1) Minimum distance opening criteria
#ifdef INDSOFT
__device__ bool split_node_grav_md(float4 nodeCenter, float4 nodeSize, float4 groupCenter, float4 groupSize,
float group_eps, float node_eps)
#else
__device__ bool split_node_grav_md(float4 nodeCenter, float4 nodeSize, float4 groupCenter, float4 groupSize)
#endif
{
//Compute the distance between the group and the cell
float3 dr = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x),
fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y),
fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
#ifdef INDSOFT
//Naar idee van Inti nu minder overbodige openingen
if(ds2 <= ((group_eps + node_eps ) * (group_eps + node_eps) )) return true;
#endif
return (ds2 <= fabs(nodeCenter.w));
}
// modified by M.I.
//2) with rsearch
#ifdef INDSOFT
__device__ bool split_node_grav_md_rsearch(float4 nodeCenter,
float4 nodeSize,
float4 groupCenter,
float4 groupSize,
float group_eps,
float node_eps,
float rsearch_sq)
#else
__device__ bool split_node_grav_md_rsearch(float4 nodeCenter,
float4 nodeSize,
float4 groupCenter,
float4 groupSize,
float rsearch_sq)
#endif
{
//Compute the distance between the group and the cell
float3 dr = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x),
fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y),
fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
#ifdef INDSOFT
//Naar idee van Inti nu minder overbodige openingen
if(ds2 <= ((group_eps + node_eps ) * (group_eps + node_eps) )) return true;
#endif
return ( (ds2 <= fabs(nodeCenter.w)) || ds2 < rsearch_sq);
}
// 3) Improved Barnes Hut criterium
#ifdef INDSOFT
__device__ bool split_node_grav_impbh(float4 nodeCOM, float4 groupCenter, float4 groupSize,
float group_eps, float node_eps)
#else
__device__ bool split_node_grav_impbh(float4 nodeCOM, float4 groupCenter, float4 groupSize)
#endif
{
//Compute the distance between the group and the cell
float3 dr = {fabs(groupCenter.x - nodeCOM.x) - (groupSize.x),
fabs(groupCenter.y - nodeCOM.y) - (groupSize.y),
fabs(groupCenter.z - nodeCOM.z) - (groupSize.z)};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
#ifdef INDSOFT
//Extra test
if(ds2 <= ((group_eps + node_eps ) * (group_eps + node_eps) )) return true;
#endif
return (ds2 <= fabs(nodeCOM.w));
}
// by M.I.
// 4) Improved Barnes Hut criterium with rsearch
#ifdef INDSOFT
__device__ bool split_node_grav_impbh_rsearch(float4 nodeCOM,
float4 nodeCenter,
float4 nodeSize,
float4 groupCenter,
float4 groupSize,
float group_eps,
float node_eps,
float rsearch_sq)
#else
__device__ bool split_node_grav_impbh_rsearch(float4 nodeCOM,
float4 nodeCenter,
float4 nodeSize,
float4 groupCenter,
float4 groupSize,
float rsearch_sq)
#endif
{
//Compute the distance between the group and the cell
float3 dr_impbh = {fabs(groupCenter.x - nodeCOM.x) - (groupSize.x),
fabs(groupCenter.y - nodeCOM.y) - (groupSize.y),
fabs(groupCenter.z - nodeCOM.z) - (groupSize.z)};
dr_impbh.x += fabs(dr_impbh.x); dr_impbh.x *= 0.5f;
dr_impbh.y += fabs(dr_impbh.y); dr_impbh.y *= 0.5f;
dr_impbh.z += fabs(dr_impbh.z); dr_impbh.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2_impbh = dr_impbh.x*dr_impbh.x + dr_impbh.y*dr_impbh.y + dr_impbh.z*dr_impbh.z;
//Compute the distance between the group and the cell
float3 dr_md = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x),
fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y),
fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)};
dr_md.x += fabs(dr_md.x); dr_md.x *= 0.5f;
dr_md.y += fabs(dr_md.y); dr_md.y *= 0.5f;
dr_md.z += fabs(dr_md.z); dr_md.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2_md = dr_md.x*dr_md.x + dr_md.y*dr_md.y + dr_md.z*dr_md.z;
#ifdef INDSOFT
//Extra test
if(ds2 <= ((group_eps + node_eps ) * (group_eps + node_eps) )) return true;
#endif
return ( (ds2_impbh <= fabs(nodeCOM.w)) || ds2_md < rsearch_sq);
}
|
the_stack
|
#include <thrust/unique.h>
#include <cub/block/block_radix_sort.cuh>
#include "filtering_util.h"
#include "rxmesh/attribute.h"
#include "rxmesh/context.h"
#include "rxmesh/kernels/query_dispatcher.cuh"
#include "rxmesh/util/vector.h"
constexpr float EPS = 10e-6;
/**
* compute_vertex_normal()
*/
template <typename T, uint32_t blockThreads>
__global__ static void compute_vertex_normal(const rxmesh::Context context,
rxmesh::VertexAttribute<T> coords,
rxmesh::VertexAttribute<T> normals)
{
using namespace rxmesh;
auto vn_lambda = [&](FaceHandle face_id, VertexIterator& fv) {
// this face's three vertices
VertexHandle v0(fv[0]), v1(fv[1]), v2(fv[2]);
// get the face's three vertices coordinates
Vector<3, T> c0(coords(fv[0], 0), coords(fv[0], 1), coords(fv[0], 2));
Vector<3, T> c1(coords(fv[1], 0), coords(fv[1], 1), coords(fv[1], 2));
Vector<3, T> c2(coords(fv[2], 0), coords(fv[2], 1), coords(fv[2], 2));
// compute the face normal
Vector<3, T> n = cross(c1 - c0, c2 - c0);
n.normalize();
// add the face's normal to its vertices
for (uint32_t v = 0; v < 3; ++v) { // for every vertex in this face
for (uint32_t i = 0; i < 3; ++i) { // for the vertex 3 coordinates
atomicAdd(&normals(fv[v], i), n[i]);
}
}
};
query_block_dispatcher<Op::FV, blockThreads>(context, vn_lambda);
}
/**
* compute_new_coordinates()
*/
template <typename T>
__device__ __inline__ void compute_new_coordinates(
const rxmesh::VertexHandle& v_id,
const rxmesh::VertexHandle vv[],
const uint8_t num_vv,
rxmesh::Vector<3, T>& v,
const rxmesh::Vector<3, T>& n,
const T sigma_c_sq,
const rxmesh::VertexAttribute<T>& input_coords,
rxmesh::VertexAttribute<T>& filtered_coords)
{
T sigma_s_sq = compute_sigma_s_sq(v_id, vv, num_vv, v, n, input_coords);
T sum = 0;
T normalizer = 0;
for (uint8_t i = 0; i < num_vv; ++i) {
rxmesh::Vector<3, T> q(input_coords(vv[i], 0),
input_coords(vv[i], 1),
input_coords(vv[i], 2));
q -= v;
T t = q.norm();
T h = dot(q, n);
T wc = exp(-0.5 * t * t / sigma_c_sq);
T ws = exp(-0.5 * h * h / sigma_s_sq);
sum += wc * ws * h;
normalizer += wc * ws;
}
v += (n * (sum / normalizer));
filtered_coords(v_id, 0) = v[0];
filtered_coords(v_id, 1) = v[1];
filtered_coords(v_id, 2) = v[2];
}
/**
* bilateral_filtering_low_level_API()
* TODO refactor this to use handles
*/
/*template <typename T, uint32_t blockThreads, uint32_t maxVVSize>
__launch_bounds__(blockThreads) __global__
static void bilateral_filtering_low_level_API(
const rxmesh::Context context,
rxmesh::Attribute<T> input_coords,
rxmesh::Attribute<T> filtered_coords,
rxmesh::Attribute<T> vertex_normals)
{
constexpr uint32_t special = 0xFFFFFFFE;
using namespace rxmesh;
uint32_t vv[maxVVSize];
uint32_t vv_patch[maxVVSize];
uint16_t vv_local[maxVVSize];
uint8_t num_vv = 0;
T sigma_c_sq = 0;
T radius = 0;
Vector<3, T> vertex, normal;
uint32_t v_id = INVALID32;
__shared__ uint32_t s_num_patches;
__shared__ uint32_t s_block_patches[blockThreads];
s_block_patches[threadIdx.x] = INVALID32;
__shared__ uint32_t s_current_num_patches;
if (threadIdx.x == 0) {
s_current_num_patches = 0;
s_num_patches = 0;
}
uint32_t patch_id = blockIdx.x;
// This lambda function gets the 1-ring, compute the search radius, and then
// keeps processing the 1-ring of 1-ring as long as the vertices being
// processed are within the same patch (patch_id). If a vertex within the
// k-ring is not in the patch, it will be added to s_block_patches so the
// whole block would process this patch later.
auto compute_vv_1st_level = [&](uint32_t p_id, Iterator& iter) {
v_id = p_id;
vertex[0] = input_coords(v_id, 0);
vertex[1] = input_coords(v_id, 1);
vertex[2] = input_coords(v_id, 2);
normal[0] = vertex_normals(v_id, 0);
normal[1] = vertex_normals(v_id, 1);
normal[2] = vertex_normals(v_id, 2);
normal.normalize();
vv[0] = v_id;
vv_patch[0] = INVALID32;
++num_vv;
sigma_c_sq = 1e10;
for (uint32_t v = 0; v < iter.size(); ++v) {
const uint32_t vv_id = iter[v];
const Vector<3, T> q(input_coords(vv_id, 0),
input_coords(vv_id, 1),
input_coords(vv_id, 2));
T len = dist2(vertex, q);
if (len < sigma_c_sq) {
sigma_c_sq = len;
}
}
radius = 4.0 * sigma_c_sq;
// add 1-ring if it is within the radius
for (uint32_t v = 0; v < iter.size(); ++v) {
uint32_t vv_id = iter[v];
const Vector<3, T> vvc(input_coords(vv_id, 0),
input_coords(vv_id, 1),
input_coords(vv_id, 2));
T dist = dist2(vertex, vvc);
if (dist <= radius) {
uint8_t id = num_vv++;
assert(id < maxVVSize);
vv[id] = vv_id;
vv_local[id] = iter.neighbour_local_id(v);
vv_patch[id] = special;
}
}
// process the 1-ring vertices that this in this patch and within
// the radius
uint8_t num_vv_start = 1;
uint8_t num_vv_end = num_vv;
while (true) {
for (uint16_t v = num_vv_start; v < num_vv_end; ++v) {
// This condition means that this vertex is owned by this
// patch, and thus we can process it now since we have its
// results
if (vv_local[v] < iter.m_num_src_in_patch) {
assert(vv_patch[v] == special);
assert(context.get_vertex_patch()[vv[v]] == patch_id);
// to indicate that it's processed
vv_patch[v] = INVALID32;
Iterator vv_iter(iter);
vv_iter.set(vv_local[v], 0);
for (uint32_t i = 0; i < vv_iter.size(); ++i) {
uint32_t vvv_id = vv_iter[i];
uint16_t vvv_local_id = vv_iter.neighbour_local_id(i);
// make sure that it is not a duplicate
if (!linear_search(vv, vvv_id, num_vv)) {
const Vector<3, T> vvv(input_coords(vvv_id, 0),
input_coords(vvv_id, 1),
input_coords(vvv_id, 2));
T dist = dist2(vvv, vertex);
if (dist <= radius) {
uint8_t id = num_vv++;
assert(id < maxVVSize);
vv[id] = vvv_id;
vv_local[id] = vvv_local_id;
vv_patch[id] = special;
}
}
}
} else {
// if the vertex is not owned by this patch, we add its
// patch so we can process it later.
uint32_t pp = context.get_vertex_patch()[vv[v]];
// but we first check if this thread has added this
// patch before (this will reduce the duplicates
// significantly)
if (!linear_search(vv_patch, pp, num_vv)) {
uint32_t id = atomicAdd(&s_num_patches, 1u);
assert(id < blockThreads);
s_block_patches[id] = pp;
}
vv_patch[v] = pp;
vv_local[v] = INVALID16;
}
}
// means we have not added anything new
if (num_vv_end == num_vv) {
break;
}
// otherwise, it means we have added new vertices that might
// fall in this patch, so we better process them now.
num_vv_start = num_vv_end;
num_vv_end = num_vv;
}
};
query_block_dispatcher<Op::VV, blockThreads>(context, compute_vv_1st_level);
__syncthreads();
while (s_num_patches > 0) {
__syncthreads();
// Filter out duplicate patches
// sort
typedef cub::BlockRadixSort<uint32_t, blockThreads, 1> BlockRadixSort;
__shared__ typename BlockRadixSort::TempStorage temp_storage;
uint32_t thread_key[1];
thread_key[0] = s_block_patches[threadIdx.x];
if (threadIdx.x < s_current_num_patches ||
threadIdx.x >= s_num_patches) {
thread_key[0] = INVALID32;
}
BlockRadixSort(temp_storage).Sort(thread_key);
s_block_patches[threadIdx.x] = thread_key[0];
__syncthreads();
// uniquify
uint32_t num_current_patches = s_num_patches - s_current_num_patches;
uint32_t* new_end =
thrust::unique(thrust::device,
s_block_patches,
s_block_patches + num_current_patches);
__syncthreads();
if (threadIdx.x == 0) {
s_current_num_patches = new_end - s_block_patches;
s_num_patches = s_current_num_patches;
}
__syncthreads();
for (uint32_t p = 0; p < s_current_num_patches; ++p) {
patch_id = s_block_patches[p];
uint32_t num_src_in_patch, *input_mapping, *output_mapping;
uint16_t *offset_all_patches, *output_all_patches;
detail::template query_block_dispatcher<Op::VV, blockThreads>(
context,
patch_id,
[](uint32_t) { return true; },
false,
true,
num_src_in_patch,
input_mapping,
output_mapping,
offset_all_patches,
output_all_patches);
// mean that this thread has be assigned a vertex in
// compute_vv_1st_level
if (v_id != INVALID32) {
// search within this thread list (vv) to see if any
// unprocessed vertex falls in this patch
for (uint16_t v = 1; v < num_vv; ++v) {
if (vv_patch[v] == patch_id) {
// the global index of this vertex
uint32_t vv_id = vv[v];
// search for its local index
uint16_t vv_local_id = vv_local[v];
if (vv_local_id == INVALID16) {
for (uint16_t j = 0; j < num_src_in_patch; ++j) {
if (vv_id == output_mapping[j]) {
vv_local_id = j;
break;
}
}
}
assert(vv_local_id != INVALID16);
// so that we don't process it again
vv_patch[v] = INVALID32;
Iterator vv_iter(vv_local_id,
output_all_patches,
offset_all_patches,
output_mapping,
0,
num_src_in_patch);
for (uint32_t i = 0; i < vv_iter.size(); ++i) {
uint32_t vvv_id = vv_iter[i];
uint32_t vvv_local_id =
vv_iter.neighbour_local_id(i);
// make sure that it is not a duplicate
if (!linear_search(vv, vvv_id, num_vv)) {
const Vector<3, T> vvv(input_coords(vvv_id, 0),
input_coords(vvv_id, 1),
input_coords(vvv_id, 2));
T dist = dist2(vvv, vertex);
if (dist <= radius) {
uint8_t id = num_vv++;
assert(id < maxVVSize);
vv[id] = vvv_id;
uint32_t pp;
if (vvv_local_id < num_src_in_patch) {
pp = patch_id;
} else {
pp = context.get_vertex_patch()[vvv_id];
}
// search if this thread has added this
// patch before so we reduce the
// duplicates
if (pp != patch_id) {
if (!linear_search(
vv_patch, pp, num_vv)) {
uint32_t d =
atomicAdd(&s_num_patches, 1u);
assert(d < blockThreads);
s_block_patches[d] = pp;
}
vv_local[id] = INVALID16;
} else {
vv_local[id] = vvv_local_id;
}
vv_patch[id] = pp;
}
}
}
}
}
}
__syncthreads();
}
__syncthreads();
if (s_current_num_patches == s_num_patches) {
break;
}
}
if (v_id != INVALID32) {
compute_new_coordinates(v_id,
vv,
num_vv,
vertex,
normal,
sigma_c_sq,
input_coords,
filtered_coords);
}
}*/
template <typename T, uint32_t blockThreads, uint32_t maxVVSize>
__global__ static void bilateral_filtering(
const rxmesh::Context context,
rxmesh::VertexAttribute<T> input_coords,
rxmesh::VertexAttribute<T> filtered_coords,
rxmesh::VertexAttribute<T> vertex_normals)
{
using namespace rxmesh;
VertexHandle vv[maxVVSize];
uint32_t num_vv = 0;
T sigma_c_sq = 0;
T radius = 0;
Vector<3, T> vertex, normal;
VertexHandle v_id;
auto first_ring = [&](VertexHandle& p_id, VertexIterator& iter) {
v_id = p_id;
vertex[0] = input_coords(v_id, 0);
vertex[1] = input_coords(v_id, 1);
vertex[2] = input_coords(v_id, 2);
normal[0] = vertex_normals(v_id, 0);
normal[1] = vertex_normals(v_id, 1);
normal[2] = vertex_normals(v_id, 2);
normal.normalize();
vv[0] = v_id;
++num_vv;
sigma_c_sq = 1e10;
for (uint32_t v = 0; v < iter.size(); ++v) {
const VertexHandle vv_id = iter[v];
const Vector<3, T> q(input_coords(vv_id, 0),
input_coords(vv_id, 1),
input_coords(vv_id, 2));
T len = dist2(vertex, q);
if (len < sigma_c_sq) {
sigma_c_sq = len;
}
}
radius = 4.0 * sigma_c_sq;
// add 1-ring if it is within the radius
for (uint32_t v = 0; v < iter.size(); ++v) {
const VertexHandle vv_id = iter[v];
const Vector<3, T> vvc(input_coords(vv_id, 0),
input_coords(vv_id, 1),
input_coords(vv_id, 2));
T dist = dist2(vertex, vvc);
if (dist <= radius) {
uint8_t id = num_vv++;
assert(id < maxVVSize);
vv[id] = vv_id;
}
}
};
query_block_dispatcher<Op::VV, blockThreads>(context, first_ring);
__syncthreads();
uint32_t next_id = 1;
while (true) {
VertexHandle next_vertex;
if (v_id.is_valid() && next_id < num_vv) {
next_vertex = vv[next_id];
}
auto n_rings = [&](const VertexHandle& id, const VertexIterator& iter) {
assert(id == next_vertex);
for (uint32_t i = 0; i < iter.size(); ++i) {
VertexHandle vvv_id = iter[i];
if (vvv_id != v_id) {
// make sure that we don't store duplicate outputs
if (!linear_search(vv, vvv_id, num_vv)) {
const Vector<3, T> vvv(input_coords(vvv_id, 0),
input_coords(vvv_id, 1),
input_coords(vvv_id, 2));
T dist = dist2(vvv, vertex);
if (dist <= radius) {
uint32_t id = num_vv++;
assert(id < maxVVSize);
vv[id] = vvv_id;
}
}
}
}
};
higher_query_block_dispatcher<Op::VV, blockThreads>(
context, next_vertex, n_rings);
bool is_done = (next_id >= num_vv) || !v_id.is_valid();
if (__syncthreads_and(is_done)) {
break;
}
next_id++;
}
if (v_id.is_valid()) {
compute_new_coordinates(v_id,
vv,
num_vv,
vertex,
normal,
sigma_c_sq,
input_coords,
filtered_coords);
}
}
|
the_stack
|
#include "nnbilinearsampler.hpp"
#include "datacu.hpp"
#include "impl/cudnnhelper.hpp"
#include <assert.h>
#include <algorithm>
using namespace std ;
using namespace vl ;
using namespace vl::nn ;
using namespace vl::impl ;
#if CUDNN_VERSION < 5000
#warning "bilinearsampler_cudnn.cu will be disabled as it requires CUDNN v5 or higher."
namespace vl { namespace impl {
template<vl::DataType dataType>
vl::ErrorCode
vl::impl::nnbilinearsampler_cudnn<dataType>::forward(Context& op.context,
Tensor output,
Tensor data,
Tensor grid)
{
return vl::VLE_Unsupported ;
}
template<vl::DataType dataType>
vl::ErrorCode
vl::impl::nnbilinearsampler_cudnn<dataType>::backward(Context& op.context,
Tensor derInputData,
Tensor derGrid,
Tensor data,
Tensor grid,
Tensor derOutput)
{
return vl::VLE_Unsupported ;
}
}}
#else // CUDNN_VERSION
// check if the descriptors, etc. were successfully created:
#define CHECK(x) \
{ \
cudnnError = x ; \
if (cudnnError != CUDNN_STATUS_SUCCESS) { \
error = op.context.setError(op.context.getCudaHelper().catchCudnnError(cudnnError, \
STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__))) ; \
goto done ; \
} }
// -------------------------------------------------------------------
// Forward
// -------------------------------------------------------------------
template<DataType dataType>
struct BilinearSamplerForwardCudnn
{
vl::ErrorCode operator()(BilinearSampler &op,
Tensor &output,
Tensor const &input,
Tensor const &grid)
{
assert(output) ;
assert(input) ;
assert(grid) ;
typedef typename DataTypeTraits<dataType>::type type ;
cudnnTensorDescriptor_t outputDesc, dataDesc ;
cudnnSpatialTransformerDescriptor_t samplerDesc ;
bool outputDescInitialized = false ;
bool dataDescInitialized = false ;
bool samplerDescInitialized = false ;
// get the sizes:
int inCardinality = input.getSize();
int inDepth = input.getDepth();
int inHeight = input.getHeight();
int inWidth = input.getWidth();
int outCardinality = output.getSize();
int outDepth = output.getDepth();
int outWidth = output.getWidth();
int outHeight = output.getHeight();
cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::dataType ;
vl::DataType dynDataType = output.getDataType() ;
assert(dynDataType == dataType) ;
cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ;
vl::ErrorCode error = vl::VLE_Success ;
cudnnHandle_t handle ;
// get number of transforms/image == groupSize:
int groupSize = outCardinality / inCardinality ;
int dimOut[4] = { 1, outDepth, outWidth, outHeight } ; // one-image
// Get CuDNN
CHECK(op.context.getCudaHelper().getCudnnHandle(&handle)) ;
// Get tensor descriptors:
CHECK(cudnnCreateTensorDescriptor(&outputDesc)) ;
outputDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(outputDesc,
cudnnDataType,
1, outDepth, outWidth, outHeight, // sizes: n,c,w,h
outHeight * outWidth * outDepth, //strides
outHeight * outWidth,
outHeight,
1)) ;
CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ;
dataDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(dataDesc,
cudnnDataType,
1, inDepth, inWidth, inHeight, // sizes: n,c,w,h
inHeight * inWidth * inDepth, //strides
inHeight * inWidth,
inHeight,
1)) ;
// Get bilinear-sampler descriptor:
CHECK(cudnnCreateSpatialTransformerDescriptor(&samplerDesc)) ;
samplerDescInitialized = true ;
CHECK(cudnnSetSpatialTransformerNdDescriptor(samplerDesc,
CUDNN_SAMPLER_BILINEAR,
cudnnDataType,
4,
dimOut)) ;
{
type alpha = 1.0f ;
type beta = 0.0f ;
const ptrdiff_t dataOffset = inHeight * inWidth * inDepth ;
const ptrdiff_t gridOffset = 2 * outWidth * outHeight ;
const ptrdiff_t outOffset = outHeight * outWidth * outDepth ;
type const* data_ptr = (type const*) input.getMemory() ;
type const* grid_ptr = (type const*) grid.getMemory() ;
type * out_ptr = (type *) output.getMemory() ;
for (int im=0; im < inCardinality; im++) {
for (int ig=0; ig < groupSize; ig++) {
cudnnSpatialTfSamplerForward(handle,
samplerDesc,
&alpha,
dataDesc, data_ptr,
grid_ptr,
&beta,
outputDesc, out_ptr) ;
grid_ptr += gridOffset ;
out_ptr += outOffset ;
}
data_ptr += dataOffset ;
}
}
done:
if (samplerDescInitialized) { cudnnDestroySpatialTransformerDescriptor(samplerDesc) ; }
if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; }
if (outputDescInitialized) { cudnnDestroyTensorDescriptor(outputDesc) ; }
return op.context.passError(error, __func__) ;
}
};
// -------------------------------------------------------------------
// Forward
// -------------------------------------------------------------------
template<DataType dataType>
struct BilinearSamplerBackwardCudnn
{
vl::ErrorCode operator()
(BilinearSampler &op,
Tensor &derInput,
Tensor &derGrid,
Tensor const &input,
Tensor const &grid,
Tensor const &derOutput)
{
typedef typename DataTypeTraits<dataType>::type type ;
/* no derInputDataDesc needed as same as dataDesc <-- nice! */
cudnnTensorDescriptor_t dataDesc, derOutputDesc ;
cudnnSpatialTransformerDescriptor_t samplerDesc ;
bool dataDescInitialized = false ;
bool derOutputDescInitialized = false ;
bool samplerDescInitialized = false ;
// get the sizes:
int inCardinality = input.getSize();
int inDepth = input.getDepth();
int inHeight = input.getHeight();
int inWidth = input.getWidth();
int outCardinality = derOutput.getSize();
int outDepth = derOutput.getDepth();
int outWidth = derOutput.getWidth();
int outHeight = derOutput.getHeight();
cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::dataType ;
vl::DataType dynDataType = derOutput.getDataType() ;
assert(dynDataType == dataType) ;
cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ;
vl::ErrorCode error = vl::VLE_Success ;
cudnnHandle_t handle ;
// get number of transforms/image == groupSize:
int groupSize = outCardinality / inCardinality;
int dimOut[4] = { 1, outDepth, outWidth, outHeight };
// Get CuDNN
CHECK(op.context.getCudaHelper().getCudnnHandle(&handle)) ;
// Get tensor descriptors:
CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ;
derOutputDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(derOutputDesc,
cudnnDataType,
1, outDepth, outWidth, outHeight, // sizes: n,c,w,h
outHeight * outWidth * outDepth, //strides
outHeight * outWidth,
outHeight,
1)) ;
CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ;
dataDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(dataDesc,
cudnnDataType,
1, inDepth, inWidth, inHeight, // sizes: n,c,w,h
inHeight * inWidth * inDepth, //strides
inHeight * inWidth,
inHeight,
1)) ;
// Get bilinear-sampler descriptor:
CHECK(cudnnCreateSpatialTransformerDescriptor(&samplerDesc)) ;
samplerDescInitialized = true ;
CHECK(cudnnSetSpatialTransformerNdDescriptor(samplerDesc,
CUDNN_SAMPLER_BILINEAR,
cudnnDataType,
4,
dimOut));
/* do the work */
{
type alpha = 1.0f ;
type dataBeta = 1.0f ; // assuming that the derInputData has been initialized to zero
type gridBeta = 0.0f ;
const ptrdiff_t dataOffset = inHeight * inWidth * inDepth ;
const ptrdiff_t gridOffset = 2 * outWidth * outHeight ;
const ptrdiff_t outOffset = outHeight * outWidth * outDepth ;
type const* data_ptr = (type const*) input.getMemory() ;
type * derInputData_ptr = (type *) derInput.getMemory() ;
type const* grid_ptr = (type const*) grid.getMemory() ;
type * derGrid_ptr = (type *) derGrid.getMemory() ;
type * derOut_ptr = (type *) derOutput.getMemory() ;
for (int im=0; im < inCardinality; im++) {
for (int ig=0; ig < groupSize; ig++) {
cudnnSpatialTfSamplerBackward(handle,
samplerDesc,
&alpha,
dataDesc, data_ptr,
&dataBeta,
dataDesc, derInputData_ptr,
&alpha,
derOutputDesc, derOut_ptr,
grid_ptr,
&gridBeta,
derGrid_ptr) ;
grid_ptr += gridOffset ;
derGrid_ptr += gridOffset ;
derOut_ptr += outOffset ;
}
data_ptr += dataOffset ;
derInputData_ptr += dataOffset ;
}
}
/* cleanup */
done:
if (samplerDescInitialized) { cudnnDestroySpatialTransformerDescriptor(samplerDesc) ; }
if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; }
if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; }
return op.context.passError(error, __func__) ;
}
} ;
#endif // CUDNN >= v5.0
|
the_stack
|
#include "src/common.hpp"
#include "src/scf.hpp"
#include "src/integrate.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
using namespace std;
using namespace etics;
Integrator IntegratorObj;
// GLOBAL VARIABLES
Real ConstantStep = 0.001953125;
Real T, Step, dT1, dT2, Tcrit;
int N;
extern Real mass;
extern int k3gs, k3bs, k4gs, k4bs;
/*extern*/ Particle *hostP;
thrust::host_vector<Particle> PPP;
/*extern*/ thrust::device_vector<Particle> PPPPP;
// /*extern*/ thrust::device_vector<vec3> F0xxxxxx;
// /*extern*/ thrust::device_vector<Real> PotPotPot; // ugly name
// /*extern*/ thrust::device_vector<vec3> F1xxxxxx;
/*extern*/ vec3 *F1_ptr;
// extern Particle *P_h;
// extern thrust::device_vector<Particle> P;
//
// extern thrust::device_vector<vec3> F0;
// extern thrust::device_vector<Real> Potential;
// extern thrust::device_vector<vec3> F1;
void CommitParticles();
// void InitSCF(int N);
// void ForceSCF(int N, Real *Potential, Particle *PPPPP, vec3 *F);
void DriftStep();
void KickStep();
void CommitForces();
int InitilizeIntegratorMemory();
#define PTR(x) (thrust::raw_pointer_cast((x).data()))
int initialize_code() {
#warning initscf should be here!!! just the problem is that N is requires, so fix it
return 0;
}
int recommit_parameters() {
return 0;
}
int commit_parameters() {
return 0;
}
int new_particle(int *id, double mass, double x, double y, double z, double vx, double vy, double vz, double radius) {
Particle p;
p.m = mass;
p.pos = vec3(x, y, z);
p.vel = vec3(vx, vy, vz);
PPP.push_back(p);
*id = N;
N++;
return 0;
}
int commit_particles() {
// cerr << "calling commit_particles" << endl;
cerr << "we did commit_particles()" << endl;
etics::scf::Init(N, 180, 64, 2605, 384);
#warning hardcoded launch configuration
IntegratorObj = Integrator(&PPP[0], N);
return 0;
}
struct CenterMassFunctor {
Real ConstantMass;
__host__ __device__ CenterMassFunctor(Real _ConstantMass) : ConstantMass(_ConstantMass) {}
__host__ __device__ vec3 operator() (const Particle &p) const {return p.pos;}
};
struct ShiftFunctor {
vec3 Shift;
__host__ __device__ ShiftFunctor(vec3 _Shift) : Shift(_Shift) {}
__host__ __device__ Particle operator() (Particle &p) const {
p.pos += Shift;
p.CalculateR2();
return p;
}
};
int counttt = 0;
bool FirstStep = true;
int evolve_model(double t) {
// PPPPP = PPP;
cerr << "call evolve_model t_end = " << t << " dt = " << t - T << "****************" << endl;
// vec3 CenterMass = thrust::transform_reduce(PPPPP.begin(), PPPPP.end(), CenterMassFunctor(mass), vec3(0,0,0), thrust::plus<vec3>());
// CenterMass = CenterMass * (1.0/N); //ugly should divide by the total mass
// cerr << "CENTER OF MASS " << CenterMass.x << endl;
//
// // thrust::transform(PPPPP.begin(), PPPPP.end(), PPPPP.begin(), ShiftFunctor(-CenterMass));
//
// vec3 CenterMass2 = thrust::transform_reduce(PPPPP.begin(), PPPPP.end(), CenterMassFunctor(mass), vec3(0,0,0), thrust::plus<vec3>());
// CenterMass2 = CenterMass2 * (1.0/N); //ugly should divide by the total mass
// cerr << "CENTER OF MASS after correction " << CenterMass2.x << endl;
//
Step = ConstantStep;
while (T <= t) {
// Take the drift step.
IntegratorObj.DriftStep(Step);
// Calculate the forces in the new positions.
// ForceSCF(N, PTR(PotPotPot), PTR(PPPPP), PTR(F1xxxxxx));
IntegratorObj.CalculateGravity();
// Finish by taking the kick step.
// The kick functor also "commits" the predicted forces into the "acc" member.
IntegratorObj.KickStep(Step);
// N particles were implicitly propagated in this iteration.
// Advance global time.
T += Step;
}
//
// vec3 CenterMass3 = thrust::transform_reduce(PPPPP.begin(), PPPPP.end(), CenterMassFunctor(mass), vec3(0,0,0), thrust::plus<vec3>());
// CenterMass3 = CenterMass3 * (1.0/N); //ugly should divide by the total mass
// cerr << "CENTER OF MASS after evolve " << CenterMass3.x << endl;
//
// cerr << "done evolve; transform" << endl;
// // thrust::transform(PPPPP.begin(), PPPPP.end(), PPPPP.begin(), ShiftFunctor(+CenterMass)); // antishift
//
// vec3 CenterMass4 = thrust::transform_reduce(PPPPP.begin(), PPPPP.end(), CenterMassFunctor(mass), vec3(0,0,0), thrust::plus<vec3>());
// CenterMass4 = CenterMass4 * (1.0/N); //ugly should divide by the total mass
// cerr << "CENTER OF MASS after antishift " << CenterMass4.x << endl;
//
// cerr << "done transform; download to RAM" << endl;
IntegratorObj.CopyParticlesToHost(&PPP[0]);
//
// cerr << "done download; return" << endl;
return 0;
}
int set_begin_time(double time_begin) {
// cerr << "called set_begin_time(" << time_begin << endl;
return 0;
}
int get_begin_time(double *time_begin) {
*time_begin = 0;
return 0;
}
int get_mass(int index_of_the_particle, double *mass) {
*mass = PPP[index_of_the_particle].m;
return 0;
}
int get_time(double *time) {
*time = T;
return 0;
}
int set_mass(int index_of_the_particle, double mass) {
// cerr << "calling set_mass" << endl;
PPP[index_of_the_particle].m = mass;
return 0;
}
int get_index_of_first_particle(int *index_of_the_particle) {
// cerr << "calling get_index_of_first_particle" << endl;
*index_of_the_particle = 0;
return 0;
}
int get_total_radius(double *radius) {
return -2;
}
int get_potential_at_point(double soft, double x, double y, double z, double *phi) {
return -2;
}
int get_total_mass(double *mass) {
return -2;
}
int set_eps2(double epsilon_squared) {
return -1;
}
int get_eps2(double *epsilon_squared) {
*epsilon_squared = 0;
return -1;
}
int get_number_of_particles(int *number_of_particles) {
// cerr << "calling get_number_of_particles" << endl;
*number_of_particles = PPP.size();
return 0;
}
int get_index_of_next_particle(int index_of_the_particle, int *index_of_the_next_particle) {
*index_of_the_next_particle = index_of_the_particle + 1;
return 0;
}
int delete_particle(int index_of_the_particle) {
return -2;
}
int get_potential(int index_of_the_particle, double *potential) {
return -2;
}
int synchronize_model() {
// cerr << "calling synchronize_model" << endl;
return 0;
}
int set_state(int index_of_the_particle, double mass, double radius, double x, double y, double z, double vx, double vy, double vz) {
cerr << "calling set_state" << endl;
// cerr << "calling set_state" << endl;
PPP[index_of_the_particle].pos = vec3(x, y, z);
PPP[index_of_the_particle].vel = vec3(vx, vy, vz);
return 0;
}
int get_state(int index_of_the_particle, double *mass, double *radius, double *x, double *y, double *z, double *vx, double *vy, double *vz) {
// cerr << "calling get_state" << endl;
Particle p = PPP[index_of_the_particle];
*mass = index_of_the_particle;
*x = p.pos.x;
*y = p.pos.y;
*z = p.pos.z;
*vx = p.vel.x;
*vy = p.vel.y;
*vz = p.vel.z;
return 0;
}
int get_time_step(double *time_step) {
// cerr << "calling get_time_step" << endl;
*time_step = ConstantStep;
return 0;
}
int set_time_step(double time_step) {
cerr << "calling set_time_step" << endl;
ConstantStep = time_step;
return 0;
}
int get_launch_config(int **launch_config) {
return 0;
}
int set_launch_config(int *launch_config) {
// k3gs = launch_config[0];
// k3bs = launch_config[1];
// k4gs = launch_config[2];
// k4bs = launch_config[3];
return -2;
}
int recommit_particles() {
// cerr << "calling recommit_particles" << endl;
#warning put something here
cerr << "hhhhhhhhhhhhhhhhhhhhhhhhhhhhhh" << endl;
PPPPP = PPP;
return -2;
}
int set_acceleration(int index_of_the_particle, double ax, double ay, double az) {
return -2;
}
int get_center_of_mass_position(double *x, double *y, double *z) {
// vec3 CenterMass = thrust::transform_reduce(PPPPP.begin(), PPPPP.end(), CenterMassFunctor(mass), vec3(0,0,0), thrust::plus<vec3>());
// CenterMass = CenterMass * (1.0/N); //ugly should divide by the total mass
// *x = CenterMass.x;
// *y = CenterMass.y;
// *z = CenterMass.z;
return 0;
}
int get_center_of_mass_velocity(double *vx, double *vy, double *vz) {
return -2;
}
int get_radius(int index_of_the_particle, double *radius) {
*radius = 0;
return 0;
}
int set_radius(int index_of_the_particle, double radius) {
// should store the radius somewhere but completely ignored by code
// cerr << "calling set_radius" << endl;
return 0;
}
int cleanup_code() {
IntegratorObj.~Integrator();
cerr << "bye" << endl;
return 0;
}
int get_gravity_at_point(double soft, double x, double y, double z, double *forcex, double *forcey, double *forcez) {
return -2;
}
int get_velocity(int index_of_the_particle, double *vx, double *vy, double *vz) {
*vx = PPP[index_of_the_particle].vel.x;
*vy = PPP[index_of_the_particle].vel.y;
*vz = PPP[index_of_the_particle].vel.z;
return 0;
}
int get_position(int index_of_the_particle, double *x, double *y, double *z) {
*x = PPP[index_of_the_particle].pos.x;
*y = PPP[index_of_the_particle].pos.y;
*z = PPP[index_of_the_particle].pos.z;
return 0;
}
bool already_printed = false;
int set_position(int index_of_the_particle, double x, double y, double z) {
if (already_printed == false) {
cerr << "calling set_position" << endl;
cerr << "---------index_of_the_particle=" << index_of_the_particle << endl;
cerr << "--------- x" << PPP[index_of_the_particle].pos.x << "--->" << x << endl;
cerr << "--------- y" << PPP[index_of_the_particle].pos.y << "--->" << y << endl;
cerr << "--------- z" << PPP[index_of_the_particle].pos.z << "--->" << z << endl;
already_printed = true;
}
PPP[index_of_the_particle].pos = vec3(x, y, z);
counttt++;
return 0;
}
int get_acceleration(int index_of_the_particle, double *ax, double *ay, double *az) {
return -2;
}
int set_velocity(int index_of_the_particle, double vx, double vy, double vz) {
// cerr << "calling set_velocity" << endl;
PPP[index_of_the_particle].vel = vec3(vx, vy, vz);
return 0;
}
int get_kinetic_energy(double *kinetic_energy) {
*kinetic_energy = IntegratorObj.KineticEnergy();
return 0;
}
int get_potential_energy(double *potential_energy) {
*potential_energy = IntegratorObj.PotentialEnergy();
return 0;
}
int update_force_potential_arrays(double tttt) {
#warning time shouldnt be a parameter to this one
// ForceSCF(N, PTR(PotPotPot), PTR(PPPPP), PTR(F0xxxxxx));
return 0;
}
|
the_stack
|
//#define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y){
x = _x, y = _y;
}
__device__ void set(float _x, float _y){
x = _x; y = _y;
}
__device__ Point operator +(const Point &b)const{
return Point(x + b.x, y + b.y);
}
__device__ Point operator -(const Point &b)const{
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b){
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){
int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) &&
min(q1.x,q2.x) <= max(p1.x,p2.x) &&
min(p1.y,p2.y) <= max(q1.y,q2.y) &&
min(q1.y,q2.y) <= max(p1.y,p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p){
//params: box (5) [x1, y1, x2, y2, angle]
const float MARGIN = 1e-5;
float center_x = (box[0] + box[2]) / 2;
float center_y = (box[1] + box[3]) / 2;
float angle_cos = cos(-box[4]), angle_sin = sin(-box[4]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x;
float rot_y = -(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
#ifdef DEBUG
printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2], box[3], box[4]);
printf("center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, %.3f)\n", center_x, center_y,
angle_cos, angle_sin, p.x, p.y, rot_x, rot_y);
#endif
return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN && rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if(fabs(s5 - s1) > EPS){
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
}
else{
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){
float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x;
float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){
return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b){
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], a_angle = box_a[4];
float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], b_angle = box_b[4];
Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2);
Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2);
#ifdef DEBUG
printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle,
b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++){
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]);
if (flag){
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++){
if (check_in_box2d(box_a, box_b_corners[k])){
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])){
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++){
for (int i = 0; i < cnt - j - 1; i++){
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++){
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++){
area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b){
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]);
float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]);
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 5;
const float * cur_box_b = boxes_b + b_idx * 5;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 5;
const float * cur_box_b = boxes_b + b_idx * 5;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 5) [x1, y1, x2, y2, ry]
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const * const a, float const * const b) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0]) * (a[3] - a[1]);
float Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 5) [x1, y1, x2, y2, ry]
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_iou);
}
void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
|
the_stack
|
#include "doctest_proxy.hpp"
#include <Core/Array/ArrayView.hpp>
#include <Core/CUDA/CUDAArray.hpp>
#include <Core/CUDA/CUDAArrayView.hpp>
using namespace CubbyFlow;
TEST_CASE("[CUDAArray2] - Constructors")
{
{
CUDAArray2<float> arr;
CHECK_EQ(0u, arr.Width());
CHECK_EQ(0u, arr.Height());
}
{
CUDAArray2<float> arr(CUDAStdArray<size_t, 2>(3, 7));
CHECK_EQ(3u, arr.Width());
CHECK_EQ(7u, arr.Height());
for (size_t i = 0; i < 21; ++i)
{
CHECK_EQ(0.f, arr[i]);
}
}
{
CUDAArray2<float> arr(CUDAStdArray<size_t, 2>(1, 9), 1.5f);
CHECK_EQ(1u, arr.Width());
CHECK_EQ(9u, arr.Height());
for (size_t i = 0; i < 9; ++i)
{
CHECK_EQ(1.5f, arr[i]);
}
}
{
CUDAArray2<float> arr(5, 2);
CHECK_EQ(5u, arr.Width());
CHECK_EQ(2u, arr.Height());
for (size_t i = 0; i < 10; ++i)
{
CHECK_EQ(0.f, arr[i]);
}
}
{
CUDAArray2<float> arr(3, 4, 7.f);
CHECK_EQ(3u, arr.Width());
CHECK_EQ(4u, arr.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ(7.f, arr[i]);
}
}
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CHECK_EQ(4u, arr.Width());
CHECK_EQ(3u, arr.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr[i]);
}
}
{
Array2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(arr);
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(arr);
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArrayView2<float> arrVew(arr.data(), arr.Size());
CHECK_EQ(4u, arrVew.Width());
CHECK_EQ(3u, arrVew.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arrVew[i]);
}
}
}
TEST_CASE("[CUDAArray2] - At")
{
{
float values[12] = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f,
6.f, 7.f, 8.f, 9.f, 10.f, 11.f };
CUDAArray2<float> arr(4, 3);
for (size_t i = 0; i < 12; ++i)
{
arr[i] = values[i];
}
// Test row-major
CHECK_EQ(0.f, arr(0, 0));
CHECK_EQ(1.f, arr(1, 0));
CHECK_EQ(2.f, arr(2, 0));
CHECK_EQ(3.f, arr(3, 0));
CHECK_EQ(4.f, arr(0, 1));
CHECK_EQ(5.f, arr(1, 1));
CHECK_EQ(6.f, arr(2, 1));
CHECK_EQ(7.f, arr(3, 1));
CHECK_EQ(8.f, arr(0, 2));
CHECK_EQ(9.f, arr(1, 2));
CHECK_EQ(10.f, arr(2, 2));
CHECK_EQ(11.f, arr(3, 2));
}
}
TEST_CASE("[CUDAArray2] - CopyFrom")
{
// From Array
{
Array2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(2, 5);
arr2.CopyFrom(arr);
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
// From ArrayView
{
Array2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(2, 5);
arr2.CopyFrom(arr.View());
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
// From CUDAArray
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(2, 5);
arr2.CopyFrom(arr);
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
// From CUDAArrayView
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(2, 5);
arr2.CopyFrom(arr.View());
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
}
TEST_CASE("[CUDAArray2] - CopyTo")
{
// To Array
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
Array2<float> arr2(2, 5);
arr.CopyTo(arr2);
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
// To ArrayView
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
Array2<float> arr2(4, 3);
ArrayView2<float> arrView2 = arr2.View();
arr.CopyTo(arrView2);
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
// From CUDAArray
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(2, 5);
arr.CopyTo(arr2);
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
// From CUDAArrayView
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(4, 3);
CUDAArrayView2<float> arrView2 = arr2.View();
arr.CopyTo(arrView2);
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
}
TEST_CASE("[CUDAArray2] - Fill")
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
arr.Fill(42.0f);
CHECK_EQ(4u, arr.Width());
CHECK_EQ(3u, arr.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ(42.0f, arr[i]);
}
}
TEST_CASE("[CUDAArray2] - Resize")
{
{
CUDAArray2<float> arr;
arr.Resize(CUDAStdArray<size_t, 2>(2, 9));
CHECK_EQ(2u, arr.Width());
CHECK_EQ(9u, arr.Height());
for (size_t i = 0; i < 18; ++i)
{
CHECK_EQ(0.f, arr[i]);
}
arr.Resize(CUDAStdArray<size_t, 2>(8, 13), 4.f);
cudaDeviceSynchronize();
CHECK_EQ(8u, arr.Width());
CHECK_EQ(13u, arr.Height());
for (size_t i = 0; i < 8; ++i)
{
for (size_t j = 0; j < 13; ++j)
{
if (i < 2 && j < 9)
{
CHECK_EQ(0.f, arr(i, j));
}
else
{
CHECK_EQ(4.f, arr(i, j));
}
}
}
}
{
CUDAArray2<float> arr;
arr.Resize(7, 6);
CHECK_EQ(7u, arr.Width());
CHECK_EQ(6u, arr.Height());
for (size_t i = 0; i < 42; ++i)
{
CHECK_EQ(0.f, arr[i]);
}
arr.Resize(1, 9, 3.f);
CHECK_EQ(1u, arr.Width());
CHECK_EQ(9u, arr.Height());
for (size_t i = 0; i < 1; ++i)
{
for (size_t j = 0; j < 9; ++j)
{
if (j < 6)
{
CHECK_EQ(0.f, arr(i, j));
}
else
{
CHECK_EQ(3.f, arr(i, j));
}
}
}
}
}
TEST_CASE("[CUDAArray2] - Clear")
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
arr.Clear();
CHECK_EQ(0u, arr.Width());
CHECK_EQ(0u, arr.Height());
}
TEST_CASE("[CUDAArray2] - Swap")
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(2, 5, 42.f);
arr.Swap(arr2);
CHECK_EQ(2u, arr.Width());
CHECK_EQ(5u, arr.Height());
for (size_t i = 0; i < 10; ++i)
{
CHECK_EQ(42.0f, arr[i]);
}
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
TEST_CASE("[CUDAArray2] - View")
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
auto view = arr.View();
CHECK_EQ(4u, view.Width());
CHECK_EQ(3u, view.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, view[i]);
}
for (size_t j = 0; j < 3; ++j)
{
for (size_t i = 0; i < 4; ++i)
{
CHECK_EQ(arr(i, j), view(i, j));
}
}
const auto& arrRef = arr;
auto constView = arrRef.View();
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, constView[i]);
}
for (size_t j = 0; j < 3; ++j)
{
for (size_t i = 0; i < 4; ++i)
{
CHECK_EQ(arr(i, j), constView(i, j));
}
}
for (size_t j = 0; j < 3; ++j)
{
for (size_t i = 0; i < 4; ++i)
{
view(i, j) = float(i + 4 * j);
}
}
for (size_t j = 0; j < 3; ++j)
{
for (size_t i = 0; i < 4; ++i)
{
CHECK_EQ(float(i + 4 * j), arr(i, j));
CHECK_EQ(float(i + 4 * j), constView(i, j));
}
}
}
TEST_CASE("[CUDAArray2] - AssignmentOperator")
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(2, 5, 42.f);
arr2 = arr;
CHECK_EQ(4u, arr.Width());
CHECK_EQ(3u, arr.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr[i]);
}
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
TEST_CASE("[CUDAArray2] - MoveOperator")
{
CUDAArray2<float> arr({ { 1.f, 2.f, 3.f, 4.f },
{ 5.f, 6.f, 7.f, 8.f },
{ 9.f, 10.f, 11.f, 12.f } });
CUDAArray2<float> arr2(2, 5, 42.f);
arr2 = std::move(arr);
CHECK_EQ(0u, arr.Width());
CHECK_EQ(0u, arr.Height());
CHECK_EQ(nullptr, arr.data());
CHECK_EQ(4u, arr2.Width());
CHECK_EQ(3u, arr2.Height());
for (size_t i = 0; i < 12; ++i)
{
CHECK_EQ((float)i + 1.f, arr2[i]);
}
}
|
the_stack
|
namespace cunumeric {
using namespace Legion;
using namespace legate;
////////////////////////////////////
// Direct convolution implementation
////////////////////////////////////
// Convolution should be able to hit FMA throughput limits
// on the GPU due to the amount of FLOPs needed to be performed
// given the amount of data loaded. This is especially true of
// larger convolution filters. In order to hit these limits though
// we need to make sure that the GPU is fed data appropriately.
// We have two different kernels to handle different sized filters.
// Small Tile Case
// In the small tile case, a reasonable tile input including the
// all the boundary values for a given filter tile can fit in the
// shared memory of the SM, allowing the threadblock to fully
// compute an entire tile of output points in a single pass.
// If the tile is small enough, we even try to get multiple CTAs/SM
// in order to better pipeline data loading with compute.
// Large Tile Case
// For inputs where the filter is very large and it is impossible
// to fit a reasonable sized tile into shared memory, we tile both
// the output and the filter and make multiple passes over the data
// to create reasonable sized input tiles that fit in shared memory.
// If possible we also attempt to tile for the L2 cache as well so
// that threadblocks walking through memory together can hopefully
// hit in the L2 more often than not when loading data
template <int DIM>
struct ConvolutionInitArgs {
public:
FastDivmodU64 pitches[DIM];
};
template <typename VAL, int DIM>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 4)
convolution_init(const AccessorWO<VAL, DIM> out,
const Point<DIM> subrect_lo,
const ConvolutionInitArgs<DIM> args,
const size_t volume)
{
size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= volume) return;
Point<DIM> point = subrect_lo;
#pragma unroll
for (int d = 0; d < DIM; d++) point[d] += args.pitches[d].divmod(offset, offset);
out[point] = VAL{0};
}
// We want to run the convolution kernel with as large a shared memory
// tile as possible to avoid duplicate loading of data and maximize
// compute intensity. Therefore we're always going to run with 1 CTA
// per SM, but we still want enough thread-level parallelism, so we
// set this to the maximum number of warps in a threadblock
// Note that a lot of this code assumes this is a power of 2
#define CONVOLUTION_THREADS 1024
// The idea behind THREAD_OUTPUTS is to figure out how many registers
// we will be willing to assign to hold the partial output accumulations
// in each thread without using too many registers. Every GPU (with one
// exception) has 64K 32-bit registers per SM. We key off that and want
// to allocate a quarter of those registers for holding the partial accumulations
// We assume here that sizeof(VAL) is a power of 2
#define THREAD_OUTPUTS(TYPE) 1
//((4/*bytes/reg*/ * ((65536/8)/CONVOLUTION_THREADS)/*regs/thread*/) / sizeof(TYPE))
template <int DIM, int POINTS>
struct ConvolutionLargeTileArgs {
FastDivmod l1_output_tile_pitches[DIM];
FastDivmod l1_input_pitches[DIM];
FastDivmod l1_filter_pitches[DIM];
FastDivmod l1_output_pitches[DIM];
Point<DIM> l2_output_limits;
Point<DIM, unsigned> point_offsets[POINTS];
Point<DIM, unsigned> l2_output_tile;
Point<DIM, unsigned> l2_filter_tile;
Point<DIM, unsigned> l1_output_tile;
Point<DIM, unsigned> l1_filter_tile;
unsigned total_l2_outputs;
unsigned total_l1_outputs;
unsigned total_l1_filters;
unsigned total_l1_points;
unsigned l1_filter_points;
unsigned l1_input_points;
unsigned shared_input_offset;
unsigned uniform_input_stride;
unsigned shared_input_bound;
};
template <typename VAL, int DIM, int POINTS>
__global__ static void __launch_bounds__(CONVOLUTION_THREADS, 1)
convolution_large_tile(const AccessorWO<VAL, DIM> out,
const AccessorRO<VAL, DIM> filter,
const AccessorRO<VAL, DIM> in,
const Rect<DIM> root_rect,
const Rect<DIM> subrect,
const Rect<DIM> l2_filter_rect,
const Point<DIM> l2_input_start,
const Point<DIM> l2_input_stop,
const Point<DIM> l1_input_start,
const Point<DIM, unsigned> zero,
const Point<DIM, unsigned> one,
const ConvolutionLargeTileArgs<DIM, POINTS> args)
{
// Deal with compiler shared memory stupidity
extern __shared__ uint8_t buffer[];
// Technically this is illegal C++, but there's no other way to do it
VAL* sharedmem = (VAL*)buffer;
Point<DIM, unsigned> thread_offset;
int offset = threadIdx.x;
#pragma unroll
for (int d = 0; d < DIM; d++) thread_offset[d] = args.l1_output_pitches[d].divmod(offset, offset);
Point<DIM> l2_output_offset = zero;
for (unsigned l2_outidx = 0; l2_outidx < args.total_l2_outputs; l2_outidx++) {
// Do a quick check here to see if all the inputs are contained for this tile
// l2_input_start = subrect.lo + args.extents - l2_filter_rect.hi - one - centers
// l2_input_stop = subrect.lo + l2_output_tile - one + args.extents - l2_filter_rect.lo - one -
// centers
const Rect<DIM> l2_input_rect(l2_input_start + l2_output_offset,
l2_input_stop + l2_output_offset);
const bool input_contained = root_rect.contains(l2_input_rect);
// Iterate the L1 output tiles that this threadblock should compute for the L2 output
for (unsigned l1_outidx = blockIdx.x; l1_outidx < args.total_l1_outputs;
l1_outidx += gridDim.x) {
Point<DIM, unsigned> l1_output_offset;
offset = l1_outidx;
#pragma unroll
for (int d = 0; d < DIM; d++)
l1_output_offset[d] =
args.l1_output_tile_pitches[d].divmod(offset, offset) * args.l1_output_tile[d];
// Handle the boundary case where an L1 tile is not contained in the L2 tile
// becasue the L2 tile is overlapping a boundary. Note this decisions is the
// same for all the threads in the threadblock so no bad divergence
bool output_contained = true;
#pragma unroll
for (int d = 0; d < DIM; d++) {
if ((subrect.lo[d] + l2_output_offset[d] + l1_output_offset[d]) <= subrect.hi[d]) continue;
output_contained = false;
break;
}
if (!output_contained) continue;
// Initialize our point data
VAL acc[POINTS];
#pragma unroll
for (int p = 0; p < POINTS; p++) acc[p] = VAL{0};
// Iterate over the l1 filter tiles
Point<DIM, unsigned> l1_filter_offset = zero;
for (unsigned l1_fidx = 0; l1_fidx < args.total_l1_filters; l1_fidx++) {
// Wait for any previous readers to be done
__syncthreads();
// Load the filter into shared memory
// Unroll this a few times to get some memory level parallelims
#pragma unroll 4
for (unsigned fidx = threadIdx.x; fidx < args.l1_filter_points; fidx += blockDim.x) {
Point<DIM> filter_point = l2_filter_rect.lo + l1_filter_offset;
offset = fidx;
#pragma unroll
for (int d = 0; d < DIM; d++)
filter_point[d] += args.l1_filter_pitches[d].divmod(offset, offset);
if (l2_filter_rect.contains(filter_point))
sharedmem[fidx] = filter[filter_point];
else
sharedmem[fidx] = VAL{0};
}
// Load the input into shared memory
// Compute the input start point
// input_start = subrect.lo + extents - l2_filter_rect.lo - l1_filter_tile - centers
Point<DIM> input_start = l1_input_start + l2_output_offset + l1_output_offset;
input_start -= l1_filter_offset;
// Unroll this a few times to get some memory level parallelism
#pragma unroll 4
for (unsigned idx = threadIdx.x; idx < args.l1_input_points; idx += blockDim.x) {
Point<DIM> input_point = input_start;
offset = idx;
#pragma unroll
for (int d = 0; d < DIM; d++)
input_point[d] += args.l1_input_pitches[d].divmod(offset, offset);
if (input_contained || root_rect.contains(input_point))
sharedmem[args.shared_input_offset + idx] = in[input_point];
else
sharedmem[args.shared_input_offset + idx] = VAL{0};
}
// Wait for everything to be loaded into shared memory
__syncthreads();
// Iterate the points in the filter
// We can safely iterate all the filter points and input points
// because we wrote zeros into shared memory for everything that
// was out of bounds
Point<DIM, unsigned> filter_point = zero;
if (args.uniform_input_stride) {
// Each point is a constant offset in shared from the others
unsigned input_offset = args.shared_input_offset;
#pragma unroll
for (int d = 0; d < DIM; d++)
input_offset +=
args.l1_input_pitches[d].divisor * (thread_offset[d] + args.l1_filter_tile[d] - 1);
if (args.shared_input_bound) {
for (unsigned fidx = 0; fidx < args.l1_filter_points; fidx++) {
// Use shared memory broadcasting functionality to avoid bank conflicts
const VAL filter_value = sharedmem[fidx];
unsigned point_offset = input_offset;
#pragma unroll
for (int p = 0; p < POINTS; p++) {
if (args.shared_input_bound <= point_offset) break;
acc[p] = acc[p] + filter_value * sharedmem[point_offset];
point_offset += args.uniform_input_stride;
}
// Step to the next filter point and update the input stride
#pragma unroll
for (int d = DIM - 1; d >= 0; d--) {
filter_point[d]++;
input_offset -= args.l1_input_pitches[d].divisor;
if (filter_point[d] == args.l1_filter_tile[d]) {
input_offset += args.l1_filter_tile[d] * args.l1_input_pitches[d].divisor;
filter_point[d] = 0;
} else {
break;
}
}
}
} else {
for (unsigned fidx = 0; fidx < args.l1_filter_points; fidx++) {
// Use shared memory broadcasting functionality to avoid bank conflicts
const VAL filter_value = sharedmem[fidx];
unsigned point_offset = input_offset;
#pragma unroll
for (int p = 0; p < POINTS; p++) {
acc[p] = acc[p] + filter_value * sharedmem[point_offset];
point_offset += args.uniform_input_stride;
}
// Step to the next filter point and update the input stride
#pragma unroll
for (int d = DIM - 1; d >= 0; d--) {
filter_point[d]++;
input_offset -= args.l1_input_pitches[d].divisor;
if (filter_point[d] == args.l1_filter_tile[d]) {
input_offset += args.l1_filter_tile[d] * args.l1_input_pitches[d].divisor;
filter_point[d] = 0;
} else {
break;
}
}
}
}
} else {
// Need to compute the input offset uniquely for each point
Point<DIM, unsigned> input_point = thread_offset + args.l1_filter_tile - one;
unsigned point_offsets[POINTS];
#pragma unroll
for (int p = 0; p < POINTS; p++) {
point_offsets[p] = args.shared_input_offset;
#pragma unroll
for (int d = 0; d < DIM; d++)
point_offsets[p] +=
(input_point[d] + args.point_offsets[p][d]) * args.l1_input_pitches[d].divisor;
}
unsigned filter_offset = 0;
if (args.shared_input_bound) {
for (unsigned fidx = 0; fidx < args.l1_filter_points; fidx++) {
// Use shared memory broadcasting functionality to avoid bank conflicts
const VAL filter_value = sharedmem[fidx];
#pragma unroll
for (int p = 0; p < POINTS; p++) {
unsigned point_offset = point_offsets[p] - filter_offset;
if (args.shared_input_bound <= point_offset) continue;
acc[p] = acc[p] + filter_value * sharedmem[point_offset];
}
// Step to the next filter point
#pragma unroll
for (int d = DIM - 1; d >= 0; d--) {
filter_point[d]++;
filter_offset += args.l1_input_pitches[d].divisor;
if (filter_point[d] == args.l1_filter_tile[d]) {
filter_offset -= args.l1_filter_tile[d] * args.l1_input_pitches[d].divisor;
filter_point[d] = 0;
} else {
break;
}
}
}
} else {
for (unsigned fidx = 0; fidx < args.l1_filter_points; fidx++) {
// Use shared memory broadcasting functionality to avoid bank conflicts
const VAL filter_value = sharedmem[fidx];
#pragma unroll
for (int p = 0; p < POINTS; p++) {
unsigned point_offset = point_offsets[p] - filter_offset;
acc[p] = acc[p] + filter_value * sharedmem[point_offset];
}
// Step to the next filter point
#pragma unroll
for (int d = DIM - 1; d >= 0; d--) {
filter_point[d]++;
filter_offset += args.l1_input_pitches[d].divisor;
if (filter_point[d] == args.l1_filter_tile[d]) {
filter_offset -= args.l1_filter_tile[d] * args.l1_input_pitches[d].divisor;
filter_point[d] = 0;
} else {
break;
}
}
}
}
}
// Step to the next L1 filter tile
#pragma unroll
for (int d = DIM - 1; d >= 0; d--) {
l1_filter_offset[d] += args.l1_filter_tile[d];
if (args.l2_filter_tile[d] <= l1_filter_offset[d])
l1_filter_offset[d] = 0;
else
break;
}
}
// Now we can stream our accumulators back to the output
Point<DIM> output = subrect.lo + l2_output_offset + (l1_output_offset + thread_offset);
if (input_contained) {
// If the input was contained, then so is the output
if (args.total_l1_points) {
unsigned index = threadIdx.x;
#pragma unroll
for (int p = 0; p < POINTS; p++) {
if (args.total_l1_points <= index) break;
VAL* ptr = out.ptr(output + args.point_offsets[p]);
// Make sure we don't pollute the L2 cache
VAL value = load_streaming<VAL>(ptr);
store_streaming<VAL>(ptr, value + acc[p]);
index += blockDim.x;
}
} else {
#pragma unroll
for (int p = 0; p < POINTS; p++) {
VAL* ptr = out.ptr(output + args.point_offsets[p]);
// Make sure we don't pollute the L2 cache
VAL value = load_streaming<VAL>(ptr);
store_streaming<VAL>(ptr, value + acc[p]);
}
}
} else {
// Input was not contained, so the output might not be either, do checks
if (args.total_l1_points) {
unsigned index = threadIdx.x;
#pragma unroll
for (int p = 0; p < POINTS; p++) {
if (args.total_l1_points <= index) break;
Point<DIM> point = output + args.point_offsets[p];
if (!subrect.contains(point)) break;
VAL* ptr = out.ptr(point);
// Make sure we don't pollute the L2 cache
VAL value = load_streaming<VAL>(ptr);
store_streaming<VAL>(ptr, value + acc[p]);
index += blockDim.x;
}
} else {
#pragma unroll
for (int p = 0; p < POINTS; p++) {
Point<DIM> point = output + args.point_offsets[p];
if (!subrect.contains(point)) continue;
VAL* ptr = out.ptr(point);
// Make sure we don't pollute the L2 cache
VAL value = load_streaming<VAL>(ptr);
store_streaming<VAL>(ptr, value + acc[p]);
}
}
}
}
// Step to the next output tile
#pragma unroll
for (int d = DIM - 1; d >= 0; d--) {
l2_output_offset[d] += args.l2_output_tile[d];
if (args.l2_output_limits[d] <= l2_output_offset[d])
l2_output_offset[d] = 0;
else
break;
}
}
}
template <int DIM>
struct ConvolutionSmallTileArgs {
FastDivmodU64 grid_pitches[DIM];
FastDivmodU64 block_pitches[DIM];
FastDivmodU64 input_pitches[DIM];
unsigned block_tiles[DIM];
unsigned filter_centers[DIM];
unsigned filter_extents[DIM];
Point<DIM> delta_lo, delta_hi;
size_t filter_volume;
size_t tile_volume;
size_t input_volume;
};
template <typename VAL, int DIM>
__global__ static void __launch_bounds__(512, 2)
convolution_small_tile1(const AccessorWO<VAL, DIM> out,
const AccessorRO<VAL, DIM> filter,
const AccessorRO<VAL, DIM> in,
const Rect<DIM> root_rect,
const Rect<DIM> subrect,
const Rect<DIM> filter_rect,
const ConvolutionSmallTileArgs<DIM> args)
{
// Deal with compiler shared memory stupidity
extern __shared__ uint8_t buffer[];
// Technically this illegal C++, but there's no other way to do it
VAL* input = (VAL*)buffer;
// Compute the origin point of the block
size_t offset = blockIdx.x;
Point<DIM> block_point = subrect.lo;
#pragma unroll
for (int d = 0; d < DIM; d++)
block_point[d] += args.grid_pitches[d].divmod(offset, offset) * args.block_tiles[d];
// Load in the shared memory for this block
Point<DIM> tile_point;
const Rect<DIM> input_bounds(block_point - args.delta_lo, block_point + args.delta_hi);
const bool input_contained = root_rect.contains(input_bounds);
if (input_contained) {
// All the points are contained, so no need for point-wise tests
// Unroll this four times to try to pipeline loads
#pragma unroll 4
for (unsigned idx = threadIdx.x; idx < args.input_volume; idx += blockDim.x) {
offset = idx;
#pragma unroll
for (int d = 0; d < DIM; d++) tile_point[d] = args.input_pitches[d].divmod(offset, offset);
VAL value = in[input_bounds.lo + tile_point];
// Write the value into shared memory
input[idx] = value;
}
} else {
// Need to do point-wise tests
// Unroll this four times to try to pipeline loads
#pragma unroll 4
for (unsigned idx = threadIdx.x; idx < args.input_volume; idx += blockDim.x) {
offset = idx;
#pragma unroll
for (int d = 0; d < DIM; d++) tile_point[d] = args.input_pitches[d].divmod(offset, offset);
if (!root_rect.contains(input_bounds.lo + tile_point)) continue;
VAL value = in[input_bounds.lo + tile_point];
// Write the value into shared memory
input[idx] = value;
}
}
// Wait for everything to be loaded into shared memory
__syncthreads();
// Loop over points in the tile and compute the outputs
coord_t f_coords[DIM];
Point<DIM> out_point, in_point, filter_point;
for (unsigned idx = threadIdx.x; idx < args.tile_volume; idx += blockDim.x) {
// Compute the local coordinates
offset = idx;
#pragma unroll
for (int d = 0; d < DIM; d++) {
tile_point[d] = args.block_pitches[d].divmod(offset, offset);
out_point[d] = block_point[d] + tile_point[d];
}
if (!subrect.contains(out_point)) continue;
#pragma unroll
for (int d = 0; d < DIM; d++) f_coords[d] = 0;
VAL acc{0};
for (unsigned idx = 0; idx < args.filter_volume; idx++) {
#pragma unroll
for (int d = 0; d < DIM; d++)
in_point[d] = out_point[d] + f_coords[d] - args.filter_centers[d];
if (input_contained || root_rect.contains(in_point)) {
offset = 0;
#pragma unroll
for (int d = 0; d < DIM; d++)
offset += (tile_point[d] + f_coords[d]) * args.input_pitches[d].divisor;
#pragma unroll
for (int d = 0; d < DIM; d++) filter_point[d] = args.filter_extents[d] - f_coords[d] - 1;
acc = acc + input[offset] * filter[filter_point];
}
// Step the filter coordinates
#pragma unroll
for (int d = DIM - 1; d >= 0; d--) {
f_coords[d]++;
if (f_coords[d] == args.filter_extents[d])
f_coords[d] = 0;
else
break;
}
}
store_streaming(out.ptr(out_point), acc);
}
}
// This version of the kernel is identical to the one above but with
// different launch bounds to handle a bigger CTA with more shared memory
template <typename VAL, int DIM>
__global__ static void __launch_bounds__(1024, 1)
convolution_small_tile2(const AccessorWO<VAL, DIM> out,
const AccessorRO<VAL, DIM> filter,
const AccessorRO<VAL, DIM> in,
const Rect<DIM> root_rect,
const Rect<DIM> subrect,
const Rect<DIM> filter_rect,
const ConvolutionSmallTileArgs<DIM> args)
{
// Deal with compiler shared memory stupidity
extern __shared__ uint8_t buffer[];
// Technically this illegal C++, but there's no other way to do it
VAL* input = (VAL*)buffer;
// Compute the origin point of the block
size_t offset = blockIdx.x;
Point<DIM> block_point = subrect.lo;
#pragma unroll
for (int d = 0; d < DIM; d++)
block_point[d] += args.grid_pitches[d].divmod(offset, offset) * args.block_tiles[d];
// Load in the shared memory for this block
Point<DIM> tile_point;
const Rect<DIM> input_bounds(block_point - args.delta_lo, block_point + args.delta_hi);
const bool input_contained = root_rect.contains(input_bounds);
if (input_contained) {
// All the points are contained, so no need for point-wise tests
// Unroll this four times to try to pipeline loads
#pragma unroll 4
for (unsigned idx = threadIdx.x; idx < args.input_volume; idx += blockDim.x) {
offset = idx;
#pragma unroll
for (int d = 0; d < DIM; d++) tile_point[d] = args.input_pitches[d].divmod(offset, offset);
VAL value = in[input_bounds.lo + tile_point];
// Write the value into shared memory
input[idx] = value;
}
} else {
// Need to do point-wise tests
// Unroll this four times to try to pipeline loads
#pragma unroll 4
for (unsigned idx = threadIdx.x; idx < args.input_volume; idx += blockDim.x) {
offset = idx;
#pragma unroll
for (int d = 0; d < DIM; d++) tile_point[d] = args.input_pitches[d].divmod(offset, offset);
if (!root_rect.contains(input_bounds.lo + tile_point)) continue;
VAL value = in[input_bounds.lo + tile_point];
// Write the value into shared memory
input[idx] = value;
}
}
// Wait for everything to be loaded into shared memory
__syncthreads();
// Loop over points in the tile and compute the outputs
coord_t f_coords[DIM];
Point<DIM> out_point, in_point, filter_point;
for (unsigned idx = threadIdx.x; idx < args.tile_volume; idx += blockDim.x) {
// Compute the local coordinates
offset = idx;
#pragma unroll
for (int d = 0; d < DIM; d++) {
tile_point[d] = args.block_pitches[d].divmod(offset, offset);
out_point[d] = block_point[d] + tile_point[d];
}
if (!subrect.contains(out_point)) continue;
#pragma unroll
for (int d = 0; d < DIM; d++) f_coords[d] = 0;
VAL acc{0};
for (unsigned idx = 0; idx < args.filter_volume; idx++) {
#pragma unroll
for (int d = 0; d < DIM; d++)
in_point[d] = out_point[d] + f_coords[d] - args.filter_centers[d];
if (input_contained || root_rect.contains(in_point)) {
offset = 0;
#pragma unroll
for (int d = 0; d < DIM; d++)
offset += (tile_point[d] + f_coords[d]) * args.input_pitches[d].divisor;
#pragma unroll
for (int d = 0; d < DIM; d++) filter_point[d] = args.filter_extents[d] - f_coords[d] - 1;
acc = acc + input[offset] * filter[filter_point];
}
// Step the filter coordinates
#pragma unroll
for (int d = DIM - 1; d >= 0; d--) {
f_coords[d]++;
if (f_coords[d] == args.filter_extents[d])
f_coords[d] = 0;
else
break;
}
}
store_streaming(out.ptr(out_point), acc);
}
}
template <typename VAL, int DIM>
__host__ static inline void launch_small_tile_kernel(AccessorWO<VAL, DIM> out,
AccessorRO<VAL, DIM> filter,
AccessorRO<VAL, DIM> in,
const Rect<DIM>& root_rect,
const Rect<DIM>& subrect,
const Rect<DIM>& filter_rect,
const cudaDeviceProp& properties,
const unsigned extents[DIM],
const unsigned centers[DIM],
Point<DIM>& tile,
unsigned smem_size,
size_t max_smem_size)
{
// Make the tile as big as possible so that it fits in shared memory
// Try to keep it rectangular to minimize surface-to-volume ratio
// and improve the reuse of data
// If the current tile is less than half the shared memory in the SM then
// decrease the upper bound so we can get 2 CTAs/SM
bool halved = false;
const unsigned half_smem = properties.sharedMemPerMultiprocessor / 2;
if ((smem_size <= (half_smem)) && (half_smem < max_smem_size)) {
max_smem_size = half_smem;
halved = true;
}
Point<DIM> padding;
for (int d = 0; d < DIM; d++) padding[d] = 2 * centers[d];
Point<DIM> bounds = subrect.hi - subrect.lo + Point<DIM>::ONES();
smem_size = roundup_tile<VAL, DIM>(tile, bounds, padding, max_smem_size);
// At this point we've got the tile size that we're going to compute
// and the amount of dynamic shared memory that we need
// Compute the arguments needed for the kernel launch
ConvolutionSmallTileArgs<DIM> args;
size_t blocks = 1;
size_t tile_pitch = 1;
unsigned input_pitch = 1;
args.filter_volume = 1;
for (int d = DIM - 1; d >= 0; d--) {
size_t blocks_along_dim = ((subrect.hi[d] - subrect.lo[d]) + tile[d]) / tile[d];
args.grid_pitches[d] = FastDivmodU64(blocks);
blocks *= blocks_along_dim;
args.block_tiles[d] = tile[d];
args.block_pitches[d] = FastDivmodU64(tile_pitch);
tile_pitch *= tile[d];
args.delta_lo[d] = centers[d];
args.delta_hi[d] = tile[d] + centers[d] - 1;
args.input_pitches[d] = FastDivmodU64(input_pitch);
input_pitch *= (args.delta_lo[d] + args.delta_hi[d] + 1);
args.filter_centers[d] = centers[d];
args.filter_extents[d] = extents[d];
args.filter_volume *= extents[d];
}
args.tile_volume = tile_pitch;
args.input_volume = input_pitch;
assert((input_pitch * sizeof(VAL)) == smem_size);
auto stream = get_cached_stream();
if (halved) {
if (tile_pitch < 512)
convolution_small_tile1<VAL, DIM><<<blocks, tile_pitch, smem_size, stream>>>(
out, filter, in, root_rect, subrect, filter_rect, args);
else
convolution_small_tile1<VAL, DIM><<<blocks, 512, smem_size, stream>>>(
out, filter, in, root_rect, subrect, filter_rect, args);
} else {
if (tile_pitch < 1024)
convolution_small_tile2<VAL, DIM><<<blocks, tile_pitch, smem_size, stream>>>(
out, filter, in, root_rect, subrect, filter_rect, args);
else
convolution_small_tile2<VAL, DIM><<<blocks, 1024, smem_size, stream>>>(
out, filter, in, root_rect, subrect, filter_rect, args);
}
CHECK_CUDA_STREAM(stream);
}
template <typename VAL, int32_t DIM>
__host__ void direct_convolution(AccessorWO<VAL, DIM> out,
AccessorRO<VAL, DIM> filter,
AccessorRO<VAL, DIM> in,
const Rect<DIM>& root_rect,
const Rect<DIM>& subrect,
const Rect<DIM>& filter_rect)
{
constexpr int THREADVALS = THREAD_OUTPUTS(VAL);
// Get the maximum amount of shared memory per threadblock
int device;
CHECK_CUDA(cudaGetDevice(&device));
cudaDeviceProp properties;
CHECK_CUDA(cudaGetDeviceProperties(&properties, device));
size_t max_smem_size = properties.sharedMemPerBlockOptin;
// Only need to do these calls the first time on each device so
// we use a bit mask to track which devices we've done it for
static unsigned long long mask = 0;
if (!(mask & (1 << device))) {
if (properties.sharedMemPerBlock < max_smem_size) {
CHECK_CUDA(cudaFuncSetAttribute(convolution_small_tile1<VAL, DIM>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
max_smem_size));
CHECK_CUDA(cudaFuncSetAttribute(convolution_small_tile2<VAL, DIM>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
max_smem_size));
CHECK_CUDA(cudaFuncSetAttribute(convolution_large_tile<VAL, DIM, THREADVALS>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
max_smem_size));
}
if (sizeof(VAL) >= 8) {
// Only need to set this on the first invocation
CHECK_CUDA(cudaFuncSetSharedMemConfig(convolution_small_tile1<VAL, DIM>,
cudaSharedMemBankSizeEightByte));
CHECK_CUDA(cudaFuncSetSharedMemConfig(convolution_small_tile2<VAL, DIM>,
cudaSharedMemBankSizeEightByte));
CHECK_CUDA(cudaFuncSetSharedMemConfig(convolution_large_tile<VAL, DIM, THREADVALS>,
cudaSharedMemBankSizeEightByte));
}
// Make sure we have enough bits for every device
assert(device < (8 * sizeof(mask)));
// Make sure not to race with updates from other GPUs
__sync_fetch_and_add(&mask, (1 << device));
}
unsigned extents[DIM];
unsigned centers[DIM];
for (int d = 0; d < DIM; d++) {
assert(filter_rect.lo[d] == 0);
extents[d] = filter_rect.hi[d] + 1;
centers[d] = static_cast<coord_t>(extents[d] / 2);
}
Point<DIM> tile;
for (int d = DIM - 1; d >= 0; d--) {
// Make sure that each tile is at least double the size of the filter
// so that we can get some savings in bandwidth needed
tile[d] = 2 * centers[d];
if (d == (DIM - 1)) {
// In order to maximize bandwidth, we want to make sure we're loading at
// least 128B of contiguous memory along the last axis (row-major) of input
const unsigned min_contig_elmts = 128 / sizeof(VAL);
if ((tile[d] + 2 * centers[d]) < min_contig_elmts)
tile[d] = min_contig_elmts - 2 * centers[d];
}
}
unsigned smem_size = sizeof(VAL);
for (int d = 0; d < DIM; d++) smem_size *= (tile[d] + 2 * centers[d]);
if (smem_size <= max_smem_size) {
// Small tile case:
launch_small_tile_kernel<VAL, DIM>(out,
filter,
in,
root_rect,
subrect,
filter_rect,
properties,
extents,
centers,
tile,
smem_size,
max_smem_size);
} else {
// Large tile case:
// If we're going to do this, we need to initialize the output to zeros
// so we can kick that off to the GPU while we figure out how to launch
// the rest of the kernels to do the convolution
size_t strides[DIM];
VAL* out_ptr = out.ptr(subrect, strides);
// Check to see if the output is dense
bool out_dense = true;
size_t out_pitch = 1;
for (int d = DIM - 1; d >= 0; d--) {
if (strides[d] != out_pitch) {
out_dense = false;
break;
}
out_pitch *= strides[d];
}
if (out_dense) {
size_t bytes = sizeof(VAL) * out_pitch;
CHECK_CUDA(cudaMemsetAsync(out_ptr, 0, bytes));
} else {
out_pitch = 1;
ConvolutionInitArgs<DIM> args;
for (int d = DIM - 1; d >= 0; d--) {
args.pitches[d] = FastDivmodU64(out_pitch);
out_pitch *= (subrect.hi[d] - subrect.lo[d] + 1);
}
size_t blocks = (out_pitch + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
convolution_init<VAL, DIM><<<blocks, THREADS_PER_BLOCK>>>(out, subrect.lo, args, out_pitch);
}
// Figure out the shape of the L1 output tile based on the number of
// points that we can fit into registers
Point<DIM> l1_output_tile;
const unsigned max_l1_output_volume = CONVOLUTION_THREADS * THREADVALS;
// Make sure the max_l1_output_volume doesn't consume more than half of shared memory
unsigned target_l1_output_volume = max_l1_output_volume;
while ((max_smem_size / 2) < (target_l1_output_volume * sizeof(VAL)))
target_l1_output_volume /= 2;
const Point<DIM> output_bounds = subrect.hi - subrect.lo + Point<DIM>::ONES();
const unsigned l1_output_volume =
compute_output_tile<VAL, DIM>(l1_output_tile,
output_bounds,
128 /*cache line size*/ / sizeof(VAL),
target_l1_output_volume);
// At this point we've got our output tile, compute how big a filter
// tile we can make and still fit both the filter tile and the
// input tile into the maximum amount of shared memory for this GPU
Point<DIM> l1_filter_tile;
const Point<DIM> filter_bounds = filter_rect.hi - filter_rect.lo + Point<DIM>::ONES();
unsigned dynamic_smem =
compute_filter_tile<VAL, DIM>(l1_filter_tile, filter_bounds, l1_output_tile, max_smem_size);
unsigned input_smem_offset = 1;
for (int d = 0; d < DIM; d++) input_smem_offset *= l1_filter_tile[d];
// Tile the number of SMs on this GPU to compute the shape of the
// L2 output tile for this kernel
// We assume here that the number of SMs is easily factorable
// into primes of 2, 3, and 5. It would be strange if we have a
// GPU with a number of SMs these days that can't be factored
// this way. If we do report a warning.
unsigned l2_tiles[DIM];
for (int d = 0; d < DIM; d++) l2_tiles[d] = 1;
if (DIM > 1) {
unsigned twos = 0, threes = 0, fives = 0;
unsigned remainder = properties.multiProcessorCount;
while ((remainder > 1) && ((remainder % 2) == 0)) {
twos++;
remainder /= 2;
}
while ((remainder > 1) && ((remainder % 3) == 0)) {
threes++;
remainder /= 3;
}
while ((remainder > 1) && ((remainder % 5) == 0)) {
fives++;
remainder /= 5;
}
if (remainder > 1) {
fprintf(stdout,
"WARNING: %d is an unusual number of SMs "
"for GPU convolution. Please report your GPU kind and "
"the number of SMs in a Legate NumPy issue.",
properties.multiProcessorCount);
l2_tiles[DIM - 1] = remainder;
}
for (unsigned idx = 0; idx < fives; idx++) {
int smallest = 0;
for (int d = 1; d < DIM; d++) {
if (l2_tiles[smallest] < l2_tiles[d]) continue;
smallest = d;
}
l2_tiles[smallest] *= 5;
}
for (unsigned idx = 0; idx < threes; idx++) {
int smallest = 0;
for (int d = 1; d < DIM; d++) {
if (l2_tiles[smallest] < l2_tiles[d]) continue;
smallest = d;
}
l2_tiles[smallest] *= 3;
}
for (unsigned idx = 0; idx < twos; idx++) {
int smallest = 0;
for (int d = 1; d < DIM; d++) {
if (l2_tiles[smallest] < l2_tiles[d]) continue;
smallest = d;
}
l2_tiles[smallest] *= 2;
}
} else {
l2_tiles[0] = properties.multiProcessorCount;
}
// Now that we've got a tiling of the l1 output blocks across
// the SMs compute how big it is in memory and see if it is less
// than a quarter of the L2 cache so we can block for the L2
Point<DIM> l2_output_tile;
size_t l2_output_tile_size = sizeof(VAL);
for (int d = 0; d < DIM; d++) {
l2_output_tile[d] = l2_tiles[d] * l1_output_tile[d];
l2_output_tile_size *= l2_output_tile[d];
}
Point<DIM> l2_filter_tile;
size_t total_l2_filters = 1;
if (l2_output_tile_size <= (properties.l2CacheSize / 4)) {
for (int d = 0; d < DIM; d++) l2_filter_tile[d] = 1;
// Compute the L2 filter tile size so that the L2 filter and the
// corresponding L2 input tile will fit in the L2 cache
compute_filter_tile<VAL, DIM>(
l2_filter_tile, filter_bounds, l2_output_tile, 3 * properties.l2CacheSize / 4);
for (int d = 0; d < DIM; d++)
total_l2_filters *= (filter_bounds[d] + l2_filter_tile[d] - 1) / l2_filter_tile[d];
} else {
// It's likely this tile is too big to block for the L2 cache
// so we're not going to bother blocking for the L2 and just
// run everything out of the framebuffer memory. The upside is
// that we'll only need to make a single pass over the input
for (int d = 0; d < DIM; d++) l2_filter_tile[d] = filter_rect.hi[d] - filter_rect.lo[d] + 1;
}
// Construct the arguments for the kernel launches
ConvolutionLargeTileArgs<DIM, THREADVALS> args;
int pitch = 1;
for (int d = DIM - 1; d >= 0; d--) {
args.l1_input_pitches[d] = FastDivmod(pitch);
pitch *= (l1_output_tile[d] + 2 * (l1_filter_tile[d] / 2));
}
pitch = 1;
for (int d = DIM - 1; d >= 0; d--) {
args.l1_filter_pitches[d] = FastDivmod(pitch);
pitch *= l1_filter_tile[d];
}
pitch = 1;
for (int d = DIM - 1; d >= 0; d--) {
args.l1_output_pitches[d] = FastDivmod(pitch);
pitch *= l1_output_tile[d];
}
args.l2_output_tile = l2_output_tile;
args.l2_filter_tile = l2_filter_tile;
args.l1_output_tile = l1_output_tile;
args.l1_filter_tile = l1_filter_tile;
args.l2_output_limits = output_bounds;
args.shared_input_offset = input_smem_offset;
args.total_l2_outputs = 1;
args.total_l1_outputs = 1;
args.total_l1_filters = 1;
args.l1_filter_points = 1;
args.l1_input_points = 1;
pitch = 1;
for (int d = DIM - 1; d >= 0; d--) {
args.total_l2_outputs *= (output_bounds[d] + l2_output_tile[d] - 1) / l2_output_tile[d];
args.l1_output_tile_pitches[d] = FastDivmod(pitch);
pitch *= (l2_output_tile[d] + l1_output_tile[d] - 1) / l1_output_tile[d];
args.total_l1_filters *= (l2_filter_tile[d] + l1_filter_tile[d] - 1) / l1_filter_tile[d];
args.l1_filter_points *= l1_filter_tile[d];
args.l1_input_points *= (l1_output_tile[d] + 2 * (l1_filter_tile[d] / 2));
}
args.total_l1_outputs = pitch;
// Figure out how to tile the points across the l1_output_tile
if (DIM > 1) {
unsigned regsteps[DIM];
for (int d = 0; d < DIM; d++) regsteps[d] = 0;
unsigned remainder = THREADVALS;
// Handle the case here where we aren't going to use all
// the points in the registers so we need to scale back
if (l1_output_volume < max_l1_output_volume) {
assert((max_l1_output_volume % l1_output_volume) == 0);
remainder /= (max_l1_output_volume / l1_output_volume);
if (remainder == 0) remainder = 1;
}
for (int d = 0; d < DIM; d++) {
if (remainder == 1) {
regsteps[d] = l1_output_tile[d];
} else if (remainder <= l1_output_tile[d]) {
// All powers of two so should always divide
assert((l1_output_tile[d] % remainder) == 0);
regsteps[d] = l1_output_tile[d] / remainder;
remainder = 1;
} else {
// All powers of two so should always divide
assert((remainder % l1_output_tile[d]) == 0);
regsteps[d] = 1;
remainder /= l1_output_tile[d];
}
}
assert(remainder == 1);
Point<DIM, unsigned> offset = Point<DIM, unsigned>::ZEROES();
for (int p = 0; p < THREADVALS; p++) {
args.point_offsets[p] = offset;
// Step to the next offset
for (int d = DIM - 1; d >= 0; d--) {
offset[d] += regsteps[d];
if (offset[d] == l1_output_tile[d]) {
if ((d == 0) && (p != (THREADVALS - 1)))
// Allow overflow in this case to handle the case
// where we have more points than we need for the l1 output tile
assert(l1_output_volume < max_l1_output_volume);
else
offset[d] = 0;
} else
break;
}
}
args.uniform_input_stride = regsteps[0] * args.l1_input_pitches[0].divisor;
// Check to make sure this is the uniform input stride case
for (int d = 1; d < DIM; d++) {
if (regsteps[d] == l1_output_tile[d]) continue;
args.uniform_input_stride = 0;
break;
}
} else {
assert(THREADVALS <= l1_output_tile[0]);
unsigned remainder = THREADVALS;
// Handle the case here where we aren't going to use all
// the points in the registers so we need to scale back
if (l1_output_volume < max_l1_output_volume) {
assert((max_l1_output_volume % l1_output_volume) == 0);
remainder /= (max_l1_output_volume / l1_output_volume);
if (remainder == 0) remainder = 1;
}
assert((l1_output_tile[0] % remainder) == 0);
unsigned regstep = l1_output_tile[0] / remainder;
for (int p = 0; p < THREADVALS; p++) args.point_offsets[p][0] = p * regstep;
args.uniform_input_stride = regstep * args.l1_input_pitches[0].divisor;
}
if (l1_output_volume < max_l1_output_volume) {
args.shared_input_bound = dynamic_smem / sizeof(VAL);
args.total_l1_points = l1_output_volume;
} else {
args.shared_input_bound = 0;
args.total_l1_points = 0;
}
// Launch as many kernels as we need to walk over the entire filter
// Given the L2 filter tile that we came up with
auto stream = get_cached_stream();
const Point<DIM, unsigned> zero = Point<DIM, unsigned>::ZEROES();
const Point<DIM, unsigned> one = Point<DIM, unsigned>::ONES();
if (total_l2_filters > 1) {
Point<DIM> l2_filter_lo = filter_rect.lo;
for (unsigned idx = 0; idx < total_l2_filters; idx++) {
Rect<DIM> l2_filter_rect(l2_filter_lo, l2_filter_lo + l2_filter_tile - one);
l2_filter_rect = l2_filter_rect.intersection(filter_rect);
const Point<DIM> l1_input_start =
subrect.lo + Point<DIM>(extents) - l2_filter_lo - l1_filter_tile - Point<DIM>(centers);
const Point<DIM> l2_input_start =
subrect.lo + Point<DIM>(extents) - l2_filter_rect.hi - one - Point<DIM>(centers);
const Point<DIM> l2_input_stop = subrect.lo + l2_output_tile - one + Point<DIM>(extents) -
l2_filter_rect.lo - one - Point<DIM>(centers);
convolution_large_tile<VAL, DIM, THREADVALS>
<<<properties.multiProcessorCount, CONVOLUTION_THREADS, dynamic_smem, stream>>>(
out,
filter,
in,
root_rect,
subrect,
l2_filter_rect,
l2_input_start,
l2_input_stop,
l1_input_start,
zero,
one,
args);
// Step to the next filter
for (int d = DIM - 1; d >= 0; d--) {
l2_filter_lo[d] += l2_filter_tile[d];
if (filter_rect.hi[d] < l2_filter_lo[d])
l2_filter_lo[d] = filter_rect.lo[d];
else
break;
}
}
} else {
assert(total_l2_filters == 1);
const Point<DIM> l1_input_start =
subrect.lo + Point<DIM>(extents) - filter_rect.lo - l1_filter_tile - Point<DIM>(centers);
const Point<DIM> l2_input_start = subrect.lo - Point<DIM>(centers);
const Point<DIM> l2_input_stop = subrect.lo + l2_output_tile - one + Point<DIM>(extents) -
filter_rect.lo - one - Point<DIM>(centers);
convolution_large_tile<VAL, DIM, THREADVALS>
<<<properties.multiProcessorCount, CONVOLUTION_THREADS, dynamic_smem, stream>>>(
out,
filter,
in,
root_rect,
subrect,
filter_rect,
l2_input_start,
l2_input_stop,
l1_input_start,
zero,
one,
args);
}
CHECK_CUDA_STREAM(stream);
}
}
///////////////////////////////////////
// FFT-based convolution implementation
///////////////////////////////////////
template <int DIM>
struct FFTPitches {
size_t pitches[DIM];
__host__ inline size_t& operator[](unsigned idx) { return pitches[idx]; }
__device__ __forceinline__ size_t operator[](unsigned idx) const { return pitches[idx]; }
};
template <int DIM>
struct CopyPitches {
FastDivmodU64 pitches[DIM];
__host__ inline FastDivmodU64& operator[](unsigned idx) { return pitches[idx]; }
__device__ __forceinline__ const FastDivmodU64& operator[](unsigned idx) const
{
return pitches[idx];
}
};
template <typename VAL, int DIM>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 4)
copy_into_buffer(const AccessorRO<VAL, DIM> accessor,
const Buffer<VAL, DIM> buffer,
const Point<DIM> lo,
const CopyPitches<DIM> copy_pitches,
const size_t volume)
{
size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= volume) return;
Point<DIM> point;
for (int d = 0; d < DIM; d++) point[d] = copy_pitches[d].divmod(offset, offset);
buffer[point] = accessor[lo + point];
}
template <typename VAL, int DIM>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 4)
copy_from_buffer(const VAL* buffer,
const AccessorWO<VAL, DIM> accessor,
const Point<DIM> buffer_lo,
const Point<DIM> accessor_lo,
const CopyPitches<DIM> copy_pitches,
const FFTPitches<DIM> fft_pitches,
const size_t volume,
const VAL scaling)
{
size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= volume) return;
Point<DIM> point;
size_t buffer_offset = 0;
for (int d = 0; d < DIM; d++) {
point[d] = copy_pitches[d].divmod(offset, offset);
buffer_offset += (buffer_lo[d] + point[d]) * fft_pitches[d];
}
accessor[accessor_lo + point] = scaling * buffer[buffer_offset];
}
template <typename VAL>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 4)
complex_multiply(complex<VAL>* inout, complex<VAL>* in, const size_t volume)
{
size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= volume) return;
inout[offset] *= in[offset];
}
__host__ inline void cufft_execute_forward(cufftHandle plan, float* idata, float* odata)
{
CHECK_CUFFT(cufftExecR2C(plan, (cufftReal*)idata, (cufftComplex*)odata));
}
__host__ inline void cufft_execute_forward(cufftHandle plan, double* idata, double* odata)
{
CHECK_CUFFT(cufftExecD2Z(plan, (cufftDoubleReal*)idata, (cufftDoubleComplex*)odata));
}
__host__ inline void cufft_execute_backward(cufftHandle plan, float* idata, float* odata)
{
CHECK_CUFFT(cufftExecC2R(plan, (cufftComplex*)idata, (cufftReal*)odata));
}
__host__ inline void cufft_execute_backward(cufftHandle plan, double* idata, double* odata)
{
CHECK_CUFFT(cufftExecZ2D(plan, (cufftDoubleComplex*)idata, (cufftDoubleReal*)odata));
}
template <typename VAL>
struct ForwardPlanType;
template <>
struct ForwardPlanType<float> {
static constexpr cufftType value = CUFFT_R2C;
};
template <>
struct ForwardPlanType<double> {
static constexpr cufftType value = CUFFT_D2Z;
};
template <typename VAL>
struct BackwardPlanType;
template <>
struct BackwardPlanType<float> {
static constexpr cufftType value = CUFFT_C2R;
};
template <>
struct BackwardPlanType<double> {
static constexpr cufftType value = CUFFT_Z2D;
};
template <typename VAL, int DIM>
__host__ static inline void cufft_convolution(AccessorWO<VAL, DIM> out,
AccessorRO<VAL, DIM> filter,
AccessorRO<VAL, DIM> in,
const Rect<DIM>& root_rect,
const Rect<DIM>& subrect,
const Rect<DIM>& filter_rect)
{
int device;
CHECK_CUDA(cudaGetDevice(&device));
cudaDeviceProp properties;
CHECK_CUDA(cudaGetDeviceProperties(&properties, device));
size_t max_smem_size = properties.sharedMemPerBlockOptin;
// Only need to do these calls the first time on each device so
// we use a bit mask to track which devices we've done it for
static unsigned long long mask = 0;
if (!(mask & (1 << device))) {
if (properties.sharedMemPerBlock < max_smem_size) {
CHECK_CUDA(cudaFuncSetAttribute(convolution_small_tile1<VAL, DIM>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
max_smem_size));
CHECK_CUDA(cudaFuncSetAttribute(convolution_small_tile2<VAL, DIM>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
max_smem_size));
}
if (sizeof(VAL) >= 8) {
// Only need to set this on the first invocation
CHECK_CUDA(cudaFuncSetSharedMemConfig(convolution_small_tile1<VAL, DIM>,
cudaSharedMemBankSizeEightByte));
CHECK_CUDA(cudaFuncSetSharedMemConfig(convolution_small_tile2<VAL, DIM>,
cudaSharedMemBankSizeEightByte));
}
// Make sure we have enough bits for every device
assert(device < (8 * sizeof(mask)));
// Make sure not to race with updates from other GPUs
__sync_fetch_and_add(&mask, (1 << device));
}
unsigned extents[DIM];
unsigned centers[DIM];
for (int d = 0; d < DIM; d++) {
assert(filter_rect.lo[d] == 0);
extents[d] = filter_rect.hi[d] + 1;
centers[d] = static_cast<coord_t>(extents[d] / 2);
}
Point<DIM> tile;
for (int d = DIM - 1; d >= 0; d--) {
// Make sure that each tile is at least double the size of the filter
// so that we can get some savings in bandwidth needed
tile[d] = 2 * centers[d];
if (d == (DIM - 1)) {
// In order to maximize bandwidth, we want to make sure we're loading at
// least 128B of contiguous memory along the last axis (row-major) of input
const unsigned min_contig_elmts = 128 / sizeof(VAL);
if ((tile[d] + 2 * centers[d]) < min_contig_elmts)
tile[d] = min_contig_elmts - 2 * centers[d];
}
}
unsigned smem_size = sizeof(VAL);
for (int d = 0; d < DIM; d++) smem_size *= (tile[d] + 2 * centers[d]);
if (smem_size <= max_smem_size) {
launch_small_tile_kernel<VAL, DIM>(out,
filter,
in,
root_rect,
subrect,
filter_rect,
properties,
extents,
centers,
tile,
smem_size,
max_smem_size);
} else {
// Instead of doing the large tile case, we can instead do this
// by transforming both the input and the filter to the frequency
// domain using an FFT, perform the convolution with a point-wise
// multiplication, and then transform the result back to the spatial domain
auto stream = get_cached_stream();
// First compute how big our temporary allocation needs to be
// We'll need two of them to store the zero-padded data for the inputs
const Point<DIM> zero = Point<DIM>::ZEROES();
const Point<DIM> one = Point<DIM>::ONES();
Rect<DIM> offset_bounds;
for (int d = 0; d < DIM; d++) {
offset_bounds.lo[d] = subrect.lo[d] - centers[d];
offset_bounds.hi[d] = subrect.hi[d] + extents[d] - 1 - centers[d];
}
Rect<DIM> input_bounds = root_rect.intersection(offset_bounds);
const Point<DIM> signal_bounds = input_bounds.hi - input_bounds.lo + one;
const Point<DIM> filter_bounds = filter_rect.hi - filter_rect.lo + one;
Point<DIM> fftsize = signal_bounds + filter_bounds;
for (int d = 0; d < DIM; d++) {
// Technically we can shrink this by one and still be sound but we'll
// only do that if it will make the number even
if ((fftsize[d] % 2) == 1) fftsize[d]--;
}
// Cufft needs the last dimension to have fftsize/2+1 complex elements for
// the temporary buffer
// Since we know fftsize is even, we just need to add two to it for the output
Point<DIM> buffersize = fftsize;
buffersize[DIM - 1] += 2;
size_t buffervolume = 1;
for (int d = 0; d < DIM; d++) buffervolume *= buffersize[d];
// Zero pad and copy in the input data
auto signal_buffer = create_buffer<VAL, DIM>(buffersize, Memory::GPU_FB_MEM, 128 /*alignment*/);
VAL* signal_ptr = signal_buffer.ptr(zero);
CHECK_CUDA(cudaMemsetAsync(signal_ptr, 0, buffervolume * sizeof(VAL), stream));
// Check to see if the input pointer is dense and we can do this with a CUDA memcpy
size_t strides[DIM];
const VAL* input_ptr = in.ptr(input_bounds, strides);
size_t pitch = 1;
CopyPitches<DIM> copy_pitches;
for (int d = DIM - 1; d >= 0; d--) {
copy_pitches[d] = FastDivmodU64(pitch);
pitch *= signal_bounds[d];
}
size_t blocks = (pitch + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
copy_into_buffer<VAL, DIM><<<blocks, THREADS_PER_BLOCK, 0, stream>>>(
in, signal_buffer, input_bounds.lo, copy_pitches, pitch);
// Zero pad and copy in the filter data
auto filter_buffer = create_buffer<VAL, DIM>(buffersize, Memory::GPU_FB_MEM, 128 /*alignment*/);
VAL* filter_ptr = filter_buffer.ptr(zero);
CHECK_CUDA(cudaMemsetAsync(filter_ptr, 0, buffervolume * sizeof(VAL), stream));
const VAL* filt_ptr = filter.ptr(filter_rect, strides);
pitch = 1;
for (int d = DIM - 1; d >= 0; d--) {
copy_pitches[d] = FastDivmodU64(pitch);
pitch *= filter_bounds[d];
}
blocks = (pitch + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
copy_into_buffer<VAL, DIM><<<blocks, THREADS_PER_BLOCK, 0, stream>>>(
filter, filter_buffer, filter_rect.lo, copy_pitches, pitch);
CHECK_CUDA_STREAM(stream);
auto forward_plan = get_cufft_plan(ForwardPlanType<VAL>::value, fftsize);
auto backward_plan = get_cufft_plan(BackwardPlanType<VAL>::value, fftsize);
// Set the stream and working area for the plans
CHECK_CUFFT(cufftSetStream(forward_plan.handle(), stream));
CHECK_CUFFT(cufftSetStream(backward_plan.handle(), stream));
auto workarea_size = std::max(forward_plan.workareaSize(), backward_plan.workareaSize());
// Create the plan and allocate a temporary buffer for it if it needs one
Buffer<uint8_t, 1> workarea_buffer;
if (workarea_size > 0) {
const Point<1> zero1d(0);
workarea_buffer =
create_buffer<uint8_t, 1>(workarea_size, Memory::GPU_FB_MEM, 128 /*alignment*/);
void* workarea = workarea_buffer.ptr(zero1d);
CHECK_CUFFT(cufftSetWorkArea(forward_plan.handle(), workarea));
CHECK_CUFFT(cufftSetWorkArea(backward_plan.handle(), workarea));
}
// FFT the input data
cufft_execute_forward(forward_plan.handle(), signal_ptr, signal_ptr);
// FFT the filter data
cufft_execute_forward(forward_plan.handle(), filter_ptr, filter_ptr);
CHECK_CUDA_STREAM(stream);
// Perform the pointwise multiplcation
{
size_t volume = (buffervolume / 2);
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
complex_multiply<VAL><<<blocks, THREADS_PER_BLOCK, 0, stream>>>(
(complex<VAL>*)signal_ptr, (complex<VAL>*)filter_ptr, volume);
}
// Inverse FFT for the ouptut
// Allow this out-of-place for better performance
cufft_execute_backward(backward_plan.handle(), signal_ptr, filter_ptr);
// Copy the result data out of the temporary buffer and scale
// because CUFFT inverse does not perform the scale for us
pitch = 1;
FFTPitches<DIM> fft_pitches;
for (int d = DIM - 1; d >= 0; d--) {
fft_pitches[d] = pitch;
pitch *= fftsize[d];
}
const VAL scaling_factor = VAL(1) / pitch;
Point<DIM> buffer_offset;
for (int d = 0; d < DIM; d++)
buffer_offset[d] =
centers[d] - (((extents[d] % 2) == 0) ? 1 : 0) +
((offset_bounds.lo[d] < root_rect.lo[d]) ? (subrect.lo[d] - root_rect.lo[d]) : centers[d]);
Point<DIM> output_bounds = subrect.hi - subrect.lo + one;
pitch = 1;
for (int d = DIM - 1; d >= 0; d--) {
copy_pitches[d] = FastDivmodU64(pitch);
pitch *= output_bounds[d];
}
blocks = (pitch + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
copy_from_buffer<VAL, DIM><<<blocks, THREADS_PER_BLOCK, 0, stream>>>(
filter_ptr, out, buffer_offset, subrect.lo, copy_pitches, fft_pitches, pitch, scaling_factor);
CHECK_CUDA_STREAM(stream);
#if 0
// This is useful debugging code for finding the output
VAL *buffer = (VAL*)malloc(buffervolume*sizeof(VAL));
CHECK_CUDA( cudaMemcpyAsync(buffer, filter_ptr, buffervolume*sizeof(VAL), cudaMemcpyDeviceToHost, stream) );
CHECK_CUDA( cudaStreamSynchronize(stream) );
for (unsigned idx = 0; idx < buffervolume; idx++) {
if ((idx % fftsize[DIM-1]) == 0)
printf("\n");
printf("%.8g ", buffer[idx]*scaling_factor);
}
printf("\n");
free(buffer);
#endif
}
}
/////////////
// Dispatcher
/////////////
template <typename VAL, int DIM>
struct UseCUFFT {
static constexpr bool value = 1 <= DIM && DIM <= 3 && std::is_floating_point<VAL>::value;
};
template <LegateTypeCode CODE, int DIM>
struct ConvolveImplBody<VariantKind::GPU, CODE, DIM> {
using VAL = legate_type_of<CODE>;
template <typename _VAL, int32_t _DIM, std::enable_if_t<UseCUFFT<_VAL, _DIM>::value>* = nullptr>
__host__ void dispatch(AccessorWO<_VAL, _DIM> out,
AccessorRO<_VAL, _DIM> filter,
AccessorRO<_VAL, _DIM> in,
const Rect<_DIM>& root_rect,
const Rect<_DIM>& subrect,
const Rect<_DIM>& filter_rect) const
{
cufft_convolution<_VAL, _DIM>(out, filter, in, root_rect, subrect, filter_rect);
}
template <typename _VAL, int32_t _DIM, std::enable_if_t<!UseCUFFT<_VAL, _DIM>::value>* = nullptr>
__host__ void dispatch(AccessorWO<_VAL, _DIM> out,
AccessorRO<_VAL, _DIM> filter,
AccessorRO<_VAL, _DIM> in,
const Rect<_DIM>& root_rect,
const Rect<_DIM>& subrect,
const Rect<_DIM>& filter_rect) const
{
direct_convolution<_VAL, _DIM>(out, filter, in, root_rect, subrect, filter_rect);
}
__host__ void operator()(AccessorWO<VAL, DIM> out,
AccessorRO<VAL, DIM> filter,
AccessorRO<VAL, DIM> in,
const Rect<DIM>& root_rect,
const Rect<DIM>& subrect,
const Rect<DIM>& filter_rect) const
{
dispatch(out, filter, in, root_rect, subrect, filter_rect);
}
};
/*static*/ void ConvolveTask::gpu_variant(TaskContext& context)
{
convolve_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
|
the_stack
|
#include <pcl/gpu/utils/device/limits.hpp>
#include <pcl/gpu/utils/device/algorithm.hpp>
#include <pcl/gpu/utils/device/warp.hpp>
#include <pcl/gpu/utils/device/static_check.hpp>
//#include <pcl/gpu/utils/device/funcattrib.hpp>
#include <pcl/gpu/utils/safe_call.hpp>
#include <thrust/tuple.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include "thrust/device_ptr.h"
#include <thrust/transform.h>
#include <thrust/sort.h>
#include <thrust/transform_scan.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/unique.h>
#include <thrust/gather.h>
using namespace thrust;
using namespace std;
namespace pcl
{
namespace device
{
__global__ void size_check() { Static<sizeof(uint64_type) == 8>::check(); };
template<bool use_max>
struct IndOp
{
__device__ __forceinline__ tuple<float, int> operator()(const tuple<float, int>& e1, const tuple<float, int>& e2) const
{
tuple<float, int> res;
if (use_max)
res.get<0>() = fmax(e1.get<0>(), e2.get<0>());
else
res.get<0>() = fmin(e1.get<0>(), e2.get<0>());
res.get<1>() = (res.get<0>() == e1.get<0>()) ? e1.get<1>() : e2.get<1>();
return res;
}
};
struct X
{
__device__ __forceinline__
tuple<float, int>
operator()(const tuple<PointType, int>& in) const
{
return tuple<float, int>(in.get<0>().x, in.get<1>());
}
};
struct Y
{
__device__ __forceinline__ float operator()(const PointType& in) const { return in.y; }
};
struct Z
{
__device__ __forceinline__ float operator()(const PointType& in) const { return in.z; }
};
struct LineDist
{
float3 x1, x2;
LineDist(const PointType& p1, const PointType& p2) : x1(tr(p1)), x2(tr(p2)) {}
__device__ __forceinline__
tuple<float, int> operator()(const tuple<PointType, int>& in) const
{
float3 x0 = tr(in.get<0>());
float dist = norm(cross(x0 - x1, x0 - x2))/norm(x1 - x2);
return tuple<float, int>(dist, in.get<1>());
}
};
struct PlaneDist
{
float3 x1, n;
PlaneDist(const PointType& p1, const PointType& p2, const PointType& p3) : x1(tr(p1))
{
float3 x2 = tr(p2), x3 = tr(p3);
n = normalized(cross(x2 - x1, x3 - x1));
}
__device__ __forceinline__
tuple<float, int> operator()(const tuple<PointType, int>& in) const
{
float3 x0 = tr(in.get<0>());
float dist = fabs(dot(n, x0 - x1));
return tuple<float, int>(dist, in.get<1>());
}
};
template<typename It, typename Unary, typename Init, typename Binary>
int transform_reduce_index(It beg, It end, Unary unop, Init init, Binary binary)
{
counting_iterator<int> cbeg(0);
counting_iterator<int> cend = cbeg + thrust::distance(beg, end);
tuple<float, int> t = transform_reduce(
make_zip_iterator(make_tuple(beg, cbeg)),
make_zip_iterator(make_tuple(end, cend)),
unop, init, binary);
return t.get<1>();
}
template<typename It, typename Unary>
int transform_reduce_min_index(It beg, It end, Unary unop)
{
tuple<float, int> min_tuple(std::numeric_limits<float>::max(), 0);
return transform_reduce_index(beg, end, unop, min_tuple, IndOp<false>());
}
template<typename It, typename Unary>
int transform_reduce_max_index(It beg, It end, Unary unop)
{
tuple<float, int> max_tuple(std::numeric_limits<float>::min(), 0);
return transform_reduce_index(beg, end, unop, max_tuple, IndOp<true>());
}
}
}
pcl::device::PointStream::PointStream(const Cloud& cloud_) : cloud(cloud_)
{
cloud_size = cloud.size();
facets_dists.create(cloud_size);
perm.create(cloud_size);
device_ptr<int> pbeg(perm.ptr());
thrust::sequence(pbeg, pbeg + cloud_size);
}
void pcl::device::PointStream::computeInitalSimplex()
{
device_ptr<const PointType> beg(cloud.ptr());
device_ptr<const PointType> end = beg + cloud_size;
int minx = transform_reduce_min_index(beg, end, X());
int maxx = transform_reduce_max_index(beg, end, X());
PointType p1 = *(beg + minx);
PointType p2 = *(beg + maxx);
int maxl = transform_reduce_max_index(beg, end, LineDist(p1, p2));
PointType p3 = *(beg + maxl);
int maxp = transform_reduce_max_index(beg, end, PlaneDist(p1, p2, p3));
PointType p4 = *(beg + maxp);
simplex.x1 = tr(p1); simplex.x2 = tr(p2); simplex.x3 = tr(p3); simplex.x4 = tr(p4);
simplex.i1 = minx; simplex.i2 = maxx; simplex.i3 = maxl; simplex.i4 = maxp;
float maxy = transform_reduce(beg, end, Y(), std::numeric_limits<float>::min(), maximum<float>());
float miny = transform_reduce(beg, end, Y(), std::numeric_limits<float>::max(), minimum<float>());
float maxz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::min(), maximum<float>());
float minz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::max(), minimum<float>());
float dx = (p2.x - p1.x);
float dy = (maxy - miny);
float dz = (maxz - minz);
cloud_diag = sqrt(dx*dx + dy*dy + dz*dz);
simplex.p1 = compute_plane(simplex.x4, simplex.x2, simplex.x3, simplex.x1);
simplex.p2 = compute_plane(simplex.x3, simplex.x1, simplex.x4, simplex.x2);
simplex.p3 = compute_plane(simplex.x2, simplex.x1, simplex.x4, simplex.x3);
simplex.p4 = compute_plane(simplex.x1, simplex.x2, simplex.x3, simplex.x4);
}
namespace pcl
{
namespace device
{
__global__ void init_fs(int i1, int i2, int i3, int i4, PtrStep<int> verts_inds)
{
*(int4*)verts_inds.ptr(0) = make_int4(i2, i1, i1, i1);
*(int4*)verts_inds.ptr(1) = make_int4(i3, i3, i2, i2);
*(int4*)verts_inds.ptr(2) = make_int4(i4, i4, i4, i3);
}
}
}
void pcl::device::FacetStream::setInitialFacets(const InitalSimplex& s)
{
init_fs<<<1, 1>>>(s.i1, s.i2, s.i3, s.i4, verts_inds);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
facet_count = 4;
}
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
struct InitalClassify
{
float diag;
float4 pl1, pl2, pl3, pl4;
InitalClassify(const float4& p1, const float4& p2, const float4& p3, const float4& p4, float diagonal)
: diag(diagonal), pl1(p1), pl2(p2), pl3(p3), pl4(p4)
{
pl1 *= compue_inv_normal_norm(pl1);
pl2 *= compue_inv_normal_norm(pl2);
pl3 *= compue_inv_normal_norm(pl3);
pl4 *= compue_inv_normal_norm(pl4);
}
__device__ __forceinline__
uint64_type
operator()(const PointType& p) const
{
float4 x = p;
x.w = 1;
float d0 = dot(pl1, x);
float d1 = dot(pl2, x);
float d2 = dot(pl3, x);
float d3 = dot(pl4, x);
float dists[] = { d0, d1, d2, d3 };
int negs_inds[4];
int neg_count = 0;
int idx = numeric_limits<int>::max();
float dist = 0;
#pragma unroll
for(int i = 0; i < 4; ++i)
if (dists[i] < 0)
negs_inds[neg_count++] = i;
if (neg_count == 3)
{
int i1 = negs_inds[1];
int i2 = negs_inds[2];
int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1;
negs_inds[1] = ir;
--neg_count;
}
if (neg_count == 2)
{
int i1 = negs_inds[0];
int i2 = negs_inds[1];
int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1;
negs_inds[0] = ir;
--neg_count;
}
if (neg_count == 1)
{
idx = negs_inds[0];
dist = diag - fabs(dists[idx]); // to ensure that sorting order is inverse, i.e. distant points go first
}
//if (neg_count == 0)
// then internal point ==>> idx = INT_MAX
uint64_type res = idx;
res <<= 32;
return res + *reinterpret_cast<unsigned int*>(&dist);
}
};
__global__ void initalClassifyKernel(const InitalClassify ic, const PointType* points, int cloud_size, uint64_type* output)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < cloud_size)
output[index] = ic(points[index]);
}
}
}
void pcl::device::PointStream::initalClassify()
{
//thrust::device_ptr<const PointType> beg(cloud.ptr());
//thrust::device_ptr<const PointType> end = beg + cloud_size;
thrust::device_ptr<uint64_type> out(facets_dists.ptr());
InitalClassify ic(simplex.p1, simplex.p2, simplex.p3, simplex.p4, cloud_diag);
//thrust::transform(beg, end, out, ic);
//printFuncAttrib(initalClassifyKernel);
initalClassifyKernel<<<divUp(cloud_size, 256), 256>>>(ic, cloud, cloud_size, facets_dists);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
thrust::device_ptr<int> pbeg(perm.ptr());
thrust::sort_by_key(out, out + cloud_size, pbeg);
}
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
__device__ int new_cloud_size;
struct SearchFacetHeads
{
uint64_type *facets_dists;
int cloud_size;
int facet_count;
int *perm;
const PointType* points;
mutable int* head_points;
//bool logger;
__device__ __forceinline__
void operator()(int facet) const
{
const uint64_type* b = facets_dists;
const uint64_type* e = b + cloud_size;
bool last_thread = facet == facet_count;
int search_value = !last_thread ? facet : numeric_limits<int>::max();
int index = lower_bound(b, e, search_value, LessThanByFacet()) - b;
if (last_thread)
new_cloud_size = index;
else
{
bool not_found = index == cloud_size || (facet != (facets_dists[index] >> 32));
head_points[facet] = not_found ? -1 : perm[index];
}
}
};
__global__ void searchFacetHeadsKernel(const SearchFacetHeads sfh)
{
int facet = threadIdx.x + blockDim.x * blockIdx.x;
if (facet <= sfh.facet_count)
sfh(facet);
}
}
}
int pcl::device::PointStream::searchFacetHeads(size_t facet_count, DeviceArray<int>& head_points)
{
SearchFacetHeads sfh;
sfh.facets_dists = facets_dists;
sfh.cloud_size = (int)cloud_size;
sfh.facet_count = (int)facet_count;
sfh.perm = perm;
sfh.points = cloud.ptr();
sfh.head_points = head_points;
//thrust::counting_iterator<int> b(0);
//thrust::counting_iterator<int> e = b + facet_count + 1;
//thrust::for_each(b, e, sfh);
searchFacetHeadsKernel<<<divUp(facet_count+1, 256), 256>>>(sfh);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int new_size;
cudaSafeCall( cudaMemcpyFromSymbol( (void*)&new_size, pcl::device::new_cloud_size, sizeof(new_size)) );
return new_size;
}
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
struct NotMinus1
{
__device__ __forceinline__
int operator()(const int& v) const { return (v == -1) ? 0 : 1; }
};
struct Compaction
{
enum
{
CTA_SIZE = 256,
WARPS = CTA_SIZE/ Warp::WARP_SIZE
};
int* head_points_in;
PtrStep<int> verts_inds_in;
int *scan_buffer;
int facet_count;
mutable int* head_points_out;
mutable PtrStep<int> verts_inds_out;
mutable PtrStep<int> empty_facets;
mutable int *empty_count;
__device__ __forceinline__
void operator()() const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (__all(idx >= facet_count))
return;
int empty = 0;
if(idx < facet_count)
{
int head_idx = head_points_in[idx];
if (head_idx != -1)
{
int offset = scan_buffer[idx];
head_points_out[offset] = head_idx;
verts_inds_out.ptr(0)[offset] = verts_inds_in.ptr(0)[idx];
verts_inds_out.ptr(1)[offset] = verts_inds_in.ptr(1)[idx];
verts_inds_out.ptr(2)[offset] = verts_inds_in.ptr(2)[idx];
}
else
empty = 1;
}
int total = __popc(__ballot(empty));
if (total > 0)
{
int offset = Warp::binaryExclScan(__ballot(empty));
volatile __shared__ int wapr_buffer[WARPS];
int laneid = Warp::laneId();
int warpid = Warp::id();
if (laneid == 0)
{
int old = atomicAdd(empty_count, total);
wapr_buffer[warpid] = old;
}
int old = wapr_buffer[warpid];
if (empty)
{
empty_facets.ptr(0)[old + offset] = verts_inds_in.ptr(0)[idx];
empty_facets.ptr(1)[old + offset] = verts_inds_in.ptr(1)[idx];
empty_facets.ptr(2)[old + offset] = verts_inds_in.ptr(2)[idx];
int a1 = verts_inds_in.ptr(0)[idx], a2 = verts_inds_in.ptr(1)[idx], a3 = verts_inds_in.ptr(2)[idx];
}
}
}
};
__global__ void compactionKernel( const Compaction c ) { c(); }
}
}
void pcl::device::FacetStream::compactFacets()
{
int old_empty_count;
empty_count.download(&old_empty_count);
thrust::device_ptr<int> b(head_points.ptr());
thrust::device_ptr<int> e = b + facet_count;
thrust::device_ptr<int> o(scan_buffer.ptr());
thrust::transform_exclusive_scan(b, e, o, NotMinus1(), 0, thrust::plus<int>());
Compaction c;
c.verts_inds_in = verts_inds;
c.head_points_in = head_points;
c.scan_buffer = scan_buffer;
c.facet_count = facet_count;
c.head_points_out = head_points2;
c.verts_inds_out = verts_inds2;
c.empty_facets = empty_facets;
c.empty_count = empty_count;
int block = Compaction::CTA_SIZE;
int grid = divUp(facet_count, block);
compactionKernel<<<grid, block>>>(c);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
verts_inds.swap(verts_inds2);
head_points.swap(head_points2);
int new_empty_count;
empty_count.download(&new_empty_count);
facet_count -= new_empty_count - old_empty_count;
}
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
struct Classify
{
uint64_type* facets_dists;
int* scan_buffer;
int* head_points;
int* perm;
PtrStep<int> verts_inds;
const PointType *points;
float diag;
int facet_count;
__device__ __forceinline__
void operator()(int point_idx) const
{
int perm_index = perm[point_idx];
int facet = facets_dists[point_idx] >> 32;
facet = scan_buffer[facet];
int hi = head_points[facet];
if (hi == perm_index)
{
uint64_type res = numeric_limits<int>::max();
res <<= 32;
facets_dists[point_idx] = res;
}
else
{
int i1 = verts_inds.ptr(0)[facet];
int i2 = verts_inds.ptr(1)[facet];
int i3 = verts_inds.ptr(2)[facet];
float3 hp = tr( points[ hi ] );
float3 v1 = tr( points[ i1 ] );
float3 v2 = tr( points[ i2 ] );
float3 v3 = tr( points[ i3 ] );
float4 p0 = compute_plane(hp, v1, v2, /*opposite*/v3); // j
float4 p1 = compute_plane(hp, v2, v3, /*opposite*/v1); // facet_count + j
float4 p2 = compute_plane(hp, v3, v1, /*opposite*/v2); // facet_count + j*2
p0 *= compue_inv_normal_norm(p0);
p1 *= compue_inv_normal_norm(p1);
p2 *= compue_inv_normal_norm(p2);
float4 p = points[perm_index];
p.w = 1;
float d0 = dot(p, p0);
float d1 = dot(p, p1);
float d2 = dot(p, p2);
float dists[] = { d0, d1, d2 };
int negs_inds[3];
int neg_count = 0;
int new_idx = numeric_limits<int>::max();
float dist = 0;
int indeces[] = { facet, facet + facet_count, facet + facet_count * 2 };
#pragma unroll
for(int i = 0; i < 3; ++i)
if (dists[i] < 0)
negs_inds[neg_count++] = i;
if (neg_count == 3)
{
int i1 = negs_inds[1];
int i2 = negs_inds[2];
int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1;
negs_inds[1] = ir;
--neg_count;
}
if (neg_count == 2)
{
int i1 = negs_inds[0];
int i2 = negs_inds[1];
int ir = fabs(dists[i1]) < fabs(dists[i2]) ? i2 : i1;
negs_inds[0] = ir;
--neg_count;
}
if (neg_count == 1)
{
new_idx = negs_inds[0];
dist = diag - fabs(dists[new_idx]); // to ensure that sorting order is inverse, i.e. distant points go first
new_idx = indeces[new_idx];
}
// if (neg_count == 0)
// new_idx = INT_MAX ==>> internal point
uint64_type res = new_idx;
res <<= 32;
res += *reinterpret_cast<unsigned int*>(&dist);
facets_dists[point_idx] = res;
} /* if (hi == perm_index) */
}
};
__global__ void classifyKernel(const Classify c, int cloud_size)
{
int point_idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( point_idx < cloud_size )
c(point_idx);
}
}
}
void pcl::device::PointStream::classify(FacetStream& fs)
{
Classify c;
c.facets_dists = facets_dists;
c.scan_buffer = fs.scan_buffer;
c.head_points = fs.head_points;
c.perm = perm;
c.verts_inds = fs.verts_inds;
c.points = cloud;
c.diag = cloud_diag;
c.facet_count = fs.facet_count;
//thrust::counting_iterator<int> b(0);
//thrust::for_each(b, b + cloud_size, c);
classifyKernel<<<divUp(cloud_size, 256), 256>>>(c, cloud_size);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
thrust::device_ptr<uint64_type> beg(facets_dists.ptr());
thrust::device_ptr<uint64_type> end = beg + cloud_size;
thrust::device_ptr<int> pbeg(perm.ptr());
thrust::sort_by_key(beg, end, pbeg);
}
namespace pcl
{
namespace device
{
struct SplitFacets
{
int* head_points;
int facet_count;
mutable PtrStep<int> verts_inds;
__device__ __forceinline__
void operator()(int facet) const
{
int hi = head_points[facet];
int i1 = verts_inds.ptr(0)[facet];
int i2 = verts_inds.ptr(1)[facet];
int i3 = verts_inds.ptr(2)[facet];
make_facet(hi, i1, i2, facet);
make_facet(hi, i2, i3, facet + facet_count);
make_facet(hi, i3, i1, facet + facet_count * 2);
}
__device__ __forceinline__
void make_facet(int i1, int i2, int i3, int out_idx) const
{
verts_inds.ptr(0)[out_idx] = i1;
verts_inds.ptr(1)[out_idx] = i2;
verts_inds.ptr(2)[out_idx] = i3;
}
};
__global__ void splitFacetsKernel(const SplitFacets sf)
{
int facet = threadIdx.x + blockIdx.x * blockDim.x;
if (facet < sf.facet_count)
sf(facet);
}
}
}
void pcl::device::FacetStream::splitFacets()
{
SplitFacets sf;
sf.head_points = head_points;
sf.verts_inds = verts_inds;
sf.facet_count = facet_count;
//thrust::counting_iterator<int> b(0);
//thrust::for_each(b, b + facet_count, sf);
splitFacetsKernel<<<divUp(facet_count, 256), 256>>>(sf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
facet_count *= 3;
}
size_t pcl::device::remove_duplicates(DeviceArray<int>& indeces)
{
thrust::device_ptr<int> beg(indeces.ptr());
thrust::device_ptr<int> end = beg + indeces.size();
thrust::sort(beg, end);
return (size_t)(thrust::unique(beg, end) - beg);
}
namespace pcl
{
namespace device
{
__global__ void gatherKernel(const PtrSz<int> indeces, const PointType* src, PointType* dst)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < indeces.size)
dst[idx] = src[indeces.data[idx]];
}
}
}
void pcl::device::pack_hull(const DeviceArray<PointType>& points, const DeviceArray<int>& indeces, DeviceArray<PointType>& output)
{
output.create(indeces.size());
//device_ptr<const PointType> in(points.ptr());
//thrust::device_ptr<const int> mb(indeces.ptr());
//thrust::device_ptr<const int> me = mb + indeces.size();
//device_ptr<PointType> out(output.ptr());
//thrust::gather(mb, me, in, out);
gatherKernel<<<divUp(indeces.size(), 256), 256>>>(indeces, points, output);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
|
the_stack
|
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudev.hpp"
using namespace cv::cudev;
void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const GpuMat& mask, double, Stream& stream, int);
namespace
{
template <typename SrcType, typename ScalarType, typename DstType> struct SubScalarOp : unary_function<SrcType, DstType>
{
ScalarType val;
__device__ __forceinline__ DstType operator ()(SrcType a) const
{
return saturate_cast<DstType>(saturate_cast<ScalarType>(a) - val);
}
};
template <typename SrcType, typename ScalarType, typename DstType> struct SubScalarOpInv : unary_function<SrcType, DstType>
{
ScalarType val;
__device__ __forceinline__ DstType operator ()(SrcType a) const
{
return saturate_cast<DstType>(val - saturate_cast<ScalarType>(a));
}
};
template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy
{
};
template <> struct TransformPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename SrcType, typename ScalarDepth, typename DstType>
void subScalarImpl(const GpuMat& src, cv::Scalar value, bool inv, GpuMat& dst, const GpuMat& mask, Stream& stream)
{
typedef typename MakeVec<ScalarDepth, VecTraits<SrcType>::cn>::type ScalarType;
cv::Scalar_<ScalarDepth> value_ = value;
if (inv)
{
SubScalarOpInv<SrcType, ScalarType, DstType> op;
op.val = VecTraits<ScalarType>::make(value_.val);
if (mask.data)
gridTransformUnary_< TransformPolicy<ScalarDepth> >(globPtr<SrcType>(src), globPtr<DstType>(dst), op, globPtr<uchar>(mask), stream);
else
gridTransformUnary_< TransformPolicy<ScalarDepth> >(globPtr<SrcType>(src), globPtr<DstType>(dst), op, stream);
}
else
{
SubScalarOp<SrcType, ScalarType, DstType> op;
op.val = VecTraits<ScalarType>::make(value_.val);
if (mask.data)
gridTransformUnary_< TransformPolicy<ScalarDepth> >(globPtr<SrcType>(src), globPtr<DstType>(dst), op, globPtr<uchar>(mask), stream);
else
gridTransformUnary_< TransformPolicy<ScalarDepth> >(globPtr<SrcType>(src), globPtr<DstType>(dst), op, stream);
}
}
}
void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const GpuMat& mask, double, Stream& stream, int)
{
typedef void (*func_t)(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[7][7][4] =
{
{
{subScalarImpl<uchar, float, uchar>, subScalarImpl<uchar2, float, uchar2>, subScalarImpl<uchar3, float, uchar3>, subScalarImpl<uchar4, float, uchar4>},
{subScalarImpl<uchar, float, schar>, subScalarImpl<uchar2, float, char2>, subScalarImpl<uchar3, float, char3>, subScalarImpl<uchar4, float, char4>},
{subScalarImpl<uchar, float, ushort>, subScalarImpl<uchar2, float, ushort2>, subScalarImpl<uchar3, float, ushort3>, subScalarImpl<uchar4, float, ushort4>},
{subScalarImpl<uchar, float, short>, subScalarImpl<uchar2, float, short2>, subScalarImpl<uchar3, float, short3>, subScalarImpl<uchar4, float, short4>},
{subScalarImpl<uchar, float, int>, subScalarImpl<uchar2, float, int2>, subScalarImpl<uchar3, float, int3>, subScalarImpl<uchar4, float, int4>},
{subScalarImpl<uchar, float, float>, subScalarImpl<uchar2, float, float2>, subScalarImpl<uchar3, float, float3>, subScalarImpl<uchar4, float, float4>},
{subScalarImpl<uchar, double, double>, subScalarImpl<uchar2, double, double2>, subScalarImpl<uchar3, double, double3>, subScalarImpl<uchar4, double, double4>}
},
{
{subScalarImpl<schar, float, uchar>, subScalarImpl<char2, float, uchar2>, subScalarImpl<char3, float, uchar3>, subScalarImpl<char4, float, uchar4>},
{subScalarImpl<schar, float, schar>, subScalarImpl<char2, float, char2>, subScalarImpl<char3, float, char3>, subScalarImpl<char4, float, char4>},
{subScalarImpl<schar, float, ushort>, subScalarImpl<char2, float, ushort2>, subScalarImpl<char3, float, ushort3>, subScalarImpl<char4, float, ushort4>},
{subScalarImpl<schar, float, short>, subScalarImpl<char2, float, short2>, subScalarImpl<char3, float, short3>, subScalarImpl<char4, float, short4>},
{subScalarImpl<schar, float, int>, subScalarImpl<char2, float, int2>, subScalarImpl<char3, float, int3>, subScalarImpl<char4, float, int4>},
{subScalarImpl<schar, float, float>, subScalarImpl<char2, float, float2>, subScalarImpl<char3, float, float3>, subScalarImpl<char4, float, float4>},
{subScalarImpl<schar, double, double>, subScalarImpl<char2, double, double2>, subScalarImpl<char3, double, double3>, subScalarImpl<char4, double, double4>}
},
{
{0 /*subScalarImpl<ushort, float, uchar>*/, 0 /*subScalarImpl<ushort2, float, uchar2>*/, 0 /*subScalarImpl<ushort3, float, uchar3>*/, 0 /*subScalarImpl<ushort4, float, uchar4>*/},
{0 /*subScalarImpl<ushort, float, schar>*/, 0 /*subScalarImpl<ushort2, float, char2>*/, 0 /*subScalarImpl<ushort3, float, char3>*/, 0 /*subScalarImpl<ushort4, float, char4>*/},
{subScalarImpl<ushort, float, ushort>, subScalarImpl<ushort2, float, ushort2>, subScalarImpl<ushort3, float, ushort3>, subScalarImpl<ushort4, float, ushort4>},
{subScalarImpl<ushort, float, short>, subScalarImpl<ushort2, float, short2>, subScalarImpl<ushort3, float, short3>, subScalarImpl<ushort4, float, short4>},
{subScalarImpl<ushort, float, int>, subScalarImpl<ushort2, float, int2>, subScalarImpl<ushort3, float, int3>, subScalarImpl<ushort4, float, int4>},
{subScalarImpl<ushort, float, float>, subScalarImpl<ushort2, float, float2>, subScalarImpl<ushort3, float, float3>, subScalarImpl<ushort4, float, float4>},
{subScalarImpl<ushort, double, double>, subScalarImpl<ushort2, double, double2>, subScalarImpl<ushort3, double, double3>, subScalarImpl<ushort4, double, double4>}
},
{
{0 /*subScalarImpl<short, float, uchar>*/, 0 /*subScalarImpl<short2, float, uchar2>*/, 0 /*subScalarImpl<short3, float, uchar3>*/, 0 /*subScalarImpl<short4, float, uchar4>*/},
{0 /*subScalarImpl<short, float, schar>*/, 0 /*subScalarImpl<short2, float, char2>*/, 0 /*subScalarImpl<short3, float, char3>*/, 0 /*subScalarImpl<short4, float, char4>*/},
{subScalarImpl<short, float, ushort>, subScalarImpl<short2, float, ushort2>, subScalarImpl<short3, float, ushort3>, subScalarImpl<short4, float, ushort4>},
{subScalarImpl<short, float, short>, subScalarImpl<short2, float, short2>, subScalarImpl<short3, float, short3>, subScalarImpl<short4, float, short4>},
{subScalarImpl<short, float, int>, subScalarImpl<short2, float, int2>, subScalarImpl<short3, float, int3>, subScalarImpl<short4, float, int4>},
{subScalarImpl<short, float, float>, subScalarImpl<short2, float, float2>, subScalarImpl<short3, float, float3>, subScalarImpl<short4, float, float4>},
{subScalarImpl<short, double, double>, subScalarImpl<short2, double, double2>, subScalarImpl<short3, double, double3>, subScalarImpl<short4, double, double4>}
},
{
{0 /*subScalarImpl<int, float, uchar>*/, 0 /*subScalarImpl<int2, float, uchar2>*/, 0 /*subScalarImpl<int3, float, uchar3>*/, 0 /*subScalarImpl<int4, float, uchar4>*/},
{0 /*subScalarImpl<int, float, schar>*/, 0 /*subScalarImpl<int2, float, char2>*/, 0 /*subScalarImpl<int3, float, char3>*/, 0 /*subScalarImpl<int4, float, char4>*/},
{0 /*subScalarImpl<int, float, ushort>*/, 0 /*subScalarImpl<int2, float, ushort2>*/, 0 /*subScalarImpl<int3, float, ushort3>*/, 0 /*subScalarImpl<int4, float, ushort4>*/},
{0 /*subScalarImpl<int, float, short>*/, 0 /*subScalarImpl<int2, float, short2>*/, 0 /*subScalarImpl<int3, float, short3>*/, 0 /*subScalarImpl<int4, float, short4>*/},
{subScalarImpl<int, float, int>, subScalarImpl<int2, float, int2>, subScalarImpl<int3, float, int3>, subScalarImpl<int4, float, int4>},
{subScalarImpl<int, float, float>, subScalarImpl<int2, float, float2>, subScalarImpl<int3, float, float3>, subScalarImpl<int4, float, float4>},
{subScalarImpl<int, double, double>, subScalarImpl<int2, double, double2>, subScalarImpl<int3, double, double3>, subScalarImpl<int4, double, double4>}
},
{
{0 /*subScalarImpl<float, float, uchar>*/, 0 /*subScalarImpl<float2, float, uchar2>*/, 0 /*subScalarImpl<float3, float, uchar3>*/, 0 /*subScalarImpl<float4, float, uchar4>*/},
{0 /*subScalarImpl<float, float, schar>*/, 0 /*subScalarImpl<float2, float, char2>*/, 0 /*subScalarImpl<float3, float, char3>*/, 0 /*subScalarImpl<float4, float, char4>*/},
{0 /*subScalarImpl<float, float, ushort>*/, 0 /*subScalarImpl<float2, float, ushort2>*/, 0 /*subScalarImpl<float3, float, ushort3>*/, 0 /*subScalarImpl<float4, float, ushort4>*/},
{0 /*subScalarImpl<float, float, short>*/, 0 /*subScalarImpl<float2, float, short2>*/, 0 /*subScalarImpl<float3, float, short3>*/, 0 /*subScalarImpl<float4, float, short4>*/},
{0 /*subScalarImpl<float, float, int>*/, 0 /*subScalarImpl<float2, float, int2>*/, 0 /*subScalarImpl<float3, float, int3>*/, 0 /*subScalarImpl<float4, float, int4>*/},
{subScalarImpl<float, float, float>, subScalarImpl<float2, float, float2>, subScalarImpl<float3, float, float3>, subScalarImpl<float4, float, float4>},
{subScalarImpl<float, double, double>, subScalarImpl<float2, double, double2>, subScalarImpl<float3, double, double3>, subScalarImpl<float4, double, double4>}
},
{
{0 /*subScalarImpl<double, double, uchar>*/, 0 /*subScalarImpl<double2, double, uchar2>*/, 0 /*subScalarImpl<double3, double, uchar3>*/, 0 /*subScalarImpl<double4, double, uchar4>*/},
{0 /*subScalarImpl<double, double, schar>*/, 0 /*subScalarImpl<double2, double, char2>*/, 0 /*subScalarImpl<double3, double, char3>*/, 0 /*subScalarImpl<double4, double, char4>*/},
{0 /*subScalarImpl<double, double, ushort>*/, 0 /*subScalarImpl<double2, double, ushort2>*/, 0 /*subScalarImpl<double3, double, ushort3>*/, 0 /*subScalarImpl<double4, double, ushort4>*/},
{0 /*subScalarImpl<double, double, short>*/, 0 /*subScalarImpl<double2, double, short2>*/, 0 /*subScalarImpl<double3, double, short3>*/, 0 /*subScalarImpl<double4, double, short4>*/},
{0 /*subScalarImpl<double, double, int>*/, 0 /*subScalarImpl<double2, double, int2>*/, 0 /*subScalarImpl<double3, double, int3>*/, 0 /*subScalarImpl<double4, double, int4>*/},
{0 /*subScalarImpl<double, double, float>*/, 0 /*subScalarImpl<double2, double, float2>*/, 0 /*subScalarImpl<double3, double, float3>*/, 0 /*subScalarImpl<double4, double, float4>*/},
{subScalarImpl<double, double, double>, subScalarImpl<double2, double, double2>, subScalarImpl<double3, double, double3>, subScalarImpl<double4, double, double4>}
}
};
const int sdepth = src.depth();
const int ddepth = dst.depth();
const int cn = src.channels();
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F && cn <= 4 );
const func_t func = funcs[sdepth][ddepth][cn - 1];
if (!func)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src, val, inv, dst, mask, stream);
}
#endif
|
the_stack
|
#include <algorithm>
#include <cstdio>
#include <limits>
#include <numeric>
#include <random>
#include <vector>
#include "k2/csrc/nbest.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
namespace k2 {
TEST(AlgorithmsTest, TestSuffixArray) {
ContextPtr cpu = GetCpuContext();
for (int i = 0; i < 100; i++) {
int array_len = RandInt(1, 50), // 1 is min, due to termination symbol.
max_symbol = RandInt(10, 500);
Array1<int32_t> array(cpu, array_len + 3);
int32_t *array_data = array.Data();
for (int i = 0; i + 1 < array_len; i++)
array_data[i] = RandInt(1, max_symbol - 1); // termination symbol must
// be larger than all
// others, don't allow
array_data[array_len - 1] = max_symbol; // Termination symbol
for (int i = array_len; i < array_len + 3; i++)
array_data[i] = 0;
// really array_len, extra elem is to test that it doesn't write past
// the end.
Array1<int32_t> suffix_array(cpu, array_len + 1);
int32_t *suffix_array_data = suffix_array.Data();
suffix_array_data[array_len] = -10; // should not be changed.
CreateSuffixArray(array_data, array_len,
max_symbol, suffix_array_data);
assert(suffix_array_data[array_len] == -10); // should be unchanged.
Array1<int32_t> seen_indexes(cpu, array_len, 0);
int32_t *seen_indexes_data = seen_indexes.Data();
for (int32_t i = 0; i < array_len; i++)
seen_indexes_data[suffix_array_data[i]] = 1;
for (int32_t i = 0; i < array_len; i++)
assert(seen_indexes_data[i] == 1); // make sure all integers seen.
for (int32_t i = 0; i + 1 < array_len; i++) {
int32_t *suffix_a = array_data + suffix_array_data[i],
*suffix_b = array_data + suffix_array_data[i + 1];
// checking that each suffix is lexicographically less than the next one.
// None are identical, because the terminating zero is always in different
// positions.
while (true) {
if (*suffix_a < *suffix_b)
break; // correct order
assert(!(*suffix_a > *suffix_b)); // order is wrong!
// past array end without correct comparison order.
assert(!(suffix_a > array_data + array_len ||
suffix_b > array_data + array_len));
suffix_a++;
suffix_b++;
}
}
}
}
TEST(AlgorithmsTest, TestCreateLcpArray) {
ContextPtr cpu = GetCpuContext();
for (int i = 0; i < 100; i++) {
int array_len = RandInt(1, 50), // at least 1 due to termination symbol
max_symbol = RandInt(2, 5);
Array1<int32_t> array(cpu, array_len + 3);
int32_t *array_data = array.Data();
for (int i = 0; i + 1 < array_len; i++)
array_data[i] = RandInt(1, max_symbol - 1);
array_data[array_len - 1] = max_symbol; // Termination symbol
for (int i = array_len; i < array_len + 3; i++)
array_data[i] = 0;
Array1<int32_t> suffix_array(cpu, array_len);
int32_t *suffix_array_data = suffix_array.Data();
CreateSuffixArray(array_data, array_len,
max_symbol, suffix_array_data);
Array1<int32_t> lcp(cpu, array_len);
int32_t *lcp_data = lcp.Data();
CreateLcpArray(array_data, suffix_array_data, array_len,
lcp_data);
if (array_len > 0)
assert(lcp_data[0] == 0);
for (int32_t i = 1; i < array_len; i++) {
int32_t lcp = lcp_data[i],
prev_pos = suffix_array_data[i - 1],
this_pos = suffix_array_data[i];
for (int32_t j = 0; j < lcp; j++)
assert(array_data[prev_pos + j] == array_data[this_pos + j]);
assert(array_data[prev_pos + lcp] != array_data[this_pos + lcp]);
}
}
}
TEST(AlgorithmsTest, TestCreateLcpIntervalArray) {
ContextPtr cpu = GetCpuContext();
for (int i = 0; i < 100; i++) {
int array_len = RandInt(1, 50), // at least 1 due to termination symbol
max_symbol = RandInt(3, 5);
Array1<int32_t> array(cpu, array_len + 3);
int32_t *array_data = array.Data();
for (int i = 0; i + 1 < array_len; i++)
array_data[i] = RandInt(1, max_symbol - 1);
array_data[array_len - 1] = max_symbol; // Termination symbol
for (int i = array_len; i < array_len + 3; i++)
array_data[i] = 0;
Array1<int32_t> suffix_array(cpu, array_len);
int32_t *suffix_array_data = suffix_array.Data();
CreateSuffixArray(array_data, array_len,
max_symbol, suffix_array_data);
Array1<int32_t> lcp(cpu, array_len);
int32_t *lcp_data = lcp.Data();
CreateLcpArray(array_data, suffix_array_data, array_len,
lcp_data);
Array1<LcpInterval<int32_t> > lcp_intervals;
Array1<int32_t> leaf_parent_intervals;
CreateLcpIntervalArray(GetCpuContext(),
array_len, lcp_data,
&lcp_intervals,
&leaf_parent_intervals);
LcpInterval<int32_t> *lcp_intervals_data = lcp_intervals.Data();
int32_t *leaf_parent_intervals_data = leaf_parent_intervals.Data();
int32_t num_intervals = lcp_intervals.Dim();
for (int32_t i = 0; i < array_len; i++) {
int32_t lcp_interval = leaf_parent_intervals_data[i];
assert(lcp_interval >= 0 && lcp_interval < num_intervals);
assert(i >= lcp_intervals_data[lcp_interval].lb &&
i <= lcp_intervals_data[lcp_interval].rb);
// the lcp value / height
int32_t lcp = lcp_intervals_data[lcp_interval].lcp;
for (int32_t j = 0; j < num_intervals; j++) {
// The interval that i is a member of should be the tightest enclosing
// interval, this loop checks that.
if (lcp_intervals_data[j].lcp >= lcp && j != lcp_interval) {
assert(!(i >= lcp_intervals_data[j].lb &&
i <= lcp_intervals_data[j].rb));
}
}
}
for (int32_t i = 0; i < num_intervals; i++) {
LcpInterval<int32_t> interval = lcp_intervals_data[i];
if (!(interval.lb == 0 && interval.rb + 1 == array_len &&
interval.parent == -1)) {
assert(interval.parent > i);
LcpInterval<int32_t> parent = lcp_intervals_data[interval.parent];
assert(interval.lb >= parent.lb &&
interval.rb <= parent.rb &&
interval.lcp > parent.lcp);
}
// Now check the basic requirements/definition of lcp interval...
assert(interval.lb >= 0 &&
(interval.rb > interval.lb || array_len == 1) &&
interval.rb < array_len);
assert(lcp_data[interval.lb] < interval.lcp ||
(interval.lb == 0 && interval.lcp == 0));
assert(interval.rb == array_len - 1 ||
lcp_data[interval.rb + 1] < interval.lcp);
if (array_len != 1) {
int32_t min_lcp = 1000000;
for (int32_t j = interval.lb + 1; j <= interval.rb; ++j)
if (lcp_data[j] < min_lcp)
min_lcp = lcp_data[j];
assert(min_lcp == interval.lcp); // Check lcp value is correct. This
// test does not work if array_len ==
// 1 so we skip it in that case.
}
}
}
}
TEST(AlgorithmsTest, TestFindTightestNonemptyIntervals) {
ContextPtr cpu = GetCpuContext();
for (int i = 0; i < 100; i++) {
int array_len = RandInt(1, 50), // at least 1 due to termination symbol
max_symbol = RandInt(3, 5);
Array1<int32_t> array(cpu, array_len + 3),
counts(cpu, array_len);
int32_t *array_data = array.Data();
for (int i = 0; i + 1 < array_len; i++)
array_data[i] = RandInt(1, max_symbol - 1);
array_data[array_len - 1] = max_symbol; // Termination symbol
for (int i = array_len; i < array_len + 3; i++)
array_data[i] = 0;
int32_t *counts_data = counts.Data();
for (int i = 0; i < array_len; i++)
counts_data[i] = RandInt(0, 1);
Array1<int32_t> suffix_array_plusone(cpu, array_len + 1, 0),
suffix_array = suffix_array_plusone.Range(0, array_len);
int32_t *suffix_array_data = suffix_array.Data();
CreateSuffixArray(array_data, array_len,
max_symbol, suffix_array_data);
Array1<int32_t> lcp(cpu, array_len);
int32_t *lcp_data = lcp.Data();
CreateLcpArray(array_data, suffix_array_data, array_len,
lcp_data);
Array1<LcpInterval<int32_t> > lcp_intervals;
Array1<int32_t> leaf_parent_intervals; // dim will be seq_len
CreateLcpIntervalArray(GetCpuContext(),
array_len, lcp_data,
&lcp_intervals,
&leaf_parent_intervals);
// we get one extra don't-care element at the end of `counts_reordered`,
// which is required by ExclusiveSum().
Array1<int32_t> counts_reordered = counts[suffix_array_plusone],
counts_reordered_sum(cpu, array_len + 1);
ExclusiveSum(counts_reordered, &counts_reordered_sum);
Array1<int32_t> leaf_parent_intervals_mod(leaf_parent_intervals.Clone());
FindTightestNonemptyIntervals(array_len,
&lcp_intervals,
&counts_reordered_sum,
&leaf_parent_intervals_mod);
LcpInterval<int32_t> *lcp_intervals_data = lcp_intervals.Data();
int32_t *leaf_parent_intervals_data = leaf_parent_intervals.Data(),
*leaf_parent_intervals_mod_data = leaf_parent_intervals_mod.Data();
int32_t num_intervals = lcp_intervals.Dim();
for (int32_t i = 0; i < array_len; i++) {
int32_t lcp_interval = leaf_parent_intervals_data[i],
nonempty_lcp_interval = leaf_parent_intervals_mod_data[i];
assert(lcp_interval >= 0 && lcp_interval < num_intervals);
assert(nonempty_lcp_interval >= 0 &&
nonempty_lcp_interval < num_intervals);
if (counts_reordered_sum[array_len] == 0) {
// If the total count is zero, everything should go to the top of the
// tree, but we won't otherwise test this.
assert(nonempty_lcp_interval == num_intervals - 1);
} else {
int32_t lcp = lcp_intervals_data[lcp_interval].lcp;
K2_CHECK_EQ((lcp_interval == nonempty_lcp_interval),
(counts_reordered_sum[lcp_intervals_data[lcp_interval].lb] != // NOLINT
counts_reordered_sum[lcp_intervals_data[lcp_interval].rb + 1])); // NOLINT
K2_CHECK(i >= lcp_intervals_data[nonempty_lcp_interval].lb &&
i <= lcp_intervals_data[nonempty_lcp_interval].rb);
for (int32_t j = 0; j < num_intervals; j++) {
// nonempty_lcp_interval should be the tightest enclosing
// interval that has nonzero count, this loop checks that.
if (lcp_intervals_data[j].lcp >= lcp && j != nonempty_lcp_interval) {
// Check that this is not a tighter enclosing interval than
// nonempty_lcp_interval, with nonzero count, that encloses i.
K2_CHECK(!(i >= lcp_intervals_data[j].lb &&
i <= lcp_intervals_data[j].rb &&
counts_reordered_sum[lcp_intervals_data[j].lb] !=
counts_reordered_sum[lcp_intervals_data[j].rb + 1]));
}
}
}
}
}
}
TEST(AlgorithmTest, TestGetBestMatchingStatsEmpty) {
Ragged<int32_t> tokens(GetCpuContext(), "[ [ [ ] ] ]");
Array1<float> scores(GetCpuContext(), "[ ]");
Array1<int32_t> counts(GetCpuContext(), "[ ]");
Array1<float> mean, var;
Array1<int32_t> counts_out, ngram_order;
int32_t eos = 8,
min_token = 1,
max_token = 8,
max_order = 2;
GetBestMatchingStats(tokens, scores, counts, eos, min_token, max_token,
max_order, &mean, &var, &counts_out, &ngram_order);
K2_CHECK_EQ(mean.Dim(), 0);
K2_CHECK_EQ(var.Dim(), 0);
K2_CHECK_EQ(counts_out.Dim(), 0);
K2_CHECK_EQ(ngram_order.Dim(), 0);
}
TEST(AlgorithmTest, TestGetBestMatchingStatsSingle) {
// There are 20 tokens, index with [0, 20)
// keys' positions are [0, 10), queries positions are [10, 20)
// The best matching positions(include the token itself) are as follows
// index 0 : (0, 5, 10) with lcp "84", we add eos(8)
// index 1 : (1, 16,) with lcp "6"
// index 2 : (2, 17,) with lcp "76"
// index 3 : (3, 18,) with lcp "671"
// index 4 : (4, 19,) with lcp "5718"
// index 5 : (5, 10,) with lcp "7184"
// index 6 : (6, 11,) with lcp "43"
// index 7 : (2, 7, 17,) with lcp "7"
// index 8 : (3, 8, 18,) with lcp "71"
// index 9 : (4, 9, 19,) with lcp "718"
// index 10 : (5, 10,) with lcp "7184"
// index 11 : (6, 11,) with lcp "43"
// index 12 : (12,) with no matching
// index 13 : (3, 8, 13, 18,) with lcp "1"
// index 14 : (4, 9, 14, 19,) with lcp "18"
// index 15 : (15,) with no matching
// index 16 : (1, 16,) with lcp "6"
// index 17 : (2, 17,) with lcp "67"
// index 18 : (3, 18,) with lcp "671"
// index 19 : (4, 19,) with lcp "6718"
Ragged<int32_t> tokens(GetCpuContext(), "[ [ 4 6 7 1 8 ] [ 4 3 7 1 8 ] "
" [ 4 3 2 1 8 ] [ 5 6 7 1 8 ] ]");
Array1<float> scores(GetCpuContext(), "[ 1 2 3 4 5 6 7 8 9 10 "
" 0 0 0 0 0 0 0 0 0 0 ]");
Array1<int32_t> counts(GetCpuContext(), "[ 1 1 1 1 1 1 1 1 1 1 "
" 0 0 0 0 0 0 0 0 0 0 ]");
Array1<float> mean, var;
Array1<int32_t> counts_out, ngram_order;
int32_t eos = 8,
min_token = 1,
max_token = 8,
max_order = 2;
GetBestMatchingStats(tokens, scores, counts, eos, min_token, max_token,
max_order, &mean, &var, &counts_out, &ngram_order);
Array1<float> mean_ref(GetCpuContext(), "[ 3.5 2 3 4 5 6 7 5.5 6.5 7.5 "
" 6 7 5.5 6.5 7.5 5.5 2 3 4 5 ]");
Array1<float> var_ref(GetCpuContext(), "[ 6.25 0 0 0 0 0 0 6.25 6.25 6.25 "
" 0 0 8.25 6.25 6.25 8.25 0 0 0 0 ]");
Array1<int32_t> counts_out_ref(GetCpuContext(), "[ 2 1 1 1 1 1 1 2 2 2 "
" 1 1 0 2 2 0 1 1 1 1 ]");
Array1<int32_t> ngram_order_ref(GetCpuContext(), "[ 2 1 2 2 2 2 2 1 2 2 "
" 2 2 0 1 2 0 1 2 2 2 ]");
K2_CHECK(Equal(mean, mean_ref));
K2_CHECK(Equal(var, var_ref));
K2_CHECK(Equal(counts_out, counts_out_ref));
K2_CHECK(Equal(ngram_order, ngram_order_ref));
}
TEST(AlgorithmTest, TestGetBestMatchingStatsSpecial) {
Ragged<int32_t> tokens(GetCpuContext(), "[ [ 4 6 7 1 8 ] [ 4 3 7 1 8 ] "
" [ 4 3 2 1 8 ] [ 5 6 7 1 8 ] ]");
Array1<float> scores(GetCpuContext(), "[ 0 0 0 0 0 0 0 0 0 0 "
" 0 0 0 0 0 0 0 0 0 0 ]");
Array1<int32_t> counts(GetCpuContext(), "[ 0 0 0 0 0 0 0 0 0 0 "
" 0 0 0 0 0 0 0 0 0 0 ]");
Array1<float> mean, var;
Array1<int32_t> counts_out, ngram_order;
int32_t eos = 8,
min_token = 1,
max_token = 8,
max_order = 2;
GetBestMatchingStats(tokens, scores, counts, eos, min_token, max_token,
max_order, &mean, &var, &counts_out, &ngram_order);
Array1<float> mean_ref(GetCpuContext(), "[ 0 0 0 0 0 0 0 0 0 0 "
" 0 0 0 0 0 0 0 0 0 0 ]");
Array1<float> var_ref(GetCpuContext(), "[ 0 0 0 0 0 0 0 0 0 0 "
" 0 0 0 0 0 0 0 0 0 0 ]");
Array1<int32_t> counts_out_ref(GetCpuContext(), "[ 0 0 0 0 0 0 0 0 0 0 "
" 0 0 0 0 0 0 0 0 0 0 ]");
Array1<int32_t> ngram_order_ref(GetCpuContext(), "[ 0 0 0 0 0 0 0 0 0 0 "
" 0 0 0 0 0 0 0 0 0 0 ]");
K2_CHECK(Equal(mean, mean_ref));
K2_CHECK(Equal(var, var_ref));
K2_CHECK(Equal(counts_out, counts_out_ref));
K2_CHECK(Equal(ngram_order, ngram_order_ref));
}
TEST(AlgorithmTest, TestGetBestMatchingStatsSingleMulti) {
Ragged<int32_t> tokens(GetCpuContext(), "[ [ [ 4 6 7 1 8 ] [ 4 3 7 1 8 ] "
" [ 4 3 2 1 8 ] [ 5 6 7 1 8 ] ] "
" [ [ 5 1 4 8 ] [ 5 1 2 8 ] "
" [ 5 3 4 8 ] ] ]");
Array1<float> scores(GetCpuContext(), "[ 1 2 3 4 5 6 7 8 9 10 "
" 0 0 0 0 0 0 0 0 0 0 "
" 1 2 3 4 5 7 8 6 0 0 0 0 ]");
Array1<int32_t> counts(GetCpuContext(), "[ 1 1 1 1 1 1 1 1 1 1 "
" 0 0 0 0 0 0 0 0 0 0 "
" 1 1 1 1 1 1 1 1 0 0 0 0 ]");
Array1<float> mean, var;
Array1<int32_t> counts_out, ngram_order;
int32_t eos = 8,
min_token = 0,
max_token = 10,
max_order = 5;
GetBestMatchingStats(tokens, scores, counts, eos, min_token, max_token,
max_order, &mean, &var, &counts_out, &ngram_order);
Array1<float> mean_ref(GetCpuContext(), "[ 3.5 2 3 4 5 6 7 5.5 6.5 7.5 "
" 6 7 5.5 6.5 7.5 5.5 2 3 4 5 "
" 3 4.5 3 4 3 4.5 4.5 5 "
" 3 4.5 3 4 ]");
Array1<float> var_ref(GetCpuContext(), "[ 6.25 0 0 0 0 0 0 6.25 6.25 6.25 "
" 0 0 8.25 6.25 6.25 8.25 0 0 0 0 "
" 4 6.25 0 0 4 6.25 5.25 1 "
" 4 5.25 0 0 ]");
Array1<int32_t> counts_out_ref(GetCpuContext(), "[ 2 1 1 1 1 1 1 2 2 2 "
" 1 1 0 2 2 0 1 1 1 1 "
" 2 2 1 1 2 2 0 2 "
" 2 0 1 1 ]");
Array1<int32_t> ngram_order_ref(GetCpuContext(), "[ 5 1 2 3 4 5 5 1 2 3 "
" 5 5 0 1 2 0 1 2 3 4 "
" 5 5 1 2 5 5 0 1 "
" 5 0 1 2 ]");
K2_CHECK(Equal(mean, mean_ref));
K2_CHECK(Equal(var, var_ref));
K2_CHECK(Equal(counts_out, counts_out_ref));
K2_CHECK(Equal(ngram_order, ngram_order_ref));
}
} // namespace k2
|
the_stack
|
using at::Half;
using at::Tensor;
#define DIVUP(m, n) ((m) / (m) + ((m) % (n) > 0))
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#ifndef CAFFE_COMMON_CUH_
#define CAFFE_COMMON_CUH_
#include <cuda.h>
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else // performence loss
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
if (val == 0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#endif
static __inline__ __device__ at::Half atomicAdd(at::Half *address,
at::Half val) {
unsigned int *aligned =
(unsigned int *)((size_t)address - ((size_t)address & 2));
unsigned int old = *aligned;
unsigned int assumed;
unsigned short old_as_us;
do {
assumed = old;
old_as_us =
(unsigned short)((size_t)address & 2 ? old >> 16 : old & 0xffff);
#if __CUDACC_VER_MAJOR__ >= 9
half sum =
__float2half_rn(__half2float(__ushort_as_half(old_as_us)) + float(val));
unsigned short sum_as_us = __half_as_ushort(sum);
#else
unsigned short sum_as_us =
__float2half_rn(__half2float(old_as_us) + float(val));
#endif
unsigned int sum_as_ui = (size_t)address & 2
? (sum_as_us << 16) | (old & 0xffff)
: (old & 0xffff0000) | sum_as_us;
old = atomicCAS(aligned, assumed, sum_as_ui);
} while (assumed != old);
//__half_raw raw = {old_as_us};
// return at::Half(raw);
return at::Half({__ushort_as_half(old_as_us)});
};
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
/*** Forward ***/
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(
const scalar_t *bottom_data, const int height, const int width, scalar_t y,
scalar_t x, const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
scalar_t v1 = bottom_data[y_low * width + x_low];
scalar_t v2 = bottom_data[y_low * width + x_high];
scalar_t v3 = bottom_data[y_high * width + x_low];
scalar_t v4 = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void
PSROIAlignForward(const int nthreads, const scalar_t *bottom_data,
const scalar_t spatial_scale, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const scalar_t *bottom_rois,
const int output_dim, const int group_size,
const int sampling_ratio, scalar_t *top_data,
int *mapping_channel, const int shape) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
// liqq 2016/09/25
// bottom_rois += n * shape;
int roi_batch_ind = (int)bottom_rois[n * shape + 0];
scalar_t roi_start_w =
static_cast<scalar_t>(bottom_rois[n * shape + 1]) * spatial_scale;
scalar_t roi_start_h =
static_cast<scalar_t>(bottom_rois[n * shape + 2]) * spatial_scale;
scalar_t roi_end_w =
static_cast<scalar_t>(bottom_rois[n * shape + 3] + 1.) * spatial_scale;
scalar_t roi_end_h =
static_cast<scalar_t>(bottom_rois[n * shape + 4] + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
scalar_t bin_size_h = roi_height / static_cast<scalar_t>(pooled_height);
scalar_t bin_size_w = roi_width / static_cast<scalar_t>(pooled_width);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
const scalar_t *offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// use max pooling
scalar_t maxval = -1E+10;
int maxidx = -1;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const scalar_t y =
roi_start_h + ph * bin_size_h +
(iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(ix + .5f) * bin_size_w / roi_bin_grid_w;
scalar_t val = bilinear_interpolate(offset_bottom_data, height, width,
y, x, index);
int bottom_index = iy * roi_bin_grid_w + ix;
if (val > maxval) {
maxval = val;
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
mapping_channel[index] = maxidx;
}
}
int PSROIAlignForwardLaucher(Tensor bottom_data, const float spatial_scale,
const int num_rois, const int output_dim,
const int size_rois, const int height,
const int width, const int channels,
const int pooled_height, const int pooled_width,
const float sampling_ratio, Tensor bottom_rois,
Tensor top_data, Tensor mapping_channel) {
const int kThreadsPerBlock = 1024;
int output_size = num_rois * pooled_height * pooled_width * output_dim;
cudaError_t err;
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "%s#%d: cudaCheckError() failed : %s\n", __FILE__, __LINE__,
cudaGetErrorString(err));
exit(-1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.scalar_type(), "psroi_align_forward_cuda", ([&] {
PSROIAlignForward<scalar_t>
<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock>>>(
output_size, bottom_data.data_ptr<scalar_t>(), spatial_scale,
channels, height, width, pooled_height, pooled_width,
bottom_rois.data_ptr<scalar_t>(), output_dim, pooled_height,
sampling_ratio, top_data.data_ptr<scalar_t>(),
mapping_channel.data_ptr<int>(), size_rois);
}));
// pooled_height == pooled_width == group_size
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "%s#%d: cudaCheckError() failed : %s\n", __FILE__, __LINE__,
cudaGetErrorString(err));
exit(-1);
}
return 1;
}
/*** Backward ***/
template <typename scalar_t>
inline __device__ scalar_t gpu_atomic_add(scalar_t val, scalar_t *address);
template <typename scalar_t>
inline __device__ scalar_t gpu_atomic_add(scalar_t val, scalar_t *address) {
return atomicAdd(address, val);
}
template <typename scalar_t>
__device__ void bilinear_interpolate_gradient(
const int height, const int width, scalar_t y, scalar_t x, scalar_t &w1,
scalar_t &w2, scalar_t &w3, scalar_t &w4, int &x_low, int &x_high,
int &y_low, int &y_high, const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename scalar_t>
__global__ void PSROIAlignBackward(
const int nthreads, const scalar_t *top_diff, const int *mapping_channel,
const scalar_t spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int output_dim, const int group_size, const int sampling_ratio,
scalar_t *bottom_diff, const scalar_t *bottom_rois, const int shape) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
// liqq 2016/09/25
// bottom_rois += n * shape;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = (int)bottom_rois[n * shape + 0];
scalar_t roi_start_w =
static_cast<scalar_t>(bottom_rois[n * shape + 1]) * spatial_scale;
scalar_t roi_start_h =
static_cast<scalar_t>(bottom_rois[n * shape + 2]) * spatial_scale;
scalar_t roi_end_w =
static_cast<scalar_t>(bottom_rois[n * shape + 3] + 1.) * spatial_scale;
scalar_t roi_end_h =
static_cast<scalar_t>(bottom_rois[n * shape + 4] + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
scalar_t bin_size_h = roi_height / static_cast<scalar_t>(pooled_height);
scalar_t bin_size_w = roi_width / static_cast<scalar_t>(pooled_width);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
scalar_t *offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * output_dim + ctop) * pooled_height * pooled_width;
scalar_t top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int maxidx = mapping_channel[top_offset + ph * pooled_width + pw];
int iy = maxidx / roi_bin_grid_w;
int ix = maxidx % roi_bin_grid_w;
scalar_t y = roi_start_h + ph * bin_size_h +
static_cast<float>(iy + .5f) * bin_size_h /
static_cast<float>(roi_bin_grid_h); // e.g. 0.5, 1.5
scalar_t x = roi_start_w + pw * bin_size_w +
static_cast<float>(ix + .5f) * bin_size_w /
static_cast<float>(roi_bin_grid_w);
scalar_t w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
// bilinear_interpolation_gradient
bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low,
x_high, y_low, y_high, index);
scalar_t g1 = top_diff_this_bin * w1;
scalar_t g2 = top_diff_this_bin * w2;
scalar_t g3 = top_diff_this_bin * w3;
scalar_t g4 = top_diff_this_bin * w4;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add<scalar_t>(g1, offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add<scalar_t>(g2, offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add<scalar_t>(g3, offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add<scalar_t>(g4,
offset_bottom_diff + y_high * width + x_high);
}
}
}
int PSROIAlignBackwardLaucher(Tensor top_diff, const float spatial_scale,
const int batch_size, const int num_rois,
const int output_dim, const int size_rois,
const int height, const int width,
const int channels, const int pooled_height,
const int pooled_width,
const float sampling_ratio, Tensor bottom_rois,
Tensor bottom_diff, Tensor mapping_channel) {
const int kThreadsPerBlock = 1024;
// int output_size = batch_size * height * width * output_dim;
int output_size = output_dim * pooled_height * pooled_width * num_rois;
cudaError_t err;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_diff.scalar_type(), "psroi_align_backward_cuda", ([&] {
PSROIAlignBackward<scalar_t>
<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock>>>(output_size, top_diff.data_ptr<scalar_t>(),
mapping_channel.data_ptr<int>(), spatial_scale,
channels, height, width, pooled_height,
pooled_width, output_dim, pooled_height,
sampling_ratio, bottom_diff.data_ptr<scalar_t>(),
bottom_rois.data_ptr<scalar_t>(), size_rois);
}));
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "%s#%d: cudaCheckError() failed : %s\n", __FILE__, __LINE__,
cudaGetErrorString(err));
exit(-1);
}
return 1;
}
|
the_stack
|
using namespace gpu_treeshap; // NOLINT
class ParameterisedModelTest
: public ::testing::TestWithParam<
std::tuple<size_t, size_t, size_t, size_t, size_t>> {
protected:
ParameterisedModelTest() {
size_t max_depth, num_paths;
std::tie(num_rows, num_features, num_groups, max_depth, num_paths) =
GetParam();
model = GenerateEnsembleModel(num_groups, max_depth, num_features,
num_paths, 78);
test_data = TestDataset(num_rows, num_features, 22);
margin = Predict(model, test_data, num_groups);
X = test_data.GetDeviceWrapper();
phis.resize(X.NumRows() * (X.NumCols() + 1) * (X.NumCols() + 1) *
num_groups);
}
std::vector<PathElement<XgboostSplitCondition>> model;
TestDataset test_data;
DenseDatasetWrapper X;
std::vector<float> margin;
thrust::device_vector<float> phis;
size_t num_groups;
size_t num_rows;
size_t num_features;
};
TEST_P(ParameterisedModelTest, ShapSum) {
GPUTreeShap(X, model.begin(), model.end(), num_groups, phis.begin(),
phis.end());
thrust::host_vector<float> result(phis);
std::vector<float> tmp(result.begin(), result.end());
std::vector<float> sum(num_rows * num_groups);
for (auto i = 0ull; i < num_rows; i++) {
for (auto j = 0ull; j < num_features + 1; j++) {
for (auto group = 0ull; group < num_groups; group++) {
size_t result_index = IndexPhi(i, num_groups, group, num_features, j);
sum[i * num_groups + group] += result[result_index];
}
}
}
for (auto i = 0ull; i < sum.size(); i++) {
ASSERT_NEAR(sum[i], margin[i], 1e-3);
}
}
TEST_P(ParameterisedModelTest, ShapInteractionsSum) {
thrust::device_vector<float> phis_interactions(
X.NumRows() * (X.NumCols() + 1) * (X.NumCols() + 1) * num_groups);
GPUTreeShap(X, model.begin(), model.end(), num_groups, phis.begin(),
phis.end());
GPUTreeShapInteractions(X, model.begin(), model.end(), num_groups,
phis_interactions.begin(), phis_interactions.end());
thrust::host_vector<float> interactions_result(phis_interactions);
std::vector<float> sum(phis.size());
for (auto row_idx = 0ull; row_idx < num_rows; row_idx++) {
for (auto group = 0ull; group < num_groups; group++) {
for (auto i = 0ull; i < num_features + 1; i++) {
for (auto j = 0ull; j < num_features + 1; j++) {
size_t result_index = IndexPhiInteractions(row_idx, num_groups, group,
num_features, i, j);
sum[IndexPhi(row_idx, num_groups, group, num_features, i)] +=
interactions_result[result_index];
}
}
}
}
thrust::host_vector<float> phis_host(phis);
for (auto i = 0ull; i < sum.size(); i++) {
ASSERT_NEAR(sum[i], phis_host[i], 1e-3);
}
}
TEST_P(ParameterisedModelTest, ShapTaylorInteractionsSum) {
GPUTreeShapTaylorInteractions(X, model.begin(), model.end(), num_groups,
phis.begin(), phis.end());
thrust::host_vector<float> interactions_result(phis);
std::vector<float> sum(margin.size());
for (auto row_idx = 0ull; row_idx < num_rows; row_idx++) {
for (auto group = 0ull; group < num_groups; group++) {
for (auto i = 0ull; i < num_features + 1; i++) {
for (auto j = 0ull; j < num_features + 1; j++) {
size_t result_index = IndexPhiInteractions(row_idx, num_groups, group,
num_features, i, j);
sum[row_idx * num_groups + group] +=
interactions_result[result_index];
}
}
}
}
for (auto i = 0ull; i < sum.size(); i++) {
ASSERT_NEAR(sum[i], margin[i], 1e-3);
}
}
TEST_P(ParameterisedModelTest, ShapSumInterventional) {
auto r_test_data = TestDataset(400, num_features, 10);
auto R = r_test_data.GetDeviceWrapper();
GPUTreeShapInterventional(X, R, model.begin(), model.end(), num_groups,
phis.begin(), phis.end());
thrust::host_vector<float> result(phis);
std::vector<float> tmp(result.begin(), result.end());
std::vector<float> sum(num_rows * num_groups);
for (auto i = 0ull; i < num_rows; i++) {
for (auto j = 0ull; j < num_features + 1; j++) {
for (auto group = 0ull; group < num_groups; group++) {
size_t result_index = IndexPhi(i, num_groups, group, num_features, j);
sum[i * num_groups + group] += result[result_index];
}
}
}
for (auto i = 0ull; i < sum.size(); i++) {
ASSERT_NEAR(sum[i], margin[i], 1e-3);
}
}
std::string PrintTestName(
const testing::TestParamInfo<ParameterisedModelTest::ParamType>& info) {
std::string name = "nrow" + std::to_string(std::get<0>(info.param)) + "_";
name += "nfeat" + std::to_string(std::get<1>(info.param)) + "_";
name += "ngroup" + std::to_string(std::get<2>(info.param)) + "_";
name += "mdepth" + std::to_string(std::get<3>(info.param)) + "_";
name += "npaths" + std::to_string(std::get<4>(info.param));
return name;
}
// Generate a bunch of random models and check the shap results sum up to the
// predictions
size_t test_num_rows[] = {1, 10, 100, 1000};
size_t test_num_features[] = {1, 5, 8, 31};
size_t test_num_groups[] = {1, 5};
size_t test_max_depth[] = {1, 8, 20};
size_t test_num_paths[] = {1, 10};
INSTANTIATE_TEST_CASE_P(ShapInstantiation, ParameterisedModelTest,
testing::Combine(testing::ValuesIn(test_num_rows),
testing::ValuesIn(test_num_features),
testing::ValuesIn(test_num_groups),
testing::ValuesIn(test_max_depth),
testing::ValuesIn(test_num_paths)),
PrintTestName);
#define EXPECT_THROW_CONTAINS_MESSAGE(stmt, etype, whatstring) \
EXPECT_THROW(try { stmt; } catch (const etype& ex) { \
EXPECT_NE(std::string(ex.what()).find(whatstring), std::string::npos); \
throw; \
}, \
etype)
class APITest : public ::testing::Test {
protected:
APITest() {
const float inf = std::numeric_limits<float>::infinity();
model = {
{0, -1, 0, {-inf, inf, false}, 1.0f, 2.0f},
{0, 0, 0, {0.5f, inf, false}, 0.25f, 2.0f},
{0, 1, 0, {0.5f, inf, false}, 0.5f, 2.0f},
{0, 2, 0, {0.5f, inf, false}, 0.6f, 2.0f},
{0, 3, 0, {0.5f, inf, false}, 1.0f, 2.0f},
};
data = std::vector<float>({1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f});
X = DenseDatasetWrapper(data.data().get(), 2, 4);
phis.resize((X.NumRows() * (X.NumCols() + 1) * (X.NumCols() + 1)));
}
template <typename ExceptionT>
void ExpectAPIThrow(std::string message) {
EXPECT_THROW_CONTAINS_MESSAGE(
GPUTreeShap(X, model.begin(), model.end(), 1, phis.begin(), phis.end()),
ExceptionT, message);
EXPECT_THROW_CONTAINS_MESSAGE(
GPUTreeShapInteractions(X, model.begin(), model.end(), 1, phis.begin(),
phis.end()),
ExceptionT, message);
EXPECT_THROW_CONTAINS_MESSAGE(
GPUTreeShapTaylorInteractions(X, model.begin(), model.end(), 1,
phis.begin(), phis.end()),
ExceptionT, message);
}
thrust::device_vector<float> data;
std::vector<PathElement<XgboostSplitCondition>> model;
DenseDatasetWrapper X;
thrust::device_vector<float> phis;
};
TEST_F(APITest, PathTooLong) {
model.resize(33);
model[0] = {0, -1, 0, {0, 0, 0}, 0, 0};
for (size_t i = 1; i < model.size(); i++) {
model[i] = {0, static_cast<int64_t>(i), 0, {0, 0, 0}, 0, 0};
}
ExpectAPIThrow<std::invalid_argument>("Tree depth must be <= 32");
}
TEST_F(APITest, PathVIncorrect) {
model = {{0, -1, 0, {0.0f, 0.0f, false}, 0.0, 1.0f},
{0, 0, 0, {0.0f, 0.0f, false}, 0.0f, 0.5f}};
ExpectAPIThrow<std::invalid_argument>(
"Leaf value v should be the same across a single path");
}
TEST_F(APITest, PhisIncorrectLength) {
phis.resize(1);
ExpectAPIThrow<std::invalid_argument>("phis_out must be at least of size");
}
// Test a simple tree and compare output to xgb shap values
// 0:[f0<0.5] yes=1,no=2,missing=1,gain=1.63333321,cover=5
// 1:leaf=-1,cover=2
// 2:[f1<0.5] yes=3,no=4,missing=3,gain=2.04166675,cover=3
// 3:leaf=-1,cover=1
// 4:[f2<0.5] yes=5,no=6,missing=5,gain=0.125,cover=2
// 5:leaf=1,cover=1
// 6:leaf=0.5,cover=1
TEST(GPUTreeShap, BasicPaths) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<PathElement<XgboostSplitCondition>> path{
{0, -1, 0, {-inf, inf, false}, 1.0f, 0.5f},
{0, 0, 0, {0.5f, inf, false}, 0.6f, 0.5f},
{0, 1, 0, {0.5f, inf, false}, 2.0f / 3, 0.5f},
{0, 2, 0, {0.5f, inf, false}, 0.5f, 0.5f},
{1, -1, 0, {-inf, 0.0f, false}, 1.0f, 1.0f},
{1, 0, 0, {0.5f, inf, false}, 0.6f, 1.0f},
{1, 1, 0, {0.5f, inf, false}, 2.0f / 3, 1.0f},
{1, 2, 0, {-inf, 0.5f, false}, 0.5f, 1.0f},
{2, -1, 0, {-inf, 0.0f, false}, 1.0f, -1},
{2, 0, 0, {0.5f, inf, false}, 0.6f, -1.0f},
{2, 1, 0, {-inf, 0.5f, false}, 1.0f / 3, -1.0f},
{3, -1, 0, {-inf, 0.0f, false}, 1.0f, -1.0f},
{3, 0, 0, {-inf, 0.5f, false}, 0.4f, -1.0f}};
thrust::device_vector<float> data =
std::vector<float>({1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f});
DenseDatasetWrapper X(data.data().get(), 2, 3);
size_t num_trees = 1;
thrust::device_vector<float> phis(X.NumRows() * (X.NumCols() + 1));
GPUTreeShap(X, path.begin(), path.end(), 1, phis.begin(), phis.end());
thrust::host_vector<float> result(phis);
// First instance
EXPECT_NEAR(result[0], 0.6277778f * num_trees, 1e-5);
EXPECT_NEAR(result[1], 0.5027776f * num_trees, 1e-5);
EXPECT_NEAR(result[2], 0.1694444f * num_trees, 1e-5);
EXPECT_NEAR(result[3], -0.3f * num_trees, 1e-5);
// Second instance
EXPECT_NEAR(result[4], 0.24444449f * num_trees, 1e-5);
EXPECT_NEAR(result[5], -1.005555f * num_trees, 1e-5);
EXPECT_NEAR(result[6], 0.0611111f * num_trees, 1e-5);
EXPECT_NEAR(result[7], -0.3f * num_trees, 1e-5);
}
TEST(GPUTreeShap, BasicPathsInteractions) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<PathElement<XgboostSplitCondition>> path{
{0, -1, 0, {-inf, inf, false}, 1.0f, 0.5f},
{0, 0, 0, {0.5f, inf, false}, 0.6f, 0.5f},
{0, 1, 0, {0.5f, inf, false}, 2.0f / 3, 0.5f},
{0, 2, 0, {0.5f, inf, false}, 0.5f, 0.5f},
{1, -1, 0, {-inf, 0.0f, false}, 1.0f, 1.0f},
{1, 0, 0, {0.5f, inf, false}, 0.6f, 1.0f},
{1, 1, 0, {0.5f, inf, false}, 2.0f / 3, 1.0f},
{1, 2, 0, {-inf, 0.5f, false}, 0.5f, 1.0f},
{2, -1, 0, {-inf, 0.0f, false}, 1.0f, -1},
{2, 0, 0, {0.5f, inf, false}, 0.6f, -1.0f},
{2, 1, 0, {-inf, 0.5f, false}, 1.0f / 3, -1.0f},
{3, -1, 0, {-inf, 0.0f, false}, 1.0f, -1.0f},
{3, 0, 0, {-inf, 0.5f, false}, 0.4f, -1.0f}};
thrust::device_vector<float> data =
std::vector<float>({1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f});
DenseDatasetWrapper X(data.data().get(), 2, 3);
thrust::device_vector<float> phis(X.NumRows() * (X.NumCols() + 1) *
(X.NumCols() + 1));
GPUTreeShapInteractions(X, path.begin(), path.end(), 1, phis.begin(),
phis.end());
std::vector<float> result(phis.begin(), phis.end());
std::vector<float> expected_result = {
0.46111116, 0.125, 0.04166666, 0., 0.125,
0.34444442, 0.03333333, 0., 0.04166666, 0.03333335,
0.09444444, 0., 0., 0., 0.,
-0.3, 0.47222224, 0.1083333, -0.04166666, 0.,
0.10833332, 0.35555553, -0.03333333, 0., -0.04166666,
-0.03333332, -0.09444447, 0., 0., 0.,
0., -0.3};
for (auto i = 0ull; i < result.size(); i++) {
EXPECT_NEAR(result[i], expected_result[i], 1e-5);
}
}
// Test a tree with features occurring multiple times in a path
TEST(GPUTreeShap, BasicPathsWithDuplicates) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<PathElement<XgboostSplitCondition>> path{
{0, -1, 0, {-inf, 0.0f, false}, 1.0f, 3.0f},
{0, 0, 0, {0.5f, inf, false}, 2.0f / 3, 3.0f},
{0, 0, 0, {1.5f, inf, false}, 0.5f, 3.0f},
{0, 0, 0, {2.5f, inf, false}, 0.5f, 3.0f},
{1, -1, 0, {-inf, 0.0f, false}, 1.0f, 2.0f},
{1, 0, 0, {0.5f, inf, false}, 2.0f / 3.0f, 2.0f},
{1, 0, 0, {1.5f, inf, false}, 0.5f, 2.0f},
{1, 0, 0, {-inf, 2.5f, false}, 0.5f, 2.0f},
{2, -1, 0, {-inf, 0.0f, false}, 1.0f, 1.0f},
{2, 0, 0, {0.5f, inf, false}, 2.0f / 3.0f, 1.0f},
{2, 0, 0, {-inf, 1.5f, false}, 0.5f, 1.0f},
{3, -1, 0, {-inf, 0.0f, false}, 1.0f, -1.0f},
{3, 0, 0, {-inf, 0.5f, false}, 1.0f / 3, -1.0f}};
thrust::device_vector<float> data = std::vector<float>({2.0f});
DenseDatasetWrapper X(data.data().get(), 1, 1);
size_t num_trees = 1;
thrust::device_vector<float> phis(X.NumRows() * (X.NumCols() + 1));
GPUTreeShap(X, path.begin(), path.end(), 1, phis.begin(), phis.end());
thrust::host_vector<float> result(phis);
// First instance
EXPECT_FLOAT_EQ(result[0], 1.1666666f * num_trees);
EXPECT_FLOAT_EQ(result[1], 0.83333337f * num_trees);
}
__device__ bool FloatApproximatelyEqual(float a, float b) {
const float kEps = 1e-5;
return fabs(a - b) < kEps;
}
// Expose pweight for testing
class TestGroupPath : public detail::GroupPath {
public:
__device__ TestGroupPath(const detail::ContiguousGroup& g,
float zero_fraction, float one_fraction)
: detail::GroupPath(g, zero_fraction, one_fraction) {}
using detail::GroupPath::pweight_;
using detail::GroupPath::unique_depth_;
};
template <typename DatasetT, typename SplitConditionT>
__global__ void TestExtendKernel(
DatasetT X, size_t num_path_elements,
const PathElement<SplitConditionT>* path_elements) {
cooperative_groups::thread_block block =
cooperative_groups::this_thread_block();
auto group =
cooperative_groups::tiled_partition<32, cooperative_groups::thread_block>(
block);
bool thread_active = threadIdx.x < num_path_elements;
uint32_t mask = __ballot_sync(FULL_MASK, thread_active);
if (!thread_active) return;
// Test first training instance
cooperative_groups::coalesced_group active_group =
cooperative_groups::coalesced_threads();
PathElement<SplitConditionT> e = path_elements[active_group.thread_rank()];
float one_fraction =
e.split_condition.EvaluateSplit(X.GetElement(0, e.feature_idx));
float zero_fraction = e.zero_fraction;
auto labelled_group = detail::active_labeled_partition(mask, 0);
TestGroupPath path(labelled_group, zero_fraction, one_fraction);
path.Extend();
assert(path.unique_depth_ == 1);
if (active_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path.pweight_, 0.3f));
} else if (active_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path.pweight_, 0.5f));
} else {
assert(FloatApproximatelyEqual(path.pweight_, 0.0f));
}
path.Extend();
assert(path.unique_depth_ == 2);
if (active_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path.pweight_, 0.133333f));
} else if (active_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path.pweight_, 0.21111f));
} else if (active_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(path.pweight_, 0.33333f));
} else {
assert(FloatApproximatelyEqual(path.pweight_, 0.0f));
}
path.Extend();
assert(path.unique_depth_ == 3);
if (active_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path.pweight_, 0.05f));
} else if (active_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path.pweight_, 0.086111f));
} else if (active_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(path.pweight_, 0.147222f));
} else if (active_group.thread_rank() == 3) {
assert(FloatApproximatelyEqual(path.pweight_, 0.25f));
} else {
assert(FloatApproximatelyEqual(path.pweight_, 0.0f));
}
float unwound_sum = path.UnwoundPathSum();
if (active_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(unwound_sum, 0.63888f));
} else if (active_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(unwound_sum, 0.61666f));
} else if (active_group.thread_rank() == 3) {
assert(FloatApproximatelyEqual(unwound_sum, 0.67777f));
} else if (active_group.thread_rank() > 3) {
assert(FloatApproximatelyEqual(unwound_sum, 0.0f));
}
// Test second training instance
one_fraction =
e.split_condition.EvaluateSplit(X.GetElement(1, e.feature_idx));
TestGroupPath path2(labelled_group, zero_fraction, one_fraction);
path2.Extend();
assert(path2.unique_depth_ == 1);
if (active_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.3f));
} else if (active_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.5f));
} else {
assert(FloatApproximatelyEqual(path2.pweight_, 0.0f));
}
path2.Extend();
assert(path2.unique_depth_ == 2);
if (active_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.133333f));
} else if (active_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.11111f));
} else if (active_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.0f));
} else {
assert(FloatApproximatelyEqual(path2.pweight_, 0.0f));
}
path2.Extend();
assert(path2.unique_depth_ == 3);
if (active_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.05f));
} else if (active_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.06111f));
} else if (active_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.05555f));
} else if (active_group.thread_rank() == 3) {
assert(FloatApproximatelyEqual(path2.pweight_, 0.0f));
} else {
assert(FloatApproximatelyEqual(path2.pweight_, 0.0f));
}
unwound_sum = path2.UnwoundPathSum();
if (active_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(unwound_sum, 0.22222f));
} else if (active_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(unwound_sum, 0.61666f));
} else if (active_group.thread_rank() == 3) {
assert(FloatApproximatelyEqual(unwound_sum, 0.244444f));
} else if (active_group.thread_rank() > 3) {
assert(FloatApproximatelyEqual(unwound_sum, 0.0f));
}
}
TEST(GPUTreeShap, Extend) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<PathElement<XgboostSplitCondition>> path{
{0, -1, 0, {-inf, 0.0f, false}, 1.0f, 1.0f},
{0, 0, 0, {0.5f, inf, false}, 3.0f / 5, 1.0f},
{0, 1, 0, {0.5f, inf, false}, 2.0f / 3, 1.0f},
{0, 2, 0, {-inf, 0.5f, false}, 1.0f / 2, 1.0f}};
thrust::device_vector<PathElement<XgboostSplitCondition>> device_path(path);
thrust::device_vector<float> data =
std::vector<float>({1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f});
DenseDatasetWrapper X(data.data().get(), 2, 3);
TestExtendKernel<<<1, 32>>>(X, 4, device_path.data().get());
}
template <typename DatasetT, typename SplitConditionT>
__global__ void TestExtendMultipleKernel(
DatasetT X, size_t n_first, size_t n_second,
const PathElement<SplitConditionT>* path_elements) {
cooperative_groups::thread_block block =
cooperative_groups::this_thread_block();
auto warp =
cooperative_groups::tiled_partition<32, cooperative_groups::thread_block>(
block);
bool thread_active = threadIdx.x < n_first + n_second;
uint32_t mask = __ballot_sync(FULL_MASK, thread_active);
if (!thread_active) return;
cooperative_groups::coalesced_group active_group =
cooperative_groups::coalesced_threads();
int label = warp.thread_rank() >= n_first;
auto labeled_group = detail::active_labeled_partition(mask, label);
PathElement<SplitConditionT> e = path_elements[warp.thread_rank()];
// Test first training instance
float one_fraction =
e.split_condition.EvaluateSplit(X.GetElement(0, e.feature_idx));
float zero_fraction = e.zero_fraction;
TestGroupPath path(labeled_group, zero_fraction, one_fraction);
assert(path.unique_depth_ == 0);
if (labeled_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path.pweight_, 1.0f));
} else {
assert(FloatApproximatelyEqual(path.pweight_, 0.0f));
}
path.Extend();
assert(path.unique_depth_ == 1);
if (labeled_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path.pweight_, 0.3f));
} else if (labeled_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path.pweight_, 0.5f));
} else {
assert(FloatApproximatelyEqual(path.pweight_, 0.0f));
}
path.Extend();
assert(path.unique_depth_ == 2);
if (labeled_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path.pweight_, 0.133333f));
} else if (labeled_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path.pweight_, 0.21111f));
} else if (labeled_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(path.pweight_, 0.33333f));
} else {
assert(FloatApproximatelyEqual(path.pweight_, 0.0f));
}
// Extend the first group only
if (label == 0) {
path.Extend();
assert(path.unique_depth_ == 3);
if (labeled_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path.pweight_, 0.05f));
} else if (labeled_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path.pweight_, 0.086111f));
} else if (labeled_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(path.pweight_, 0.147222f));
} else if (labeled_group.thread_rank() == 3) {
assert(FloatApproximatelyEqual(path.pweight_, 0.25f));
} else {
assert(FloatApproximatelyEqual(path.pweight_, 0.0f));
}
} else {
assert(path.unique_depth_ == 2);
if (labeled_group.thread_rank() == 0) {
assert(FloatApproximatelyEqual(path.pweight_, 0.133333f));
} else if (labeled_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(path.pweight_, 0.21111f));
} else if (labeled_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(path.pweight_, 0.33333f));
} else {
assert(FloatApproximatelyEqual(path.pweight_, 0.0f));
}
}
if (label == 0) {
float unwound_sum = path.UnwoundPathSum();
if (labeled_group.thread_rank() == 1) {
assert(FloatApproximatelyEqual(unwound_sum, 0.63888f));
} else if (labeled_group.thread_rank() == 2) {
assert(FloatApproximatelyEqual(unwound_sum, 0.61666f));
} else if (labeled_group.thread_rank() == 3) {
assert(FloatApproximatelyEqual(unwound_sum, 0.67777f));
} else if (labeled_group.thread_rank() > 3) {
assert(FloatApproximatelyEqual(unwound_sum, 0.0f));
}
}
}
TEST(GPUTreeShap, ExtendMultiplePaths) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<PathElement<XgboostSplitCondition>> path{
{0, -1, 0, {-inf, 0.0f, false}, 1.0f, 1.0f},
{0, 0, 0, {0.5f, inf, false}, 3.0f / 5, 1.0f},
{0, 1, 0, {0.5f, inf, false}, 2.0f / 3, 1.0f},
{0, 2, 0, {-inf, 0.5f, false}, 1.0f / 2, 1.0f}};
// Add the first three elements again
path.emplace_back(path[0]);
path.emplace_back(path[1]);
path.emplace_back(path[2]);
thrust::device_vector<PathElement<XgboostSplitCondition>> device_path(path);
thrust::device_vector<float> data =
std::vector<float>({1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f});
DenseDatasetWrapper X(data.data().get(), 2, 3);
TestExtendMultipleKernel<<<1, 32>>>(X, 4, 3, device_path.data().get());
}
__global__ void TestActiveLabeledPartition() {
cooperative_groups::thread_block block =
cooperative_groups::this_thread_block();
auto warp =
cooperative_groups::tiled_partition<32, cooperative_groups::thread_block>(
block);
int label = warp.thread_rank() < 5 ? 3 : 6;
auto labelled_partition = detail::active_labeled_partition(FULL_MASK, label);
if (label == 3) {
assert(labelled_partition.size() == 5);
assert(labelled_partition.thread_rank() == warp.thread_rank());
} else if (label == 6) {
assert(labelled_partition.size() == 32 - 5);
assert(labelled_partition.thread_rank() == warp.thread_rank() - 5);
}
bool odd = warp.thread_rank() % 2 == 1;
uint32_t odd_mask = __ballot_sync(FULL_MASK, odd);
uint32_t even_mask = __ballot_sync(FULL_MASK, !odd);
if (odd) {
auto labelled_partition2 =
detail::active_labeled_partition(odd_mask, label);
if (label == 3) {
assert(labelled_partition2.size() == 2);
assert(labelled_partition2.thread_rank() == warp.thread_rank() / 2);
} else if (label == 6) {
assert(labelled_partition2.size() == 14);
assert(labelled_partition2.thread_rank() == (warp.thread_rank() / 2) - 2);
}
} else {
auto labelled_partition2 =
detail::active_labeled_partition(even_mask, label);
if (label == 3) {
assert(labelled_partition2.size() == 3);
assert(labelled_partition2.thread_rank() == warp.thread_rank() / 2);
} else if (label == 6) {
assert(labelled_partition2.size() == 13);
assert(labelled_partition2.thread_rank() == (warp.thread_rank() / 2) - 3);
}
}
}
TEST(GPUTreeShap, ActiveLabeledPartition) {
TestActiveLabeledPartition<<<1, 32>>>();
EXPECT_EQ(cudaDeviceSynchronize(), 0);
}
TEST(GPUTreeShap, BFDBinPacking) {
thrust::device_vector<int> counts(3);
counts[0] = 2;
counts[1] = 2;
counts[2] = 1;
auto bin_packing = detail::BFDBinPacking(counts, 3);
EXPECT_EQ(bin_packing[0], 0u);
EXPECT_EQ(bin_packing[1], 1u);
EXPECT_EQ(bin_packing[2], 0u);
counts.clear();
counts.resize(12);
counts[0] = 3;
counts[1] = 3;
counts[2] = 3;
counts[3] = 3;
counts[4] = 3;
counts[5] = 3;
counts[6] = 2;
counts[7] = 2;
counts[8] = 2;
counts[9] = 2;
counts[10] = 2;
counts[11] = 2;
bin_packing = detail::BFDBinPacking(counts, 10);
EXPECT_EQ(bin_packing[0], 0u);
EXPECT_EQ(bin_packing[1], 0u);
EXPECT_EQ(bin_packing[2], 0u);
EXPECT_EQ(bin_packing[3], 1u);
EXPECT_EQ(bin_packing[4], 1u);
EXPECT_EQ(bin_packing[5], 1u);
EXPECT_EQ(bin_packing[6], 2u);
EXPECT_EQ(bin_packing[7], 2u);
EXPECT_EQ(bin_packing[8], 2u);
EXPECT_EQ(bin_packing[9], 2u);
EXPECT_EQ(bin_packing[10], 2u);
EXPECT_EQ(bin_packing[11], 3u);
}
TEST(GPUTreeShap, NFBinPacking) {
thrust::device_vector<int> counts(4);
counts[0] = 3;
counts[1] = 3;
counts[2] = 1;
counts[3] = 2;
auto bin_packing = detail::NFBinPacking(counts, 5);
EXPECT_EQ(bin_packing[0], 0u);
EXPECT_EQ(bin_packing[1], 1u);
EXPECT_EQ(bin_packing[2], 1u);
EXPECT_EQ(bin_packing[3], 2u);
}
TEST(GPUTreeShap, FFDBinPacking) {
thrust::device_vector<int> counts(5);
counts[0] = 3;
counts[1] = 2;
counts[2] = 3;
counts[3] = 4;
counts[4] = 1;
auto bin_packing = detail::FFDBinPacking(counts, 5);
EXPECT_EQ(bin_packing[0], 1u);
EXPECT_EQ(bin_packing[1], 1u);
EXPECT_EQ(bin_packing[2], 2u);
EXPECT_EQ(bin_packing[3], 0u);
EXPECT_EQ(bin_packing[4], 0u);
}
__global__ void TestContiguousGroup() {
int label = threadIdx.x > 2 && threadIdx.x < 6 ? 1 : threadIdx.x >= 6 ? 2 : 0;
auto group = detail::active_labeled_partition(FULL_MASK, label);
if (label == 1) {
assert(group.size() == 3);
assert(group.thread_rank() == threadIdx.x - 3);
int up = group.shfl_up(threadIdx.x, 1);
if (group.thread_rank() > 0) {
assert(up == threadIdx.x - 1);
}
assert(group.shfl(threadIdx.x, 2) == 5);
}
}
TEST(GPUTreeShap, ContiguousGroup) {
TestContiguousGroup<<<1, 32>>>();
EXPECT_EQ(cudaDeviceSynchronize(), 0);
}
class DeterminismTest : public ::testing::Test {
protected:
DeterminismTest() {
size_t num_rows = 100;
size_t num_features = 100;
num_groups = 1;
size_t max_depth = 10;
size_t num_paths = 1000;
samples = 100;
model = GenerateEnsembleModel(num_groups, max_depth, num_features,
num_paths, 78);
test_data = TestDataset(num_rows, num_features, 22, 1e-15);
X = test_data.GetDeviceWrapper();
reference_phis.resize(X.NumRows() * (X.NumCols() + 1) * (X.NumCols() + 1) *
num_groups);
}
std::vector<PathElement<XgboostSplitCondition>> model;
TestDataset test_data;
DenseDatasetWrapper X;
size_t samples;
size_t num_groups;
thrust::device_vector<float> reference_phis;
};
TEST_F(DeterminismTest, GPUTreeShap) {
GPUTreeShap(X, model.begin(), model.end(), num_groups, reference_phis.begin(),
reference_phis.end());
for (auto i = 0ull; i < samples; i++) {
thrust::device_vector<float> phis(reference_phis.size());
GPUTreeShap(X, model.begin(), model.end(), num_groups, phis.begin(),
phis.end());
ASSERT_TRUE(thrust::equal(reference_phis.begin(), reference_phis.end(),
phis.begin()));
}
}
TEST_F(DeterminismTest, GPUTreeShapInteractions) {
GPUTreeShapInteractions(X, model.begin(), model.end(), num_groups,
reference_phis.begin(), reference_phis.end());
for (auto i = 0ull; i < samples; i++) {
thrust::device_vector<float> phis(reference_phis.size());
GPUTreeShapInteractions(X, model.begin(), model.end(), num_groups,
phis.begin(), phis.end());
ASSERT_TRUE(thrust::equal(reference_phis.begin(), reference_phis.end(),
phis.begin()));
}
}
TEST_F(DeterminismTest, GPUTreeShapTaylorInteractions) {
GPUTreeShapTaylorInteractions(X, model.begin(), model.end(), num_groups,
reference_phis.begin(), reference_phis.end());
for (auto i = 0ull; i < samples; i++) {
thrust::device_vector<float> phis(reference_phis.size());
GPUTreeShapTaylorInteractions(X, model.begin(), model.end(), num_groups,
phis.begin(), phis.end());
ASSERT_TRUE(thrust::equal(reference_phis.begin(), reference_phis.end(),
phis.begin()));
}
}
// Example from page 10 section 4.1
// Dhamdhere, Kedar, Ashish Agarwal, and Mukund Sundararajan. "The Shapley
// Taylor Interaction Index." arXiv preprint arXiv:1902.05622 (2019).
TEST(GPUTreeShap, TaylorInteractionsPaperExample) {
const float inf = std::numeric_limits<float>::infinity();
float c = 3.0f;
std::vector<PathElement<XgboostSplitCondition>> path{
{0, -1, 0, {-inf, inf, false}, 1.0f, 1.0f},
{0, 0, 0, {0.5f, inf, false}, 0.0f, 1.0f},
{1, -1, 0, {-inf, inf, false}, 1.0f, 1.0f},
{1, 1, 0, {0.5f, inf, false}, 0.0f, 1.0f},
{2, -1, 0, {-inf, inf, false}, 1.0f, 1.0f},
{2, 2, 0, {0.5f, inf, false}, 0.0f, 1.0f},
{3, -1, 0, {-inf, inf, false}, 1.0f, c},
{3, 0, 0, {0.5f, inf, false}, 0.0f, c},
{3, 1, 0, {0.5f, inf, false}, 0.0f, c},
{3, 2, 0, {0.5f, inf, false}, 0.0f, c},
};
thrust::device_vector<float> data = std::vector<float>({1.0f, 1.0f, 1.0f});
DenseDatasetWrapper X(data.data().get(), 1, 3);
thrust::device_vector<float> interaction_phis(
X.NumRows() * (X.NumCols() + 1) * (X.NumCols() + 1));
GPUTreeShapTaylorInteractions(X, path.begin(), path.end(), 1,
interaction_phis.begin(),
interaction_phis.end());
std::vector<float> interactions_result(interaction_phis.begin(),
interaction_phis.end());
std::vector<float> expected_result = {1.0, 0.5, 0.5, 0.0, 0.5, 1.0, 0.5, 0.0,
0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0};
ASSERT_EQ(interaction_phis, expected_result);
}
TEST(GPUTreeShap, TaylorInteractionsBasic) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<PathElement<XgboostSplitCondition>> path{
{0, -1, 0, {-inf, inf, false}, 1.0f, 2.0f},
{0, 0, 0, {0.5f, inf, false}, 0.25f, 2.0f},
{0, 1, 0, {0.5f, inf, false}, 0.5f, 2.0f},
{0, 2, 0, {0.5f, inf, false}, 0.6f, 2.0f},
{0, 3, 0, {0.5f, inf, false}, 1.0f, 2.0f},
};
thrust::device_vector<float> data =
std::vector<float>({1.0f, 1.0f, 1.0f, 1.0f});
DenseDatasetWrapper X(data.data().get(), 1, 4);
thrust::device_vector<float> interaction_phis(
X.NumRows() * (X.NumCols() + 1) * (X.NumCols() + 1));
GPUTreeShapTaylorInteractions(X, path.begin(), path.end(), 1,
interaction_phis.begin(),
interaction_phis.end());
thrust::host_vector<float> interactions_result(interaction_phis);
float sum =
std::accumulate(interaction_phis.begin(), interaction_phis.end(), 0.0f);
ASSERT_FLOAT_EQ(sum, 2.0f);
}
TEST(GPUTreeShap, GetWCoefficients) {
EXPECT_DOUBLE_EQ(detail::W(0, 1), 1.0);
EXPECT_DOUBLE_EQ(detail::W(0, 2), 0.5);
EXPECT_DOUBLE_EQ(detail::W(1, 2), 0.5);
EXPECT_DOUBLE_EQ(detail::W(0, 3), 2.0 / 6);
EXPECT_DOUBLE_EQ(detail::W(1, 3), 1.0 / 6);
EXPECT_DOUBLE_EQ(detail::W(2, 3), 2.0 / 6);
EXPECT_DOUBLE_EQ(detail::W(0, 4), 6.0 / 24);
EXPECT_DOUBLE_EQ(detail::W(1, 4), 2.0 / 24);
EXPECT_DOUBLE_EQ(detail::W(2, 4), 2.0 / 24);
EXPECT_DOUBLE_EQ(detail::W(3, 4), 6.0 / 24);
}
TEST(GPUTreeShap, InterventionalBasic) {
const float inf = std::numeric_limits<float>::infinity();
std::vector<PathElement<XgboostSplitCondition>> path{
{0, -1, 0, {-inf, inf, false}, 1.0f, 8.0f},
{0, 0, 0, {5.0f, inf, false}, 0.0f, 8.0f},
{0, 1, 0, {5.0f, inf, false}, 0.0f, 8.0f},
{0, 0, 0, {5.0f, inf, false}, 0.0f, 8.0f},
{1, -1, 0, {-inf, inf, false}, 1.0f, 6.0f},
{1, 0, 0, {5.0f, inf, false}, 0.0f, 6.0f},
{1, 1, 0, {-inf, 5.0f, false}, 0.0f, 6.0f},
{1, 2, 0, {-5.0f, inf, false}, 0.0f, 6.0f},
{2, -1, 0, {-inf, inf, false}, 1.0f, 5.0f},
{2, 0, 0, {5.0f, inf, false}, 0.0f, 5.0f},
{2, 1, 0, {-inf, 5.0f, false}, 0.0f, 5.0f},
{2, 2, 0, {-inf, -5.0f, false}, 0.0f, 5.0f},
};
thrust::device_vector<float> X_data =
std::vector<float>({10.0f, 0.0f, 10.0f});
thrust::device_vector<float> R_data =
std::vector<float>({10.0f, 10.0f, -10.0f, 10.0f, 10.0f, 10.0f});
DenseDatasetWrapper X(X_data.data().get(), 1, 3);
DenseDatasetWrapper R(R_data.data().get(), 2, 3);
thrust::device_vector<float> phis(X.NumRows() * (X.NumCols() + 1));
GPUTreeShapInterventional(X, R, path.begin(), path.end(), 1,
phis.begin(), phis.end());
std::vector<float> result(phis.begin(), phis.end());
ASSERT_FLOAT_EQ(result[0], 0.0f);
ASSERT_FLOAT_EQ(result[1], -2.25f);
ASSERT_FLOAT_EQ(result[2], 0.25f);
ASSERT_FLOAT_EQ(result[3], 8.0f);
}
|
the_stack
|
#include <amgx_types/util.h>
#include <solvers/block_common_solver.h>
#include <amgx_cublas.h>
//TODO remove synchronization from this module by moving host operations to the device
namespace amgx
{
template< class T_Config>
GMRES_Solver<T_Config>::GMRES_Solver( AMG_Config &cfg, const std::string &cfg_scope ) :
Solver<T_Config>( cfg, cfg_scope ), m_preconditioner(0), no_preconditioner(true)
{
std::string solverName, new_scope, tmp_scope;
cfg.getParameter<std::string>( "preconditioner", solverName, cfg_scope, new_scope );
if (solverName.compare("NOSOLVER") == 0)
{
no_preconditioner = true;
m_preconditioner = NULL;
}
else
{
no_preconditioner = false;
m_preconditioner = SolverFactory<T_Config>::allocate( cfg, cfg_scope, "preconditioner" );
}
m_R = cfg.AMG_Config::getParameter<int>("gmres_n_restart", cfg_scope);
m_krylov_size = min( this->m_max_iters, m_R );
if ( this->m_norm_type != L2 )
{
FatalError("GMRES only works with L2 norm. Other norms would require extra computations. ", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
m_H.resize( m_krylov_size + 1, m_krylov_size );
m_s.resize( m_krylov_size + 1 );
m_cs.resize( m_krylov_size );
m_sn.resize( m_krylov_size );
m_V_vectors.resize( m_krylov_size + 1 );
}
template<class T_Config>
GMRES_Solver<T_Config>::~GMRES_Solver()
{
if (!no_preconditioner) { delete m_preconditioner; }
}
template<class T_Config>
void
GMRES_Solver<T_Config>::printSolverParameters() const
{
std::cout << "gmres_n_restart=" << this->m_R << std::endl;
if (!no_preconditioner)
{
std::cout << "preconditioner: " << this->m_preconditioner->getName() << " with scope name: " << this->m_preconditioner->getScope() << std::endl;
}
}
template<class T_Config>
void
GMRES_Solver<T_Config>::solver_setup(bool reuse_matrix_structure)
{
// Setup the solver
ViewType oldView = this->m_A->currentView();
this->m_A->setViewExterior();
if ( this->m_A->get_block_dimy() != 1 && !this->m_use_scalar_norm )
{
FatalError( "GMRES solver only works on block matrix if configuration parameter use_scalar_norm=1", AMGX_ERR_NOT_SUPPORTED_TARGET );
}
if (!no_preconditioner) { m_preconditioner->setup( *this->m_A, reuse_matrix_structure ); }
// Make sure vectors already have decent sizes.
assert( m_V_vectors.size() >= m_krylov_size + 1 );
// The number of elements in temporary vectors.
const int N = static_cast<int>( this->m_A->get_num_cols() * this->m_A->get_block_dimy() );
// Allocate memory needed for iterating.
for ( int i = 0 ; i <= m_krylov_size ; ++i )
{
m_V_vectors[i].resize(N);
}
m_Z_vector.resize(N);
for ( int i = 0 ; i <= m_krylov_size ; ++i )
{
m_V_vectors[i].set_block_dimy(this->m_A->get_block_dimy());
m_V_vectors[i].set_block_dimx(1);
m_V_vectors[i].dirtybit = 1;
m_V_vectors[i].delayed_send = 1;
m_V_vectors[i].tag = this->tag * 100 + i;
}
m_Z_vector.set_block_dimy(this->m_A->get_block_dimy());
m_Z_vector.set_block_dimx(1);
m_Z_vector.dirtybit = 1;
m_Z_vector.delayed_send = 1;
m_Z_vector.tag = this->tag * 100;
this->m_A->setView(oldView);
}
template <typename ValueType, bool IsComplex>
struct GeneratePlaneRotation;
template <typename ValueType>
struct GeneratePlaneRotation<ValueType, false>
{
static __host__ void generate( ValueType &dx, ValueType &dy, ValueType &cs, ValueType &sn, ValueType &rhsx, ValueType &rhsy )
{
ValueType tmp;
if (dy < ValueType(0.0))
{
cs = 1.0;
sn = 0.0;
}
else if (abs(dy) > abs(dx))
{
tmp = dx / dy;
sn = ValueType(1.0) / sqrt(ValueType(1.0) + tmp * tmp);
cs = tmp * sn;
}
else
{
tmp = dy / dx;
cs = ValueType(1.0) / sqrt(ValueType(1.0) + tmp * tmp);
sn = tmp * cs;
}
tmp = cs * rhsx;
rhsy = -sn * rhsx;
rhsx = tmp;
}
};
template <typename ValueType>
struct GeneratePlaneRotation<ValueType, true>
{
static __host__ void generate( ValueType &dx, ValueType &dy, ValueType &cs, ValueType &sn, ValueType &rhsx, ValueType &rhsy )
{
typedef typename types::PODTypes<ValueType>::type PodTypeB;
ValueType tmp;
PodTypeB adx = types::util<ValueType>::abs(dx);
PodTypeB ady = types::util<ValueType>::abs(dy);
if (isCloseToZero(dx + dy))
{
cs = types::util<ValueType>::get_one();
sn = types::util<ValueType>::get_zero();
}
else if (ady > adx)
{
adx = adx / ady;
sn = types::util<ValueType>::get_one() / sqrt(PodTypeB(1.0) + adx * adx);
cs = sn * adx;
}
else
{
ady = ady / adx;
cs = types::util<ValueType>::get_one() / sqrt(PodTypeB(1.0) + ady * ady);
sn = cs * ady;
}
tmp = cs * rhsx;
rhsy = types::util<ValueType>::invert(types::util<ValueType>::conjugate(sn)) * rhsx; //-conjugate(sin)
rhsx = tmp;
}
};
template <typename ValueType>
static __host__ void PlaneRotation( cusp::array2d<ValueType, cusp::host_memory, cusp::column_major> &H,
cusp::array1d<ValueType, cusp::host_memory> &cs,
cusp::array1d<ValueType, cusp::host_memory> &sn,
cusp::array1d<ValueType, cusp::host_memory> &s,
int i)
{
ValueType temp;
for (int k = 0; k < i; k++)
{
temp = cs[k] * H(k, i) + sn[k] * H(k + 1, i);
H(k + 1, i) = cs[k] * H(k + 1, i) - types::util<ValueType>::conjugate(sn[k]) * H(k, i);
H(k, i) = temp;
}
GeneratePlaneRotation<ValueType, types::util<ValueType>::is_complex>::generate(H(i, i), H(i + 1, i), cs[i], sn[i], s[i], s[i + 1]);
H(i, i) = cs[i] * H(i, i) + sn[i] * H(i + 1, i);
H(i + 1, i) = types::util<ValueType>::get_zero();
}
template<class T_Config>
void
GMRES_Solver<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{}
template<class T_Config>
bool
GMRES_Solver<T_Config>::solve_one_iteration( VVector &b, VVector &x )
{
ViewType oldView = this->m_A->currentView();
this->m_A->setViewExterior();
Operator<T_Config> &A = *this->m_A;
int offset, size;
A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size);
// compute initial residual
A.apply( x, m_V_vectors[0]); // V(0) = A*x
axpy( b, m_V_vectors[0], types::util<ValueTypeB>::get_minus_one(), offset, size); // V(0) = V(0) - b
PodTypeB beta = get_norm(A, m_V_vectors[0], L2); // beta = norm(V(0))
Cublas::scal( size, PodTypeB(-1.0 / beta), m_V_vectors[0].raw() + offset, 1 ); // V(0) = -V(0)/beta //
cusp::blas::fill( m_s, types::util<ValueTypeB>::get_zero() );
m_s[0] = types::util<ValueTypeB>::get_one() * beta;
// Run one iteration of preconditioner with zero initial guess
if (no_preconditioner)
{
copy(m_V_vectors[0], m_Z_vector, offset, size);
}
else
{
m_V_vectors[0].delayed_send = 1;
m_Z_vector.delayed_send = 1;
m_preconditioner->solve( m_V_vectors[0], m_Z_vector, true );
m_V_vectors[0].delayed_send = 1;
m_Z_vector.delayed_send = 1;
}
A.apply(m_Z_vector, m_V_vectors[1]);
// Modified Gram-Schmidt
// H(k,i) = <V(i+1),V(k)> //
m_H(0, 0) = dot(A, m_V_vectors[1], m_V_vectors[0]);
// V(i+1) -= H(k, i) * V(k) //
axpy( m_V_vectors[0], m_V_vectors[1], types::util<ValueTypeB>::invert(m_H(0, 0)), offset, size );
m_H(1, 0) = types::util<ValueTypeB>::get_one() * get_norm(A, m_V_vectors[1], L2);
PlaneRotation( m_H, m_cs, m_sn, m_s, 0 );
if ( this->m_monitor_convergence )
{
this->m_nrm[0] = types::util<ValueTypeB>::abs( m_s[1] );
}
m_s[0] = m_s[0] / m_H(0, 0);
// Update the solution
axpy( m_Z_vector, x, m_s[0], offset, size );
this->m_A->setView(oldView);
return !this->m_monitor_convergence || this->converged();
}
template<class T_Config>
bool
GMRES_Solver<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero )
{
Operator<T_Config> &A = *this->m_A;
ViewType oldView = this->m_A->currentView();
this->m_A->setViewExterior();
int offset, size;
A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size);
if ( this->m_max_iters == 1 )
{
return solve_one_iteration( b, x );
}
bool done = false;
int i = this->m_curr_iter % m_R; //current iteration within restart
if (i == 0)
{
// compute initial residual
A.apply(x, m_V_vectors[0]); // V(0) = A*x
axpy( b, m_V_vectors[0], types::util<ValueTypeB>::get_minus_one(), offset, size ); // V(0) = V(0) - b
PodTypeB beta = get_norm(A, m_V_vectors[0], L2); // beta = norm(V(0))
if ( Base::m_monitor_convergence )
{
this->m_nrm[0] = beta;
if ( this->converged() )
{
return true;
}
}
Cublas::scal( size, PodTypeB(-1.0 / beta), m_V_vectors[0].raw() + offset, 1 ); // V(0) = -V(0)/beta //
cusp::blas::fill( m_s, types::util<ValueTypeB>::get_zero() );
m_s[0] = types::util<ValueTypeB>::get_one() * beta;
}
// Run one iteration of preconditioner with zero initial guess
if (no_preconditioner)
{
copy(m_V_vectors[i], m_Z_vector, offset, size);
}
else
{
m_V_vectors[i].delayed_send = 1;
m_Z_vector.delayed_send = 1;
m_preconditioner->solve( m_V_vectors[i], m_Z_vector, true );
m_V_vectors[i].delayed_send = 1;
m_Z_vector.delayed_send = 1;
}
A.apply(m_Z_vector, m_V_vectors[i + 1]);
// Modified Gram-Schmidt
for ( int k = 0; k <= i; ++k )
{
// H(k,i) = <V(i+1),V(k)> //
m_H(k, i) = dot(A, m_V_vectors[i + 1], m_V_vectors[k]);
// V(i+1) -= H(k, i) * V(k) //
axpy( m_V_vectors[k], m_V_vectors[i + 1], types::util<ValueTypeB>::invert(m_H(k, i)), offset, size );
}
m_H(i + 1, i) = types::util<ValueTypeB>::get_one() * get_norm(A, m_V_vectors[i + 1], L2);
scal( m_V_vectors[i + 1], types::util<ValueTypeB>::get_one() / m_H(i + 1, i), offset, size );
PlaneRotation( m_H, m_cs, m_sn, m_s, i );
// Check for convergence
// abs(s[i+1]) = L2 norm of residual
if ( Base::m_monitor_convergence )
{
this->m_nrm[0] = types::util<ValueTypeB>::abs( m_s[i + 1] );
done = this->converged();
}
// If reached restart limit or last iteration or if converged, compute x vector
if ( i == (m_R - 1) || this->is_last_iter() || done )
{
// Solve upper triangular system in place
for (int j = i; j >= 0; j--)
{
m_s[j] = m_s[j] / m_H(j, j);
//S(0:j) = s(0:j) - s[j] H(0:j,j)
for (int k = j - 1; k >= 0; k--)
{
m_s[k] = m_s[k] - (m_H(k, j) * m_s[j]);
}
}
// Accumulate sum_n V_m*y_m into m_Z_vector
thrust::fill(m_Z_vector.begin(), m_Z_vector.end(), types::util<ValueTypeB>::get_zero());
cudaCheckError();
for (int j = 0; j <= i; j++)
{
axpy( m_V_vectors[j], m_Z_vector, m_s[j], offset, size );
}
// Call the preconditioner to get M^-1*(sum_m vm*ym), store in m_V_Vectors[0]
if (no_preconditioner)
{
copy( m_Z_vector, m_V_vectors[0], offset, size);
}
else
{
m_V_vectors[0].delayed_send = 1;
m_Z_vector.delayed_send = 1;
m_preconditioner->solve( m_Z_vector, m_V_vectors[0], true );
m_V_vectors[0].delayed_send = 1;
m_Z_vector.delayed_send = 1;
}
// Update the solution
// Add to x0
axpy( m_V_vectors[0], x, types::util<ValueTypeB>::get_one(), offset, size );
}
this->m_A->setView(oldView);
return !Base::m_monitor_convergence || done;
}
template<class T_Config>
void
GMRES_Solver<T_Config>::solve_finalize( VVector &b, VVector &x )
{}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class GMRES_Solver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
the_stack
|
#include <cstdio>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename DType>
__device__ DType deformable_im2col_bilinear(const DType *bottom_data,
const int data_width,
const int height, const int width,
DType h, DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (DType)h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (DType)w_low;
} else {
w_high = w_low + 1;
}
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = bottom_data[h_low * data_width + w_low];
DType v2 = bottom_data[h_low * data_width + w_high];
DType v3 = bottom_data[h_high * data_width + w_low];
DType v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(DType argmax_h, DType argmax_w,
const int h, const int w, const int height,
const int width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
// empty
return 0;
}
argmax_h = max(argmax_h, (DType)0.0f);
argmax_w = max(argmax_w, (DType)0.0f);
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w,
const int height, const int width,
const DType *im_data,
const int data_width, const int bp_dir) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
// empty
return 0;
}
if (argmax_h < 0)
argmax_h = 0;
if (argmax_w < 0)
argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (bp_dir == 0) {
weight += -1 * (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_low * data_width + argmax_w_low];
weight += -1 * (argmax_w - argmax_w_low) *
im_data[argmax_h_low * data_width + argmax_w_high];
weight += (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_w - argmax_w_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
weight += -1 * (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_low];
weight += (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_high];
weight += -1 * (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(
const int n, const DType *data_im, const DType *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int height_col,
const int width_col, DType *data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType *data_col_ptr =
data_col + (c_col * height_col + h_col) * width_col + w_col;
const DType *data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const DType *data_offset_ptr = data_offset + deformable_group_index * 2 *
kernel_h * kernel_w *
height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const DType map_h = i * dilation_h + offset_h;
const DType map_w = j * dilation_w + offset_w;
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(data_im_ptr, width, cur_height,
cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename DType>
void deformable_im2col(cudaStream_t stream, const DType *data_im,
const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, DType *data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
int channel_per_deformable_group = channels / deformable_group;
// Launch
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0,
stream>>>(
num_kernels, data_im, data_offset, height, width, ksize_h, ksize_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
// TODO(BZ) panic
}
}
template void deformable_im2col<float>(
cudaStream_t stream, const float *data_im, const float *data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int deformable_group, float *data_col);
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(
const int n, const DType *data_col, const DType *data_offset,
const int channels, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int height_col,
const int width_col, DType *grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col) % kernel_w;
const int i = (index / width_col / height_col / kernel_w) % kernel_h;
const int c = index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType *data_offset_ptr = data_offset + deformable_group_index * 2 *
kernel_h * kernel_w *
height_col * width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
(c * height + cur_h + dy) * width + cur_w + dx;
DType weight =
get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy,
cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename DType>
void deformable_col2im(cudaStream_t stream, const DType *data_col,
const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, DType *grad_im) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col;
int channel_per_deformable_group = channels / deformable_group;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0,
stream>>>(
num_kernels, data_col, data_offset, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, height_col, width_col, grad_im);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
// TODO(BZ) panic
}
}
template void deformable_col2im<float>(
cudaStream_t stream, const float *data_col, const float *data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int deformable_group, float *grad_im);
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(
const int n, const DType *data_col, const DType *data_im,
const DType *data_offset, const int channels, const int height,
const int width, const int kernel_h, const int kernel_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int height_col,
const int width_col, DType *grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
DType val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = index / width_col / height_col;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr = data_col + deformable_group_index *
channel_per_deformable_group *
width_col * height_col;
const DType *data_im_ptr =
data_im + deformable_group_index * channel_per_deformable_group /
kernel_h / kernel_w * height * width;
const DType *data_offset_ptr = data_offset + deformable_group_index * 2 *
kernel_h * kernel_w *
height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos = ((col_c * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col) % kernel_w;
int i = (col_pos / width_col / height_col / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -1;
}
const DType weight = get_coordinate_weight(
inv_h, inv_w, height, width, data_im_ptr + cnt * height * width,
width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
template <typename DType>
void deformable_col2im_coord(cudaStream_t stream, const DType *data_col,
const DType *data_im, const DType *data_offset,
const int channels, const int height,
const int width, const int ksize_h,
const int ksize_w, const int pad_h,
const int pad_w, const int stride_h,
const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group,
DType *grad_offset) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels =
height_col * width_col * 2 * ksize_h * ksize_w * deformable_group;
int channel_per_deformable_group =
channels * ksize_h * ksize_w / deformable_group;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_col, data_im, data_offset, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
dilation_w, channel_per_deformable_group, height_col, width_col,
grad_offset);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
// TODO(BZ) panic
}
}
template void
deformable_col2im_coord(cudaStream_t stream, const float *data_col,
const float *data_im, const float *data_offset,
const int channels, const int height, const int width,
const int ksize_h, const int ksize_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float *grad_offset);
|
the_stack
|
#include <omp.h>
#ifdef HAVE_CUB
#include <cub/block/block_reduce.cuh>
#endif // HAVE_CUB
#ifdef USE_NVTX
#include <nvToolsExt.h>
const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff,
0x0000ffff, 0x00ff0000, 0x00ffffff};
const int num_colors = sizeof(colors) / sizeof(uint32_t);
#define PUSH_RANGE(name, cid) \
{ \
int color_id = cid; \
color_id = color_id % num_colors; \
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name, cid)
#define POP_RANGE
#endif
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \
"with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \
}
typedef float real;
constexpr real tol = 1.0e-8;
const real PI = 2.0 * std::asin(1.0);
__global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a,
const real pi, const int nx, const int ny) {
for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < ny; iy += blockDim.x * gridDim.x) {
const real y0 = sin(2.0 * pi * iy / (ny - 1));
a[iy * nx + 0] = y0;
a[iy * nx + (nx - 1)] = y0;
a_new[iy * nx + 0] = y0;
a_new[iy * nx + (nx - 1)] = y0;
}
}
template <int BLOCK_DIM_X, int BLOCK_DIM_Y>
__global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a,
real* __restrict__ const l2_norm, const int iy_start,
const int iy_end, const int nx) {
#ifdef HAVE_CUB
typedef cub::BlockReduce<real, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
#endif // HAVE_CUB
const int iy = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
real local_l2_norm = 0.0;
if (iy < iy_end) {
if (ix >= 1 && ix < (nx - 1)) {
const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] +
a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]);
a_new[iy * nx + ix] = new_val;
// apply boundary conditions
if (iy_start == iy) {
a_new[iy_end * nx + ix] = new_val;
}
if ((iy_end - 1) == iy) {
a_new[(iy_start - 1) * nx + ix] = new_val;
}
real residue = new_val - a[iy * nx + ix];
local_l2_norm = residue * residue;
}
}
#ifdef HAVE_CUB
real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm);
if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm);
#else
atomicAdd(l2_norm, local_l2_norm);
#endif // HAVE_CUB
}
double noopt(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck,
const bool print);
template <typename T>
T get_argval(char** begin, char** end, const std::string& arg, const T default_val) {
T argval = default_val;
char** itr = std::find(begin, end, arg);
if (itr != end && ++itr != end) {
std::istringstream inbuf(*itr);
inbuf >> argval;
}
return argval;
}
bool get_arg(char** begin, char** end, const std::string& arg) {
char** itr = std::find(begin, end, arg);
if (itr != end) {
return true;
}
return false;
}
struct l2_norm_buf {
cudaEvent_t copy_done;
real* d;
real* h;
};
int main(int argc, char* argv[]) {
const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000);
const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1);
const int nx = get_argval<int>(argv, argv + argc, "-nx", 16384);
const int ny = get_argval<int>(argv, argv + argc, "-ny", 16384);
const bool csv = get_arg(argv, argv + argc, "-csv");
if (nccheck != 1) {
fprintf(stderr, "Only nccheck = 1 is supported\n");
return -1;
}
real* a;
real* a_new;
cudaStream_t compute_stream;
cudaStream_t copy_l2_norm_stream;
cudaStream_t reset_l2_norm_stream;
cudaEvent_t compute_done;
cudaEvent_t reset_l2_norm_done[2];
real l2_norms[2];
l2_norm_buf l2_norm_bufs[2];
int iy_start = 1;
int iy_end = (ny - 1);
CUDA_RT_CALL(cudaSetDevice(0));
CUDA_RT_CALL(cudaFree(0));
CUDA_RT_CALL(cudaMalloc(&a, nx * ny * sizeof(real)));
CUDA_RT_CALL(cudaMalloc(&a_new, nx * ny * sizeof(real)));
CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(real)));
CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(real)));
// Set diriclet boundary conditions on left and right boarder
initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, nx, ny);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaDeviceSynchronize());
CUDA_RT_CALL(cudaStreamCreate(&compute_stream));
CUDA_RT_CALL(cudaStreamCreate(©_l2_norm_stream));
CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream));
CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done, cudaEventDisableTiming));
CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[0], cudaEventDisableTiming));
CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[1], cudaEventDisableTiming));
for (int i = 0; i < 2; ++i) {
CUDA_RT_CALL(cudaEventCreateWithFlags(&l2_norm_bufs[i].copy_done, cudaEventDisableTiming));
CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(real)));
CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(real)));
CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(real)));
(*l2_norm_bufs[i].h) = 1.0;
}
CUDA_RT_CALL(cudaDeviceSynchronize());
if (!csv)
printf(
"Jacobi relaxation: %d iterations on %d x %d mesh with norm check "
"every %d iterations\n",
iter_max, ny, nx, nccheck);
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 32;
dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + dim_block_y - 1) / dim_block_y, 1);
int iter = 0;
for (int i = 0; i < 2; ++i) {
l2_norms[i] = 0.0;
}
double start = omp_get_wtime();
PUSH_RANGE("Jacobi solve", 0)
bool l2_norm_greater_than_tol = true;
while (l2_norm_greater_than_tol && iter < iter_max) {
// on new iteration: old current vars are now previous vars, old
// previous vars are no longer needed
int prev = iter % 2;
int curr = (iter + 1) % 2;
// wait for memset from old previous iteration to complete
CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0));
jacobi_kernel<dim_block_x, dim_block_y>
<<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>(
a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaEventRecord(compute_done, compute_stream));
// perform L2 norm calculation
if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) {
CUDA_RT_CALL(cudaStreamWaitEvent(copy_l2_norm_stream, compute_done, 0));
CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real),
cudaMemcpyDeviceToHost, copy_l2_norm_stream));
CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, copy_l2_norm_stream));
// make sure D2H copy is complete before using the data for
// calculation
CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done));
l2_norms[prev] = *(l2_norm_bufs[prev].h);
l2_norms[prev] = std::sqrt(l2_norms[prev]);
l2_norm_greater_than_tol = (l2_norms[prev] > tol);
if (!csv && (iter % 100) == 0) {
printf("%5d, %0.6f\n", iter, l2_norms[prev]);
}
// reset everything for next iteration
l2_norms[prev] = 0.0;
*(l2_norm_bufs[prev].h) = 0.0;
CUDA_RT_CALL(
cudaMemsetAsync(l2_norm_bufs[prev].d, 0, sizeof(real), reset_l2_norm_stream));
CUDA_RT_CALL(cudaEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream));
}
std::swap(a_new, a);
iter++;
}
CUDA_RT_CALL(cudaDeviceSynchronize());
POP_RANGE
double stop = omp_get_wtime();
if (csv) {
printf("single_gpu, %d, %d, %d, %d, %f\n", nx, ny, iter_max, nccheck, (stop - start));
} else {
printf("%dx%d: 1 GPU: %8.4f s\n", ny, nx, (stop - start));
}
for (int i = 0; i < 2; ++i) {
CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h));
CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d));
CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done));
}
CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1]));
CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0]));
CUDA_RT_CALL(cudaEventDestroy(compute_done));
CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream));
CUDA_RT_CALL(cudaStreamDestroy(copy_l2_norm_stream));
CUDA_RT_CALL(cudaStreamDestroy(compute_stream));
CUDA_RT_CALL(cudaFree(a_new));
CUDA_RT_CALL(cudaFree(a));
return 0;
}
|
the_stack
|
namespace amgx
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename T >
__global__ void split_l_and_u(int n, const T *lu, int lda, T *l, T *u )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= n || j >= n)
{
return;
}
T l_ij = i == j ? T(1) : T(0), u_ij = T(0);
T lu_ij = lu[i * lda + j];
if (i <= j)
{
u_ij = lu_ij;
}
else
{
l_ij = lu_ij;
}
l[i * lda + j] = l_ij;
u[i * lda + j] = u_ij;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN(DenseLUSolverTest_Base);
std::string base_keywords()
{
return "dense_lu";
}
template< typename Matrix >
void make_identity(Matrix &A)
{
typedef typename Matrix::TConfig Matrix_config;
typedef typename Matrix_config::template setMemSpace<AMGX_host>::Type Config_h;
typedef typename Config_h::template setVecPrec<AMGX_vecInt>::Type IVector_config_h;
typedef Vector<Config_h> FVector_h;
typedef Vector<IVector_config_h> IVector_h;
const int num_rows = A.get_num_rows();
IVector_h row_offsets(num_rows + 1), col_indices(num_rows);
for ( int i = 0 ; i < num_rows ; ++i )
{
row_offsets[i] = i;
col_indices[i] = i;
}
row_offsets.back() = num_rows;
FVector_h values(num_rows, 1.0);
A.row_offsets.copy(row_offsets);
A.col_indices.copy(col_indices);
A.values. copy(values);
}
template< typename Matrix_h, typename Matrix_data >
void csr_to_dense(const Matrix_h &A_h, Matrix_data *dense_A_h, int lda)
{
for ( int i = 0 ; i < A_h.get_num_rows() ; ++i )
for ( int j = A_h.row_offsets[i] ; j < A_h.row_offsets[i + 1] ; ++j )
{
dense_A_h[i * lda + A_h.col_indices[j]] = A_h.values[j];
}
}
template< typename Matrix_data >
void l_times_u(int n, Matrix_data *lu_d, int lda)
{
Matrix_data *l_d, *u_d;
cudaMalloc((void **) &l_d, n * lda * sizeof(Matrix_data));
UNITTEST_ASSERT_EQUAL(cudaGetLastError(), cudaSuccess);
cudaMalloc((void **) &u_d, n * lda * sizeof(Matrix_data));
UNITTEST_ASSERT_EQUAL(cudaGetLastError(), cudaSuccess);
// Split LU.
dim3 block_dim(16, 16);
dim3 grid_dim((n + block_dim.x - 1) / block_dim.x, (n + block_dim.y - 1) / block_dim.y);
split_l_and_u <<< grid_dim, block_dim>>>(n, lu_d, lda, l_d, u_d);
cudaError_t status = cudaDeviceSynchronize();
UNITTEST_ASSERT_EQUAL(status, cudaSuccess);
// LxU = LU.
Matrix_data one(1), zero(0);
cublasHandle_t cublas_handle;
cublasStatus_t cublas_status;
cublas_status = cublasCreate(&cublas_handle);
UNITTEST_ASSERT_EQUAL(cublas_status, CUBLAS_STATUS_SUCCESS);
cublas_status = cublasGemm(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n, n, n,
&one, l_d, lda,
u_d, lda,
&zero, lu_d, lda);
UNITTEST_ASSERT_EQUAL(cublas_status, CUBLAS_STATUS_SUCCESS);
cublas_status = cublasDestroy(cublas_handle);
UNITTEST_ASSERT_EQUAL(cublas_status, CUBLAS_STATUS_SUCCESS);
cudaFree(l_d);
UNITTEST_ASSERT_EQUAL(cudaGetLastError(), cudaSuccess);
cudaFree(u_d);
UNITTEST_ASSERT_EQUAL(cudaGetLastError(), cudaSuccess);
}
template< typename Matrix, typename Matrix_data >
void check_lu_product(const Matrix &A_h, int n, Matrix_data *lu_d, int lda)
{
// Compute LxU.
l_times_u(n, lu_d, lda);
// Copy LxU to the host.
Matrix_data *lu_h = new Matrix_data[n * lda];
cudaMemcpy(lu_h, lu_d, n * lda * sizeof(Matrix_data), cudaMemcpyDeviceToHost);
UNITTEST_ASSERT_EQUAL(cudaGetLastError(), cudaSuccess);
// Make sure LxU equals A.
Matrix_data *dense_A_h = new Matrix_data[n * lda];
std::memset(dense_A_h, 0x0, n * lda * sizeof(Matrix_data));
csr_to_dense(A_h, dense_A_h, lda);
// Compare the matrices.
for ( int i = 0 ; i < n ; ++i )
for ( int j = 0 ; j < n ; ++j )
{
UNITTEST_ASSERT_EQUAL(lu_h[i * lda + j], dense_A_h[i * lda + j]);
}
delete[] dense_A_h;
delete[] lu_h;
}
DECLARE_UNITTEST_END(DenseLUSolverTest_Base);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(DenseLUSolverTest_Factorization_Id_32, DenseLUSolverTest_Base<T_Config>);
void run()
{
// Make sure the solver produces A=LU where A is the identity matrix.
typedef typename T_Config::template setMemSpace<AMGX_host>::Type Config_h;
typedef Vector<Config_h> FVector_h;
typedef typename TConfig::MatPrec Matrix_data;
const int N = 32;
Matrix<T_Config> A(N, N, N, CSR);
A.set_initialized(0);
this->make_identity(A);
A.set_initialized(1);
AMG_Config cfg;
dense_lu_solver::DenseLUSolver<T_Config> solver(cfg, "", NULL);
solver.setup(A, false);
FVector_h m(N * N);
cudaMemcpy(m.raw(), solver.get_dense_A(), N * N * sizeof(Matrix_data), cudaMemcpyDeviceToHost);
UNITTEST_ASSERT_EQUAL(cudaGetLastError(), cudaSuccess);
for ( int i = 0 ; i < N ; ++i )
for ( int j = 0 ; j < N ; ++j )
{
UNITTEST_ASSERT_EQUAL(m[i * solver.get_lda() + j], i == j ? Matrix_data(1) : Matrix_data(0));
}
}
DECLARE_UNITTEST_END(DenseLUSolverTest_Factorization_Id_32)
DenseLUSolverTest_Factorization_Id_32<TemplateMode<AMGX_mode_dDDI>::Type> DenseLUSolverTest_Factorization_Id_32_dDDI;
DenseLUSolverTest_Factorization_Id_32<TemplateMode<AMGX_mode_dFFI>::Type> DenseLUSolverTest_Factorization_Id_32_dFFI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(DenseLUSolverTest_Factorization_Id_256, DenseLUSolverTest_Base<T_Config>);
void run()
{
// Make sure the solver produces A=LU where A is the identity matrix.
typedef typename T_Config::template setMemSpace<AMGX_host>::Type Config_h;
typedef Vector<Config_h> FVector_h;
typedef typename TConfig::MatPrec Matrix_data;
const int N = 256;
Matrix<T_Config> A(N, N, N, CSR);
A.set_initialized(0);
this->make_identity(A);
A.set_initialized(1);
AMG_Config cfg;
dense_lu_solver::DenseLUSolver<T_Config> solver(cfg, "", NULL);
solver.setup(A, false);
FVector_h m(N * N);
cudaMemcpy(m.raw(), solver.get_dense_A(), N * N * sizeof(Matrix_data), cudaMemcpyDeviceToHost);
UNITTEST_ASSERT_EQUAL(cudaGetLastError(), cudaSuccess);
for ( int i = 0 ; i < N ; ++i )
for ( int j = 0 ; j < N ; ++j )
{
UNITTEST_ASSERT_EQUAL(m[i * solver.get_lda() + j], i == j ? Matrix_data(1) : Matrix_data(0));
}
}
DECLARE_UNITTEST_END(DenseLUSolverTest_Factorization_Id_256)
DenseLUSolverTest_Factorization_Id_256<TemplateMode<AMGX_mode_dDDI>::Type> DenseLUSolverTest_Factorization_Id_256_dDDI;
DenseLUSolverTest_Factorization_Id_256<TemplateMode<AMGX_mode_dFFI>::Type> DenseLUSolverTest_Factorization_Id_256_dFFI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(DenseLUSolverTest_Solve_Poisson3D, DenseLUSolverTest_Base<T_Config>);
void run()
{
typedef typename T_Config::MatPrec Matrix_data;
typedef typename T_Config::template setMemSpace<AMGX_device>::Type Config_d;
typedef typename T_Config::template setMemSpace<AMGX_host> ::Type Config_h;
typedef Matrix<Config_d> Matrix_d;
typedef Vector<Config_d> Vector_d;
typedef Matrix<Config_h> Matrix_h;
typedef Vector<Config_h> Vector_h;
Matrix_h A_h;
Vector_h x_h, b_h;
A_h.set_initialized(0);
generatePoissonForTest(A_h, 1, 0, 27, 16, 16, 16);
A_h.set_initialized(1);
AMG_Config cfg;
cfg.parseParameterString("monitor_residual=1");
Matrix_d A_d(A_h);
dense_lu_solver::DenseLUSolver<T_Config> solver(cfg, "default", NULL);
solver.setup(A_d, false);
const int n = A_h.get_num_rows();
Vector_d b_d(n), x_d(n), r_d(n);
thrust::fill(b_d.begin(), b_d.end(), Matrix_data(1));
solver.solve(b_d, x_d, false);
solver.compute_residual(b_d, x_d, r_d);
Vector_h resid_nrm(1);
solver.compute_norm(r_d, resid_nrm);
Vector_h rhs_nrm(1);
solver.compute_norm(b_d, rhs_nrm);
double relError = resid_nrm[0] / rhs_nrm[0];
if ( T_Config::matPrec == AMGX_matDouble )
{
UNITTEST_ASSERT_EQUAL_TOL(resid_nrm[0], 0.0, 1.0e-12);
}
else
{
UNITTEST_ASSERT_EQUAL_TOL(resid_nrm[0], 0.0f, 1.0e-6f);
}
}
DECLARE_UNITTEST_END(DenseLUSolverTest_Solve_Poisson3D)
DenseLUSolverTest_Solve_Poisson3D<TemplateMode<AMGX_mode_dDDI>::Type> DenseLUSolverTest_Solve_Poisson3D_dDDI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(DenseLUSolverTest_Solve_Id_32, DenseLUSolverTest_Base<T_Config>);
void run()
{
typedef typename T_Config::template setMemSpace<AMGX_host>::Type Config_h;
typedef Vector<Config_h> FVector_h;
typedef typename TConfig::MatPrec Matrix_data;
const int N = 32;
Matrix<T_Config> A(N, N, N, CSR);
A.set_initialized(0);
this->make_identity(A);
A.set_initialized(1);
AMG_Config cfg;
dense_lu_solver::DenseLUSolver<T_Config> solver(cfg, "", NULL);
solver.setup(A, false);
FVector_h b_h(N);
for ( int i = 0 ; i < N ; ++i )
{
b_h[i] = Matrix_data(rand()) / RAND_MAX;
}
Vector<T_Config> b(b_h), x(N), r(N);
solver.solve(b, x, false);
UNITTEST_ASSERT_EQUAL(x, b);
}
DECLARE_UNITTEST_END(DenseLUSolverTest_Solve_Id_32)
DenseLUSolverTest_Solve_Id_32<TemplateMode<AMGX_mode_dDDI>::Type> DenseLUSolverTest_Solve_Id_32_dDDI;
DenseLUSolverTest_Solve_Id_32<TemplateMode<AMGX_mode_dFFI>::Type> DenseLUSolverTest_Solve_Id_32_dFFI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(DenseLUSolverTest_Solve_Id_256, DenseLUSolverTest_Base<T_Config>);
void run()
{
typedef typename T_Config::template setMemSpace<AMGX_host>::Type Config_h;
typedef Vector<Config_h> FVector_h;
typedef typename TConfig::MatPrec Matrix_data;
const int N = 256;
Matrix<T_Config> A(N, N, N, CSR);
A.set_initialized(0);
this->make_identity(A);
A.set_initialized(1);
AMG_Config cfg;
dense_lu_solver::DenseLUSolver<T_Config> solver(cfg, "", NULL);
solver.setup(A, false);
FVector_h b_h(N);
for ( int i = 0 ; i < N ; ++i )
{
b_h[i] = Matrix_data(rand()) / RAND_MAX;
}
Vector<T_Config> b(b_h), x(N);
solver.solve(b, x, false);
UNITTEST_ASSERT_EQUAL(x, b);
}
DECLARE_UNITTEST_END(DenseLUSolverTest_Solve_Id_256)
DenseLUSolverTest_Solve_Id_256<TemplateMode<AMGX_mode_dDDI>::Type> DenseLUSolverTest_Solve_Id_256_dDDI;
DenseLUSolverTest_Solve_Id_256<TemplateMode<AMGX_mode_dFFI>::Type> DenseLUSolverTest_Solve_Id_256_dFFI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace amgx
|
the_stack
|
#include "antialias.h"
//------------------------------------------------------------------------
// Helpers.
#define F32_MAX (3.402823466e+38f)
static __forceinline__ __device__ bool same_sign(float a, float b) { return (__float_as_int(a) ^ __float_as_int(b)) >= 0; }
static __forceinline__ __device__ bool rational_gt(float n0, float n1, float d0, float d1) { return (n0*d1 > n1*d0) == same_sign(d0, d1); }
static __forceinline__ __device__ int max_idx3(float n0, float n1, float n2, float d0, float d1, float d2)
{
bool g10 = rational_gt(n1, n0, d1, d0);
bool g20 = rational_gt(n2, n0, d2, d0);
bool g21 = rational_gt(n2, n1, d2, d1);
if (g20 && g21) return 2;
if (g10) return 1;
return 0;
}
//------------------------------------------------------------------------
// Format of antialiasing work items stored in work buffer. Usually accessed directly as int4.
struct AAWorkItem
{
enum
{
EDGE_MASK = 3, // Edge index in lowest bits.
FLAG_DOWN_BIT = 2, // Down instead of right.
FLAG_TRI1_BIT = 3, // Edge is from other pixel's triangle.
};
int px, py; // Pixel x, y.
unsigned int pz_flags; // High 16 bits = pixel z, low 16 bits = edge index and flags.
float alpha; // Antialiasing alpha value. Zero if no AA.
};
//------------------------------------------------------------------------
// Hash functions. Adapted from public-domain code at http://www.burtleburtle.net/bob/hash/doobs.html
#define JENKINS_MAGIC (0x9e3779b9u)
static __device__ __forceinline__ void jenkins_mix(unsigned int& a, unsigned int& b, unsigned int& c)
{
a -= b; a -= c; a ^= (c>>13);
b -= c; b -= a; b ^= (a<<8);
c -= a; c -= b; c ^= (b>>13);
a -= b; a -= c; a ^= (c>>12);
b -= c; b -= a; b ^= (a<<16);
c -= a; c -= b; c ^= (b>>5);
a -= b; a -= c; a ^= (c>>3);
b -= c; b -= a; b ^= (a<<10);
c -= a; c -= b; c ^= (b>>15);
}
// Helper class for hash index iteration. Implements simple odd-skip linear probing with a key-dependent skip.
class HashIndex
{
public:
__device__ __forceinline__ HashIndex(const AntialiasKernelParams& p, uint64_t key)
{
m_mask = p.allocTriangles * AA_HASH_ELEMENTS_PER_TRIANGLE - 1;
m_idx = (uint32_t)(key & 0xffffffffu);
m_skip = (uint32_t)(key >> 32);
uint32_t dummy = JENKINS_MAGIC;
jenkins_mix(m_idx, m_skip, dummy);
m_idx &= m_mask;
m_skip &= m_mask;
m_skip |= 1;
}
__device__ __forceinline__ int get(void) const { return m_idx; }
__device__ __forceinline__ void next(void) { m_idx = (m_idx + m_skip) & m_mask; }
private:
uint32_t m_idx, m_skip, m_mask;
};
static __device__ __forceinline__ void hash_insert(const AntialiasKernelParams& p, uint64_t key, int v)
{
HashIndex idx(p, key);
while(1)
{
uint64_t prev = atomicCAS((unsigned long long*)&p.evHash[idx.get()], 0, (unsigned long long)key);
if (prev == 0 || prev == key)
break;
idx.next();
}
int* q = (int*)&p.evHash[idx.get()];
int a = atomicCAS(q+2, 0, v);
if (a != 0 && a != v)
atomicCAS(q+3, 0, v);
}
static __device__ __forceinline__ int2 hash_find(const AntialiasKernelParams& p, uint64_t key)
{
HashIndex idx(p, key);
while(1)
{
uint4 entry = p.evHash[idx.get()];
uint64_t k = ((uint64_t)entry.x) | (((uint64_t)entry.y) << 32);
if (k == key || k == 0)
return make_int2((int)entry.z, (int)entry.w);
idx.next();
}
}
static __device__ __forceinline__ void evhash_insert_vertex(const AntialiasKernelParams& p, int va, int vb, int vn)
{
if (va == vb)
return;
uint64_t v0 = (uint32_t)min(va, vb) + 1; // canonical vertex order
uint64_t v1 = (uint32_t)max(va, vb) + 1;
uint64_t vk = v0 | (v1 << 32); // hash key
hash_insert(p, vk, vn + 1);
}
static __forceinline__ __device__ int evhash_find_vertex(const AntialiasKernelParams& p, int va, int vb, int vr)
{
if (va == vb)
return -1;
uint64_t v0 = (uint32_t)min(va, vb) + 1; // canonical vertex order
uint64_t v1 = (uint32_t)max(va, vb) + 1;
uint64_t vk = v0 | (v1 << 32); // hash key
int2 vn = hash_find(p, vk) - 1;
if (vn.x == vr) return vn.y;
if (vn.y == vr) return vn.x;
return -1;
}
//------------------------------------------------------------------------
// Mesh analysis kernel.
__global__ void AntialiasFwdMeshKernel(const AntialiasKernelParams p)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= p.numTriangles)
return;
int v0 = p.tri[idx * 3 + 0];
int v1 = p.tri[idx * 3 + 1];
int v2 = p.tri[idx * 3 + 2];
if (v0 < 0 || v0 >= p.numVertices ||
v1 < 0 || v1 >= p.numVertices ||
v2 < 0 || v2 >= p.numVertices)
return;
if (v0 == v1 || v1 == v2 || v2 == v0)
return;
evhash_insert_vertex(p, v1, v2, v0);
evhash_insert_vertex(p, v2, v0, v1);
evhash_insert_vertex(p, v0, v1, v2);
}
//------------------------------------------------------------------------
// Discontinuity finder kernel.
__global__ void AntialiasFwdDiscontinuityKernel(const AntialiasKernelParams p)
{
// Calculate pixel position.
int px = blockIdx.x * AA_DISCONTINUITY_KERNEL_BLOCK_WIDTH + threadIdx.x;
int py = blockIdx.y * AA_DISCONTINUITY_KERNEL_BLOCK_HEIGHT + threadIdx.y;
int pz = blockIdx.z;
if (px >= p.width || py >= p.height || pz >= p.n)
return;
// Pointer to our TriIdx and fetch.
int pidx0 = ((px + p.width * (py + p.height * pz)) << 2) + 3;
float tri0 = p.rasterOut[pidx0];
// Look right, clamp at edge.
int pidx1 = pidx0;
if (px < p.width - 1)
pidx1 += 4;
float tri1 = p.rasterOut[pidx1];
// Look down, clamp at edge.
int pidx2 = pidx0;
if (py < p.height - 1)
pidx2 += p.width << 2;
float tri2 = p.rasterOut[pidx2];
// Determine amount of work.
int count = 0;
if (tri1 != tri0) count = 1;
if (tri2 != tri0) count += 1;
if (!count)
return; // Exit warp.
// Coalesce work counter update to once per CTA.
__shared__ int s_temp;
s_temp = 0;
__syncthreads();
int idx = atomicAdd(&s_temp, count);
__syncthreads();
if (idx == 0)
{
int base = atomicAdd(&p.workBuffer[0].x, s_temp);
s_temp = base + 1; // don't clobber the counters in first slot.
}
__syncthreads();
idx += s_temp;
// Write to memory.
if (tri1 != tri0) p.workBuffer[idx++] = make_int4(px, py, (pz << 16), 0);
if (tri2 != tri0) p.workBuffer[idx] = make_int4(px, py, (pz << 16) + (1 << AAWorkItem::FLAG_DOWN_BIT), 0);
}
//------------------------------------------------------------------------
// Forward analysis kernel.
__global__ void AntialiasFwdAnalysisKernel(const AntialiasKernelParams p)
{
__shared__ int s_base;
int workCount = p.workBuffer[0].x;
for(;;)
{
// Persistent threads work fetcher.
__syncthreads();
if (threadIdx.x == 0)
s_base = atomicAdd(&p.workBuffer[0].y, AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK);
__syncthreads();
int thread_idx = s_base + threadIdx.x;
if (thread_idx >= workCount)
return;
int4* pItem = p.workBuffer + thread_idx + 1;
int4 item = *pItem;
int px = item.x;
int py = item.y;
int pz = (int)(((unsigned int)item.z) >> 16);
int d = (item.z >> AAWorkItem::FLAG_DOWN_BIT) & 1;
int pixel0 = px + p.width * (py + p.height * pz);
int pixel1 = pixel0 + (d ? p.width : 1);
float2 zt0 = ((float2*)p.rasterOut)[(pixel0 << 1) + 1];
float2 zt1 = ((float2*)p.rasterOut)[(pixel1 << 1) + 1];
int tri0 = (int)zt0.y - 1;
int tri1 = (int)zt1.y - 1;
// Select triangle based on background / depth.
int tri = (tri0 >= 0) ? tri0 : tri1;
if (tri0 >= 0 && tri1 >= 0)
tri = (zt0.x < zt1.x) ? tri0 : tri1;
if (tri == tri1)
{
// Calculate with respect to neighbor pixel if chose that triangle.
px += 1 - d;
py += d;
}
// Bail out if triangle index is corrupt.
if (tri < 0 || tri >= p.numTriangles)
continue;
// Fetch vertex indices.
int vi0 = p.tri[tri * 3 + 0];
int vi1 = p.tri[tri * 3 + 1];
int vi2 = p.tri[tri * 3 + 2];
// Bail out if vertex indices are corrupt.
if (vi0 < 0 || vi0 >= p.numVertices ||
vi1 < 0 || vi1 >= p.numVertices ||
vi2 < 0 || vi2 >= p.numVertices)
continue;
// Fetch opposite vertex indices. Use vertex itself (always silhouette) if no opposite vertex exists.
int op0 = evhash_find_vertex(p, vi2, vi1, vi0);
int op1 = evhash_find_vertex(p, vi0, vi2, vi1);
int op2 = evhash_find_vertex(p, vi1, vi0, vi2);
// Instance mode: Adjust vertex indices based on minibatch index.
if (p.instance_mode)
{
int vbase = pz * p.numVertices;
vi0 += vbase;
vi1 += vbase;
vi2 += vbase;
if (op0 >= 0) op0 += vbase;
if (op1 >= 0) op1 += vbase;
if (op2 >= 0) op2 += vbase;
}
// Fetch vertex positions.
float4 p0 = ((float4*)p.pos)[vi0];
float4 p1 = ((float4*)p.pos)[vi1];
float4 p2 = ((float4*)p.pos)[vi2];
float4 o0 = (op0 < 0) ? p0 : ((float4*)p.pos)[op0];
float4 o1 = (op1 < 0) ? p1 : ((float4*)p.pos)[op1];
float4 o2 = (op2 < 0) ? p2 : ((float4*)p.pos)[op2];
// Project vertices to pixel space.
float w0 = 1.f / p0.w;
float w1 = 1.f / p1.w;
float w2 = 1.f / p2.w;
float ow0 = 1.f / o0.w;
float ow1 = 1.f / o1.w;
float ow2 = 1.f / o2.w;
float fx = (float)px + .5f - p.xh;
float fy = (float)py + .5f - p.yh;
float x0 = p0.x * w0 * p.xh - fx;
float y0 = p0.y * w0 * p.yh - fy;
float x1 = p1.x * w1 * p.xh - fx;
float y1 = p1.y * w1 * p.yh - fy;
float x2 = p2.x * w2 * p.xh - fx;
float y2 = p2.y * w2 * p.yh - fy;
float ox0 = o0.x * ow0 * p.xh - fx;
float oy0 = o0.y * ow0 * p.yh - fy;
float ox1 = o1.x * ow1 * p.xh - fx;
float oy1 = o1.y * ow1 * p.yh - fy;
float ox2 = o2.x * ow2 * p.xh - fx;
float oy2 = o2.y * ow2 * p.yh - fy;
// Signs to kill non-silhouette edges.
float bb = (x1-x0)*(y2-y0) - (x2-x0)*(y1-y0); // Triangle itself.
float a0 = (x1-ox0)*(y2-oy0) - (x2-ox0)*(y1-oy0); // Wings.
float a1 = (x2-ox1)*(y0-oy1) - (x0-ox1)*(y2-oy1);
float a2 = (x0-ox2)*(y1-oy2) - (x1-ox2)*(y0-oy2);
// If no matching signs anywhere, skip the rest.
if (same_sign(a0, bb) || same_sign(a1, bb) || same_sign(a2, bb))
{
// XY flip for horizontal edges.
if (d)
{
swap(x0, y0);
swap(x1, y1);
swap(x2, y2);
}
float dx0 = x2 - x1;
float dx1 = x0 - x2;
float dx2 = x1 - x0;
float dy0 = y2 - y1;
float dy1 = y0 - y2;
float dy2 = y1 - y0;
// Check if an edge crosses between us and the neighbor pixel.
float dc = -F32_MAX;
float ds = (tri == tri0) ? 1.f : -1.f;
float d0 = ds * (x1*dy0 - y1*dx0);
float d1 = ds * (x2*dy1 - y2*dx1);
float d2 = ds * (x0*dy2 - y0*dx2);
if (same_sign(y1, y2)) d0 = -F32_MAX, dy0 = 1.f;
if (same_sign(y2, y0)) d1 = -F32_MAX, dy1 = 1.f;
if (same_sign(y0, y1)) d2 = -F32_MAX, dy2 = 1.f;
int di = max_idx3(d0, d1, d2, dy0, dy1, dy2);
if (di == 0 && same_sign(a0, bb) && fabsf(dy0) >= fabsf(dx0)) dc = d0 / dy0;
if (di == 1 && same_sign(a1, bb) && fabsf(dy1) >= fabsf(dx1)) dc = d1 / dy1;
if (di == 2 && same_sign(a2, bb) && fabsf(dy2) >= fabsf(dx2)) dc = d2 / dy2;
float eps = .0625f; // Expect no more than 1/16 pixel inaccuracy.
// Adjust output image if a suitable edge was found.
if (dc > -eps && dc < 1.f + eps)
{
dc = fminf(fmaxf(dc, 0.f), 1.f);
float alpha = ds * (.5f - dc);
const float* pColor0 = p.color + pixel0 * p.channels;
const float* pColor1 = p.color + pixel1 * p.channels;
float* pOutput = p.output + (alpha > 0.f ? pixel0 : pixel1) * p.channels;
for (int i=0; i < p.channels; i++)
atomicAdd(&pOutput[i], alpha * (pColor1[i] - pColor0[i]));
// Rewrite the work item's flags and alpha. Keep original px, py.
unsigned int flags = pz << 16;
flags |= di;
flags |= d << AAWorkItem::FLAG_DOWN_BIT;
flags |= (__float_as_uint(ds) >> 31) << AAWorkItem::FLAG_TRI1_BIT;
((int2*)pItem)[1] = make_int2(flags, __float_as_int(alpha));
}
}
}
}
//------------------------------------------------------------------------
// Gradient kernel.
__global__ void AntialiasGradKernel(const AntialiasKernelParams p)
{
// Temporary space for coalesced atomics.
CA_DECLARE_TEMP(AA_GRAD_KERNEL_THREADS_PER_BLOCK);
__shared__ int s_base; // Work counter communication across entire CTA.
int workCount = p.workBuffer[0].x;
for(;;)
{
// Persistent threads work fetcher.
__syncthreads();
if (threadIdx.x == 0)
s_base = atomicAdd(&p.workBuffer[0].y, AA_GRAD_KERNEL_THREADS_PER_BLOCK);
__syncthreads();
int thread_idx = s_base + threadIdx.x;
if (thread_idx >= workCount)
return;
// Read work item filled out by forward kernel.
int4 item = p.workBuffer[thread_idx + 1];
unsigned int amask = __ballot_sync(0xffffffffu, item.w);
if (item.w == 0)
continue; // No effect.
// Unpack work item and replicate setup from forward analysis kernel.
int px = item.x;
int py = item.y;
int pz = (int)(((unsigned int)item.z) >> 16);
int d = (item.z >> AAWorkItem::FLAG_DOWN_BIT) & 1;
float alpha = __int_as_float(item.w);
int tri1 = (item.z >> AAWorkItem::FLAG_TRI1_BIT) & 1;
int di = item.z & AAWorkItem::EDGE_MASK;
float ds = __int_as_float(__float_as_int(1.0) | (tri1 << 31));
int pixel0 = px + p.width * (py + p.height * pz);
int pixel1 = pixel0 + (d ? p.width : 1);
int tri = (int)p.rasterOut[((tri1 ? pixel1 : pixel0) << 2) + 3] - 1;
if (tri1)
{
px += 1 - d;
py += d;
}
// Bail out if triangle index is corrupt.
bool triFail = (tri < 0 || tri >= p.numTriangles);
amask = __ballot_sync(amask, !triFail);
if (triFail)
continue;
// Outgoing color gradients.
float* pGrad0 = p.gradColor + pixel0 * p.channels;
float* pGrad1 = p.gradColor + pixel1 * p.channels;
// Incoming color gradients.
const float* pDy = p.dy + (alpha > 0.f ? pixel0 : pixel1) * p.channels;
// Position gradient weight based on colors and incoming gradients.
float dd = 0.f;
const float* pColor0 = p.color + pixel0 * p.channels;
const float* pColor1 = p.color + pixel1 * p.channels;
// Loop over channels and accumulate.
for (int i=0; i < p.channels; i++)
{
float dy = pDy[i];
if (dy != 0.f)
{
// Update position gradient weight.
dd += dy * (pColor1[i] - pColor0[i]);
// Update color gradients. No coalescing because all have different targets.
float v = alpha * dy;
atomicAdd(&pGrad0[i], -v);
atomicAdd(&pGrad1[i], v);
}
}
// If position weight is zero, skip the rest.
bool noGrad = (dd == 0.f);
amask = __ballot_sync(amask, !noGrad);
if (noGrad)
continue;
// Fetch vertex indices of the active edge and their positions.
int i1 = (di < 2) ? (di + 1) : 0;
int i2 = (i1 < 2) ? (i1 + 1) : 0;
int vi1 = p.tri[3 * tri + i1];
int vi2 = p.tri[3 * tri + i2];
// Bail out if vertex indices are corrupt.
bool vtxFail = (vi1 < 0 || vi1 >= p.numVertices || vi2 < 0 || vi2 >= p.numVertices);
amask = __ballot_sync(amask, !vtxFail);
if (vtxFail)
continue;
// Instance mode: Adjust vertex indices based on minibatch index.
if (p.instance_mode)
{
vi1 += pz * p.numVertices;
vi2 += pz * p.numVertices;
}
// Fetch vertex positions.
float4 p1 = ((float4*)p.pos)[vi1];
float4 p2 = ((float4*)p.pos)[vi2];
// Project vertices to pixel space.
float pxh = p.xh;
float pyh = p.yh;
float fx = (float)px + .5f - pxh;
float fy = (float)py + .5f - pyh;
// XY flip for horizontal edges.
if (d)
{
swap(p1.x, p1.y);
swap(p2.x, p2.y);
swap(pxh, pyh);
swap(fx, fy);
}
// Gradient calculation setup.
float w1 = 1.f / p1.w;
float w2 = 1.f / p2.w;
float x1 = p1.x * w1 * pxh - fx;
float y1 = p1.y * w1 * pyh - fy;
float x2 = p2.x * w2 * pxh - fx;
float y2 = p2.y * w2 * pyh - fy;
float dx = x2 - x1;
float dy = y2 - y1;
float db = x1*dy - y1*dx;
// Compute inverse delta-y with epsilon.
float ep = copysignf(1e-3f, dy); // ~1/1000 pixel.
float iy = 1.f / (dy + ep);
// Compute position gradients.
float dby = db * iy;
float iw1 = -w1 * iy * dd;
float iw2 = w2 * iy * dd;
float gp1x = iw1 * pxh * y2;
float gp2x = iw2 * pxh * y1;
float gp1y = iw1 * pyh * (dby - x2);
float gp2y = iw2 * pyh * (dby - x1);
float gp1w = -(p1.x * gp1x + p1.y * gp1y) * w1;
float gp2w = -(p2.x * gp2x + p2.y * gp2y) * w2;
// XY flip the gradients.
if (d)
{
swap(gp1x, gp1y);
swap(gp2x, gp2y);
}
// Kill position gradients if alpha was saturated.
if (fabsf(alpha) >= 0.5f)
{
gp1x = gp1y = gp1w = 0.f;
gp2x = gp2y = gp2w = 0.f;
}
// Initialize coalesced atomics. Match both triangle ID and edge index.
// Also note that some threads may be inactive.
CA_SET_GROUP_MASK(tri ^ (di << 30), amask);
// Accumulate gradients.
caAtomicAdd3_xyw(p.gradPos + 4 * vi1, gp1x, gp1y, gp1w);
caAtomicAdd3_xyw(p.gradPos + 4 * vi2, gp2x, gp2y, gp2w);
}
}
//------------------------------------------------------------------------
|
the_stack
|
#pragma once
#include <cuda_runtime.h>
#include "libvis/cuda/cuda_buffer.cuh"
#include "libvis/cuda/cuda_matrix.cuh"
#include "libvis/cuda/cuda_unprojection_lookup.cuh"
#include "libvis/cuda/pixel_corner_projector.cuh"
#include "libvis/cuda/patch_match_stereo_samples.cuh"
#include "libvis/cuda/patch_match_stereo_util.cuh"
#include "libvis/libvis.h"
namespace vis {
constexpr float kMinInvDepth = 1e-5f; // TODO: Make parameter
__forceinline__ __device__ float SampleAtProjectedPosition(
const float x, const float y, const float z,
const PixelCornerProjector_& projector,
const CUDABuffer_<u8>& mask,
const CUDAMatrix3x4& stereo_tr_reference,
cudaTextureObject_t stereo_texture) {
float3 pnxy = stereo_tr_reference * make_float3(x, y, z);
if (pnxy.z <= 0.f) {
return CUDART_NAN_F;
}
const float2 pxy = projector.Project(pnxy);
if (pxy.x < 0.5f ||
pxy.y < 0.5f ||
pxy.x >= projector.width - 0.5f ||
pxy.y >= projector.height - 0.5f ||
(mask.address() && mask(pxy.y, pxy.x) == 0)) {
return CUDART_NAN_F;
} else {
return 255.0f * tex2D<float>(stereo_texture, pxy.x, pxy.y);
}
}
__forceinline__ __device__ float CalculatePlaneDepth2(
float d, const float2& normal_xy, float normal_z,
float query_x, float query_y) {
return d / (query_x * normal_xy.x + query_y * normal_xy.y + normal_z);
}
__forceinline__ __device__ float CalculatePlaneInvDepth2(
float d, const float2& normal_xy, float normal_z,
float query_x, float query_y) {
return (query_x * normal_xy.x + query_y * normal_xy.y + normal_z) / d;
}
// __forceinline__ __device__ float ComputeCostsSSD(
// int x, int y,
// const float2& normal_xy,
// const float inv_depth,
// const float context_radius,
// const CUDAUnprojectionLookup2D_& unprojector,
// const CUDABuffer_<u8>& reference_image,
// const CUDAMatrix3x4& stereo_tr_reference,
// const PixelCornerProjector_& projector,
// const CUDABuffer_<u8>& mask,
// cudaTextureObject_t stereo_image) {
// if (inv_depth < kMinInvDepth) {
// return CUDART_NAN_F;
// }
//
// const float normal_z =
// -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y);
// const float depth = 1.f / inv_depth;
// const float2 center_nxy =
// unprojector.UnprojectPoint(x, y);
// const float plane_d =
// (center_nxy.x * depth) * normal_xy.x +
// (center_nxy.y * depth) * normal_xy.y + depth * normal_z;
//
// float cost = 0;
//
// #pragma unroll
// for (int dy = -context_radius; dy <= context_radius; ++ dy) {
// #pragma unroll
// for (int dx = -context_radius; dx <= context_radius; ++ dx) {
// float2 nxy = unprojector.UnprojectPoint(x + dx, y + dy);
// float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y);
// nxy.x *= plane_depth;
// nxy.y *= plane_depth;
//
// float sample =
// SampleAtProjectedPosition(nxy.x, nxy.y, plane_depth,
// projector,
// mask,
// stereo_tr_reference,
// stereo_image);
//
// const float diff = sample - reference_image(y + dy, x + dx);
// cost += diff * diff;
// }
// }
//
// return cost;
// }
// Computes 0.5f * (1 - ZNCC), so that the result can be used
// as a cost value with range [0; 1].
__forceinline__ __device__ float ComputeZNCCBasedCost(
const int num_samples,
const float sum_a,
const float squared_sum_a,
const float sum_b,
const float squared_sum_b,
const float product_sum) {
const float normalizer = 1.0f / num_samples;
const float numerator =
product_sum - normalizer * (sum_a * sum_b);
const float denominator_reference =
squared_sum_a - normalizer * sum_a * sum_a;
const float denominator_other =
squared_sum_b - normalizer * sum_b * sum_b;
// NOTE: Using a threshold on homogeneous patches is required here since
// otherwise the optimum might be a noisy value in a homogeneous area.
constexpr float kHomogeneousThreshold = 0.1f;
if (denominator_reference < kHomogeneousThreshold ||
denominator_other < kHomogeneousThreshold) {
return 1.0f;
} else {
return 0.5f * (1.0f - numerator *
rsqrtf(denominator_reference * denominator_other));
}
}
__forceinline__ __device__ float ComputeCostsZNCC(
int x, int y,
const float2& normal_xy,
const float inv_depth,
const float context_radius,
const CUDAUnprojectionLookup2D_& unprojector,
const CUDABuffer_<u8>& reference_image,
cudaTextureObject_t reference_texture,
const CUDAMatrix3x4& stereo_tr_reference,
const PixelCornerProjector_& projector,
const CUDABuffer_<u8>& mask,
cudaTextureObject_t stereo_image) {
if (inv_depth < kMinInvDepth) {
return CUDART_NAN_F;
}
const float normal_z =
-sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y);
const float depth = 1.f / inv_depth;
const float2 center_nxy =
unprojector.UnprojectPoint(x, y);
const float plane_d =
(center_nxy.x * depth) * normal_xy.x +
(center_nxy.y * depth) * normal_xy.y + depth * normal_z;
float sum_a = 0;
float squared_sum_a = 0;
float sum_b = 0;
float squared_sum_b = 0;
float product_sum = 0;
for (int sample = 0; sample < kNumSamples; ++ sample) {
float dx = context_radius * kSamplesCUDA[sample][0];
float dy = context_radius * kSamplesCUDA[sample][1];
float2 nxy = unprojector.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers).
float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y);
nxy.x *= plane_depth;
nxy.y *= plane_depth;
float stereo_value =
SampleAtProjectedPosition(nxy.x, nxy.y, plane_depth,
projector,
mask,
stereo_tr_reference,
stereo_image);
sum_a += stereo_value;
squared_sum_a += stereo_value * stereo_value;
float reference_value = 255.f * tex2D<float>(reference_texture, x + dx + 0.5f, y + dy + 0.5f);
sum_b += reference_value;
squared_sum_b += reference_value * reference_value;
product_sum += stereo_value * reference_value;
}
return ComputeZNCCBasedCost(
kNumSamples, sum_a, squared_sum_a, sum_b, squared_sum_b, product_sum);
}
// __forceinline__ __device__ float ComputeCostsCensus(
// int x, int y,
// const float2& normal_xy,
// const float inv_depth,
// const float context_radius,
// const CUDAUnprojectionLookup2D_& unprojector,
// const CUDABuffer_<u8>& reference_image,
// const CUDAMatrix3x4& stereo_tr_reference,
// const PixelCornerProjector_& projector,
// cudaTextureObject_t stereo_image) {
// if (inv_depth < kMinInvDepth) {
// return CUDART_NAN_F;
// }
//
// const float normal_z =
// -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y);
// const float depth = 1.f / inv_depth;
// const float2 center_nxy =
// unprojector.UnprojectPoint(x, y);
// const float plane_d =
// (center_nxy.x * depth) * normal_xy.x +
// (center_nxy.y * depth) * normal_xy.y + depth * normal_z;
//
// float stereo_center_value =
// SampleAtProjectedPosition(center_nxy.x * depth, center_nxy.y * depth, depth,
// projector,
// mask,
// stereo_tr_reference,
// stereo_image);
// u8 reference_center_value = reference_image(y, x);
//
// float cost = 0;
//
// constexpr int kSpreadFactor = 2; // TODO: Make parameter
//
// #pragma unroll
// for (int dy = -kSpreadFactor * context_radius; dy <= kSpreadFactor * context_radius; dy += kSpreadFactor) {
// #pragma unroll
// for (int dx = -kSpreadFactor * context_radius; dx <= kSpreadFactor * context_radius; dx += kSpreadFactor) {
// if (dx == 0 && dy == 0) {
// continue;
// }
// if (x + dx < 0 ||
// y + dy < 0 ||
// x + dx >= reference_image.width() ||
// y + dy >= reference_image.height()) {
// continue;
// }
//
// float2 nxy = unprojector.UnprojectPoint(x + dx, y + dy);
// float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y);
// nxy.x *= plane_depth;
// nxy.y *= plane_depth;
//
// float stereo_value =
// SampleAtProjectedPosition(nxy.x, nxy.y, plane_depth,
// projector,
// mask,
// stereo_tr_reference,
// stereo_image);
// if (::isnan(stereo_value)) {
// return CUDART_NAN_F;
// }
// int stereo_bit = stereo_value > stereo_center_value;
//
// u8 reference_value = reference_image(y + dy, x + dx);
// int reference_bit = reference_value > reference_center_value;
//
// cost += stereo_bit != reference_bit;
// }
// }
//
// return cost;
// }
__forceinline__ __device__ float ComputeCosts(
int x, int y,
const float2& normal_xy,
const float inv_depth,
int context_radius,
const CUDAUnprojectionLookup2D_& unprojector,
const CUDABuffer_<u8>& reference_image,
cudaTextureObject_t reference_texture,
const CUDAMatrix3x4& stereo_tr_reference,
const PixelCornerProjector_& stereo_camera,
const CUDABuffer_<u8>& mask,
cudaTextureObject_t stereo_image,
int match_metric,
float second_best_min_distance_factor,
const CUDABuffer_<float>& best_inv_depth_map) {
if (second_best_min_distance_factor > 0) {
// Reject estimates which are too close to the best inv depth.
float best_inv_depth = best_inv_depth_map(y, x);
float factor = best_inv_depth / inv_depth;
if (factor < 1) {
factor = 1 / factor;
}
if (factor < second_best_min_distance_factor) {
return CUDART_NAN_F;
}
}
// TODO: Commented out for higher compile speed (and since only ZNCC is consistent with outlier filtering etc.)
// if (match_metric == kPatchMatchStereo_MatchMetric_SSD) {
// return ComputeCostsSSD(
// x, y, normal_xy, inv_depth, context_radius, unprojector, reference_image,
// stereo_tr_reference, stereo_camera, stereo_image);
// } else if (match_metric == kPatchMatchStereo_MatchMetric_ZNCC) {
return ComputeCostsZNCC(
x, y, normal_xy, inv_depth, context_radius, unprojector, reference_image, reference_texture,
stereo_tr_reference, stereo_camera, mask, stereo_image);
// } else { // if (match_metric == kPatchMatchStereo_MatchMetric_Census) {
// return ComputeCostsCensus(
// x, y, normal_xy, inv_depth, context_radius, unprojector, reference_image,
// stereo_tr_reference, stereo_camera, stereo_image);
// }
}
__forceinline__ __device__ float ComputeCosts(
int x, int y,
const float2& normal_xy,
const float inv_depth,
const StereoParametersSingleCUDA& p,
int match_metric,
float second_best_min_distance_factor,
const CUDABuffer_<float>& best_inv_depth_map) {
return ComputeCosts(
x, y,
normal_xy,
inv_depth,
p.context_radius,
p.reference_unprojection_lookup,
p.reference_image,
p.reference_texture,
p.stereo_tr_reference,
p.stereo_camera,
p.mask,
p.stereo_image,
match_metric,
second_best_min_distance_factor,
best_inv_depth_map);
}
/// ComputeCosts() variant for multiple stereo images
__forceinline__ __device__ bool IsCostOfProposedChangeLower(
int x, int y,
const float2& normal_xy,
const float inv_depth,
const float2& proposed_normal_xy,
const float proposed_inv_depth,
const StereoParametersMultiCUDA& p,
int match_metric,
float second_best_min_distance_factor,
const CUDABuffer_<float>& best_inv_depth_map) {
float old_cost_sum = 0;
float new_cost_sum = 0;
// TODO: Cache the unprojected points for both states to use them for all stereo images?
for (int s = 0; s < p.num_stereo_images; ++ s) {
float old_cost = ComputeCosts(
x, y,
normal_xy,
inv_depth,
p.context_radius,
p.reference_unprojection_lookup,
p.reference_image,
p.reference_texture,
p.stereo_tr_reference[s],
p.stereo_camera,
p.mask,
p.stereo_images[s],
match_metric,
second_best_min_distance_factor,
best_inv_depth_map);
if (::isnan(old_cost)) {
continue;
}
float new_cost = ComputeCosts(
x, y,
proposed_normal_xy,
proposed_inv_depth,
p.context_radius,
p.reference_unprojection_lookup,
p.reference_image,
p.reference_texture,
p.stereo_tr_reference[s],
p.stereo_camera,
p.mask,
p.stereo_images[s],
match_metric,
second_best_min_distance_factor,
best_inv_depth_map);
if (::isnan(new_cost)) {
continue;
}
old_cost_sum += old_cost;
new_cost_sum += new_cost;
}
if (old_cost_sum == 0 && new_cost_sum == 0) {
// No cost was valid for both states --> no info to base decision on
return false;
} else {
return new_cost_sum < old_cost_sum;
}
}
}
|
the_stack
|
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/reverse.h>
#include <thrust/reduce.h>
#include <thrust/merge.h>
#include <thrust/fill.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#define BLOCKDIM 32
__global__ void __copyToInds(float *A, float *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[I[i]] = A[i];
}
}
__global__ void __copyToIndsX(float *A, float *B, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[i] = A[i];
}
}
int copyToInds(float *A, float *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
if (I == NULL) {
__copyToIndsX<<<griddims,nthreads>>>(A, B, len);
} else {
__copyToInds<<<griddims,nthreads>>>(A, B, I, len);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyToIndsLong(long long *A, long long *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[I[i]] = A[i];
}
}
__global__ void __copyToIndsLongX(long long *A, long long *B, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[i] = A[i];
}
}
int copyToIndsLong(long long *A, long long *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
if (I == NULL) {
__copyToIndsLongX<<<griddims,nthreads>>>(A, B, len);
} else {
__copyToIndsLong<<<griddims,nthreads>>>(A, B, I, len);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToInds(float A, float *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[I[i]] = A;
}
}
__global__ void __fillToIndsX(float A, float *B, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[i] = A;
}
}
int fillToInds(float A, float *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
if (I == NULL) {
__fillToIndsX<<<griddims,nthreads>>>(A, B, len);
} else {
__fillToInds<<<griddims,nthreads>>>(A, B, I, len);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToIndsLong(long long A, long long *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[I[i]] = A;
}
}
__global__ void __fillToIndsLongX(long long A, long long *B, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[i] = A;
}
}
int fillToIndsLong(long long A, long long *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
if (I == NULL) {
__fillToIndsLongX<<<griddims,nthreads>>>(A, B, len);
} else {
__fillToIndsLong<<<griddims,nthreads>>>(A, B, I, len);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
template<typename T>
__global__ void __copyFromInds(T *A, T *B, int *I, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[i] = A[I[i]];
}
}
int copyFromInds(float *A, float *B, int *I, long long len) {
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyFromInds<<<griddims,nthreads>>>(A, B, I, len);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B[I,J] = A
// indexed copy: version with one block per column
#define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyToInds2D##DFNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A[i + iblock * lda]; \
} \
} \
}
COPYTOINDS2DA(nn,I[i],J[iblock],float)
COPYTOINDS2DA(xn,i,J[iblock],float)
COPYTOINDS2DA(nx,I[i],iblock,float)
COPYTOINDS2DA(xx,i,iblock,float)
COPYTOINDS2DA(nnl,I[i],J[iblock],long long)
COPYTOINDS2DA(xnl,i,J[iblock],long long)
COPYTOINDS2DA(nxl,I[i],iblock,long long)
COPYTOINDS2DA(xxl,i,iblock,long long)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyToInds2DB##DFNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \
} \
}
COPYTOINDS2DB(nn,I[irow],J[icol],float)
COPYTOINDS2DB(xn,irow,J[icol],float)
COPYTOINDS2DB(nx,I[irow],icol,float)
COPYTOINDS2DB(xx,irow,icol,float)
COPYTOINDS2DB(nnl,I[irow],J[icol],long long)
COPYTOINDS2DB(xnl,irow,J[icol],long long)
COPYTOINDS2DB(nxl,I[irow],icol,long long)
COPYTOINDS2DB(xxl,irow,icol,long long)
// Implement B[I,J] = A
int copyToInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyToInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyToInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyToInds3D(float *A, int lda, int rda, float *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int k = tid / (nrows * ncols);
int tidrem = tid - k * (nrows * ncols);
int kstep = step / (nrows * ncols);
int steprem = step - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk;
for (id = tid; id < nrows * ncols * nk; id += step) {
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A[i + lda * (j + rda * k)];
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
}
}
int copyToInds3D(float *A, int lda, int rda, float *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int len = nrows * ncols * nk;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyToInds3D<<<griddims,nthreads>>>(A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyToInds3DLong(long long *A, int lda, int rda, long long *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int k = tid / (nrows * ncols);
int tidrem = tid - k * (nrows * ncols);
int kstep = step / (nrows * ncols);
int steprem = step - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk;
for (id = tid; id < nrows * ncols * nk; id += step) {
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A[i + lda * (j + rda * k)];
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
}
}
int copyToInds3DLong(long long *A, int lda, int rda, long long *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int len = nrows * ncols * nk;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyToInds3DLong<<<griddims,nthreads>>>(A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToInds3D(float A, float *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int k = tid / (nrows * ncols);
int tidrem = tid - k * (nrows * ncols);
int kstep = step / (nrows * ncols);
int steprem = step - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk;
for (id = tid; id < nrows * ncols * nk; id += step) {
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A;
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
}
}
int fillToInds3D(float A, float *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int len = nrows * ncols * nk;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__fillToInds3D<<<griddims,nthreads>>>(A, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToInds3DLong(long long A, long long *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int k = tid / (nrows * ncols);
int tidrem = tid - k * (nrows * ncols);
int kstep = step / (nrows * ncols);
int steprem = step - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk;
for (id = tid; id < nrows * ncols * nk; id += step) {
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * mapk)] = A;
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
}
}
int fillToInds3DLong(long long A, long long *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int len = nrows * ncols * nk;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__fillToInds3DLong<<<griddims,nthreads>>>(A, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyToInds4D(float *A, int lda, int rda, int tda, float *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int l = tid / (nrows * ncols * nk);
int tidrem = tid - l * (nrows * ncols * nk);
int lstep = step / (nrows * ncols * nk);
int steprem = step - lstep * (nrows * ncols * nk);
int k = tidrem / (nrows * ncols);
tidrem = tidrem - k * (nrows * ncols);
int kstep = steprem / (nrows * ncols);
steprem = steprem - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk, mapl;
for (id = tid; id < nrows * ncols * nk * nl; id += step) {
mapl = l;
if (L != NULL) mapl = L[l];
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A[i + lda * (j + rda * (k + tda * l))];
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
if (k >= nk) {k -= nk; l++;}
l += lstep;
}
}
int copyToInds4D(float *A, int lda, int rda, int tda, float *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int len = nrows * ncols * nk * nl;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyToInds4D<<<griddims,nthreads>>>(A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyToInds4DLong(long long *A, int lda, int rda, int tda, long long *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int l = tid / (nrows * ncols * nk);
int tidrem = tid - l * (nrows * ncols * nk);
int lstep = step / (nrows * ncols * nk);
int steprem = step - lstep * (nrows * ncols * nk);
int k = tidrem / (nrows * ncols);
tidrem = tidrem - k * (nrows * ncols);
int kstep = steprem / (nrows * ncols);
steprem = steprem - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk, mapl;
for (id = tid; id < nrows * ncols * nk * nl; id += step) {
mapl = l;
if (L != NULL) mapl = L[l];
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A[i + lda * (j + rda * (k + tda * l))];
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
if (k >= nk) {k -= nk; l++;}
l += lstep;
}
}
int copyToInds4DLong(long long *A, int lda, int rda, int tda, long long *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int len = nrows * ncols * nk * nl;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyToInds4DLong<<<griddims,nthreads>>>(A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToInds4D(float A, float *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int l = tid / (nrows * ncols * nk);
int tidrem = tid - l * (nrows * ncols * nk);
int lstep = step / (nrows * ncols * nk);
int steprem = step - lstep * (nrows * ncols * nk);
int k = tidrem / (nrows * ncols);
tidrem = tidrem - k * (nrows * ncols);
int kstep = steprem / (nrows * ncols);
steprem = steprem - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk, mapl;
for (id = tid; id < nrows * ncols * nk * nl; id += step) {
mapl = l;
if (L != NULL) mapl = L[l];
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A;
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
if (k >= nk) {k -= nk; l++;}
l += lstep;
}
}
int fillToInds4D(float A, float *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int len = nrows * ncols * nk * nl;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__fillToInds4D<<<griddims,nthreads>>>(A, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __fillToInds4DLong(long long A, long long *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int l = tid / (nrows * ncols * nk);
int tidrem = tid - l * (nrows * ncols * nk);
int lstep = step / (nrows * ncols * nk);
int steprem = step - lstep * (nrows * ncols * nk);
int k = tidrem / (nrows * ncols);
tidrem = tidrem - k * (nrows * ncols);
int kstep = steprem / (nrows * ncols);
steprem = steprem - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk, mapl;
for (id = tid; id < nrows * ncols * nk * nl; id += step) {
mapl = l;
if (L != NULL) mapl = L[l];
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A;
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
if (k >= nk) {k -= nk; l++;}
l += lstep;
}
}
int fillToInds4DLong(long long A, long long *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int len = nrows * ncols * nk * nl;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__fillToInds4DLong<<<griddims,nthreads>>>(A, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int copyToInds2DLong(long long *A, int lda, long long *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyToInds2Dxxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dxnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2Dnxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2Dnnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyToInds2DBxxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBxnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyToInds2DBnxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyToInds2DBnnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B = A[I,J]
// indexed copy: version with one block per column
#define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyFromInds2D##FNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[i + iblock * ldb] = A[IEXPR + icol * lda]; \
} \
} \
}
COPYFROMINDS2DA(nn,I[i],J[iblock],float)
COPYFROMINDS2DA(xn,i,J[iblock],float)
COPYFROMINDS2DA(nx,I[i],iblock,float)
COPYFROMINDS2DA(xx,i,iblock,float)
COPYFROMINDS2DA(nnl,I[i],J[iblock],long long)
COPYFROMINDS2DA(xnl,i,J[iblock],long long)
COPYFROMINDS2DA(nxl,I[i],iblock,long long)
COPYFROMINDS2DA(xxl,i,iblock,long long)
// Implement B = A[I,J]
// indexed copy: version with one thread per element
#define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __copyFromInds2DB##FNAME(ETYPE *A, int lda, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \
} \
}
COPYFROMINDS2DB(nn,I[irow],J[icol],float)
COPYFROMINDS2DB(xn,irow,J[icol],float)
COPYFROMINDS2DB(nx,I[irow],icol,float)
COPYFROMINDS2DB(xx,irow,icol,float)
COPYFROMINDS2DB(nnl,I[irow],J[icol],long long)
COPYFROMINDS2DB(xnl,irow,J[icol],long long)
COPYFROMINDS2DB(nxl,I[irow],icol,long long)
COPYFROMINDS2DB(xxl,irow,icol,long long)
// Implement B = A[I,J]
int copyFromInds2D(float *A, int lda, float *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyFromInds3D(float *A, int lda, int rda, float *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int k = tid / (nrows * ncols);
int tidrem = tid - k * (nrows * ncols);
int kstep = step / (nrows * ncols);
int steprem = step - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk;
for (id = tid; id < nrows * ncols * nk; id += step) {
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[i + ldb * (j + rdb * k)] = A[mapi + lda * (mapj + rda * mapk)];
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
}
}
int copyFromInds3D(float *A, int lda, int rda, float *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int len = nrows * ncols * nk;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyFromInds3D<<<griddims,nthreads>>>(A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyFromInds3DLong(long long *A, int lda, int rda, long long *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int k = tid / (nrows * ncols);
int tidrem = tid - k * (nrows * ncols);
int kstep = step / (nrows * ncols);
int steprem = step - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk;
for (id = tid; id < nrows * ncols * nk; id += step) {
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[i + ldb * (j + rdb * k)] = A[mapi + lda * (mapj + rda * mapk)];
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
}
}
int copyFromInds3DLong(long long *A, int lda, int rda, long long *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) {
int len = nrows * ncols * nk;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyFromInds3DLong<<<griddims,nthreads>>>(A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyFromInds4D(float *A, int lda, int rda, int tda, float *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int l = tid / (nrows * ncols * nk);
int tidrem = tid - l * (nrows * ncols * nk);
int lstep = step / (nrows * ncols * nk);
int steprem = step - lstep * (nrows * ncols * nk);
int k = tidrem / (nrows * ncols);
tidrem = tidrem - k * (nrows * ncols);
int kstep = steprem / (nrows * ncols);
steprem = steprem - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk, mapl;
for (id = tid; id < nrows * ncols * nk * nl; id += step) {
mapl = l;
if (L != NULL) mapl = L[l];
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[i + ldb * (j + rdb * (k + tdb * l))] = A[mapi + lda * (mapj + rda * (mapk + tda * mapl))];
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
if (k >= nk) {k -= nk; l++;}
l += lstep;
}
}
int copyFromInds4D(float *A, int lda, int rda, int tda, float *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int len = nrows * ncols * nk * nl;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyFromInds4D<<<griddims,nthreads>>>(A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __copyFromInds4DLong(long long *A, int lda, int rda, int tda, long long *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int l = tid / (nrows * ncols * nk);
int tidrem = tid - l * (nrows * ncols * nk);
int lstep = step / (nrows * ncols * nk);
int steprem = step - lstep * (nrows * ncols * nk);
int k = tidrem / (nrows * ncols);
tidrem = tidrem - k * (nrows * ncols);
int kstep = steprem / (nrows * ncols);
steprem = steprem - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk, mapl;
for (id = tid; id < nrows * ncols * nk * nl; id += step) {
mapl = l;
if (L != NULL) mapl = L[l];
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[i + ldb * (j + rdb * (k + tdb * l))] = A[mapi + lda * (mapj + rda * (mapk + tda * mapl))];
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
if (k >= nk) {k -= nk; l++;}
l += lstep;
}
}
int copyFromInds4DLong(long long *A, int lda, int rda, int tda, long long *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int len = nrows * ncols * nk * nl;
int nthreads;
dim3 griddims;
setsizesTrim(len, &griddims, &nthreads);
__copyFromInds4DLong<<<griddims,nthreads>>>(A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int copyFromInds2DLong(long long *A, int lda, long long *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2Dxxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dxnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2Dnxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2Dnnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__copyFromInds2DBxxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBxnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__copyFromInds2DBnxl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
} else {
__copyFromInds2DBnnl<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
// Implement B[I,J] = c
// indexed copy: version with one block per column
#define FILLTOINDS2DA(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __fillToInds2D##DFNAME(ETYPE A, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int iblock = blockIdx.x + blockIdx.y * gridDim.x; \
if (iblock < ncols) { \
int icol = JEXPR; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
B[IEXPR + icol * ldb] = A; \
} \
} \
}
FILLTOINDS2DA(nn,I[i],J[iblock],float)
FILLTOINDS2DA(xn,i,J[iblock],float)
FILLTOINDS2DA(nx,I[i],iblock,float)
FILLTOINDS2DA(xx,i,iblock,float)
FILLTOINDS2DA(nnl,I[i],J[iblock],long long)
FILLTOINDS2DA(xnl,i,J[iblock],long long)
FILLTOINDS2DA(nxl,I[i],iblock,long long)
FILLTOINDS2DA(xxl,i,iblock,long long)
// Implement B[I,J] = A
// indexed copy: version with one thread per element
#define FILLTOINDS2DB(DFNAME,IEXPR,JEXPR,ETYPE) \
__global__ void __fillToInds2DB##DFNAME(ETYPE A, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \
int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \
if (indx < nrows * ncols) { \
int irow = indx % nrows; \
int icol = indx / nrows; \
B[IEXPR + JEXPR * ldb] = A; \
} \
}
FILLTOINDS2DB(nn,I[irow],J[icol],float)
FILLTOINDS2DB(xn,irow,J[icol],float)
FILLTOINDS2DB(nx,I[irow],icol,float)
FILLTOINDS2DB(xx,irow,icol,float)
FILLTOINDS2DB(nnl,I[irow],J[icol],long long)
FILLTOINDS2DB(xnl,irow,J[icol],long long)
FILLTOINDS2DB(nxl,I[irow],icol,long long)
FILLTOINDS2DB(xxl,irow,icol,long long)
int fillToInds2D(float A, float *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__fillToInds2Dxx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2Dxn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__fillToInds2Dnx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2Dnn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__fillToInds2DBxx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2DBxn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__fillToInds2DBnx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2DBnn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int fillToInds2DLong(long long A, long long *B, int ldb, int *I, int nrows, int *J, int ncols) {
int len = nrows * ncols;
int nthreads = max(32, min(1024, nrows));
int nblocks = min(ncols, (len-1)/nthreads + 1);
dim3 griddims;
griddims.x = 1;
griddims.y = 1;
griddims.z = 1;
if (nblocks < 65536) {
griddims.x = nblocks;
} else {
int vs = (int)sqrt((float)nblocks);
griddims.x = vs;
griddims.y = (nblocks-1)/vs + 1;
}
if (nblocks == ncols) {
if (I == NULL) {
if (J == NULL) {
__fillToInds2Dxxl<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2Dxnl<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__fillToInds2Dnxl<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2Dnnl<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
}
} else {
if (I == NULL) {
if (J == NULL) {
__fillToInds2DBxxl<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2DBxnl<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
} else {
if (J == NULL) {
__fillToInds2DBnxl<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
} else {
__fillToInds2DBnnl<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols);
}
}
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
const int INBLOCK = 4;
// copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix
// (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of
// rows of the output matrix. iptrs specifies the columns of the input array to copy.
// outstride is stride of the output matrix
__global__ void __icopy_transpose(int *iptrs, float *in, float *out, int outstride, int nrows, int ncols) {
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int icopy_transpose(int *iptrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__icopy_transpose<<<griddims,blockdims>>>(iptrs, in, out, stride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;}
return 0;
}
// copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix
__global__ void __ocopy_transpose(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb];
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_add(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]);
}
}
__syncthreads();
}
}
}
__global__ void __ocopy_transpose_min(int *optrs, float *in, float *out, int instride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride];
}
}
__syncthreads();
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb]));
}
}
__syncthreads();
}
}
}
int ocopy_transpose_add(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose_add<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
int ocopy_transpose_min(int *optrs, float *in, float *out, int stride, int nrows, int ncols) {
const dim3 griddims(20,256,1);
const dim3 blockdims(BLOCKDIM,INBLOCK,1);
cudaError_t err;
__ocopy_transpose_min<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;}
return 0;
}
__global__ void __transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
}
int transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int gridx = min(32, 1+(nrows-1)/256);
int gridy = min(32, 1+(ncols-1)/256);
const dim3 griddims(gridx, gridy, 1);
const dim3 blockdims(BLOCKDIM,16,1);
cudaError_t err;
int dev = -1;
cudaGetDevice(&dev);
__transpose<<<griddims,blockdims>>>(in, instride, out, outstride, nrows, ncols);
cudaStreamSynchronize(SYNC_STREAM);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols);
return err;
}
return 0;
}
__global__ void __intToFloat(int *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __doubleToFloat(double *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __floatToLong(float *A, long long *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
__global__ void __floatToInt(float *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
}
__global__ void __floatToDouble(float *A, double *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (double)(A[i]);
}
}
__global__ void __longToInt(long long *A, int *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (int)(A[i]);
}
}
__global__ void __intToLong(int *A, long long *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (long long)(A[i]);
}
}
int intToFloat(int *A, float *B, int N) {
int nthreads;
dim3 griddims;
setsizesLean(N, &griddims, &nthreads);
__intToFloat<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int doubleToFloat(double *A, float *B, int N) {
int nthreads;
dim3 griddims;
setsizesLean(N, &griddims, &nthreads);
__doubleToFloat<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int longToFloat(long long *A, float *B, int N) {
int nthreads;
dim3 griddims;
setsizesLean(N, &griddims, &nthreads);
__longToFloat<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int floatToLong(float *A, long long *B, int N) {
int nthreads;
dim3 griddims;
setsizesLean(N, &griddims, &nthreads);
__floatToLong<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int floatToInt(float *A, int *B, int N) {
int nthreads;
dim3 griddims;
setsizesLean(N, &griddims, &nthreads);
__floatToInt<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int floatToDouble(float *A, double *B, int N) {
int nthreads;
dim3 griddims;
setsizesLean(N, &griddims, &nthreads);
__floatToDouble<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int longToInt(long long *A, int *B, int N) {
int nthreads;
dim3 griddims;
setsizesLean(N, &griddims, &nthreads);
__longToInt<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int intToLong(int *A, long long *B, int N) {
int nthreads;
dim3 griddims;
setsizesLean(N, &griddims, &nthreads);
__intToLong<<<griddims,nthreads>>>(A, B, N);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) {
int i, row, col;
float v;
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (i = id; i < nnz; i += blockDim.x * gridDim.x) {
v = data[i];
row = ir[i];
col = ic[i];
od[row + col * nrows] = v;
}
}
int full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) {
int nblocks = min(32, 1+(nnz-1)/32);
int nthreads = min(1+(nnz-1)/nblocks, 1024);
__full<<<nblocks,nthreads>>>(ir, ic, data, od, nrows, ncols, nnz);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __initSeq(int *A, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = i % nrows;
}
}
__global__ void __initSeq2(int *A, int nrows, int ncols) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = i / nrows;
}
}
int initSeq(int *A, int nrows, int ncols, int dorows) {
int nthreads;
dim3 griddims;
setsizesLean(nrows*ncols, &griddims, &nthreads);
if (dorows) {
__initSeq<<<griddims,nthreads>>>(A, nrows, ncols);
} else {
__initSeq2<<<griddims,nthreads>>>(A, nrows, ncols);
}
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __set_val(float *A, float val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
__global__ void __set_lval(long long *A, long long val, int length) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) {
A[i] = val;
}
}
int set_val(float *A, float val, int length) {
int nthreads;
dim3 griddims;
setsizesLean(length, &griddims, &nthreads);
__set_val<<<griddims,nthreads>>>(A, val, length);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int set_ival(float *A, int val, int length) {
int nthreads;
dim3 griddims;
setsizesLean(length, &griddims, &nthreads);
__set_val<<<griddims,nthreads>>>(A, *((float *)&val), length);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
int set_lval(long long *A, long long val, int length) {
int nthreads;
dim3 griddims;
setsizesLean(length, &griddims, &nthreads);
__set_lval<<<griddims,nthreads>>>(A, val, length);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
template <typename T>
__global__ void __kron(T *A, T *B, T *C, int nrA, int ncA, int nrB, int ncB, int nr, int nc) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int r, rA, rB, c, cA, cB, i;
long long len = ((long long)nr) * nc;
for (i = tid; i < len; i += step) {
c = i / nr;
r = i - c * nr;
rA = r / nrB;
rB = r - rA * nrB;
cA = c / ncB;
cB = c - cA * ncB;
C[i] = A[rA + cA * nrA] * B[rB + cB * nrB];
}
}
template <typename T>
int kron(T *A, T *B, T *C, int nrA, int ncA, int nrB, int ncB) {
int nr = nrA * nrB;
int nc = ncA * ncB;
long long len = ((long long)nr) * nc;
int nthreads;
dim3 griddims;
setsizesLean(len, &griddims, &nthreads);
__kron<<<griddims,nthreads>>>(A, B, C, nrA, ncA, nrB, ncB, nr, nc);
cudaStreamSynchronize(SYNC_STREAM);
cudaError_t err = cudaGetLastError();
return err;
}
template int kron<float>(float *A, float *B, float *C, int nrA, int ncA, int nrB, int ncB);
template int kron<int>(int *A, int *B, int *C, int nrA, int ncA, int nrB, int ncB);
|
the_stack
|
// InscribedCircle
// 实现的曲线内接圆
#include "InscribedCircle.h"
#include <iostream>
#include <fstream>
#include <cmath>
#include <algorithm>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:IN_LABEL 和 OUT_LABEL
// 定义了曲线内的点和曲线外的点标记值
#define IN_LABEL 255
#define OUT_LABEL 0
// Kernel 函数:_setCloseAreaKer(将封闭曲线包围的内部区域的值变为白色)
// 该核函数使用著名的射线法确定点和一个封闭曲线的位置关系,即如果由当前点引射线,
// 与曲线有奇数个交点则在内部,如果有偶数个交点,则在曲线外部( 0 属于偶数),
// 引用该算法实现将封闭曲线包围的内部区域的值变为白色,并且需要
// 得到闭合曲线包围的点的个数,用于后续处理
static __global__ void // Kernel 函数无返回值
_setCloseAreaKer(
CurveCuda curve, // 输入曲线
ImageCuda maskimg, // 输出标记结果
int *count // 闭合曲线包围点的个数
);
// 全局函数:compare (两个 int 变量的比较函数)
// 使用于针对特征值快速排序中的比较函数指针
int compare(const void * a, const void * b)
{
return *(int *)b - *(int *)a;
// return *(float *)b > *(float *)a ? 1:-1;
}
// Kernel 函数:_setCloseAreaKer(将封闭曲线包围的内部区域的值变为白色)
static __global__ void _setCloseAreaKer(CurveCuda curve, ImageCuda maskimg,
int *count)
{
// 计算当前线程的索引
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
// 判断当前线程是否越过输入图像尺寸
if (xidx >= maskimg.imgMeta.width || yidx >= maskimg.imgMeta.height)
return;
// 定义部分寄存器变量
int downcount = 0; // 向下引射线和曲线的交点个数
int length = curve.crvMeta.curveLength; // 曲线上的点的个数
int outpitch = maskimg.pitchBytes; // 输出标记图像的 pitch
// 首先将所有点标记为曲线外的点
maskimg.imgMeta.imgData[yidx * outpitch + xidx] = OUT_LABEL;
int flag = 0; // 判断是否进入切线区域
// 遍历曲线,统计上述各个寄存器变量的值
for (int i = 0; i < length; i++) {
int x = curve.crvMeta.crvData[2 * i];
int y = curve.crvMeta.crvData[2 * i + 1];
// 曲线中的下一个点的位置
int j = (i + 1) % length;
int x2 = curve.crvMeta.crvData[2 * j];
// 曲线中上一个点的位置
int k = (i - 1 + length) % length;
int x3 = curve.crvMeta.crvData[2 * k];
// 曲线上的第 i 个点与当前点在同一列上
if (x == xidx) {
if (y == yidx) {
////当前点在曲线上,此处把曲线上的点也作为曲线内部的点
// maskimg.imgMeta.imgData[yidx * outpitch+ xidx] = IN_LABEL;
return;
}
// 交点在当前点的下方
if (y > yidx) {
// 曲线上下一个也在射线上时,避免重复统计,同时设置 flag
// 标记交点行开始。如果下一个点不在射线上,通过 flag 判断到
// 底是交点行结束还是单点相交,如果是单点相交判断是否为突出点
// 如果是交点行结束判断是否曲线在交点行同侧,以上都不是统计值
// 加一.
if (x2 == xidx) {
if (flag == 0)
flag = x3 - x;
} else {
if (flag == 0) {
if ((x3 - x) * (x2 - x) <= 0)
downcount++;
} else {
if (flag * (x2 - x) < 0)
downcount++;
flag = 0;
}
}
}
}
}
// 交点数均为奇数则判定在曲线内部
if (downcount % 2 == 1) {
maskimg.imgMeta.imgData[yidx * outpitch + xidx] = IN_LABEL;
atomicAdd(count, 1);
}
}
// Kernel 函数:_calInsCirRadiusKer()
static __global__ void _calInsCirRadiusKer(CurveCuda curve, ImageCuda maskimg,
int *dev_inscirDist, int *dev_inscirX, int *dev_inscirY, int *dev_num)
{
// 计算当前线程的索引
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
// 判断当前线程是否越过输入图像尺寸
if (xidx >= maskimg.imgMeta.width || yidx >= maskimg.imgMeta.height)
return;
// 得到标记图像的 pitch
int pitch = maskimg.pitchBytes;
int width = maskimg.imgMeta.width;
int index = yidx * width + xidx;
// 曲线上的点的个数
int length = curve.crvMeta.curveLength;
if (maskimg.imgMeta.imgData[yidx * pitch+ xidx] == OUT_LABEL) {
dev_inscirDist[index] = 0;
} else {
int min = 65535;
int dist;
for (int i = 0; i < length; i++) {
int x = curve.crvMeta.crvData[2 * i];
int y = curve.crvMeta.crvData[2 * i + 1];
dist = (xidx - x) * (xidx - x) + (yidx - y) * (yidx - y);
if (dist < min)
min = dist;
}
dev_inscirDist[index] = (int)sqrtf(min);
}
}
__host__ void getH_inscirDist(int *tmp_inscirDist, int width, int height,
int *h_inscirDist, int *h_inscirX, int *h_inscirY)
{
int size = 0;
int tmp;
int i, j;
for (i = 0; i < height; i++) {
for(j = 0; j < width; j++) {
tmp = *(tmp_inscirDist + i * width + j);
if (tmp) {
*(h_inscirDist + size) = tmp;
*(h_inscirX + size) = j;
*(h_inscirY + size) = i;
size++;
}
}
}
}
__host__ void swap(int *h_inscirDist, int *h_inscirX, int *h_inscirY,
int index, int max_index)
{
*(h_inscirDist + index) = *(h_inscirDist + index) ^ *(h_inscirDist + max_index);
*(h_inscirDist + max_index) = *(h_inscirDist + index) ^ *(h_inscirDist + max_index);
*(h_inscirDist + index) = *(h_inscirDist + index) ^ *(h_inscirDist + max_index);
*(h_inscirX + index) = *(h_inscirX + index) ^ *(h_inscirX + max_index);
*(h_inscirX + max_index) = *(h_inscirX + index) ^ *(h_inscirX + max_index);
*(h_inscirX + index) = *(h_inscirX + index) ^ *(h_inscirX + max_index);
*(h_inscirY + index) = *(h_inscirY + index) ^ *(h_inscirY + max_index);
*(h_inscirY + max_index) = *(h_inscirY + index) ^ *(h_inscirY + max_index);
*(h_inscirY + index) = *(h_inscirY + index) ^ *(h_inscirY + max_index);
}
__host__ void setFlag(bool *flag, int *inscirX, int *inscirY,
int cnum, int index, int disTh)
{
int x = inscirX[index];
int y = inscirY[index];
//int max = 0;
int length = disTh * disTh;
//int flagnum = 0;
for(int i = index + 1; i < cnum; i++) {
if (flag[i]) {
int dis = (inscirX[i] - x) * (inscirX[i] - x) +
(inscirY[i] - y) * (inscirY[i] - y);
//if(max<dis) max = dis;
if (dis < length) {
flag[i] = false;
//flagnum++;
}
}
}
}
__host__ void getInscirDist(int *inscirDist, int *inscirX, int *inscirY,
int num, int disTh, int &count,int *h_inscirDist,
int *h_inscirX, int *h_inscirY, int cnum)
{
int max = 0;
int max_index = 0;
int tmp;
int i, j;
bool *flag = new bool[cnum];
memset(flag, true, cnum * sizeof (bool));
//cout<<"getInscirDist num disTh: "<<num<<" "<<disTh<<endl;
for(i = 0; i < num; i++) {
max = 0;
max_index = 0;
bool in = false;
for (j = i; j < cnum; j++) {
tmp = *(h_inscirDist + j);
if (!flag[j]) continue;
if (tmp > max) {
max = tmp;
max_index = j;
in = true;
}
}
if (!in) {
count = i;
break;
}
swap(h_inscirDist, h_inscirX, h_inscirY, i, max_index);
*(inscirDist + i) = *(h_inscirDist + i);
*(inscirX + i) = *(h_inscirX + i);
*(inscirY + i) = *(h_inscirY + i);
flag[i] = false;
setFlag(flag, h_inscirX, h_inscirY, cnum, i, disTh);
}
if(i == num) {
count = num;
}
delete [] flag;
}
// Host 成员方法:inscribedCircle(曲线最大内接圆)
__host__ int InscribedCircle::inscribedCircle(Curve *curve, int width,
int height, int &count, int *inscirDist, int *inscirX, int *inscirY)
{
// 判断输入曲线,输入半径数组,输入圆心坐标是否为空
if (curve == NULL || inscirDist == NULL ||
inscirX == NULL || inscirY == NULL)
return NULL_POINTER;
// 检查输入参数是否有数据
if (curve->curveLength <= 0 || width <= 0 || height <= 0)
return INVALID_DATA;
// 检查输入曲线是否为封闭曲线,如果不是封闭曲线返回错误
//if (!curve->closed)
// return INVALID_DATA;
// 局部变量,错误码。
int errcode;
cudaError_t cuerrcode;
// 将曲线拷贝到 Device 内存中
errcode = CurveBasicOp::copyToCurrentDevice(curve);
if (errcode != NO_ERROR)
return errcode;
// 获取 CurveCuda 指针
CurveCuda *curvecud = CURVE_CUDA(curve);
// 定义设备端局部变量,用于多份数据的一份申请
void *temp_dev = NULL;
// 定义临时标记图像指针
Image *maskimg = NULL;
// 给临时标记图像在设备申请空间
ImageBasicOp::newImage(&maskimg);
if (errcode != NO_ERROR)
return errcode;
errcode = ImageBasicOp::makeAtCurrentDevice(maskimg, width, height);
if (errcode != NO_ERROR) {
//
return errcode;
}
// 获取 ImageCuda 指针
ImageCuda *maskimgcud = IMAGE_CUDA(maskimg);
size_t datasize = width * height;
// 给 temp_dev 在设备申请空间
cuerrcode = cudaMalloc((void**)&temp_dev, (datasize * 3 + 1) * sizeof (int));
if (cuerrcode != cudaSuccess) {
//
return CUDA_ERROR;
}
// 定义设备指针
int *dev_inscirDist = (int *)temp_dev;
int *dev_inscirX = (int *)(dev_inscirDist + datasize);
int *dev_inscirY = (int *)(dev_inscirX + datasize);
int *dev_count = (int *)(dev_inscirY + datasize);
cudaMemset(dev_count, 0, sizeof (int));
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (width + blocksize.x - 1) / blocksize.x;
gridsize.y = (height + blocksize.y - 1) / blocksize.y;
// 调用核函数,将封闭曲线包围的内部区域的值变为白色,并且得到包围点的个数
_setCloseAreaKer<<<gridsize, blocksize>>>(
*curvecud, *maskimgcud, dev_count);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
//FREE_CURVE_TOPOLOGY;
return CUDA_ERROR;
}
// 调用核函数,将
_calInsCirRadiusKer<<<gridsize, blocksize>>>(*curvecud, *maskimgcud,
dev_inscirDist, dev_inscirX, dev_inscirY, dev_count);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
//FREE_CURVE_TOPOLOGY;
return CUDA_ERROR;
}
int cnum;
cudaMemcpy(&cnum, dev_count, sizeof (int), cudaMemcpyDeviceToHost);
int *h_inscirDist = new int [cnum];
int *h_inscirX = new int [cnum];
int *h_inscirY = new int [cnum];
int *tmp_inscirDist = new int [datasize];
cudaMemcpy(tmp_inscirDist, dev_inscirDist, sizeof (int) * datasize,
cudaMemcpyDeviceToHost);
getH_inscirDist(tmp_inscirDist, width, height,
h_inscirDist, h_inscirX, h_inscirY);
getInscirDist(inscirDist, inscirX, inscirY, this->num, this->disTh, count,
h_inscirDist, h_inscirX, h_inscirY, cnum);
// for(int i = 0; i < count; i++) {
// cout<<"["<<h_inscirDist[i]<<",("<<h_inscirX[i]<<","<<h_inscirY[i]<<")]";
// }
// cout<<endl;
for(int i = 0; i < count; i++) {
cout<<"["<<inscirDist[i]<<",("<<inscirX[i]<<","<<inscirY[i]<<")]";
}
//sort(tmp,tmp+datasize,greater<int>());
//for(int i=0;i<m;i++) {
// if(i%50==0) cout<<endl;
// cout<<inscirDist[i]<< " ";
//}
//memcpy(t1, inscirDist, cnum * sizeof (int));
// memset(t1+cnum,0,(n - cnum) * sizeof (int));
// int *t2 = new int [n];
// SortArray sort(1024,n/1024,1,true);
// sort.shearSort(t1,t2);
cout<<endl<<"cnum count: "<<cnum<<" "<<count<<endl;
//cout<<width<<" "<<height<<endl;
cudaFree(temp_dev);
delete [] h_inscirDist;
delete [] h_inscirX;
delete [] h_inscirY;
delete [] tmp_inscirDist;
return NO_ERROR;
}
|
the_stack
|
#include <cmath>
#include "core/context_cuda.h"
#include "utils/cuda_device.h"
#include "utils/math_functions.h"
#include "utils/cast.h"
namespace dragon {
namespace math {
/******************** Level-0 ********************/
template <typename T>
__global__ void _Set(const int n, const T alpha, T* x) {
CUDA_KERNEL_LOOP(idx, n) {
x[idx] = alpha;
}
}
template <> void Set<float, CUDAContext>(const int n,
const float alpha,
float* x) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(x, 0, sizeof(float) * n));
return;
}
_Set<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, alpha, x);
}
template <> void Set<int, CUDAContext>(const int n,
const int alpha,
int* x) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(x, 0, sizeof(int) * n));
return;
}
_Set<int> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, alpha, x);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _SetHalf2(const int n, const half2 alpha, half2* x) {
CUDA_KERNEL_LOOP(idx, n) {
x[idx] = alpha;
}
}
template <> void Set<float16, CUDAContext>(const int n,
const float16 alpha,
float16* x) {
if (n % 2 == 0) {
_SetHalf2<half2> << <GET_BLOCKS(n / 2), CUDA_NUM_THREADS >> >(n / 2,
dragon_cast<half2, float16>(alpha),
reinterpret_cast<half2*>(x));
} else {
_Set<float16> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, alpha, x);
}
}
#endif
template <> void RandomUniform<uint32_t, CUDAContext>(const int n,
const float low,
const float high,
uint32_t* x) {
// note that we ignore the low / high
// curand could only generates in the range of [0, uint32]
CURAND_CHECK(curandGenerate(curand_generator(), x, n));
}
template <> void RandomUniform<float16, CUDAContext>(const int n,
const float low,
const float high,
float16* x) {
NOT_IMPLEMENTED;
}
template <> void RandomNormal<float, CUDAContext>(const int n,
const float mu,
const float sigma,
float* x) {
CURAND_CHECK(curandGenerateNormal(curand_generator(), x, n, mu, sigma));
}
template <> void RandomNormal<float16, CUDAContext>(const int n,
const float mu,
const float sigma,
float16* x) {
NOT_IMPLEMENTED;
}
template <> void RandomBernoulli<float, CUDAContext>(const int n,
const float p,
unsigned int* x) {
// curand could not generate bernoulli distribution
// we recommend implement it within specfic case, e.g. Dropout
NOT_IMPLEMENTED;
}
/******************** Level-1 ********************/
template <typename T>
__global__ void _Add(const int n, const T* a, const T *b, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = a[idx] + b[idx];
}
}
template <> void Add<float, CUDAContext>(int n,
const float* a,
const float* b,
float *y) {
_Add<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, a, b, y);
}
template <typename T>
__global__ void _Sub(const int n, const T* a, const T *b, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = a[idx] - b[idx];
}
}
template <> void Sub<float, CUDAContext>(int n,
const float* a,
const float* b,
float *y) {
_Sub<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, a, b, y);
}
template <typename T>
__global__ void _Mul(const int n, const T* a, const T* b, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = a[idx] * b[idx];
}
}
template <> void Mul<float, CUDAContext>(int n,
const float* a,
const float* b,
float* y) {
_Mul<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, a, b, y);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _MulHalf(const int n, const half* a, const half* b, half* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul(a[idx], b[idx]);
#endif
}
}
template <typename T>
__global__ void _MulHalf2(const int n, const half2* a, const half2* b, half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul2(a[idx], b[idx]);
#endif
}
}
template <> void Mul<float16, CUDAContext>(int n,
const float16* a,
const float16* b,
float16* y) {
if (n % 2 == 0)
_MulHalf2<half2> << <GET_BLOCKS(n / 2), CUDA_NUM_THREADS >> >(n / 2,
reinterpret_cast<const half2*>(a),
reinterpret_cast<const half2*>(b),
reinterpret_cast<half2*>(y));
else _MulHalf<half> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n,
reinterpret_cast<const half*>(a),
reinterpret_cast<const half*>(b),
reinterpret_cast<half*>(y));
}
#endif
template <typename T>
__global__ void _Div(const int n, const T* a, const T* b, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = a[idx] / b[idx];
}
}
template <> void Div<float, CUDAContext>(int n,
const float* a,
const float* b,
float* y) {
_Div<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, a, b, y);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _DivHalf(const int n, const half* a, const half* b, half* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hdiv(a[idx], b[idx]);
#endif
}
}
template <> void Div<float16, CUDAContext>(int n,
const float16* a,
const float16* b,
float16* y) {
_DivHalf<half> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n,
reinterpret_cast<const half*>(a),
reinterpret_cast<const half*>(b),
reinterpret_cast<half*>(y));
}
#endif
template <typename T>
__global__ void _Clip(const int n, const T low, const T high, T* x) {
CUDA_KERNEL_LOOP(idx, n) {
x[idx] = x[idx] > high ? high : x[idx];
x[idx] = x[idx] < low ? low : x[idx];
}
}
template <> void Clip<float, CUDAContext>(const int n,
const float low,
const float high,
float* x) {
_Clip<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, low, high, x);
}
template <typename T>
__global__ void _Exp(const int n, const T* a, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = std::exp(a[idx]);
}
}
template <> void Exp<float, CUDAContext>(int n, const float* x, float *y) {
_Exp<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, x, y);
}
template <typename T>
__global__ void _Log(const int n, const T* a, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = std::log(a[idx]);
}
}
template <> void Log<float, CUDAContext>(int n, const float* x, float *y) {
_Log<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, x, y);
}
template <typename T>
__global__ void _Square(const int n, const T* x, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = x[idx] * x[idx];
}
}
template <> void Square<float, CUDAContext>(int n,
const float* x,
float* y) {
_Square<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, x, y);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _SquareHalf(const int n, const half* x, half* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul(x[idx], x[idx]);
#endif
}
}
template <typename T>
__global__ void _SquareHalf2(const int n, const half2* x, half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul2(x[idx], x[idx]);
#endif
}
}
template <> void Square<float16, CUDAContext>(int n,
const float16* x,
float16* y) {
if (n % 2 == 0)
_SquareHalf2<half2> << < GET_BLOCKS(n / 2), CUDA_NUM_THREADS >> >(n / 2,
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
else _SquareHalf<half> << < GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n,
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
CUDA_POST_KERNEL_CHECK;
}
#endif
template <typename T>
__global__ void _Sqrt(const int n, const T* x, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = std::sqrt(x[idx]);
}
}
template <> void Sqrt<float, CUDAContext>(int n,
const float* x,
float* y) {
_Sqrt<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, x, y);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _SqrtHalf(const int n, const half* x, half* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = hsqrt(x[idx]);
#endif
}
}
template <typename T>
__global__ void _SqrtHalf2(const int n, const half2* x, half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = h2sqrt(x[idx]);
#endif
}
}
template <> void Sqrt<float16, CUDAContext>(int n,
const float16* x,
float16* y) {
if (n % 2 == 0)
_SqrtHalf2<half2> << < GET_BLOCKS(n / 2), CUDA_NUM_THREADS >> >(n / 2,
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
else _SqrtHalf<half> << < GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n,
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
CUDA_POST_KERNEL_CHECK;
}
#endif
template <typename T>
__global__ void _Pow(const int n, const T alpha, const T* a, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = std::pow(a[idx], alpha);
}
}
template <> void Pow<float, CUDAContext>(int n,
const float alpha,
const float* x,
float* y) {
_Pow<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, alpha, x, y);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _PowHalf(const int n, const float alpha, const half* a, half* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul(a[idx], a[idx]);
#endif
}
}
template <typename T>
__global__ void _PowHalf2(const int n, const float alpha, const half2* a, half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul2(a[idx], a[idx]);
#endif
}
}
template <> void Pow<float16, CUDAContext>(int n,
const float alpha,
const float16* x,
float16* y) {
CHECK(alpha == float(2)) << "fp16 only support the power of 2";
if (n % 2 == 0)
_PowHalf2<half2> << < GET_BLOCKS(n / 2), CUDA_NUM_THREADS >> >(n / 2,
alpha,
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
else _PowHalf<half> << < GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n,
alpha,
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
CUDA_POST_KERNEL_CHECK;
}
#endif
template <typename T>
__global__ void _Inv(const int n, const float numerator, const T* x, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = numerator / x[idx];
}
}
template <> void Inv<float, CUDAContext>(const int n,
const float numerator,
const float* x,
float* y) {
_Inv<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, numerator, x, y);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _InvHalf(const int n, const half numerator, const half* x, half* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul(hrcp(x[idx]), numerator);
#endif
}
}
template <typename T>
__global__ void _InvHalf2(const int n, const half2 numerator, const half2* x, half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul2(h2rcp(x[idx]), numerator);
#endif
}
}
template <> void Inv<float16, CUDAContext>(const int n,
const float numerator,
const float16* x,
float16* y) {
if (n % 2 == 0) {
_InvHalf2<half2> << < GET_BLOCKS(n / 2), CUDA_NUM_THREADS >> >(n / 2,
dragon_cast<half2, float>(numerator),
reinterpret_cast<const half2*>(x),
reinterpret_cast<half2*>(y));
}
else {
_InvHalf<half> << < GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n,
dragon_cast<half, float>(numerator),
reinterpret_cast<const half*>(x),
reinterpret_cast<half*>(y));
}
CUDA_POST_KERNEL_CHECK;
}
#endif
/******************** Level-2 ********************/
template <> void Scal<float, CUDAContext>(const int n, const float alpha, float* y) {
CUBLAS_CHECK(cublasSscal_v2(cublas_handle(),
n,
&alpha,
y, 1));
}
template <> void Scal<float16, CUDAContext>(const int n, const float alpha, float16* y) {
CUBLAS_CHECK(cublasScalEx(cublas_handle(),
n,
&alpha, CUDA_R_32F,
y, CUDA_R_16F, 1,
CUDA_R_32F));
}
template <> void Scale<float, CUDAContext>(const int n,
const float alpha,
const float* x,
float* y) {
CUBLAS_CHECK(cublasScopy_v2(cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal_v2(cublas_handle(), n, &alpha, y, 1));
}
template <> void Scale<float16, CUDAContext>(const int n,
const float alpha,
const float16* x,
float16* y) {
CUDAContext ctx;
ctx.Copy<float16, CUDAContext, CUDAContext>(n, y, x);
Scal<float16, CUDAContext>(n, alpha, y);
}
template <> float StridedDot<float, CUDAContext>(const int n,
const float* a,
const int incx,
const float* b,
const int incy) {
float result;
CUBLAS_CHECK(cublasSdot_v2(cublas_handle(),
n,
a, incx,
b, incy,
&result));
return result;
}
template <> float Dot<float, CUDAContext>(int n, const float* a, const float* b) {
return StridedDot<float, CUDAContext>(n, a, 1, b, 1);
}
template <> float Dot<float16, CUDAContext>(int n, const float16* a, const float16* b) {
float16 result;
CUBLAS_CHECK(cublasDotEx(cublas_handle(),
n,
&a, CUDA_R_16F, 1,
&b, CUDA_R_16F, 1,
&result, CUDA_R_16F,
CUDA_R_32F));
return dragon_cast<float, float16>(result);
}
template <> float ASum<float, CUDAContext>(const int n, const float* x) {
return cublasSasum(n, x, 1);
}
template <typename T>
__global__ void _AddScalar(const int n, T alpha, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] += alpha;
}
}
template <> void AddScalar<float, CUDAContext>(const int n, const float alpha, float* y) {
_AddScalar<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, alpha, y);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _AddScalarHalf(const int n, half alpha, half* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hadd(y[idx], alpha);
#endif
}
}
template <typename T>
__global__ void _AddScalarHalf2(const int n, half2 alpha, half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hadd2(y[idx], alpha);
#endif
}
}
template <> void AddScalar<float16, CUDAContext>(const int n, const float alpha, float16* y) {
if (n % 2 == 0) {
_AddScalarHalf2<half2> << <GET_BLOCKS(n / 2), CUDA_NUM_THREADS >> >(n / 2,
dragon_cast<half2, float>(alpha),
reinterpret_cast<half2*>(y));
} else {
_AddScalarHalf<half> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n,
dragon_cast<half, float>(alpha),
reinterpret_cast<half*>(y));
}
CUDA_POST_KERNEL_CHECK;
}
#endif
template <typename T>
__global__ void _MulScalar(const int n, T alpha, T* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] *= alpha;
}
}
template <> void MulScalar<float, CUDAContext>(const int n, const float alpha, float* y) {
_MulScalar<float> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n, alpha, y);
}
#ifdef WITH_CUDA_FP16
template <typename T>
__global__ void _MulScalarHalf(const int n, half alpha, half* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul(y[idx], alpha);
#endif
}
}
template <typename T>
__global__ void _MulScalarHalf2(const int n, half2 alpha, half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
#if __CUDA_ARCH__ >= 530
y[idx] = __hmul2(y[idx], alpha);
#endif
}
}
template <> void MulScalar<float16, CUDAContext>(const int n, const float alpha, float16* y) {
if (n % 2 == 0) {
_MulScalarHalf2<half2> << <GET_BLOCKS(n / 2), CUDA_NUM_THREADS >> >(n / 2,
dragon_cast<half2, float>(alpha),
reinterpret_cast<half2*>(y));
} else {
_MulScalarHalf<half> << <GET_BLOCKS(n), CUDA_NUM_THREADS >> >(n,
dragon_cast<half, float>(alpha),
reinterpret_cast<half*>(y));
}
CUDA_POST_KERNEL_CHECK;
}
#endif
template <> void Axpy<float, CUDAContext>(const int n,
float alpha,
const float* x,
float* y) {
CUBLAS_CHECK(cublasSaxpy_v2(cublas_handle(),
n,
&alpha, x, 1,
y, 1));
}
template <> void Axpy<float16, CUDAContext>(const int n,
float alpha,
const float16* x,
float16* y) {
CUBLAS_CHECK(cublasAxpyEx(cublas_handle(),
n,
&alpha, CUDA_R_32F,
x, CUDA_R_16F, 1,
y, CUDA_R_16F, 1,
CUDA_R_32F));
}
template <> void Axpby<float, CUDAContext>(const int n,
float alpha,
const float* x,
float beta,
float* y) {
Scal<float, CUDAContext>(n, beta, y);
Axpy<float, CUDAContext>(n, alpha, x, y);
}
template <> void Axpby<float16, CUDAContext>(const int n,
float alpha,
const float16* x,
float beta,
float16* y) {
Scal<float16, CUDAContext>(n, beta, y);
Axpy<float16, CUDAContext>(n, alpha, x, y);
}
/******************** Level-3 ********************/
template <> void RandomUniform<float, CUDAContext>(const int n,
const float low,
const float high,
float* x) {
CURAND_CHECK(curandGenerateUniform(curand_generator(), x, n));
float range = high - low;
if (range != float(1)) Scal<float, CUDAContext>(n, range, x);
if (low != float(0)) AddScalar<float, CUDAContext>(n, low, x);
}
template <> void Gemm<float, CUDAContext>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float *C,
TensorProto_DataType math_type) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA = (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm_v2(cublas_handle(),
cuTransB, cuTransA,
N, M, K,
&alpha,
B, ldb,
A, lda,
&beta,
C, N));
}
#ifdef WITH_CUDA_FP16
template <> void Gemm<float16, CUDAContext>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16 *C,
TensorProto_DataType math_type) {
int lda = (transA == CblasNoTrans) ? K : M;
int ldb = (transB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA = (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(cublas_handle(),
cuTransB, cuTransA,
N, M, K,
&alpha,
B, CUDA_R_16F, ldb,
A, CUDA_R_16F, lda,
&beta,
C, CUDA_R_16F, N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
CUBLAS_CHECK(cublasHgemm(cublas_handle(),
cuTransB, cuTransA,
N, M, K,
&dragon_cast<half, float>(alpha),
reinterpret_cast<const half*>(B), ldb,
reinterpret_cast<const half*>(A), lda,
&dragon_cast<half, float>(beta),
reinterpret_cast<half*>(C), N));
} else {
LOG(FATAL) << "Unsupported math type";
}
}
#endif
template <> void Gemv<float, CUDAContext>(const CBLAS_TRANSPOSE transA,
const int M, const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
TensorProto_DataType math_type) {
cublasOperation_t cuTransA = (transA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv_v2(cublas_handle(),
cuTransA,
N, M,
&alpha,
A, N,
x, 1,
&beta,
y, 1));
}
#ifdef WITH_CUDA_FP16
template <> void Gemv<float16, CUDAContext>(const CBLAS_TRANSPOSE transA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
TensorProto_DataType math_type) {
cublasOperation_t cuTransA = (transA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
int m = (cuTransA == CUBLAS_OP_N) ? N : M;
int k = (cuTransA == CUBLAS_OP_N) ? M : N;
int LDA = (cuTransA == CUBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(cublas_handle(),
cuTransA, CUBLAS_OP_N,
m, 1, k,
&alpha,
A, CUDA_R_16F, LDA,
x, CUDA_R_16F, k,
&beta,
y, CUDA_R_16F, LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
CUBLAS_CHECK(cublasHgemm(cublas_handle(),
cuTransA, CUBLAS_OP_N,
m, 1, k,
&dragon_cast<half, float>(alpha),
reinterpret_cast<const half*>(A), LDA,
reinterpret_cast<const half*>(x), k,
&dragon_cast<half, float>(beta),
reinterpret_cast<half*>(y), LDC));
} else {
LOG(FATAL) << "Unsupported math type";
}
}
#endif
} // namespace math
} // namespace dragon
#endif // WITH_CUDA
|
the_stack
|
// threads per block
#define THREADS1 256 // must be a power of 2
#define THREADS2 256
#define THREADS3 256
#define THREADS4 256
#define THREADS5 256
#define THREADS6 256
// block count = factor * #SMs
#define FACTOR1 2
#define FACTOR2 2
#define FACTOR3 1 // must all be resident at the same time
#define FACTOR4 1 // must all be resident at the same time
#define FACTOR5 2
#define FACTOR6 2
#define WARPSIZE 32
#define MAXDEPTH 32
//
// compute center and radius
//
__global__
void BoundingBoxKernel(
const int nnodesd,
const int nbodiesd,
int* const __restrict__ startd,
int* const __restrict__ childd,
float4* const __restrict__ posMassd,
float3* const __restrict__ maxd,
float3* const __restrict__ mind,
float* const __restrict__ radiusd,
int* const __restrict__ bottomd,
int* const __restrict__ stepd,
unsigned int* const __restrict__ blkcntd)
{
int i, j, k;
float val;
float3 min, max;
__shared__ float sminx[THREADS1],
smaxx[THREADS1],
sminy[THREADS1],
smaxy[THREADS1],
sminz[THREADS1],
smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
const float4 p0 = posMassd[0];
min.x = max.x = p0.x;
min.y = max.y = p0.y;
min.z = max.z = p0.z;
// scan all bodies
i = threadIdx.x;
int inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
const float4 p = posMassd[j];
val = p.x;
min.x = fminf(min.x, val);
max.x = fmaxf(max.x, val);
val = p.y;
min.y = fminf(min.y, val);
max.y = fmaxf(max.y, val);
val = p.z;
min.z = fminf(min.z, val);
max.z = fmaxf(max.z, val);
}
// reduction in shared memory
sminx[i] = min.x;
smaxx[i] = max.x;
sminy[i] = min.y;
smaxy[i] = max.y;
sminz[i] = min.z;
smaxz[i] = max.z;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = min.x = fminf(min.x, sminx[k]);
smaxx[i] = max.x = fmaxf(max.x, smaxx[k]);
sminy[i] = min.y = fminf(min.y, sminy[k]);
smaxy[i] = max.y = fmaxf(max.y, smaxy[k]);
sminz[i] = min.z = fminf(min.z, sminz[k]);
smaxz[i] = max.z = fmaxf(max.z, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
mind[k] = min;
maxd[k] = max;
__threadfence();
inc = gridDim.x - 1;
if (inc == atomicInc(blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
float3 minp = mind[j];
float3 maxp = maxd[j];
min.x = fminf(min.x, minp.x);
max.x = fmaxf(max.x, maxp.x);
min.y = fminf(min.y, minp.y);
max.y = fmaxf(max.y, maxp.y);
min.z = fminf(min.z, minp.z);
max.z = fmaxf(max.z, maxp.z);
}
// compute radius
val = fmaxf(max.x - min.x, max.y - min.y);
*radiusd = fmaxf(val, max.z - min.z) * 0.5f;
// create root node
k = nnodesd;
*bottomd = k;
startd[k] = 0;
float4 p;
p.x = (min.x + max.x) * 0.5f;
p.y = (min.y + max.y) * 0.5f;
p.z = (min.z + max.z) * 0.5f;
p.w = -1.0f;
posMassd[k] = p;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
(*stepd)++;
}
}
}
//
// build tree
//
__global__
void ClearKernel1(const int nnodesd, const int nbodiesd, int* const __restrict__ childd)
{
int top = 8 * nnodesd;
int bottom = 8 * nbodiesd;
int inc = blockDim.x * gridDim.x;
int k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < top) {
childd[k] = -1;
k += inc;
}
}
__global__
void TreeBuildingKernel(
const int nnodesd,
const int nbodiesd,
volatile int* const __restrict__ childd,
const float4* const __restrict__ posMassd,
const float* const __restrict radiusd,
int* const __restrict bottomd
)
{
int i, j, depth, skip, inc;
float x, y, z, r;
float dx, dy, dz;
int ch, n, cell, locked, patch;
float radius;
// cache root data
radius = *radiusd * 0.5f;
const float4 root = posMassd[nnodesd];
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
const float4 p = posMassd[i];
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
n = nnodesd;
depth = 1;
r = radius;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (root.x < p.x) {j = 1; dx = r;}
if (root.y < p.y) {j |= 2; dy = r;}
if (root.z < p.z) {j |= 4; dz = r;}
x = root.x + dx;
y = root.y + dy;
z = root.z + dz;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (x < p.x) {j = 1; dx = r;}
if (y < p.y) {j |= 2; dy = r;}
if (z < p.z) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == -1) {
if (ch == atomicCAS((int*)&childd[locked], ch, i)) { // if null, just insert the new body
i += inc; // move on to next body
skip = 1;
}
} else { // there already is a body at this position
if (ch == atomicCAS((int*)&childd[locked], ch, -2)) { // try to lock
patch = -1;
const float4 chp = posMassd[ch];
// create new cell(s) and insert the old and new bodies
do {
depth++;
cell = atomicSub(bottomd, 1) - 1;
if (patch != -1) {
childd[n*8+j] = cell;
}
patch = max(patch, cell);
j = 0;
if (x < chp.x) j = 1;
if (y < chp.y) j |= 2;
if (z < chp.z) j |= 4;
childd[cell*8+j] = ch;
n = cell;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
if (x < p.x) {j = 1; dx = r;}
if (y < p.y) {j |= 2; dy = r;}
if (z < p.z) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
i += inc; // move on to next body
skip = 2;
}
}
}
__syncthreads(); // optional barrier for performance
if (skip == 2) {
childd[locked] = patch;
}
}
}
__global__
void ClearKernel2(
const int nnodesd,
int* const __restrict__ startd,
float4* const __restrict__ posMassd,
int* const __restrict__ bottomd)
{
int k, inc, bottom;
bottom = *bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < nnodesd) {
posMassd[k].w = -1.0f;
startd[k] = -1;
k += inc;
}
}
//
// compute center of mass
//
__global__
void SummarizationKernel(
const int nnodesd,
const int nbodiesd,
volatile int* const __restrict__ countd,
const int* const __restrict__ childd,
volatile float4* const __restrict__ posMassd, // will cause hanging for 2048 bodies without volatile
int* const __restrict bottomd)
{
__shared__ int child[THREADS3 * 8];
__shared__ float mass[THREADS3 * 8];
int i, j, ch, cnt;
float cm, px, py, pz, m;
int bottom = *bottomd;
int inc = blockDim.x * gridDim.x;
int k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
int restart = k;
for (j = 0; j < 3; j++) { // wait-free pre-passes
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (posMassd[k].w < 0.0f) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch >= nbodiesd) && ((mass[i*THREADS3+threadIdx.x] = posMassd[ch].w) < 0.0f)) {
break;
}
}
if (i == 8) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
const float chx = posMassd[ch].x;
const float chy = posMassd[ch].y;
const float chz = posMassd[ch].z;
const float chw = posMassd[ch].w;
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = chw;
cnt++;
}
// add child's contribution
cm += m;
px += chx * m;
py += chy * m;
pz += chz * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posMassd[k].x = px * m;
posMassd[k].y = py * m;
posMassd[k].z = pz * m;
posMassd[k].w = cm;
}
}
k += inc; // move on to next cell
}
k = restart;
}
j = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (posMassd[k].w >= 0.0f) {
k += inc;
} else {
if (j == 0) {
j = 8;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch < nbodiesd) || ((mass[i*THREADS3+threadIdx.x] = posMassd[ch].w) >= 0.0f)) {
j--;
}
}
} else {
j = 8;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if ((ch < nbodiesd) || (mass[i*THREADS3+threadIdx.x] >= 0.0f) || ((mass[i*THREADS3+threadIdx.x] = posMassd[ch].w) >= 0.0f)) {
j--;
}
}
}
if (j == 0) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
// four reads due to missing copy constructor for "volatile float4"
const float chx = posMassd[ch].x;
const float chy = posMassd[ch].y;
const float chz = posMassd[ch].z;
const float chw = posMassd[ch].w;
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = chw;
cnt++;
}
// add child's contribution
cm += m;
px += chx * m;
py += chy * m;
pz += chz * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
// four writes due to missing copy constructor for "volatile float4"
posMassd[k].x = px * m;
posMassd[k].y = py * m;
posMassd[k].z = pz * m;
posMassd[k].w = cm;
k += inc;
}
}
}
}
//
// sort bodies
//
__global__
void SortKernel(
const int nnodesd,
const int nbodiesd,
int* const __restrict__ sortd,
const int* const __restrict__ countd,
volatile int* const __restrict__ startd,
int* const __restrict__ childd,
int* const __restrict__ bottomd)
{
int i, j;
int bottom = *bottomd;
int dec = blockDim.x * gridDim.x;
int k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
int start = startd[k];
if (start >= 0) {
j = 0;
for (i = 0; i < 8; i++) {
int ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
j++;
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
}
k -= dec; // move on to next cell
}
__syncthreads(); // optional barrier for performance
}
}
//
// compute force
//
__global__
void ForceCalculationKernel(
const int nnodesd,
const int nbodiesd,
const float dthfd,
const float itolsqd,
const float epssqd,
const int* const __restrict__ sortd,
const int* const __restrict__ childd,
const float4* const __restrict__ posMassd,
float2* const __restrict__ veld,
float4* const __restrict__ accVeld,
const float* const __restrict__ radiusd,
const int* const __restrict__ stepd)
{
int i, j, k, n, depth, base, sbase, diff, pd, nd;
float ax, ay, az, dx, dy, dz, tmp;
__shared__ int pos[THREADS5], node[THREADS5];
__shared__ float dq[THREADS5];
if (0 == threadIdx.x) {
tmp = *radiusd * 2;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < MAXDEPTH; i++) {
dq[i] = dq[i - 1] * 0.25f;
dq[i - 1] += epssqd;
}
dq[i - 1] += epssqd;
}
__syncthreads();
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
const float4 pi = posMassd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
pos[j] = 0;
node[j] = nnodesd * 8;
}
do {
// stack is not empty
pd = pos[depth];
nd = node[depth];
while (pd < 8) {
// node on top of stack has more children to process
n = childd[nd + pd]; // load child pointer
pd++;
if (n >= 0) {
const float4 pn = posMassd[n];
dx = pn.x - pi.x;
dy = pn.y - pi.y;
dz = pn.z - pi.z;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all_sync(0xffffffff, tmp >= dq[depth])) {
// check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = pn.w * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
if (sbase == threadIdx.x) {
pos[depth] = pd;
node[depth] = nd;
}
depth++;
pd = 0;
nd = n * 8;
}
} else {
pd = 8; // early out because all remaining children are also zero
}
}
depth--; // done with this level
} while (depth >= j);
float4 acc = accVeld[i];
if (*stepd > 0) {
// update velocity
float2 v = veld[i];
v.x += (ax - acc.x) * dthfd;
v.y += (ay - acc.y) * dthfd;
acc.w += (az - acc.z) * dthfd;
veld[i] = v;
}
// save computed acceleration
acc.x = ax;
acc.y = ay;
acc.z = az;
accVeld[i] = acc;
}
}
//
// advance bodies
//
__global__
void IntegrationKernel(
const int nbodiesd,
const float dtimed,
const float dthfd,
float4* const __restrict__ posMass,
float2* const __restrict__ veld,
float4* const __restrict__ accVeld)
{
int i, inc;
float dvelx, dvely, dvelz;
float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
float4 acc = accVeld[i];
dvelx = acc.x * dthfd;
dvely = acc.y * dthfd;
dvelz = acc.z * dthfd;
float2 v = veld[i];
velhx = v.x + dvelx;
velhy = v.y + dvely;
velhz = acc.w + dvelz;
float4 p = posMass[i];
p.x += velhx * dtimed;
p.y += velhy * dtimed;
p.z += velhz * dtimed;
posMass[i] = p;
v.x = velhx + dvelx;
v.y = velhy + dvely;
acc.w = velhz + dvelz;
veld[i] = v;
accVeld[i] = acc;
}
}
__global__
void InitializationKernel(int *step, unsigned int *blkcnt)
{
*step = -1;
*blkcnt = 0;
}
// random number generator (https://github.com/staceyson/splash2/blob/master/codes/apps/barnes/util.C)
static int randx = 7;
static double drnd()
{
const int lastrand = randx;
randx = (1103515245L * randx + 12345) & 0x7FFFFFFF;
return (double)lastrand / 2147483648.0;
}
int main(int argc, char* argv[])
{
// perform some checks
printf("ECL-BH v4.5\n");
printf("Copyright (c) 2010-2020 Texas State University\n");
if (argc != 3) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps\n");
exit(-1);
}
const int nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
int timesteps = atoi(argv[2]);
if (timesteps < 0) {
fprintf(stderr, "the number of steps should be positive: %d\n", timesteps);
exit(-1);
}
int i;
int nnodes, step;
double runtime;
float dtime, dthf, epssq, itolsq;
float4 *accVel;
float2 *vel;
int *d_sort, *d_child, *d_count, *d_start;
int *d_step, *d_bottom;
unsigned int *d_blkcnt;
float *d_radius;
float4 *d_accVel;
float2 *d_vel;
float3 *d_max, *d_min;
float4 *d_posMass;
float4 *posMass;
double rsc, vsc, r, v, x, y, z, sq, scale;
// the number of thread blocks may be adjusted for higher performance
const int blocks = 24;
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
dtime = 0.025f;
dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
printf("configuration: %d bodies, %d time steps\n", nbodies, timesteps);
// allocate host memory
accVel = (float4*)malloc(sizeof(float4) * nbodies);
if (accVel == NULL) fprintf(stderr, "cannot allocate accVel\n");
vel = (float2*)malloc(sizeof(float2) * nbodies);
if (vel == NULL) fprintf(stderr, "cannot allocate vel\n");
posMass = (float4*)malloc(sizeof(float4) * nbodies);
if (posMass == NULL) fprintf(stderr, "cannot allocate posMass\n");
// initialize host memory (https://github.com/staceyson/splash2/blob/master/codes/apps/barnes/code.C)
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
float4 p;
p.w = 1.f / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1.0);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
p.x = x * scale;
p.y = y * scale;
p.z = z * scale;
posMass[i] = p;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
float2 v;
v.x = x * scale;
v.y = y * scale;
accVel[i].w = z * scale;
vel[i] = v;
}
// allocate device memory
if (cudaSuccess != cudaMalloc((void **)&d_child, sizeof(int) * (nnodes+1) * 8))
fprintf(stderr, "could not allocate d_child\n");
if (cudaSuccess != cudaMalloc((void **)&d_vel, sizeof(float2) * (nnodes+1)))
fprintf(stderr, "could not allocate d_vel\n");
if (cudaSuccess != cudaMalloc((void **)&d_accVel, sizeof(float4) * (nnodes+1)))
fprintf(stderr, "could not allocate d_accVel\n");
if (cudaSuccess != cudaMalloc((void **)&d_count, sizeof(int) * (nnodes+1)))
fprintf(stderr, "could not allocate d_count\n");
if (cudaSuccess != cudaMalloc((void **)&d_start, sizeof(int) * (nnodes+1)))
fprintf(stderr, "could not allocate d_start\n");
if (cudaSuccess != cudaMalloc((void **)&d_sort, sizeof(int) * (nnodes+1)))
fprintf(stderr, "could not allocate d_sort\n");
if (cudaSuccess != cudaMalloc((void **)&d_posMass, sizeof(float4) * (nnodes+1)))
fprintf(stderr, "could not allocate d_posMass\n");
if (cudaSuccess != cudaMalloc((void **)&d_max, sizeof(float3) * blocks * FACTOR1))
fprintf(stderr, "could not allocate d_max\n");
if (cudaSuccess != cudaMalloc((void **)&d_min, sizeof(float3) * blocks * FACTOR1))
fprintf(stderr, "could not allocate d_min\n");
if (cudaSuccess != cudaMalloc((void **)&d_step, sizeof(int)))
fprintf(stderr, "could not allocate d_step\n");
if (cudaSuccess != cudaMalloc((void **)&d_bottom, sizeof(int)))
fprintf(stderr, "could not allocate d_bottom\n");
if (cudaSuccess != cudaMalloc((void **)&d_blkcnt, sizeof(unsigned int)))
fprintf(stderr, "could not allocate d_blkcnt\n");
if (cudaSuccess != cudaMalloc((void **)&d_radius, sizeof(float)))
fprintf(stderr, "could not allocate d_radius\n");
if (cudaSuccess != cudaMemcpy(d_accVel, accVel, sizeof(float4) * nbodies, cudaMemcpyHostToDevice))
fprintf(stderr, "copying of vel to device failed\n");
if (cudaSuccess != cudaMemcpy(d_vel, vel, sizeof(float2) * nbodies, cudaMemcpyHostToDevice))
fprintf(stderr, "copying of vel to device failed\n");
if (cudaSuccess != cudaMemcpy(d_posMass, posMass, sizeof(float4) * nbodies, cudaMemcpyHostToDevice))
fprintf(stderr, "copying of posMass to device failed\n");
cudaDeviceSynchronize();
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
// run timesteps (launch kernels on a device)
InitializationKernel<<<1, 1>>>(d_step, d_blkcnt);
for (step = 0; step < timesteps; step++) {
BoundingBoxKernel<<<blocks * FACTOR1, THREADS1>>>(
nnodes, nbodies, d_start, d_child, d_posMass, d_max, d_min,
d_radius, d_bottom, d_step, d_blkcnt );
ClearKernel1<<<blocks * 1, 256>>>(nnodes, nbodies, d_child);
TreeBuildingKernel<<<blocks * FACTOR2, THREADS2>>>(
nnodes, nbodies, d_child, d_posMass, d_radius, d_bottom);
ClearKernel2<<<blocks * 1, 256>>>(nnodes, d_start, d_posMass, d_bottom);
SummarizationKernel<<<blocks * FACTOR3, THREADS3>>>(
nnodes, nbodies, d_count, d_child, d_posMass, d_bottom);
SortKernel<<<blocks * FACTOR4, THREADS4>>>(
nnodes, nbodies, d_sort, d_count, d_start, d_child, d_bottom);
ForceCalculationKernel<<<blocks * FACTOR5, THREADS5>>>(
nnodes, nbodies, dthf, itolsq, epssq, d_sort, d_child, d_posMass,
d_vel, d_accVel, d_radius, d_step);
IntegrationKernel<<<blocks * FACTOR6, THREADS6>>>(
nbodies, dtime, dthf, d_posMass, d_vel, d_accVel);
}
cudaDeviceSynchronize();
gettimeofday(&endtime, NULL);
runtime = (endtime.tv_sec + endtime.tv_usec/1000000.0 -
starttime.tv_sec - starttime.tv_usec/1000000.0);
printf("Kernel execution time: %.4lf s\n", runtime);
// transfer final results back to a host
if (cudaSuccess != cudaMemcpy(accVel, d_accVel, sizeof(float4) * nbodies, cudaMemcpyDeviceToHost))
fprintf(stderr, "copying of accVel from device failed\n");
if (cudaSuccess != cudaMemcpy(vel, d_vel, sizeof(float2) * nbodies, cudaMemcpyDeviceToHost))
fprintf(stderr, "copying of vel from device failed\n");
if (cudaSuccess != cudaMemcpy(posMass, d_posMass, sizeof(float4) * nbodies, cudaMemcpyDeviceToHost))
fprintf(stderr, "copying of posMass from device failed\n");
#ifdef DUMP
// print output for verification
for (i = 0; i < nbodies; i++) {
printf("%d: %.2e %.2e %.2e\n", i, posMass[i].x, posMass[i].y, posMass[i].z);
printf("%d: %.2e %.2e %.2e %.2e\n", i, accVel[i].x, accVel[i].y, accVel[i].z, accVel[i].w);
printf("%d: %.2e %.2e\n", i, vel[i].x, vel[i].y);
}
#endif
free(vel);
free(accVel);
free(posMass);
cudaFree(d_child);
cudaFree(d_vel);
cudaFree(d_accVel);
cudaFree(d_count);
cudaFree(d_start);
cudaFree(d_sort);
cudaFree(d_posMass);
cudaFree(d_max);
cudaFree(d_min);
cudaFree(d_step);
cudaFree(d_blkcnt);
cudaFree(d_bottom);
cudaFree(d_radius);
return 0;
}
|
the_stack
|
namespace caffe {
///////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void forward_affine(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
//int div = channels_ * output_H_ * output_W_;
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 6 * n;
Dtype x = x_target * theta[offset] + y_target * theta[offset + 1] + theta[offset + 2];
Dtype y = x_target * theta[offset + 3] + y_target * theta[offset + 4] + theta[offset + 5];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_translation(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out,
const float theta_1_1, const float theta_2_2//, const Dtype* fill_value_
) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 2 * n;
Dtype x = theta_1_1 * x_target + theta[offset];
Dtype y = theta_2_2 * y_target + theta[offset + 1];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_translation_scaling(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 4 * n;
Dtype x = x_target * theta[offset] + theta[offset + 1];
Dtype y = y_target * theta[offset + 2] + theta[offset + 3];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_projective(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 8 * n;
Dtype z = 1 / (x_target * theta[offset + 6] + y_target * theta[offset + 7] + 1);
Dtype x = x_target * theta[offset] + y_target * theta[offset + 1] + theta[offset + 2];
Dtype y = x_target * theta[offset + 3] + y_target * theta[offset + 4] + theta[offset + 5];
/*offset = (n * map_size + h * output_W_ + w) * 3;
source_data[offset] = (x *= z);
source_data[offset + 1] = (y *= z);
source_data[offset + 2] = z;*/
x = (x * z + (Dtype) 1.) * (width_ - 1) / 2;
y = (y * z + (Dtype) 1.) * (height_ - 1) / 2;
offset = (n * map_size + h * output_W_ + w) * 3;
source_data[offset] = x;
source_data[offset + 1] = y;
source_data[offset + 2] = z;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_grid(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* out) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = theta[offset];
Dtype y = theta[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
offset = (n * channels_ + c) * height_ * width_;
Dtype tmp = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}
}
}
template <typename Dtype>
__global__ void forward_similarity(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 4 * n;
// 0: alpha
// 1: scaling
// 2: tx
// 3: ty
Dtype ct = cos(theta[offset]), st = sin(theta[offset]);
Dtype x = theta[offset + 1] * (x_target * ct - y_target * st) + theta[offset + 2];
Dtype y = theta[offset + 1] * (x_target * st + y_target * ct) + theta[offset + 3];
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
}
}
template <typename Dtype>
__global__ void forward_similarity_plus(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype)w / (output_W_ - 1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype)h / (output_H_ - 1) * 2 - (Dtype)1.;
int offset = 5 * n;
// 0: alpha
// 1: scaling_x
// 2: scaling_y
// 3: tx
// 4: ty
Dtype ct = cos(theta[offset]), st = sin(theta[offset]);
Dtype sx = theta[offset + 1], sy = theta[offset + 2];
Dtype x = sx * x_target * ct - sy * y_target * st + theta[offset + 3];
Dtype y = sx * x_target * st + sy * y_target * ct + theta[offset + 4];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* theta_data = bottom[1]->gpu_data();
const int count = num_ * map_size_;
if (t_type_ == 4) {
forward_grid<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, top_data);
CUDA_POST_KERNEL_CHECK;
return;
}
Dtype* source_data = source_.mutable_gpu_data();
switch (t_type_) {
case 0:
// affine
forward_affine<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
case 1:
// translation
forward_translation<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data,
this->layer_param_.st_param().theta_1_1(), this->layer_param_.st_param().theta_2_2());// , fill_value_.gpu_data());
break;
case 2:
// translation + scaling
forward_translation_scaling<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);// , fill_value_.gpu_data());
break;
case 3:
// projective
forward_projective<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);// , fill_value_.gpu_data());
break;
case 5:
// similarity
forward_similarity<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
case 6:
// similarity+
forward_similarity_plus<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
}
CUDA_POST_KERNEL_CHECK;
}
///////////////////////////////////////////////////////////////////
__device__ inline void atomic_add(float * address, float val) {
atomicAdd(address, val);
}
__device__ inline void atomic_add(double * address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
// compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta}
// compute sum_{i} d{V_i} / d{U_nm}
template <typename Dtype>
__global__ void backward_affine(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
if (data_diff) {
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype buffer = top_diff[(n * channels_ + c) * map_size + h * output_W_ + w];
// offset in the input image U
offset = ((n * channels_ + c) * height_ + hh) * width_ + ww;
atomic_add(data_diff + offset, buffer * dx * dy);
buffer *= data[offset];
dv_dx += buffer * dy * sign_x;
dv_dy += buffer * dx * sign_y;
}
}
}
}else {
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 6 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx * y_target;
theta_diff_cache[n + map_size*2] = dv_dx;
theta_diff_cache[n + map_size*3] = dv_dy * x_target;
theta_diff_cache[n + map_size*4] = dv_dy * y_target;
theta_diff_cache[n + map_size*5] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_translation(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
n = n * 2 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx;
theta_diff_cache[n + map_size] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_translation_scaling(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 4 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx;
theta_diff_cache[n + map_size*2] = dv_dy * y_target;
theta_diff_cache[n + map_size*3] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_projective(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
//const Dtype width_const = (Dtype)2 / (Dtype)(width_ - 1);
//const Dtype height_const = (Dtype)2 / (Dtype)(height_ - 1);
const Dtype width_const = (Dtype)(width_ - 1) / 2;
const Dtype height_const = (Dtype)(height_ - 1) / 2;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 3;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
Dtype z = source_data[offset + 2];
//Dtype x = (x0 + (Dtype) 1.) * (width_ - 1) / 2;
//Dtype y = (y0 + (Dtype) 1.) * (height_ - 1) / 2;
Dtype x0 = x - width_const, y0 = y - height_const;
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
Dtype tmp_source_z = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
Dtype dv_dx_i = u * dy * sign_x;
Dtype dv_dy_i = u * dx * sign_y;
dv_dx += dv_dx_i;
dv_dy += dv_dy_i;
tmp_source_z -= dv_dx_i * x0 + dv_dy_i * y0;
}
}
}
dv_dx *= width_const * z;
dv_dy *= height_const * z;
tmp_source_z *= z;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 8 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx * y_target;
theta_diff_cache[n + map_size*2] = dv_dx;
theta_diff_cache[n + map_size*3] = dv_dy * x_target;
theta_diff_cache[n + map_size*4] = dv_dy * y_target;
theta_diff_cache[n + map_size*5] = dv_dy;
theta_diff_cache[n + map_size*6] = tmp_source_z * x_target;
theta_diff_cache[n + map_size*7] = tmp_source_z * y_target;
}
}
template <typename Dtype>
__global__ void backward_grid(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* theta_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = theta_data[offset];
Dtype y = theta_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
theta_diff[offset] = dv_dx;
theta_diff[offset + 1] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_similarity(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff, const Dtype* theta_data,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x0 = source_data[offset];
Dtype y0 = source_data[offset + 1];
Dtype x = (x0 + (Dtype) 1.) / 2 * (width_ - 1);
Dtype y = (y0 + (Dtype) 1.) / 2 * (height_ - 1);
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
//Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
//Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
offset = 4 * n;
n = offset * map_size + h * output_W_ + w;
Dtype s = 1 / theta_data[offset + 1];
x0 -= theta_data[offset + 2];
y0 -= theta_data[offset + 3];
//theta_diff_cache[n] = dv_dx * (ty - y) + dv_dy * (x - tx); // alpha
//theta_diff_cache[n + map_size] = dv_dx * 1/s * (tx - x) + dv_dy * 1/s * (y - ty); // scaling
theta_diff_cache[n] = dv_dx * (-y0) + dv_dy * (x0); // alpha
theta_diff_cache[n + map_size] = s * (dv_dx * (x0) + dv_dy * (y0)); // scaling
theta_diff_cache[n + map_size * 2] = dv_dx; // tx
theta_diff_cache[n + map_size * 3] = dv_dy; // ty
}
}
template <typename Dtype>
__global__ void backward_similarity_plus(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff, const Dtype* theta_data,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
offset = 5 * n;
n = offset * map_size + h * output_W_ + w;
Dtype ct = cos(theta_data[offset]), st = sin(theta_data[offset]);
//Dtype sx = 1 / theta_data[offset + 1], sy = 1 / theta_data[offset + 2];
x -= theta_data[offset + 3];
y -= theta_data[offset + 4];
//theta_diff_cache[n] = dv_dx * (ty - y) + dv_dy * (x - tx); // alpha
//theta_diff_cache[n + map_size] = dv_dx * 1/s * (tx - x) + dv_dy * 1/s * (y - ty); // scaling
theta_diff_cache[n] = dv_dx * (-y) + dv_dy * (x); // alpha
theta_diff_cache[n + map_size] = (dv_dx * ct - dv_dy * st) * x_target; // scaling x
theta_diff_cache[n + map_size * 2] = (-dv_dx * st + dv_dy * ct) * y_target; // scaling y
theta_diff_cache[n + map_size * 3] = dv_dx; // tx
theta_diff_cache[n + map_size * 4] = dv_dy; // ty
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* data_diff = 0;
Dtype* theta_diff = bottom[1]->mutable_gpu_diff();
int count = num_ * map_size_;
if (t_type_ == 4) {
const Dtype* theta_data = bottom[1]->gpu_data();
backward_grid<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_,
height_, width_, output_H_, output_W_,
bottom_data, theta_data, top_diff, // input
data_diff, theta_diff // output
);
CUDA_POST_KERNEL_CHECK;
return;
}
Dtype* theta_diff_cache = theta_diff_cache_.mutable_gpu_data();
const Dtype* source_data = source_.gpu_data();
if (propagate_down[0]) {
data_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff);
}
//caffe_gpu_set<Dtype>(bottom[1]->count(), 0, theta_diff); // UNNECCESSARY
switch (t_type_) {
case 0:
// affine
// compute gradient with respect to theta
backward_affine<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_,
height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, // input
data_diff, theta_diff_cache // output
);
// aggregate gradient for theta
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 6, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 1:
// translation
backward_translation<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 2, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 2:
// translation + scaling
backward_translation_scaling<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 3:
// projective
backward_projective<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 8, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 5:
// similarity
backward_similarity<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, bottom[1]->gpu_data(),
data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 6:
// similarity+
backward_similarity_plus<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, bottom[1]->gpu_data(),
data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
|
the_stack
|
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include "buffalo/cuda/utils.cuh"
#include "buffalo/cuda/als/als.hpp"
namespace cuda_als{
using std::invalid_argument;
using namespace cuda_buffalo;
__global__ void least_squares_cg_kernel(const int dim, const int vdim,
const int rows, const int op_rows,
float* P, const float* Q, const float* FF, float* loss_nume, float* loss_deno,
const int start_x, const int next_x,
const int64_t* indptr, const int* keys, const float* vals,
const float alpha, const float reg, const bool adaptive_reg, const float cg_tolerance,
const int num_cg_max_iters, const bool compute_loss,
const float eps, const bool axis){
extern __shared__ float shared_memory[];
float* Ap = &shared_memory[0];
float* r = &shared_memory[vdim];
float* p = &shared_memory[2*vdim];
int64_t shift = start_x == 0? 0: indptr[start_x - 1];
for (int row=blockIdx.x + start_x; row<next_x; row+=gridDim.x){
float* _P = &P[row*vdim];
// assume that shifted index can be represented by size_t
size_t beg = row == 0? 0: indptr[row - 1] - shift;
size_t end = indptr[row] - shift;
if (beg == end) {
_P[threadIdx.x] = 0;
continue;
}
// set adaptive regularization coefficient
float ada_reg = adaptive_reg? (end - beg): 1.0;
ada_reg *= reg;
float tmp = 0.0;
// not necessary to compute vdim times
for (int d=0; d<dim; ++d)
tmp -= _P[d] * FF[d * vdim + threadIdx.x];
Ap[threadIdx.x] = -tmp;
// compute loss on negative samples (only item side)
if (compute_loss and axis){
float _dot = dot(_P, Ap);
if (threadIdx.x == 0){
loss_nume[blockIdx.x] += _dot;
loss_deno[blockIdx.x] += op_rows;
}
}
tmp -= _P[threadIdx.x] * ada_reg;
for (size_t idx=beg; idx<end; ++idx){
const float* _Q = &Q[keys[idx] * vdim];
const float v = vals[idx];
float _dot = dot(_P, _Q);
// compute loss on positive samples (only item side)
if (compute_loss and axis and threadIdx.x == 0){
loss_nume[blockIdx.x] -= _dot * _dot;
loss_nume[blockIdx.x] += (1.0 + v * alpha) * (_dot - 1) * (_dot - 1);
loss_deno[blockIdx.x] += v * alpha;
}
tmp += (1 + alpha * v * (1 - _dot)) * _Q[threadIdx.x];
}
p[threadIdx.x] = r[threadIdx.x] = tmp;
float rsold = dot(r, r);
// early stopping
if (rsold < cg_tolerance){
// compute loss on regularization (both user and item side)
if (compute_loss){
float _dot = dot(_P, _P);
if (threadIdx.x == 0)
loss_nume[blockIdx.x] += _dot * ada_reg;
}
continue;
}
// iterate cg
for (int it=0; it<num_cg_max_iters; ++it){
Ap[threadIdx.x] = ada_reg * p[threadIdx.x];
for (int d=0; d<dim; ++d){
Ap[threadIdx.x] += p[d] * FF[d * vdim + threadIdx.x];
}
for (size_t idx=beg; idx<end; ++idx){
const float* _Q = &Q[keys[idx] * vdim];
const float v = vals[idx];
float _dot = dot(p, _Q);
Ap[threadIdx.x] += v * alpha * _dot * _Q[threadIdx.x];
}
float alpha = rsold / (dot(p, Ap) + eps);
_P[threadIdx.x] += alpha * p[threadIdx.x];
r[threadIdx.x] -= alpha * Ap[threadIdx.x];
float rsnew = dot(r, r);
if (rsnew < cg_tolerance) break;
p[threadIdx.x] = r[threadIdx.x] + (rsnew / (rsold + eps)) * p[threadIdx.x];
rsold = rsnew;
__syncthreads();
}
// compute loss on regularization (both user and item side)
if (compute_loss){
float _dot = dot(_P, _P);
if (threadIdx.x == 0)
loss_nume[blockIdx.x] += _dot * ada_reg;
}
if (isnan(rsold)){
if (threadIdx.x == 0)
printf("Warning NaN detected in row %d of %d\n", row, rows);
_P[threadIdx.x] = 0.0;
}
}
}
CuALS::CuALS(){
logger_ = BuffaloLogger().get_logger();
opt_setted_ = false, initialized_ = false, ph_setted_ = false;
CHECK_CUDA(cudaGetDevice(&devId_));
cudaDeviceProp prop;
CHECK_CUDA(cudaGetDeviceProperties(&prop, devId_));
mp_cnt_ = prop.multiProcessorCount;
int major = prop.major;
int minor = prop.minor;
cores_ = -1;
switch (major){
case 2: // Fermi
if (minor == 1) cores_ = mp_cnt_ * 48;
else cores_ = mp_cnt_ * 32;
break;
case 3: // Kepler
cores_ = mp_cnt_ * 192;
break;
case 5: // Maxwell
cores_ = mp_cnt_ * 128;
break;
case 6: // Pascal
if (minor == 1) cores_ = mp_cnt_ * 128;
else if (minor == 0) cores_ = mp_cnt_ * 64;
else INFO0("Unknown device type");
break;
case 7: // Volta
if (minor == 0) cores_ = mp_cnt_ * 64;
else INFO0("Unknown device type");
break;
default:
INFO0("Unknown device type");
break;
}
if (cores_ == -1) cores_ = mp_cnt_ * 128;
INFO("cuda device info, major: {}, minor: {}, multi processors: {}, cores: {}",
major, minor, mp_cnt_, cores_);
}
CuALS::~CuALS(){
// destructor
CHECK_CUBLAS(cublasDestroy(blas_handle_));
_release_utility();
_release_embedding();
_release_placeholder();
}
void CuALS::_release_utility(){
// free memory of utility variables
if (opt_setted_){
CHECK_CUDA(cudaFree(devFF_)); devFF_ = nullptr;
if (compute_loss_){
free(hostLossNume_);
free(hostLossDeno_);
CHECK_CUDA(cudaFree(devLossNume_));
CHECK_CUDA(cudaFree(devLossDeno_));
}
}
opt_setted_ = false;
}
void CuALS::_release_embedding(){
// free memory of embedding matrix
if (initialized_){
CHECK_CUDA(cudaFree(devP_));
CHECK_CUDA(cudaFree(devQ_));
devP_ = nullptr, devQ_ = nullptr;
hostP_ = nullptr, hostQ_ = nullptr;
}
initialized_ = false;
}
void CuALS::_release_placeholder(){
// free memory of placeholders
if (ph_setted_){
CHECK_CUDA(cudaFree(lindptr_));
CHECK_CUDA(cudaFree(rindptr_));
CHECK_CUDA(cudaFree(keys_));
CHECK_CUDA(cudaFree(vals_));
}
ph_setted_ = false;
}
bool CuALS::parse_option(std::string opt_path, Json& j){
std::ifstream in(opt_path.c_str());
if (not in.is_open()) {
return false;
}
std::string str((std::istreambuf_iterator<char>(in)),
std::istreambuf_iterator<char>());
std::string err_cmt;
auto _j = Json::parse(str, err_cmt);
if (not err_cmt.empty()) {
return false;
}
j = _j;
return true;
}
bool CuALS::init(std::string opt_path){
// parse options
bool ok = parse_option(opt_path, opt_);
if (ok){
// if already setted, free memory
_release_utility();
// set options
compute_loss_ = opt_["compute_loss_on_training"].bool_value();
adaptive_reg_ = opt_["adaptive_reg"].bool_value();
dim_ = opt_["d"].int_value();
num_cg_max_iters_ = opt_["num_cg_max_iters"].int_value();
alpha_ = opt_["alpha"].number_value();
reg_u_ = opt_["reg_u"].number_value();
reg_i_ = opt_["reg_i"].number_value();
cg_tolerance_ = opt_["cg_tolerance"].number_value();
eps_ = opt_["eps"].number_value();
// virtual dimension
vdim_ = (dim_ / WARP_SIZE) * WARP_SIZE;
if (dim_ % WARP_SIZE > 0) vdim_ += WARP_SIZE;
CHECK_CUDA(cudaMalloc(&devFF_, sizeof(float)*vdim_*vdim_));
CHECK_CUBLAS(cublasCreate(&blas_handle_));
block_cnt_ = opt_["hyper_threads"].int_value() * (cores_ / vdim_);
if (compute_loss_){
hostLossNume_ = (float*) malloc(sizeof(float)*block_cnt_);
hostLossDeno_ = (float*) malloc(sizeof(float)*block_cnt_);
CHECK_CUDA(cudaMalloc(&devLossNume_, sizeof(float)*block_cnt_));
CHECK_CUDA(cudaMalloc(&devLossDeno_, sizeof(float)*block_cnt_));
}
opt_setted_ = true;
}
return ok;
}
void CuALS::initialize_model(
float* P, int P_rows,
float* Q, int Q_rows)
{
// if already setted, free memory
_release_embedding();
// initialize parameters and send to gpu memory
hostP_ = P;
hostQ_ = Q;
P_rows_ = P_rows;
Q_rows_ = Q_rows;
CHECK_CUDA(cudaMalloc(&devP_, sizeof(float)*P_rows_*vdim_));
CHECK_CUDA(cudaMemcpy(devP_, hostP_, sizeof(float)*P_rows_*vdim_,
cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMalloc(&devQ_, sizeof(float)*Q_rows_*vdim_));
CHECK_CUDA(cudaMemcpy(devQ_, hostQ_, sizeof(float)*Q_rows_*vdim_,
cudaMemcpyHostToDevice));
CHECK_CUDA(cudaDeviceSynchronize());
initialized_ = true;
}
void CuALS::set_placeholder(int64_t* lindptr, int64_t* rindptr, size_t batch_size)
{
// if already setted, free memory
_release_placeholder();
CHECK_CUDA(cudaMalloc(&lindptr_, sizeof(int64_t)*(P_rows_)));
CHECK_CUDA(cudaMalloc(&rindptr_, sizeof(int64_t)*(Q_rows_)));
CHECK_CUDA(cudaMemcpy(lindptr_, lindptr, sizeof(int64_t)*(P_rows_),
cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(rindptr_, rindptr, sizeof(int64_t)*(Q_rows_),
cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMalloc(&keys_, sizeof(int)*batch_size));
CHECK_CUDA(cudaMalloc(&vals_, sizeof(float)*batch_size));
batch_size_ = batch_size;
ph_setted_ = true;
}
void CuALS::precompute(int axis){
// precompute FF using cublas
int op_rows = axis == 0? Q_rows_: P_rows_;
float* opF = axis == 0? devQ_: devP_;
float alpha = 1.0, beta = 0.0;
CHECK_CUBLAS(cublasSgemm(blas_handle_, CUBLAS_OP_N, CUBLAS_OP_T,
vdim_, vdim_, op_rows, &alpha,
opF, vdim_, opF, vdim_, &beta, devFF_, vdim_));
CHECK_CUDA(cudaDeviceSynchronize());
}
void CuALS::_synchronize(int start_x, int next_x, int axis, bool device_to_host){
// synchronize parameters between cpu memory and gpu memory
float* devF = axis == 0? devP_: devQ_;
float* hostF = axis == 0? hostP_: hostQ_;
int size = next_x - start_x;
if (device_to_host){
CHECK_CUDA(cudaMemcpy(hostF + (start_x * vdim_), devF + (start_x * vdim_),
sizeof(float)*size*vdim_,
cudaMemcpyDeviceToHost));
} else{
CHECK_CUDA(cudaMemcpy(devF + (start_x * vdim_), hostF + (start_x * vdim_),
sizeof(float)*size*vdim_,
cudaMemcpyHostToDevice));
}
CHECK_CUDA(cudaDeviceSynchronize());
}
int CuALS::get_vdim(){
return vdim_;
}
std::pair<double, double> CuALS::partial_update(int start_x,
int next_x,
int64_t* indptr,
int* keys,
float* vals,
int axis){
int thread_cnt = vdim_;
size_t shared_memory_size = sizeof(float) * (3 * vdim_);
int rows = axis == 0? P_rows_: Q_rows_;
int op_rows = axis == 0? Q_rows_: P_rows_;
float* P = axis == 0? devP_: devQ_;
float* Q = axis == 0? devQ_: devP_;
float reg = axis == 0? reg_u_: reg_i_;
int64_t* _indptr = axis == 0? lindptr_: rindptr_;
// copy data to gpu memory
size_t beg = start_x == 0? 0: indptr[start_x - 1];
size_t end = indptr[next_x - 1];
CHECK_CUDA(cudaMemcpy(keys_, keys, sizeof(int)*(end-beg),
cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(vals_, vals, sizeof(float)*(end-beg),
cudaMemcpyHostToDevice));
// set zeros for measuring losses
if (compute_loss_){
for (size_t i=0; i<block_cnt_; ++i){
hostLossNume_[i] = 0;
hostLossDeno_[i] = 0;
}
CHECK_CUDA(cudaMemcpy(devLossNume_, hostLossDeno_, sizeof(float)*block_cnt_,
cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(devLossDeno_, hostLossDeno_, sizeof(float)*block_cnt_,
cudaMemcpyHostToDevice));
}
CHECK_CUDA(cudaDeviceSynchronize());
// compute least square
least_squares_cg_kernel<<<block_cnt_, thread_cnt, shared_memory_size>>>(
dim_, vdim_, rows, op_rows, P, Q, devFF_, devLossNume_, devLossDeno_,
start_x, next_x, _indptr, keys_, vals_, alpha_, reg, adaptive_reg_,
cg_tolerance_, num_cg_max_iters_, compute_loss_, eps_, axis);
CHECK_CUDA(cudaDeviceSynchronize());
// accumulate losses
double loss_nume = 0, loss_deno = 0;
if (compute_loss_){
CHECK_CUDA(cudaMemcpy(hostLossNume_, devLossNume_, sizeof(float)*block_cnt_,
cudaMemcpyDeviceToHost));
CHECK_CUDA(cudaMemcpy(hostLossDeno_, devLossDeno_, sizeof(float)*block_cnt_,
cudaMemcpyDeviceToHost));
CHECK_CUDA(cudaDeviceSynchronize());
for (size_t i=0; i<block_cnt_; ++i){
loss_nume += hostLossNume_[i];
loss_deno += hostLossDeno_[i];
}
}
_synchronize(start_x, next_x, axis, true);
return std::make_pair(loss_nume, loss_deno);
}
} // namespace cuda_als
|
the_stack
|
#include <cstdint>
#include <cstdio>
#include "histogram_16_64_256.hu"
namespace LightGBM {
// atomic add for float number in local memory
inline __device__ void atomic_local_add_f(acc_type *addr, const acc_type val) {
atomicAdd(addr, static_cast<acc_type>(val));
}
// histogram16 stuff
#ifdef ENABLE_ALL_FEATURES
#ifdef IGNORE_INDICES
#define KERNEL_NAME histogram16_fulldata
#else // IGNORE_INDICES
#define KERNEL_NAME histogram16
#endif // IGNORE_INDICES
#else // ENABLE_ALL_FEATURES
#error "ENABLE_ALL_FEATURES should always be 1"
#define KERNEL_NAME histogram16
#endif // ENABLE_ALL_FEATURES
#define NUM_BINS 16
#define LOCAL_MEM_SIZE ((sizeof(unsigned int) + 2 * sizeof(acc_type)) * NUM_BINS)
// this function will be called by histogram16
// we have one sub-histogram of one feature in local memory, and need to read others
inline void __device__ within_kernel_reduction16x4(const acc_type* __restrict__ feature_sub_hist,
const unsigned int skip_id,
const unsigned int old_val_cont_bin0,
const uint16_t num_sub_hist,
acc_type* __restrict__ output_buf,
acc_type* __restrict__ local_hist,
const size_t power_feature_workgroups) {
const uint16_t ltid = threadIdx.x;
acc_type grad_bin = local_hist[ltid * 2];
acc_type hess_bin = local_hist[ltid * 2 + 1];
unsigned int* __restrict__ local_cnt = reinterpret_cast<unsigned int *>(local_hist + 2 * NUM_BINS);
unsigned int cont_bin;
if (power_feature_workgroups != 0) {
cont_bin = ltid ? local_cnt[ltid] : old_val_cont_bin0;
} else {
cont_bin = local_cnt[ltid];
}
uint16_t i;
if (power_feature_workgroups != 0) {
// add all sub-histograms for feature
const acc_type* __restrict__ p = feature_sub_hist + ltid;
for (i = 0; i < skip_id; ++i) {
grad_bin += *p; p += NUM_BINS;
hess_bin += *p; p += NUM_BINS;
cont_bin += as_acc_int_type(*p); p += NUM_BINS;
}
// skip the counters we already have
p += 3 * NUM_BINS;
for (i = i + 1; i < num_sub_hist; ++i) {
grad_bin += *p; p += NUM_BINS;
hess_bin += *p; p += NUM_BINS;
cont_bin += as_acc_int_type(*p); p += NUM_BINS;
}
}
__syncthreads();
output_buf[ltid * 2 + 0] = grad_bin;
output_buf[ltid * 2 + 1] = hess_bin;
}
#if USE_CONSTANT_BUF == 1
__kernel void KERNEL_NAME(__global const uchar* restrict feature_data_base,
__constant const uchar* restrict feature_masks __attribute__((max_constant_size(65536))),
const data_size_t feature_size,
__constant const data_size_t* restrict data_indices __attribute__((max_constant_size(65536))),
const data_size_t num_data,
__constant const score_t* restrict ordered_gradients __attribute__((max_constant_size(65536))),
#if CONST_HESSIAN == 0
__constant const score_t* restrict ordered_hessians __attribute__((max_constant_size(65536))),
#else
const score_t const_hessian,
#endif
char* __restrict__ output_buf,
volatile int * sync_counters,
acc_type* __restrict__ hist_buf_base,
const size_t power_feature_workgroups) {
#else
__global__ void KERNEL_NAME(const uchar* feature_data_base,
const uchar* __restrict__ feature_masks,
const data_size_t feature_size,
const data_size_t* data_indices,
const data_size_t num_data,
const score_t* ordered_gradients,
#if CONST_HESSIAN == 0
const score_t* ordered_hessians,
#else
const score_t const_hessian,
#endif
char* __restrict__ output_buf,
volatile int * sync_counters,
acc_type* __restrict__ hist_buf_base,
const size_t power_feature_workgroups) {
#endif
// allocate the local memory array aligned with float2, to guarantee correct alignment on NVIDIA platforms
// otherwise a "Misaligned Address" exception may occur
__shared__ float2 shared_array[LOCAL_MEM_SIZE/sizeof(float2)];
const unsigned int gtid = blockIdx.x * blockDim.x + threadIdx.x;
const uint16_t ltid = threadIdx.x;
const uint16_t lsize = NUM_BINS; // get_local_size(0);
const uint16_t group_id = blockIdx.x;
// local memory per workgroup is 3 KB
// clear local memory
unsigned int *ptr = reinterpret_cast<unsigned int *>(shared_array);
for (int i = ltid; i < LOCAL_MEM_SIZE/sizeof(unsigned int); i += lsize) {
ptr[i] = 0;
}
__syncthreads();
// gradient/hessian histograms
// assume this starts at 32 * 4 = 128-byte boundary // What does it mean? boundary??
// total size: 2 * 256 * size_of(float) = 2 KB
// organization: each feature/grad/hessian is at a different bank,
// as indepedent of the feature value as possible
acc_type *gh_hist = reinterpret_cast<acc_type *>(shared_array);
// counter histogram
// total size: 256 * size_of(unsigned int) = 1 KB
unsigned int *cnt_hist = reinterpret_cast<unsigned int *>(gh_hist + 2 * NUM_BINS);
// odd threads (1, 3, ...) compute histograms for hessians first
// even thread (0, 2, ...) compute histograms for gradients first
// etc.
uchar is_hessian_first = ltid & 1;
uint16_t feature_id = group_id >> power_feature_workgroups;
// each 2^POWER_FEATURE_WORKGROUPS workgroups process on one feature (compile-time constant)
// feature_size is the number of examples per feature
const uchar *feature_data = feature_data_base + feature_id * feature_size;
// size of threads that process this feature4
const unsigned int subglobal_size = lsize * (1 << power_feature_workgroups);
// equavalent thread ID in this subgroup for this feature4
const unsigned int subglobal_tid = gtid - feature_id * subglobal_size;
data_size_t ind;
data_size_t ind_next;
#ifdef IGNORE_INDICES
ind = subglobal_tid;
#else
ind = data_indices[subglobal_tid];
#endif
// extract feature mask, when a byte is set to 0, that feature is disabled
uchar feature_mask = feature_masks[feature_id];
// exit if the feature is masked
if (!feature_mask) {
return;
} else {
feature_mask = feature_mask - 1; // feature_mask is used for get feature (1: 4bit feature, 0: 8bit feature)
}
// STAGE 1: read feature data, and gradient and hessian
// first half of the threads read feature data from global memory
// We will prefetch data into the "next" variable at the beginning of each iteration
uchar feature;
uchar feature_next;
uint16_t bin;
feature = feature_data[ind >> feature_mask];
if (feature_mask) {
feature = (feature >> ((ind & 1) << 2)) & 0xf;
}
bin = feature;
acc_type grad_bin = 0.0f, hess_bin = 0.0f;
acc_type *addr_bin;
// store gradient and hessian
score_t grad, hess;
score_t grad_next, hess_next;
grad = ordered_gradients[ind];
#if CONST_HESSIAN == 0
hess = ordered_hessians[ind];
#endif
// there are 2^POWER_FEATURE_WORKGROUPS workgroups processing each feature4
for (unsigned int i = subglobal_tid; i < num_data; i += subglobal_size) {
// prefetch the next iteration variables
// we don't need bondary check because we have made the buffer large
int i_next = i + subglobal_size;
#ifdef IGNORE_INDICES
// we need to check to bounds here
ind_next = i_next < num_data ? i_next : i;
#else
ind_next = data_indices[i_next];
#endif
grad_next = ordered_gradients[ind_next];
#if CONST_HESSIAN == 0
hess_next = ordered_hessians[ind_next];
#endif
// STAGE 2: accumulate gradient and hessian
if (bin != feature) {
addr_bin = gh_hist + bin * 2 + is_hessian_first;
#if CONST_HESSIAN == 0
acc_type acc_bin = is_hessian_first ? hess_bin : grad_bin;
atomic_local_add_f(addr_bin, acc_bin);
addr_bin = addr_bin + 1 - 2 * is_hessian_first;
acc_bin = is_hessian_first ? grad_bin : hess_bin;
atomic_local_add_f(addr_bin, acc_bin);
#elif CONST_HESSIAN == 1
atomic_local_add_f(addr_bin, grad_bin);
#endif
bin = feature;
grad_bin = grad;
hess_bin = hess;
} else {
grad_bin += grad;
hess_bin += hess;
}
// prefetch the next iteration variables
feature_next = feature_data[ind_next >> feature_mask];
// STAGE 3: accumulate counter
atomicAdd(cnt_hist + feature, 1);
// STAGE 4: update next stat
grad = grad_next;
hess = hess_next;
if (!feature_mask) {
feature = feature_next;
} else {
feature = (feature_next >> ((ind_next & 1) << 2)) & 0xf;
}
}
addr_bin = gh_hist + bin * 2 + is_hessian_first;
#if CONST_HESSIAN == 0
acc_type acc_bin = is_hessian_first ? hess_bin : grad_bin;
atomic_local_add_f(addr_bin, acc_bin);
addr_bin = addr_bin + 1 - 2 * is_hessian_first;
acc_bin = is_hessian_first ? grad_bin : hess_bin;
atomic_local_add_f(addr_bin, acc_bin);
#elif CONST_HESSIAN == 1
atomic_local_add_f(addr_bin, grad_bin);
#endif
__syncthreads();
#if CONST_HESSIAN == 1
// make a final reduction
gh_hist[ltid * 2] += gh_hist[ltid * 2 + 1];
gh_hist[ltid * 2 + 1] = const_hessian * cnt_hist[ltid]; // counter move to this position
__syncthreads();
#endif
#if POWER_FEATURE_WORKGROUPS != 0
acc_type *__restrict__ output = reinterpret_cast<acc_type *>(output_buf) + group_id * 3 * NUM_BINS;
// write gradients and hessians
acc_type *__restrict__ ptr_f = output;
for (uint16_t i = ltid; i < 2 * NUM_BINS; i += lsize) {
// even threads read gradients, odd threads read hessians
acc_type value = gh_hist[i];
ptr_f[(i & 1) * NUM_BINS + (i >> 1)] = value;
}
// write counts
acc_int_type *__restrict__ ptr_i = reinterpret_cast<acc_int_type *>(output + 2 * NUM_BINS);
for (uint16_t i = ltid; i < NUM_BINS; i += lsize) {
unsigned int value = cnt_hist[i];
ptr_i[i] = value;
}
__syncthreads();
__threadfence();
unsigned int * counter_val = cnt_hist;
// backup the old value
unsigned int old_val = *counter_val;
if (ltid == 0) {
// all workgroups processing the same feature add this counter
*counter_val = atomicAdd(const_cast<int*>(sync_counters + feature_id), 1);
}
// make sure everyone in this workgroup is here
__syncthreads();
// everyone in this workgroup: if we are the last workgroup, then do reduction!
if (*counter_val == (1 << power_feature_workgroups) - 1) {
if (ltid == 0) {
sync_counters[feature_id] = 0;
}
#else
}
// only 1 work group, no need to increase counter
// the reduction will become a simple copy
{
unsigned int old_val; // dummy
#endif
// locate our feature's block in output memory
unsigned int output_offset = (feature_id << power_feature_workgroups);
acc_type const * __restrict__ feature_subhists =
reinterpret_cast<acc_type *>(output_buf) + output_offset * 3 * NUM_BINS;
// skip reading the data already in local memory
unsigned int skip_id = group_id - output_offset;
// locate output histogram location for this feature4
acc_type *__restrict__ hist_buf = hist_buf_base + feature_id * 2 * NUM_BINS;
within_kernel_reduction16x4(feature_subhists, skip_id, old_val, 1 << power_feature_workgroups, hist_buf, reinterpret_cast<acc_type *>(shared_array), power_feature_workgroups);
}
}
// end of histogram16 stuff
// histogram64 stuff
#undef KERNEL_NAME
#undef NUM_BINS
#undef LOCAL_MEM_SIZE
#ifdef ENABLE_ALL_FEATURES
#ifdef IGNORE_INDICES
#define KERNEL_NAME histogram64_fulldata
#else // IGNORE_INDICES
#define KERNEL_NAME histogram64 // seems like ENABLE_ALL_FEATURES is set to 1 in the header if its disabled
// #define KERNEL_NAME histogram64_allfeats
#endif // IGNORE_INDICES
#else // ENABLE_ALL_FEATURES
#error "ENABLE_ALL_FEATURES should always be 1"
#define KERNEL_NAME histogram64
#endif // ENABLE_ALL_FEATURES
#define NUM_BINS 64
#define LOCAL_MEM_SIZE ((sizeof(unsigned int) + 2 * sizeof(acc_type)) * NUM_BINS)
// this function will be called by histogram64
// we have one sub-histogram of one feature in local memory, and need to read others
inline void __device__ within_kernel_reduction64x4(const acc_type* __restrict__ feature_sub_hist,
const unsigned int skip_id,
const unsigned int old_val_cont_bin0,
const uint16_t num_sub_hist,
acc_type* __restrict__ output_buf,
acc_type* __restrict__ local_hist,
const size_t power_feature_workgroups) {
const uint16_t ltid = threadIdx.x;
acc_type grad_bin = local_hist[ltid * 2];
acc_type hess_bin = local_hist[ltid * 2 + 1];
unsigned int* __restrict__ local_cnt = reinterpret_cast<unsigned int *>(local_hist + 2 * NUM_BINS);
unsigned int cont_bin;
if (power_feature_workgroups != 0) {
cont_bin = ltid ? local_cnt[ltid] : old_val_cont_bin0;
} else {
cont_bin = local_cnt[ltid];
}
uint16_t i;
if (power_feature_workgroups != 0) {
// add all sub-histograms for feature
const acc_type* __restrict__ p = feature_sub_hist + ltid;
for (i = 0; i < skip_id; ++i) {
grad_bin += *p; p += NUM_BINS;
hess_bin += *p; p += NUM_BINS;
cont_bin += as_acc_int_type(*p); p += NUM_BINS;
}
// skip the counters we already have
p += 3 * NUM_BINS;
for (i = i + 1; i < num_sub_hist; ++i) {
grad_bin += *p; p += NUM_BINS;
hess_bin += *p; p += NUM_BINS;
cont_bin += as_acc_int_type(*p); p += NUM_BINS;
}
}
__syncthreads();
output_buf[ltid * 2 + 0] = grad_bin;
output_buf[ltid * 2 + 1] = hess_bin;
}
#if USE_CONSTANT_BUF == 1
__kernel void KERNEL_NAME(__global const uchar* restrict feature_data_base,
__constant const uchar* restrict feature_masks __attribute__((max_constant_size(65536))),
const data_size_t feature_size,
__constant const data_size_t* restrict data_indices __attribute__((max_constant_size(65536))),
const data_size_t num_data,
__constant const score_t* restrict ordered_gradients __attribute__((max_constant_size(65536))),
#if CONST_HESSIAN == 0
__constant const score_t* restrict ordered_hessians __attribute__((max_constant_size(65536))),
#else
const score_t const_hessian,
#endif
char* __restrict__ output_buf,
volatile int * sync_counters,
acc_type* __restrict__ hist_buf_base,
const size_t power_feature_workgroups) {
#else
__global__ void KERNEL_NAME(const uchar* feature_data_base,
const uchar* __restrict__ feature_masks,
const data_size_t feature_size,
const data_size_t* data_indices,
const data_size_t num_data,
const score_t* ordered_gradients,
#if CONST_HESSIAN == 0
const score_t* ordered_hessians,
#else
const score_t const_hessian,
#endif
char* __restrict__ output_buf,
volatile int * sync_counters,
acc_type* __restrict__ hist_buf_base,
const size_t power_feature_workgroups) {
#endif
// allocate the local memory array aligned with float2, to guarantee correct alignment on NVIDIA platforms
// otherwise a "Misaligned Address" exception may occur
__shared__ float2 shared_array[LOCAL_MEM_SIZE/sizeof(float2)];
const unsigned int gtid = blockIdx.x * blockDim.x + threadIdx.x;
const uint16_t ltid = threadIdx.x;
const uint16_t lsize = NUM_BINS; // get_local_size(0);
const uint16_t group_id = blockIdx.x;
// local memory per workgroup is 3 KB
// clear local memory
unsigned int *ptr = reinterpret_cast<unsigned int *>(shared_array);
for (int i = ltid; i < LOCAL_MEM_SIZE/sizeof(unsigned int); i += lsize) {
ptr[i] = 0;
}
__syncthreads();
// gradient/hessian histograms
// assume this starts at 32 * 4 = 128-byte boundary // What does it mean? boundary??
// total size: 2 * 256 * size_of(float) = 2 KB
// organization: each feature/grad/hessian is at a different bank,
// as indepedent of the feature value as possible
acc_type *gh_hist = reinterpret_cast<acc_type *>(shared_array);
// counter histogram
// total size: 256 * size_of(unsigned int) = 1 KB
unsigned int *cnt_hist = reinterpret_cast<unsigned int *>(gh_hist + 2 * NUM_BINS);
// odd threads (1, 3, ...) compute histograms for hessians first
// even thread (0, 2, ...) compute histograms for gradients first
// etc.
uchar is_hessian_first = ltid & 1;
uint16_t feature_id = group_id >> power_feature_workgroups;
// each 2^POWER_FEATURE_WORKGROUPS workgroups process on one feature (compile-time constant)
// feature_size is the number of examples per feature
const uchar *feature_data = feature_data_base + feature_id * feature_size;
// size of threads that process this feature4
const unsigned int subglobal_size = lsize * (1 << power_feature_workgroups);
// equavalent thread ID in this subgroup for this feature4
const unsigned int subglobal_tid = gtid - feature_id * subglobal_size;
data_size_t ind;
data_size_t ind_next;
#ifdef IGNORE_INDICES
ind = subglobal_tid;
#else
ind = data_indices[subglobal_tid];
#endif
// extract feature mask, when a byte is set to 0, that feature is disabled
uchar feature_mask = feature_masks[feature_id];
// exit if the feature is masked
if (!feature_mask) {
return;
} else {
feature_mask = feature_mask - 1; // feature_mask is used for get feature (1: 4bit feature, 0: 8bit feature)
}
// STAGE 1: read feature data, and gradient and hessian
// first half of the threads read feature data from global memory
// We will prefetch data into the "next" variable at the beginning of each iteration
uchar feature;
uchar feature_next;
uint16_t bin;
feature = feature_data[ind >> feature_mask];
if (feature_mask) {
feature = (feature >> ((ind & 1) << 2)) & 0xf;
}
bin = feature;
acc_type grad_bin = 0.0f, hess_bin = 0.0f;
acc_type *addr_bin;
// store gradient and hessian
score_t grad, hess;
score_t grad_next, hess_next;
grad = ordered_gradients[ind];
#if CONST_HESSIAN == 0
hess = ordered_hessians[ind];
#endif
// there are 2^POWER_FEATURE_WORKGROUPS workgroups processing each feature4
for (unsigned int i = subglobal_tid; i < num_data; i += subglobal_size) {
// prefetch the next iteration variables
// we don't need bondary check because we have made the buffer large
int i_next = i + subglobal_size;
#ifdef IGNORE_INDICES
// we need to check to bounds here
ind_next = i_next < num_data ? i_next : i;
#else
ind_next = data_indices[i_next];
#endif
grad_next = ordered_gradients[ind_next];
#if CONST_HESSIAN == 0
hess_next = ordered_hessians[ind_next];
#endif
// STAGE 2: accumulate gradient and hessian
if (bin != feature) {
addr_bin = gh_hist + bin * 2 + is_hessian_first;
#if CONST_HESSIAN == 0
acc_type acc_bin = is_hessian_first ? hess_bin : grad_bin;
atomic_local_add_f(addr_bin, acc_bin);
addr_bin = addr_bin + 1 - 2 * is_hessian_first;
acc_bin = is_hessian_first ? grad_bin : hess_bin;
atomic_local_add_f(addr_bin, acc_bin);
#elif CONST_HESSIAN == 1
atomic_local_add_f(addr_bin, grad_bin);
#endif
bin = feature;
grad_bin = grad;
hess_bin = hess;
} else {
grad_bin += grad;
hess_bin += hess;
}
// prefetch the next iteration variables
feature_next = feature_data[ind_next >> feature_mask];
// STAGE 3: accumulate counter
atomicAdd(cnt_hist + feature, 1);
// STAGE 4: update next stat
grad = grad_next;
hess = hess_next;
if (!feature_mask) {
feature = feature_next;
} else {
feature = (feature_next >> ((ind_next & 1) << 2)) & 0xf;
}
}
addr_bin = gh_hist + bin * 2 + is_hessian_first;
#if CONST_HESSIAN == 0
acc_type acc_bin = is_hessian_first ? hess_bin : grad_bin;
atomic_local_add_f(addr_bin, acc_bin);
addr_bin = addr_bin + 1 - 2 * is_hessian_first;
acc_bin = is_hessian_first ? grad_bin : hess_bin;
atomic_local_add_f(addr_bin, acc_bin);
#elif CONST_HESSIAN == 1
atomic_local_add_f(addr_bin, grad_bin);
#endif
__syncthreads();
#if CONST_HESSIAN == 1
// make a final reduction
gh_hist[ltid * 2] += gh_hist[ltid * 2 + 1];
gh_hist[ltid * 2 + 1] = const_hessian * cnt_hist[ltid]; // counter move to this position
__syncthreads();
#endif
#if POWER_FEATURE_WORKGROUPS != 0
acc_type *__restrict__ output = reinterpret_cast<acc_type *>(output_buf) + group_id * 3 * NUM_BINS;
// write gradients and hessians
acc_type *__restrict__ ptr_f = output;
for (uint16_t i = ltid; i < 2 * NUM_BINS; i += lsize) {
// even threads read gradients, odd threads read hessians
acc_type value = gh_hist[i];
ptr_f[(i & 1) * NUM_BINS + (i >> 1)] = value;
}
// write counts
acc_int_type *__restrict__ ptr_i = reinterpret_cast<acc_int_type *>(output + 2 * NUM_BINS);
for (uint16_t i = ltid; i < NUM_BINS; i += lsize) {
unsigned int value = cnt_hist[i];
ptr_i[i] = value;
}
__syncthreads();
__threadfence();
unsigned int * counter_val = cnt_hist;
// backup the old value
unsigned int old_val = *counter_val;
if (ltid == 0) {
// all workgroups processing the same feature add this counter
*counter_val = atomicAdd(const_cast<int*>(sync_counters + feature_id), 1);
}
// make sure everyone in this workgroup is here
__syncthreads();
// everyone in this workgroup: if we are the last workgroup, then do reduction!
if (*counter_val == (1 << power_feature_workgroups) - 1) {
if (ltid == 0) {
sync_counters[feature_id] = 0;
}
#else
}
// only 1 work group, no need to increase counter
// the reduction will become a simple copy
{
unsigned int old_val; // dummy
#endif
// locate our feature's block in output memory
unsigned int output_offset = (feature_id << power_feature_workgroups);
acc_type const * __restrict__ feature_subhists =
reinterpret_cast<acc_type *>(output_buf) + output_offset * 3 * NUM_BINS;
// skip reading the data already in local memory
unsigned int skip_id = group_id - output_offset;
// locate output histogram location for this feature4
acc_type *__restrict__ hist_buf = hist_buf_base + feature_id * 2 * NUM_BINS;
within_kernel_reduction64x4(feature_subhists, skip_id, old_val, 1 << power_feature_workgroups, hist_buf, reinterpret_cast<acc_type *>(shared_array), power_feature_workgroups);
}
}
// end of histogram64 stuff
// histogram256 stuff
#undef KERNEL_NAME
#undef NUM_BINS
#undef LOCAL_MEM_SIZE
#ifdef ENABLE_ALL_FEATURES
#ifdef IGNORE_INDICES
#define KERNEL_NAME histogram256_fulldata
#else // IGNORE_INDICES
#define KERNEL_NAME histogram256 // seems like ENABLE_ALL_FEATURES is set to 1 in the header if its disabled
// #define KERNEL_NAME histogram256_allfeats
#endif // IGNORE_INDICES
#else // ENABLE_ALL_FEATURES
#error "ENABLE_ALL_FEATURES should always be 1"
#define KERNEL_NAME histogram256
#endif // ENABLE_ALL_FEATURES
#define NUM_BINS 256
#define LOCAL_MEM_SIZE ((sizeof(unsigned int) + 2 * sizeof(acc_type)) * NUM_BINS)
// this function will be called by histogram256
// we have one sub-histogram of one feature in local memory, and need to read others
inline void __device__ within_kernel_reduction256x4(const acc_type* __restrict__ feature_sub_hist,
const unsigned int skip_id,
const unsigned int old_val_cont_bin0,
const uint16_t num_sub_hist,
acc_type* __restrict__ output_buf,
acc_type* __restrict__ local_hist,
const size_t power_feature_workgroups) {
const uint16_t ltid = threadIdx.x;
acc_type grad_bin = local_hist[ltid * 2];
acc_type hess_bin = local_hist[ltid * 2 + 1];
unsigned int* __restrict__ local_cnt = reinterpret_cast<unsigned int *>(local_hist + 2 * NUM_BINS);
unsigned int cont_bin;
if (power_feature_workgroups != 0) {
cont_bin = ltid ? local_cnt[ltid] : old_val_cont_bin0;
} else {
cont_bin = local_cnt[ltid];
}
uint16_t i;
if (power_feature_workgroups != 0) {
// add all sub-histograms for feature
const acc_type* __restrict__ p = feature_sub_hist + ltid;
for (i = 0; i < skip_id; ++i) {
grad_bin += *p; p += NUM_BINS;
hess_bin += *p; p += NUM_BINS;
cont_bin += as_acc_int_type(*p); p += NUM_BINS;
}
// skip the counters we already have
p += 3 * NUM_BINS;
for (i = i + 1; i < num_sub_hist; ++i) {
grad_bin += *p; p += NUM_BINS;
hess_bin += *p; p += NUM_BINS;
cont_bin += as_acc_int_type(*p); p += NUM_BINS;
}
}
__syncthreads();
output_buf[ltid * 2 + 0] = grad_bin;
output_buf[ltid * 2 + 1] = hess_bin;
}
#if USE_CONSTANT_BUF == 1
__kernel void KERNEL_NAME(__global const uchar* restrict feature_data_base,
__constant const uchar* restrict feature_masks __attribute__((max_constant_size(65536))),
const data_size_t feature_size,
__constant const data_size_t* restrict data_indices __attribute__((max_constant_size(65536))),
const data_size_t num_data,
__constant const score_t* restrict ordered_gradients __attribute__((max_constant_size(65536))),
#if CONST_HESSIAN == 0
__constant const score_t* restrict ordered_hessians __attribute__((max_constant_size(65536))),
#else
const score_t const_hessian,
#endif
char* __restrict__ output_buf,
volatile int * sync_counters,
acc_type* __restrict__ hist_buf_base,
const size_t power_feature_workgroups) {
#else
__global__ void KERNEL_NAME(const uchar* feature_data_base,
const uchar* __restrict__ feature_masks,
const data_size_t feature_size,
const data_size_t* data_indices,
const data_size_t num_data,
const score_t* ordered_gradients,
#if CONST_HESSIAN == 0
const score_t* ordered_hessians,
#else
const score_t const_hessian,
#endif
char* __restrict__ output_buf,
volatile int * sync_counters,
acc_type* __restrict__ hist_buf_base,
const size_t power_feature_workgroups) {
#endif
// allocate the local memory array aligned with float2, to guarantee correct alignment on NVIDIA platforms
// otherwise a "Misaligned Address" exception may occur
__shared__ float2 shared_array[LOCAL_MEM_SIZE/sizeof(float2)];
const unsigned int gtid = blockIdx.x * blockDim.x + threadIdx.x;
const uint16_t ltid = threadIdx.x;
const uint16_t lsize = NUM_BINS; // get_local_size(0);
const uint16_t group_id = blockIdx.x;
// local memory per workgroup is 3 KB
// clear local memory
unsigned int *ptr = reinterpret_cast<unsigned int *>(shared_array);
for (int i = ltid; i < LOCAL_MEM_SIZE/sizeof(unsigned int); i += lsize) {
ptr[i] = 0;
}
__syncthreads();
// gradient/hessian histograms
// assume this starts at 32 * 4 = 128-byte boundary // What does it mean? boundary??
// total size: 2 * 256 * size_of(float) = 2 KB
// organization: each feature/grad/hessian is at a different bank,
// as indepedent of the feature value as possible
acc_type *gh_hist = reinterpret_cast<acc_type *>(shared_array);
// counter histogram
// total size: 256 * size_of(unsigned int) = 1 KB
unsigned int *cnt_hist = reinterpret_cast<unsigned int *>(gh_hist + 2 * NUM_BINS);
// odd threads (1, 3, ...) compute histograms for hessians first
// even thread (0, 2, ...) compute histograms for gradients first
// etc.
uchar is_hessian_first = ltid & 1;
uint16_t feature_id = group_id >> power_feature_workgroups;
// each 2^POWER_FEATURE_WORKGROUPS workgroups process on one feature (compile-time constant)
// feature_size is the number of examples per feature
const uchar *feature_data = feature_data_base + feature_id * feature_size;
// size of threads that process this feature4
const unsigned int subglobal_size = lsize * (1 << power_feature_workgroups);
// equavalent thread ID in this subgroup for this feature4
const unsigned int subglobal_tid = gtid - feature_id * subglobal_size;
data_size_t ind;
data_size_t ind_next;
#ifdef IGNORE_INDICES
ind = subglobal_tid;
#else
ind = data_indices[subglobal_tid];
#endif
// extract feature mask, when a byte is set to 0, that feature is disabled
uchar feature_mask = feature_masks[feature_id];
// exit if the feature is masked
if (!feature_mask) {
return;
} else {
feature_mask = feature_mask - 1; // feature_mask is used for get feature (1: 4bit feature, 0: 8bit feature)
}
// STAGE 1: read feature data, and gradient and hessian
// first half of the threads read feature data from global memory
// We will prefetch data into the "next" variable at the beginning of each iteration
uchar feature;
uchar feature_next;
uint16_t bin;
feature = feature_data[ind >> feature_mask];
if (feature_mask) {
feature = (feature >> ((ind & 1) << 2)) & 0xf;
}
bin = feature;
acc_type grad_bin = 0.0f, hess_bin = 0.0f;
acc_type *addr_bin;
// store gradient and hessian
score_t grad, hess;
score_t grad_next, hess_next;
grad = ordered_gradients[ind];
#if CONST_HESSIAN == 0
hess = ordered_hessians[ind];
#endif
// there are 2^POWER_FEATURE_WORKGROUPS workgroups processing each feature4
for (unsigned int i = subglobal_tid; i < num_data; i += subglobal_size) {
// prefetch the next iteration variables
// we don't need bondary check because we have made the buffer large
int i_next = i + subglobal_size;
#ifdef IGNORE_INDICES
// we need to check to bounds here
ind_next = i_next < num_data ? i_next : i;
#else
ind_next = data_indices[i_next];
#endif
grad_next = ordered_gradients[ind_next];
#if CONST_HESSIAN == 0
hess_next = ordered_hessians[ind_next];
#endif
// STAGE 2: accumulate gradient and hessian
if (bin != feature) {
addr_bin = gh_hist + bin * 2 + is_hessian_first;
#if CONST_HESSIAN == 0
acc_type acc_bin = is_hessian_first ? hess_bin : grad_bin;
atomic_local_add_f(addr_bin, acc_bin);
addr_bin = addr_bin + 1 - 2 * is_hessian_first;
acc_bin = is_hessian_first ? grad_bin : hess_bin;
atomic_local_add_f(addr_bin, acc_bin);
#elif CONST_HESSIAN == 1
atomic_local_add_f(addr_bin, grad_bin);
#endif
bin = feature;
grad_bin = grad;
hess_bin = hess;
} else {
grad_bin += grad;
hess_bin += hess;
}
// prefetch the next iteration variables
feature_next = feature_data[ind_next >> feature_mask];
// STAGE 3: accumulate counter
atomicAdd(cnt_hist + feature, 1);
// STAGE 4: update next stat
grad = grad_next;
hess = hess_next;
if (!feature_mask) {
feature = feature_next;
} else {
feature = (feature_next >> ((ind_next & 1) << 2)) & 0xf;
}
}
addr_bin = gh_hist + bin * 2 + is_hessian_first;
#if CONST_HESSIAN == 0
acc_type acc_bin = is_hessian_first ? hess_bin : grad_bin;
atomic_local_add_f(addr_bin, acc_bin);
addr_bin = addr_bin + 1 - 2 * is_hessian_first;
acc_bin = is_hessian_first ? grad_bin : hess_bin;
atomic_local_add_f(addr_bin, acc_bin);
#elif CONST_HESSIAN == 1
atomic_local_add_f(addr_bin, grad_bin);
#endif
__syncthreads();
#if CONST_HESSIAN == 1
// make a final reduction
gh_hist[ltid * 2] += gh_hist[ltid * 2 + 1];
gh_hist[ltid * 2 + 1] = const_hessian * cnt_hist[ltid]; // counter move to this position
__syncthreads();
#endif
#if POWER_FEATURE_WORKGROUPS != 0
acc_type *__restrict__ output = reinterpret_cast<acc_type *>(output_buf) + group_id * 3 * NUM_BINS;
// write gradients and hessians
acc_type *__restrict__ ptr_f = output;
for (uint16_t i = ltid; i < 2 * NUM_BINS; i += lsize) {
// even threads read gradients, odd threads read hessians
acc_type value = gh_hist[i];
ptr_f[(i & 1) * NUM_BINS + (i >> 1)] = value;
}
// write counts
acc_int_type *__restrict__ ptr_i = reinterpret_cast<acc_int_type *>(output + 2 * NUM_BINS);
for (uint16_t i = ltid; i < NUM_BINS; i += lsize) {
unsigned int value = cnt_hist[i];
ptr_i[i] = value;
}
__syncthreads();
__threadfence();
unsigned int * counter_val = cnt_hist;
// backup the old value
unsigned int old_val = *counter_val;
if (ltid == 0) {
// all workgroups processing the same feature add this counter
*counter_val = atomicAdd(const_cast<int*>(sync_counters + feature_id), 1);
}
// make sure everyone in this workgroup is here
__syncthreads();
// everyone in this workgroup: if we are the last workgroup, then do reduction!
if (*counter_val == (1 << power_feature_workgroups) - 1) {
if (ltid == 0) {
sync_counters[feature_id] = 0;
}
#else
}
// only 1 work group, no need to increase counter
// the reduction will become a simple copy
{
unsigned int old_val; // dummy
#endif
// locate our feature's block in output memory
unsigned int output_offset = (feature_id << power_feature_workgroups);
acc_type const * __restrict__ feature_subhists =
reinterpret_cast<acc_type *>(output_buf) + output_offset * 3 * NUM_BINS;
// skip reading the data already in local memory
unsigned int skip_id = group_id - output_offset;
// locate output histogram location for this feature4
acc_type *__restrict__ hist_buf = hist_buf_base + feature_id * 2 * NUM_BINS;
within_kernel_reduction256x4(feature_subhists, skip_id, old_val, 1 << power_feature_workgroups, hist_buf, reinterpret_cast<acc_type *>(shared_array), power_feature_workgroups);
}
}
// end of histogram256 stuff
} // namespace LightGBM
|
the_stack
|
//TODO remove synchronization from this module by moving host operations to the device
namespace amgx
{
template <class TConfig>
void KrylovSubspaceBuffer<TConfig>::set_max_dimension(int max_dimension)
{
m_V_vector.resize( max_dimension + 2 );
m_Z_vector.resize( max_dimension + 1 );
//set the pointers to NULL (they will be [re]allocated only if they are NULL)
for (int i = 0; i < max_dimension + 1; i++)
{
m_V_vector[i] = NULL;
m_Z_vector[i] = NULL;
}
m_V_vector[max_dimension + 1] = NULL;
//this looks kind of hacky
this->max_dimension = max_dimension + 1;
}
template <class TConfig>
KrylovSubspaceBuffer<TConfig>::KrylovSubspaceBuffer()
{
this->dimension = -1;
this->max_dimension = 0;
this->N = 0;
}
template <class TConfig>
KrylovSubspaceBuffer<TConfig>::~KrylovSubspaceBuffer()
{
for (int i = 0; i < m_V_vector.size(); i++)
{
delete m_V_vector[i];
}
for (int i = 0; i < m_Z_vector.size(); i++)
{
delete m_Z_vector[i];
}
}
template <class TConfig>
bool KrylovSubspaceBuffer<TConfig>::set_iteration(int m)
{
if ( m > this->iteration + 1 )
{
FatalError("Internal error in set_iteration: It seems like one iteration has not been set", AMGX_ERR_UNKNOWN);
}
//if we haven't reached this iteration yet and haven't reached the max dimension, try to increase dimension and if that fails, tell.
if ( (m > this->iteration) && (this->dimension < this->max_dimension) && (!this->increase_dimension()))
{
return false;
}
this->iteration = m;
return true;
}
template <class TConfig>
int KrylovSubspaceBuffer<TConfig>::get_smallest_m()
{
return max(this->iteration + 1 - this->dimension, 0);
}
template <class TConfig>
bool KrylovSubspaceBuffer<TConfig>::increase_dimension()
{
if ( this->N < 1 )
{
FatalError("N cannot be smaller than 1.", AMGX_ERR_UNKNOWN );
}
if (this->dimension == this->max_dimension )
{
return true;
}
//grow krylov space
//check whether m_Z_vector of the same size has already been allocated
if ((m_Z_vector[dimension] != NULL) && (m_Z_vector[dimension]->size() != N))
{
delete m_Z_vector[dimension];
m_Z_vector[dimension] = NULL;
}
//check whether m_V_vector of the same size has already been allocated
if ((m_V_vector[dimension + 1] != NULL) && (m_V_vector[dimension + 1]->size() != N))
{
delete m_V_vector[dimension + 1];
m_V_vector[dimension + 1] = NULL;
}
//allocate the vector if it has not been allocated (or was not allocated with the same size)
try
{
if (m_Z_vector[dimension] == NULL)
{
m_Z_vector[dimension] = new VVector(N);
}
if (m_V_vector[dimension + 1] == NULL)
{
m_V_vector[dimension + 1] = new VVector(N);
}
}
catch (std::bad_alloc &e)
{
//inform user
std::cout << "WARNING: Cannot allocate next Krylov vector, out of memory. Falling back to DQGMRES" << std::endl;
//clear error from error history
cudaGetLastError();
//reset max dimension
this->max_dimension = this->dimension;
//back out and tell
return false;
}
// init Z and V
m_V_vector[dimension + 1]->set_block_dimy(this->blockdim);
m_V_vector[dimension + 1]->set_block_dimx(1);
m_V_vector[dimension + 1]->dirtybit = 1;
m_V_vector[dimension + 1]->delayed_send = 1;
m_V_vector[dimension + 1]->tag = this->tag * 100 + max_dimension + dimension + 1;
m_Z_vector[dimension]->set_block_dimy(this->blockdim);
m_Z_vector[dimension]->set_block_dimx(1);
m_Z_vector[dimension]->dirtybit = 1;
m_Z_vector[dimension]->delayed_send = 1;
m_Z_vector[dimension]->tag = this->tag * 100 + dimension;
dimension++;
return true;
}
template <class TConfig>
Vector<TConfig> &KrylovSubspaceBuffer<TConfig>::V(int m)
{
if ( m > this->iteration + 1 )
{
FatalError("Try to access unallocated V-vector. You have to set the iteration before accessing this vector", AMGX_ERR_BAD_PARAMETERS );
}
if ( m < this->get_smallest_m() )
{
FatalError("Try to access forgotten V-vector.", AMGX_ERR_BAD_PARAMETERS );
}
return *(this->m_V_vector[m % (this->dimension + 1)]);
}
template <class TConfig>
Vector<TConfig> &KrylovSubspaceBuffer<TConfig>::Z(int m)
{
if ( m > this->iteration )
{
FatalError("Try to access unallocated Z-vector. You have to set the iteration before accessing this vector", AMGX_ERR_BAD_PARAMETERS );
}
if ( m < this->get_smallest_m() )
{
FatalError("Try to access forgotten Z-vector.", AMGX_ERR_BAD_PARAMETERS );
}
return *(this->m_Z_vector[m % this->dimension]);
}
//init the frist vector
template <class TConfig>
void KrylovSubspaceBuffer<TConfig>::setup(int N, int blockdim, int tag)
{
this->N = N;
this->blockdim = blockdim;
this->tag = tag;
this->iteration = -1;
this->dimension = 0;
//init V(0)
//check whether m_V_vector of the same size has already been allocated
if ((m_V_vector[0] != NULL) && (m_V_vector[0]->size() != N))
{
delete m_V_vector[0];
m_V_vector[0] = NULL;
}
//allocate the vector if it has not been allocated (or was not allocated with the same size)
if (m_V_vector[0] == NULL)
{
m_V_vector[0] = new VVector(N);
}
m_V_vector[0]->set_block_dimy(this->blockdim);
m_V_vector[0]->set_block_dimx(1);
m_V_vector[0]->dirtybit = 1;
m_V_vector[0]->delayed_send = 1;
m_V_vector[0]->tag = this->tag * 100 + max_dimension + 1;
}
template< class T_Config>
FGMRES_Solver<T_Config>::FGMRES_Solver( AMG_Config &cfg, const std::string &cfg_scope ) :
Solver<T_Config>( cfg, cfg_scope ), m_preconditioner(0)
{
std::string solverName, new_scope, tmp_scope;
cfg.getParameter<std::string>( "preconditioner", solverName, cfg_scope, new_scope );
if (solverName.compare("NOSOLVER") == 0)
{
use_preconditioner = false;
m_preconditioner = NULL;
}
else
{
use_preconditioner = true;
m_preconditioner = SolverFactory<T_Config>::allocate( cfg, cfg_scope, "preconditioner" );
}
m_R = cfg.AMG_Config::getParameter<int>("gmres_n_restart", cfg_scope);
m_krylov_size = min( this->m_max_iters, m_R );
int krylov_param = cfg.AMG_Config::getParameter<int>( "gmres_krylov_dim", cfg_scope );
if ( krylov_param > 0 )
{
m_krylov_size = min( m_krylov_size, krylov_param );
}
//Using L2 norm is ok, however we will do the extra computations
//if( this->m_norm_type != L2 )
// FatalError("FGMRES only works with L2 norm. Other norms would require extra computations. ", AMGX_ERR_NOT_SUPPORTED_TARGET);
m_H.resize( m_R + 1, m_R );
m_s.resize( m_R + 1 );
m_cs.resize( m_R );
m_sn.resize( m_R );
gamma.resize( m_R + 1 );
subspace.set_max_dimension( m_krylov_size );
}
template<class T_Config>
FGMRES_Solver<T_Config>::~FGMRES_Solver()
{
if (use_preconditioner) { delete m_preconditioner; }
}
template<class T_Config>
void
FGMRES_Solver<T_Config>::printSolverParameters() const
{
std::cout << "gmres_n_restart=" << this->m_R << std::endl;
if (use_preconditioner)
{
std::cout << "preconditioner: " << this->m_preconditioner->getName() << " with scope name: " << this->m_preconditioner->getScope() << std::endl;
}
}
template<class T_Config>
void
FGMRES_Solver<T_Config>::solver_setup(bool reuse_matrix_structure)
{
if (use_preconditioner)
{
m_preconditioner->setup( *this->m_A, reuse_matrix_structure );
}
ViewType oldView = this->m_A->currentView();
this->m_A->setViewExterior();
//should we warn the user about the extra computational work?
use_scalar_L2_norm = (this->m_nrm.size() == 1 || this->m_use_scalar_norm) && this->m_norm_type == L2;
subspace.setup(this->m_A->get_num_cols()*this->m_A->get_block_dimy(), this->m_A->get_block_dimy(), this->tag);
residual.tag = (this->tag + 1) * 100 - 2;
if ( this->m_R == 1 || this->m_max_iters == 1 )
{
update_x_every_iteration = true;
update_r_every_iteration = false;
}
else
{
// The update of x is needed only if running the truncated gmres
update_x_every_iteration = (m_krylov_size < m_R);
update_r_every_iteration = (!use_scalar_L2_norm || (m_krylov_size < m_R)) && Base::m_monitor_convergence;
}
this->m_A->setView(oldView);
}
template <typename ValueType>
static __host__ void GeneratePlaneRotation( ValueType &dx, ValueType &dy, ValueType &cs, ValueType &sn )
{
if (dy < ValueType(0.0))
{
cs = 1.0;
sn = 0.0;
}
else if (abs(dy) > abs(dx))
{
ValueType tmp = dx / dy;
sn = ValueType(1.0) / sqrt(ValueType(1.0) + tmp * tmp);
cs = tmp * sn;
}
else
{
ValueType tmp = dy / dx;
cs = ValueType(1.0) / sqrt(ValueType(1.0) + tmp * tmp);
sn = tmp * cs;
}
}
template <typename ValueType>
void PlaneRotation( cusp::array2d<ValueType, cusp::host_memory, cusp::column_major> &H,
cusp::array1d<ValueType, cusp::host_memory> &cs,
cusp::array1d<ValueType, cusp::host_memory> &sn,
cusp::array1d<ValueType, cusp::host_memory> &s,
int i)
{
ValueType temp;
for (int k = 0; k < i; k++)
{
temp = cs[k] * H(k, i) + sn[k] * H(k + 1, i);
H(k + 1, i) = -sn[k] * H(k, i) + cs[k] * H(k + 1, i);
H(k, i) = temp;
}
GeneratePlaneRotation(H(i, i), H(i + 1, i), cs[i], sn[i]);
H(i, i) = cs[i] * H(i, i) + sn[i] * H(i + 1, i);
H(i + 1, i) = 0.0;
temp = cs[i] * s[i];
s[i + 1] = -sn[i] * s[i];
s[i] = temp;
}
template<class T_Config>
void
FGMRES_Solver<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{
//init residual, even if we don't plan to use it, we might need it, so make sure we have enough memory to store it now
residual.resize( b.size() );
residual.set_block_dimx( 1 );
residual.set_block_dimy( this->m_A->get_block_dimy() );
residual.dirtybit = 1;
residual.delayed_send = 1;
}
//check for convergence
//al the complicated stuff happens here
template <class TConfig>
bool FGMRES_Solver<TConfig>::checkConvergenceGMRES(bool check_V_0)
{
if ( Base::m_monitor_convergence)
{
//enable blas operations
Operator<TConfig> &A = *this->m_A;
int offset, size;
A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size);
if ( this->use_scalar_L2_norm && !update_r_every_iteration )
{
this->m_nrm[0] = this->beta;
return this->converged();
}
else
{
if ( check_V_0 )
{
get_norm( A, subspace.V(0), A.get_block_dimy(), this->m_norm_type, this->m_nrm );
return this->converged();
}
else
{
if ( !update_r_every_iteration )
{
FatalError("have to compute the residual every iteration to compute a norm other than scalar L2", AMGX_ERR_BAD_PARAMETERS );
}
//compute norm of r
get_norm( A, residual, A.get_block_dimy(), this->m_norm_type, this->m_nrm );
return this->converged();
}
}
}
else
{
return false;
}
}
//Run preconditioned GMRES
template<class T_Config>
bool
FGMRES_Solver<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero )
{
Operator<T_Config> &A = *this->m_A;
ViewType oldView = A.currentView();
A.setViewExterior();
int offset, size;
A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size);
bool done = false;
int m = this->m_curr_iter % m_R; //current iteration within restart
if (m == 0)
{
//initialize gmres
subspace.set_iteration(0);
// compute initial residual r0 = b - Ax
axmb( A, x, b, subspace.V(0), offset, size );
// compute initial residual norm
this->beta = get_norm(A, subspace.V(0), L2);
// check for convergence (do we need it? leave it here for now)
if ((this->m_curr_iter == 0) && checkConvergenceGMRES( true ))
{
return true;
}
// normalize initial residual
scal( subspace.V(0), ValueTypeB(1.0 / this->beta), offset, size );
//set reduced system rhs = beta*e1
cusp::blas::fill( m_s, ValueTypeB(0.0) );
m_s[0] = this->beta;
}
//our krylov space is now smaller than m!
//because we hadn't updated x before, we have to form
//change the base formed by Z
if ( !subspace.set_iteration(m) )
{
//we have to start updating x from now on, so prepare Z for that now
if ( !update_x_every_iteration )
{
//TODO: There could be a more efficient way to do this
for (int k = 1; k < m; k++)
{
// This can be written as [Z] * inv(R), where R=Q*H. It is more efficient to do M-M product.
// But this would require the memory to be in consecutive chunk. This is not what
// the lazy memory allocation does.
//p_k = [z_k - sum( h_ik*p_i )] / h_kk
for (int i = 0; i < k; i++)
{
axpy( subspace.Z(i), subspace.Z(k), -m_H(i, k), offset, size );
}
scal( subspace.Z(k), ValueTypeB(1.0) / m_H(k, k), offset, size );
// This can be written as dense M-V product, [Z]*gamma[1:m-1]
//x_k = x_k-1 + gamma_k*p_k
axpy( subspace.Z(k), x, gamma[k], offset, size );
}
update_x_every_iteration = true;
}
if ( !update_r_every_iteration && Base::m_monitor_convergence)
{
//compute residual
axmb( A, x, b, residual, offset, size );
update_r_every_iteration = true;
}
subspace.set_iteration(m); //if the allocation failed, we have to set the iteration again
}
// Run one iteration of preconditioner with zero initial guess and v_m as rhs, i.e. solve Az_m=v_m
if (use_preconditioner)
{
m_preconditioner->solve( subspace.V(m), subspace.Z(m), true ); //TODO: check if using zero as initial solution when solving for residual inside subspace is correct
}
else
{
copy(subspace.V(m), subspace.Z(m), offset, size);
}
//obtain v_m+1 := A*z_m
A.apply( subspace.Z(m), subspace.V(m + 1) );
// Modified Gram-Schmidt
for ( int i = subspace.get_smallest_m(); i <= m; i++ )
{
// H(i,m) = <V(i),V(m+1)>
m_H(i, m) = dot(A, subspace.V(i), subspace.V(m + 1));
// V(m+1) -= H(i, m) * V(i)
axpy( subspace.V(i), subspace.V(m + 1), -m_H(i, m), offset, size );
}
//H(m+1,m) = || v_m+1 ||
m_H(m + 1, m) = get_norm(A, subspace.V(m + 1), L2);
//normalize v_m+1
scal( subspace.V(m + 1), ValueTypeB(1.0) / m_H(m + 1, m), offset, size );
this->gamma[m] = m_s[m];
PlaneRotation( m_H, m_cs, m_sn, m_s, m );
if ( update_x_every_iteration )
{
//p_m = [z_m - sum( h_im*p_i )] / h_mm
// This is dense [Z]*[-H(smallest_m:m-1,m); 1] / m_H(m,m)
for (int i = subspace.get_smallest_m(); i < m; i++)
{
axpy( subspace.Z(i), subspace.Z(m), -m_H(i, m), offset, size );
}
scal( subspace.Z(m), ValueTypeB(1.0) / m_H(m, m), offset, size );
//x_m = x_m-1 + gamma_m*pm
axpy( subspace.Z(m), x, m_s[m], offset, size );
}
if ( update_r_every_iteration )
{
// This is the recursion in Christophe's presentation
// r_m = gamma_m+1*( c_m*v_m+1 - s_m*r_m-1/gamma_m )
// r_m = (gamma_m+1*c_m)*v_m+1 + (-gamma_m+1*s_m/gamma_m)*r_m-1)
if ( m == 0 )
{
axpby( subspace.V(1), subspace.V(0), residual, m_s[m + 1]*m_cs[m], ValueTypeB(-1.0 * m_s[m + 1]*m_sn[m]), offset, size );
}
else
{
axpby( subspace.V(m + 1), residual, residual, m_s[m + 1]*m_cs[m], ValueTypeB(-1.0 * m_s[m + 1]*m_sn[m] / gamma[m]), offset, size );
}
}
// Check for convergence
// abs(s[m+1]) = L2 norm of residual
this->beta = abs( m_s[m + 1] );
done = checkConvergenceGMRES( false );
// If reached restart limit or last iteration or if converged, compute x vector
if ( !update_x_every_iteration && (m == m_R - 1 || this->is_last_iter() || done ))
{
// Solve upper triangular system in place
for (int j = m; j >= 0; j--)
{
m_s[j] /= m_H(j, j);
//S(0:j) = s(0:j) - s[j] H(0:j,j)
for (int k = j - 1; k >= 0; k--)
{
m_s[k] -= m_H(k, j) * m_s[j];
}
}
// Update the solution
// This is dense M-V, x += [Z]*m_s
for (int j = 0; j <= m; j++)
{
axpy( subspace.Z(j), x, m_s[j], offset, size );
}
}
A.setView(oldView);
return !Base::m_monitor_convergence || done;
}
template<class T_Config>
void
FGMRES_Solver<T_Config>::solve_finalize( VVector &b, VVector &x )
{
residual.resize(0);
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class FGMRES_Solver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
the_stack
|
#include <cudpp_globals.h>
#include <stdio.h>
#include "cudpp_util.h"
#define IDX (threadIdx.x + (blockIdx.x * blockDim.x))
/**
* @file
* sa_kernel.cuh
*
* @brief CUDPP kernel-level suffix array routines
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name Suffix Array Functions
* @{
*/
typedef unsigned int uint;
typedef unsigned char uchar;
/** @brief Construct the input array
*
* This is the first stage in the SA. This stage construct the
* input array composed of values of the input char array
* followed by three 0s.
*
*
* @param[in] d_str Input char array to perform the SA on.
* @param[out] d_str_value Output unsigned int array prepared for SA.
* @param[in] str_length The number of elements we are performing the SA on.
*
**/
__global__ void
strConstruct(uchar* d_str,
uint* d_str_value,
size_t str_length)
{
#if (__CUDA_ARCH__ >= 200)
const int STRIDE = gridDim.x * blockDim.x;
#pragma unroll
for(int i = IDX; i < str_length; i += STRIDE)
d_str_value[i] = (uint) d_str[i] +1 ;
if (IDX > str_length-1 && IDX < str_length + 3) d_str_value[IDX] = 0;
#endif
}
/** @brief Reconstruct the output
*
* This is the final stage in the SA. This stage reconstruct the
* output array by reducing each value by one.
*
* @param[in,out] d_keys_sa Final output of the suffix array which stores the
positions of sorted suffixes.
* @param[in] str_length Size of the array.
*
**/
__global__ void
resultConstruct(uint* d_keys_sa,
size_t str_length)
{
#if (__CUDA_ARCH__ >= 200)
const int STRIDE = gridDim.x * blockDim.x;
#pragma unroll
for(int i = IDX; i < str_length; i += STRIDE)
d_keys_sa[i] = d_keys_sa[i] - 1;
#endif
}
/** @brief Initialize the SA12 triplets
* @param[in] d_str Initial array of character values.
* @param[out] d_keys_uint_12 The keys of righ-most char in SA12 triplets.
* @param[out] d_keys_srt_12 SA12 triplets positions.
* @param[in] mod_1 The number of elements whose positions mod3 = 1 (SA1)
* @param[in] tThreads The number of elements whose positions mod3 = 1,2 (SA12)
*
**/
__global__ void
sa12_keys_construct(uint* d_str,
uint* d_keys_uint_12,
uint* d_keys_srt_12,
int mod_1,
size_t tThreads)
{
#if (__CUDA_ARCH__ >= 200)
if(IDX < mod_1)
{
d_keys_srt_12[IDX] = IDX*3+1;
d_keys_uint_12[IDX] = d_str[IDX*3+2];
}
else if(IDX < tThreads)
{
d_keys_srt_12[IDX] = (IDX-mod_1)*3+2;
d_keys_uint_12[IDX] = d_str[(IDX-mod_1)*3+3];
}
#endif
}
/** @brief Construct SA12 for the second radix sort
* @param[in] d_str Initial array of character values.
* @param[out] d_keys_uint_12 The keys of second char in SA12 triplets.
* @param[in] d_keys_srt_12 SA12 triplets positions.
* @param[in] tThreads The number of elements in SA12.
*
**/
__global__ void
sa12_keys_construct_0(uint* d_str,
uint* d_keys_uint_12,
uint* d_keys_srt_12,
size_t tThreads)
{
#if (__CUDA_ARCH__ >= 200)
if (IDX < tThreads)
d_keys_uint_12[IDX] = d_str[d_keys_srt_12[IDX]];
#endif
}
/** @brief Construct SA12 for the third radix sort
* @param[in] d_str Initial array of character values.
* @param[out] d_keys_uint_12 The keys of third char in SA12 triplets.
* @param[in] d_keys_srt_12 SA12 triplets positions.
* @param[in] tThreads The number of elements in SA12.
*
**/
__global__ void
sa12_keys_construct_1(uint* d_str,
uint* d_keys_uint_12,
uint* d_keys_srt_12,
size_t tThreads)
{
#if (__CUDA_ARCH__ >= 200)
if (IDX < tThreads)
d_keys_uint_12[IDX] = d_str[d_keys_srt_12[IDX]-1];
#endif
}
/** @brief Turn on flags for sorted SA12 triplets
* @param[in] d_str Initial array of character values.
* @param[in] d_keys_srt_12 SA12 triplets positions.
* @param[out] d_flag Marking the sorted triplets.
* @param[out] result 0 if SA12 is not fully sorted.
* @param[in] tThreads The number of elements in SA12.
* @param[in] str_length The number of elements in original string.
*
**/
__global__ void
compute_rank(uint* d_str,
uint* d_keys_srt_12,
uint* d_flag,
bool* result,
size_t tThreads,
int str_length)
{
#if (__CUDA_ARCH__ >= 200)
if(IDX==0) d_flag[IDX]=1;
else if(IDX < tThreads)
{
int i=d_keys_srt_12[IDX], j=d_keys_srt_12[IDX-1];
if(i < str_length+2 && j < str_length+2)
{
if((d_str[i-1]==d_str[j-1]) && (d_str[i]==d_str[j]) &&
(d_str[i+1]==d_str[j+1])) {
d_flag[IDX] = 0; result[0]=0;
} else {
d_flag[IDX] = 1;
}
}
}
#endif
}
/** @brief Construct new array for recursion
* @param[out] d_new_str The new string to be sent to recursion.
* @param[in] d_keys_srt_12 SA12 triplets positions.
* @param[in] d_rank Ranks of SA12 from compute_rank kernel.
* @param[in] mod_1 The number of elements of SA1.
* @param[in] tThreads The number of elements of SA12.
*
**/
__global__ void
new_str_construct(uint* d_new_str,
uint* d_keys_srt_12,
uint* d_rank,
int mod_1,
size_t tThreads)
{
#if (__CUDA_ARCH__ >= 200)
if(IDX<tThreads)
{
uint pos = d_keys_srt_12[IDX];
uint rank = d_rank[IDX];
if(pos%3 == 1) d_new_str[(pos-1)/3] = rank;
else d_new_str[mod_1+(pos-2)/3] = rank;
}
else if(IDX == tThreads || IDX == tThreads+1) d_new_str[IDX]=0;
#endif
}
/** @brief Translate SA12 from recursion
* @param[in,out] d_keys_srt_12 Sorted SA12.
* @param[in] d_isa_12 ISA12.
* @param[in] d_flag Flags to mark SA1.
* @param[in] mod_1 The number of elements in SA1.
* @param[in] tThreads The number of elements in SA12.
*
**/
__global__ void
reconstruct(uint* d_keys_srt_12,
uint* d_isa_12,
uint* d_flag,
int mod_1,
size_t tThreads)
{
#if (__CUDA_ARCH__ >= 200)
if(IDX<tThreads)
{
uint pos=d_keys_srt_12[IDX];
if(pos<tThreads+1){
d_isa_12[pos-1]=IDX+1;
if(pos > mod_1)
{
d_keys_srt_12[IDX] = 3*(pos-mod_1-1)+2;
d_flag[IDX]=0;
}
else
{
d_keys_srt_12[IDX] = 3*(pos-1)+1;
if(pos>1) d_flag[IDX] =1;
else d_flag[IDX]=0;
}
}
}
#endif
}
/** @brief Construct ISA12
* @param[in] d_keys_srt_12 Fully sorted SA12 in global position.
* @param[out] d_isa_12 ISA12 to store the ranks in local position.
* @param[out] d_flag Flags to mark SA1.
* @param[in] mod_1 The number of elements in SA1.
* @param[in] tThreads The number of elements in SA12.
*
**/
__global__ void
isa12_construct(uint* d_keys_srt_12,
uint* d_isa_12,
uint* d_flag,
int mod_1,
size_t tThreads)
{
#if (__CUDA_ARCH__ >= 200)
uint pos;
if(IDX<tThreads)
{
pos = d_keys_srt_12[IDX];
if(pos%3==1) {
pos = (pos-1)/3;
if(d_keys_srt_12[IDX]>3) d_flag[IDX]=1;
else d_flag[IDX]=0;
}
else if(pos%3==2) {
pos = mod_1+ (pos-2)/3;
d_flag[IDX]=0;
}
}
__syncthreads();
if(pos<tThreads && IDX<tThreads)
d_isa_12[pos] = IDX+1;
#endif
}
/** @brief Contruct SA3 triplets positions
* @param[out] d_keys_srt_3 SA3 generated from SA1.
* @param[in] d_str Original input array.
* @param[in] d_keys_srt_12 Fully sorted SA12.
* @param[in] d_keys_sa Positions of SA1.
* @param[in] tThreads1 The number of elements of SA12.
* @param[in] tThreads2 The number of elements of SA3.
* @param[in] str_length The number of elements in original string.
*
**/
__global__ void
sa3_srt_construct(uint* d_keys_srt_3,
uint* d_str,
uint* d_keys_srt_12,
uint* d_keys_sa,
size_t tThreads1,
size_t tThreads2,
int str_length)
{
#if (__CUDA_ARCH__ >= 200)
if(IDX<tThreads1)
{
uint pos=d_keys_sa[IDX];
if((str_length+1)%3==0)
{
if(IDX == 0) d_keys_srt_3[IDX] = str_length+1;
if(d_keys_srt_12[IDX] > 3 && d_keys_srt_12[IDX] % 3 == 1 &&
pos<tThreads2-1)
d_keys_srt_3[pos+1]=d_keys_srt_12[IDX]-1;
}
else
{
if(d_keys_srt_12[IDX]>3 && d_keys_srt_12[IDX]%3==1 && pos<tThreads2)
d_keys_srt_3[pos]=d_keys_srt_12[IDX]-1;
}
}
#endif
}
/** @brief Construct SA3 triplets keys
* @param[in] d_keys_srt_3 SA3 triplets positions.
* @param[out] d_keys_sa SA3 keys.
* @param[in] d_str Original input string.
* @param[in] tThreads The number of elements in SA12.
* @param[in] str_length The number of elements in original string.
*
**/
__global__ void
sa3_keys_construct(uint* d_keys_srt_3,
uint* d_keys_sa,
uint* d_str,
size_t tThreads,
int str_length)
{
#if (__CUDA_ARCH__ >= 200)
if(IDX<tThreads)
{
if(d_keys_srt_3[IDX] < str_length+4)
d_keys_sa[IDX] = d_str[d_keys_srt_3[IDX]-1];
}
#endif
}
/** @brief Construct SA12 keys in terms of Vector
* @param[in] d_str Original input data stream
* @param[in] d_keys_srt_12 The order of aKeys.
* @param[in] d_isa_12 The ranks in SA12 orders.
* @param[out] d_aKeys SA12 keys in Vectors.
* @param[in] tThreads The number elements in SA12
* @param[in] mod_1 The number of elements in SA1.
* @param[in] bound The number of elements in SA12 plus SA3.
* @param[in] str_length The number of elements in original string.
*
**/
__global__ void
merge_akeys_construct(uint* d_str,
uint* d_keys_srt_12,
uint* d_isa_12,
Vector* d_aKeys,
size_t tThreads,
int mod_1,
int bound,
int str_length)
{
#if (__CUDA_ARCH__ >= 200)
if(IDX < tThreads)
{
int i = d_keys_srt_12[IDX];
if(i < str_length+3)
{
if(i%3==1)
{
d_aKeys[IDX].a = d_str[i-1];
d_aKeys[IDX].b = (bound-i>0) ? d_isa_12[mod_1+(i-1)/3] : 0;
d_aKeys[IDX].c = 0;
d_aKeys[IDX].d = 1;
}
else if(i%3==2)
{
d_aKeys[IDX].a = d_str[i-1];
d_aKeys[IDX].b = (bound-i>0) ? d_str[i] : 0;
d_aKeys[IDX].c = (bound-i>1) ? d_isa_12[(i-2)/3+1] : 0;
d_aKeys[IDX].d = 0;
}
}
}
#endif
}
/** @brief Construct SA3 keys in Vector
*
* @param[in] d_str Original input data stream.
* @param[in] d_keys_srt_3 The order of bKeys
* @param[in] d_isa_12 ISA12.
* @param[out] d_bKeys SA3 keys in Vectors.
* @param[in] tThreads The number of total threads.
* @param[in] mod_1 The number of elements in SA1.
* @param[in] bound The number of elements in SA12 and SA3.
* @param[in] str_length The number of elements in original str.
*
**/
__global__ void
merge_bkeys_construct(uint* d_str,
uint* d_keys_srt_3,
uint* d_isa_12,
Vector* d_bKeys,
size_t tThreads,
int mod_1,
int bound,
int str_length)
{
#if (__CUDA_ARCH__ >= 200)
if(IDX<tThreads){
int i = d_keys_srt_3[IDX];
if(i < str_length+3){
d_bKeys[IDX].a = d_str[i-1];
d_bKeys[IDX].b = (bound-i>0) ? d_str[i] : 0;
d_bKeys[IDX].c = (bound-i>0) ? d_isa_12[i/3] : 0;
d_bKeys[IDX].d = (bound-i>1) ? d_isa_12[i/3+mod_1] : 0;
}
}
#endif
}
/** @} */ // end suffix array functions
/** @} */ // end cudpp_kernel
|
the_stack
|
template<>
void XlnetLayer<float>::blockRelShiftBd(dim3 &grid, dim3& block){
grid.x=batch_size;
grid.y=head_num;
grid.z=seq_len;
block.x=seq_len*2;
}
template<>
void XlnetLayer<__half>::blockRelShiftBd(dim3 &grid, dim3& block){
int threads=512;
int seq_dim1=threads/seq_len;
int seq_dim2=seq_len/seq_dim1;
grid.x=batch_size;
grid.y=head_num;
grid.z=seq_dim2;
block.x=seq_dim1;
block.y=seq_len;
}
/********************** Kernel Invocation ************************/
template<typename T>
void XlnetLayer<T>::oneToManyCublasGemm(T * d_A, T* d_B, T* d_C,cublasOperation_t transa, cublasOperation_t transb,
int v_m, int v_n, int v_k,int lda, int strideA,
int ldb,int strideB, int ldc,int strideC, int batch,int algo, cublasFunction method){
int m;
int n;
int k;
switch(method){
case GEMM_STRIDE:
ck(cublasGemmStridedBatchedEx(cublas_handle,
transa, transb,
v_m, v_n, v_k,
&alpha,
d_A, a_type, lda,strideA,
d_B, b_type, ldb,strideB,
&beta,
d_C, c_type, ldc,strideC,
batch,
compute_type,
static_cast<cublasGemmAlgo_t>(algo)));
break;
case GEMM_A_0:
m=v_m;
n=v_n*batch;
k=v_k;
ck(cublasGemmEx(cublas_handle,
transa, transb,
m,n,k,
&alpha,
d_A, a_type, lda,
d_B, b_type, ldb,
&beta,
d_C, c_type, ldc,
compute_type,
static_cast<cublasGemmAlgo_t>(algo)));
break;
case GEMM_B_0:
for(int count=0;count<batch;count++){
ck(cublasGemmEx(cublas_handle,
transa, transb,
v_m,v_n,v_k,
&alpha,
d_A+strideA*count, a_type, lda,
d_B+strideB*count, b_type, ldb,
&beta,
d_C+strideC*count, c_type, ldc,
compute_type,
static_cast<cublasGemmAlgo_t>(algo)));
}
break;
}
}//end func
template<typename T>
void XlnetLayer<T>::invokePrepareMatrixes(){
int off0=seq_len*hidden_dim;//seq_len*head_num*size_per_head
int i_off1=hidden_dim;//head_num*size_per_head
int o_off1=seq_len*size_per_head;
int off2=size_per_head;
dim3 grid(seq_len,batch_size);
dim3 block(next_pow2(hidden_dim)/numPerThread<T>());
prepareMatrixes<<<grid, block,0, stream>>>(
q_buf,q_buf_bd,q_buf_ef,k_buf,k_buf_bd,k_buf_ef,
query_buf, key_buf, k_head_r,layer_weight_device.attr_seg_embed,
layer_weight_device.attr_bias_Q_w,layer_weight_device.attr_bias_Q_r,
layer_weight_device.attr_bias_Q_s,
off0,i_off1,o_off1,off2);
ck(cudaDeviceSynchronize());
ck(cudaGetLastError());
}
template<typename T>
void XlnetLayer<T>::invokeTranspose102(){
dim3 grid_trans_v(batch_size, head_num);
//dim3 block_trans_v(seq_len,2);//float
//dim3 block_trans_v(seq_len);//__half
dim3 block_trans_v(seq_len,2/(numPerThread<T>()));
int toff0=head_num*seq_len*2;
int ti_off1=seq_len*2;
int to_off1=head_num*2;
int toff2=2;
transpose102<<<grid_trans_v, block_trans_v,0,stream>>>(qk_buf_ef_trans, qk_buf_ef,toff0,
ti_off1,to_off1,toff2);
ck(cudaDeviceSynchronize());
ck(cudaGetLastError());
}
template<typename T>
void XlnetLayer<T>::invokeTranspose201(){
dim3 grid_trans2(batch_size, seq_len);
dim3 block_trans2(seq_len);
int t2_off0=seq_len*seq_len*head_num;
int t2_i_off1=seq_len*head_num;
int t2_o_off1=seq_len*seq_len;
int t2_i_off2=head_num;
int t2_o_off2=seq_len;
transpose201<<<grid_trans2, block_trans2,seq_len*(head_num+1)*sizeof(float),stream>>>
(qk_buf_ef_seg_trans, qk_buf_ef_seg, t2_off0, t2_i_off1,t2_i_off2,t2_o_off1,t2_o_off2);
ck(cudaDeviceSynchronize());
ck(cudaGetLastError());
}
template<typename T>
void XlnetLayer<T>::invokeRelShiftBd(){
dim3 grid_shift;
dim3 block_shift;
blockRelShiftBd(grid_shift,block_shift);
int off0=head_num*seq_len*seq_len;
int off1=seq_len*seq_len;
relShiftBd<<<grid_shift, block_shift,0, stream>>>(qk_buf_bd_shift, qk_buf_bd,off0,off1,seq_len);
ck(cudaDeviceSynchronize());
ck(cudaGetLastError());
}
template<>
void XlnetLayer<float>::invokeCalAttnScore(float* attn_mask){
int off0=head_num*seq_len*seq_len;
int off1=seq_len*seq_len;
float p=(1/(pow(size_per_head,0.5)));
int voff0=head_num*seq_len*size_per_head;
int v_o_off1=seq_len*size_per_head;
int voff2=size_per_head;
int v_i_off1=head_num*size_per_head;
dim3 grid_score(batch_size,head_num,seq_len);
dim3 block_score(next_pow2(seq_len));
calAttnScore_valueBuf<<<grid_score, block_score,0, stream>>>(attn_score, qk_buf, qk_buf_bd_shift,
qk_buf_ef_seg_trans,attn_mask, off0, off1,seq_len,p,
value_buf_trans, value_buf,voff0, v_i_off1, v_o_off1, voff2);
ck(cudaDeviceSynchronize());
ck(cudaGetLastError());
}
template<>
void XlnetLayer<__half>::invokeCalAttnScore(__half* attn_mask){
int off0=head_num*seq_len*seq_len;
int off1=seq_len*seq_len;
float p=1/(pow(size_per_head,0.5));
int voff0=head_num*seq_len*size_per_head;
int v_o_off1=seq_len*size_per_head;
int voff2=size_per_head;
int v_i_off1=head_num*size_per_head;
if(seq_len<=32){
dim3 grid_score(batch_size,head_num,2);
dim3 block_score(seq_len/2*next_pow2(seq_len/2));
calAttnScore_valueBuf_small<<<grid_score, block_score,0, stream>>>(attn_score,
qk_buf, qk_buf_bd_shift, qk_buf_ef_seg_trans,attn_mask,
off0, off1,seq_len,seq_len/2, p,
value_buf_trans, value_buf,voff0, v_i_off1, v_o_off1, voff2);
}else if(seq_len<=64){
dim3 grid_score(batch_size,head_num,seq_len/2);
dim3 block_score(2*next_pow2(seq_len/2));
calAttnScore_valueBuf_small<<<grid_score, block_score,0, stream>>>(attn_score, qk_buf, qk_buf_bd_shift,
qk_buf_ef_seg_trans,attn_mask,
off0, off1,seq_len,2,p,
value_buf_trans, value_buf,voff0, v_i_off1, v_o_off1, voff2);
}else{
dim3 grid_score(batch_size,head_num,seq_len);
dim3 block_score(next_pow2(seq_len/2));
calAttnScore_valueBuf_large<<<grid_score, block_score,0, stream>>>(attn_score,
qk_buf, qk_buf_bd_shift, qk_buf_ef_seg_trans,attn_mask,
off0, off1,seq_len,p,
value_buf_trans, value_buf,voff0, v_i_off1, v_o_off1, voff2);
}
ck(cudaDeviceSynchronize());
ck(cudaGetLastError());
}
template<typename T>
void XlnetLayer<T>::invokeTranspose102v2(){
dim3 grid_trans_v(batch_size,seq_len);
dim3 block_trans_v(head_num*size_per_head/numPerThread<T>());
int off0=head_num*seq_len*size_per_head;
int i_off1=seq_len*size_per_head;
int o_off1=head_num*size_per_head;
int off2=size_per_head;
transpose102_v2<<<grid_trans_v, block_trans_v,0,stream>>>(attn_vec_trans, attn_vec,off0, i_off1, o_off1, off2);
ck(cudaDeviceSynchronize());
ck(cudaGetLastError());
}
template<typename T>
void XlnetLayer<T>::invokeLayerNorm()
{
dim3 grid(batch_size*seq_len);
dim3 block(hidden_dim/numPerThread<T>());
assert(block.x <= 1024);
addBias_layerNorm<T><<<grid, block, 0, stream>>>(attn_layernorm,attn_out,to_tensor,
layer_weight_device.attr_layernorm_gamma,layer_weight_device.attr_layernorm_beta,
batch_size*seq_len, hidden_dim, epsilon);
ck(cudaDeviceSynchronize());
ck(cudaGetLastError());
}
template<typename T>
void XlnetLayer<T>::invokeGelu(){
dim3 block(1024/numPerThread<T>());
dim3 grid(batch_size, seq_len);
gelu_bias_loop<<<grid, block, 0, stream>>>(output_fc1, layer_weight_device.attr_fc1_bias, hidden_dim_ff,seq_len);
}
//New LayerNorm
template<typename T>
void XlnetLayer<T>::invokeLayerNormv2()
{
dim3 grid(batch_size*seq_len);
dim3 block(hidden_dim/numPerThread<T>());
assert(block.x <= 1024);
addBias_layerNorm2<T><<<grid, block, 0, stream>>>(output_layernorm, output_fc2,attn_layernorm,
layer_weight_device.attr_fc2_bias,layer_weight_device.attr_ff_gamma,
layer_weight_device.attr_ff_beta,batch_size*seq_len, hidden_dim, epsilon);
}
/********************** Attention ************************/
template<typename T>
T* XlnetLayer<T>::forward(T* to_tensor,T* attn_mask,T* seg_mat,T* attr_k_head_r){
this->to_tensor=to_tensor;
oneToManyCublasGemm(layer_weight_device.attr_kernel_QKV,to_tensor,qkv_buf,
CUBLAS_OP_N, CUBLAS_OP_N,
hidden_dim, batch_size*seq_len, hidden_dim,
hidden_dim,hidden_dim*hidden_dim,hidden_dim, 0,
hidden_dim,buf_size,3,cublas_algo[0],(cublasFunction)cublas_func[0]);
ck(cublasGemmEx(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
hidden_dim, seq_len*2,hidden_dim,
&alpha,
layer_weight_device.attr_pos_emb, a_type, hidden_dim,
attr_k_head_r, b_type, hidden_dim,
&beta,
k_head_r, c_type, hidden_dim,
compute_type,
static_cast<cublasGemmAlgo_t>(cublas_algo[2])));
//rel_attn_core: content, position, segment based attention score
invokePrepareMatrixes();
//ac = build_block_multiply_heads(network, bag, q_head_h, k_head_h, i_layer, 'w')
ck(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
seq_len, seq_len, size_per_head,
&alpha,
k_buf, a_type, size_per_head, seq_len * size_per_head,
q_buf, b_type, size_per_head, seq_len * size_per_head,
&beta,
qk_buf, c_type, seq_len, seq_len * seq_len,
batch_size * head_num,
compute_type,
static_cast<cublasGemmAlgo_t>(cublas_algo[1])));
//bd = build_block_multiply_heads(network, bag, q_head_h, k_head_r, i_layer, 'r')
ck(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
seq_len*2, seq_len, size_per_head,
&alpha,
k_buf_bd, a_type, size_per_head, seq_len *2* size_per_head,
q_buf_bd, b_type, size_per_head, seq_len *size_per_head,
&beta,
qk_buf_bd, c_type, seq_len*2, seq_len * seq_len*2,
batch_size * head_num,
compute_type,
static_cast<cublasGemmAlgo_t>(cublas_algo[3])));
//ef = build_block_multiply_heads(network, bag, q_head_h, seg_embed, i_layer, 's')
//ef = tf.einsum('ibnd,snd->ibns', q_head + r_s_bias, seg_embed)
ck(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
2, seq_len, size_per_head,
&alpha,
k_buf_ef, a_type, size_per_head,2*size_per_head,
q_buf_ef, b_type, size_per_head, seq_len *size_per_head,
&beta,
qk_buf_ef, c_type, 2, seq_len*2,
batch_size * head_num,
compute_type,
static_cast<cublasGemmAlgo_t>(cublas_algo[4])));
invokeTranspose102();
ck(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
head_num, seq_len, 2,
&alpha,
qk_buf_ef_trans, a_type, 2, 2* head_num,
seg_mat, b_type, 2, seq_len *2,
&beta,
qk_buf_ef_seg, c_type, head_num, seq_len*head_num,
batch_size * seq_len,
compute_type,
static_cast<cublasGemmAlgo_t>(cublas_algo[5])));
invokeTranspose201();
//shift bd
invokeRelShiftBd();
//attention output,merge attention scores and perform masking
//value_buf_trans=trans102(value_buf)
invokeCalAttnScore(attn_mask);
//attn_vec=value_buf_trans*attn_score
ck(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
size_per_head, seq_len, seq_len,
&alpha,
value_buf_trans, a_type, size_per_head, seq_len* size_per_head,
attn_score, b_type, seq_len, seq_len*seq_len,
&beta,
attn_vec, c_type, size_per_head, seq_len*size_per_head,
batch_size * head_num,
compute_type,
static_cast<cublasGemmAlgo_t>(cublas_algo[6])));
//attn_vec_trans=trans102(attn_vec)
invokeTranspose102v2();
//attn_out=attn_vec_trans* (attr_proj_o)T
oneToManyCublasGemm(layer_weight_device.attr_proj_o, attn_vec_trans, attn_out,
CUBLAS_OP_T, CUBLAS_OP_N,
hidden_dim, seq_len,hidden_dim,
hidden_dim, 0,
hidden_dim, seq_len*hidden_dim,
hidden_dim, seq_len*hidden_dim,
batch_size,cublas_algo[7], (cublasFunction)cublas_func[7]);
invokeLayerNorm();
oneToManyCublasGemm(layer_weight_device.attr_fc1_kernel,attn_layernorm,output_fc1,CUBLAS_OP_N, CUBLAS_OP_N,
hidden_dim_ff, seq_len,hidden_dim,hidden_dim_ff, 0,
hidden_dim, seq_len*hidden_dim,hidden_dim_ff, seq_len*hidden_dim_ff,
batch_size,cublas_algo[8],(cublasFunction)cublas_func[8]);
invokeGelu();
oneToManyCublasGemm(layer_weight_device.attr_fc2_kernel, output_fc1,output_fc2,
CUBLAS_OP_N, CUBLAS_OP_N,
hidden_dim, seq_len,hidden_dim_ff,hidden_dim, 0,hidden_dim_ff,
seq_len*hidden_dim_ff,hidden_dim, seq_len*hidden_dim,
batch_size,cublas_algo[9],(cublasFunction)cublas_func[9]);
invokeLayerNormv2();
return output_layernorm;
}
/********************** Cublas Related Functions ************************/
template<>
void XlnetLayer<float>::setCublas(){
a_type=CUDA_R_32F;
b_type=CUDA_R_32F;
c_type=CUDA_R_32F;
compute_type=CUDA_R_32F;
start_algo = (int)CUBLAS_GEMM_DEFAULT;
end_algo = (int)CUBLAS_GEMM_ALGO23;
alpha=1.0f;
beta=0.0f;
for(int i=0;i<10;i++){
cublas_algo[i] = -1;
cublas_func[i] =0;
}
}
template<>
void XlnetLayer<__half>::setCublas(){
a_type=CUDA_R_16F;
b_type=CUDA_R_16F;
c_type=CUDA_R_16F;
compute_type=CUDA_R_16F;
start_algo=(int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
end_algo=(int)CUBLAS_GEMM_ALGO15_TENSOR_OP;
cublasSetMathMode(cublas_handle, CUBLAS_TENSOR_OP_MATH);
alpha=__float2half_rn(1.0f);
beta=__float2half_rn(0.0f);
for(int i=0;i<10;i++){
cublas_algo[i] = 99;
cublas_func[i] =0;
}
}
template <typename T>
float XlnetLayer<T>:: profileCublasGemmEx(cublasOperation_t transa, cublasOperation_t transb,
int v_m, int v_n, int v_k,int lda,int ldb, int ldc,int ites, int index){
int fast_algo = 0;
T* d_A;
T* d_B;
T* d_C;
ck(cudaMalloc((void**)&d_A, sizeof(T) * v_m * v_k));
ck(cudaMalloc((void**)&d_B, sizeof(T) * v_k * v_n));
ck(cudaMalloc((void**)&d_C, sizeof(T) * v_m * v_n));
float exec_time = 99999.0f;
int algo = start_algo;
cublasHandle_t cublas_handle_default;
cublasCreate(&cublas_handle_default);
cublasStatus_t status=CUBLAS_STATUS_SUCCESS;
for(; algo <= end_algo; algo++)
{
status=CUBLAS_STATUS_SUCCESS;
CudaTimer timer;
timer.start();
for(int ite = 0; ite < ites; ++ite)
{
status=cublasGemmEx(cublas_handle_default,
transa, transb,
v_m, v_n, v_k,
&alpha,
d_A, a_type, lda,
d_B, b_type, ldb,
&beta,
d_C, c_type, ldc,
compute_type,
static_cast<cublasGemmAlgo_t>(algo));
}//end for ite
float t=timer.stop();
if(status == CUBLAS_STATUS_SUCCESS){
//std::cout<<" Test Passed : "<<algo<<" with Time:"<< t<<" ms"<<std::endl;
if(t<exec_time){
exec_time=t;
fast_algo=algo;
}
}
}//end for algo
std::cout<<"Get Best Cublas Function(cublasGemmEx): "<<fast_algo<<" Time: "<<exec_time<<std::endl;
cublas_algo[index]=fast_algo;
ck(cudaFree(d_A));
ck(cudaFree(d_B));
ck(cudaFree(d_C));
ck(cublasDestroy(cublas_handle_default));
return exec_time;
}
template <typename T>
float XlnetLayer<T>:: profileCublasGemmStride(cublasOperation_t transa, cublasOperation_t transb,
int v_m, int v_n, int v_k,int lda, int strideA,
int ldb,int strideB, int ldc,int strideC, int batch,
int ites, int index){
T* d_A;
T* d_B;
T* d_C;
ck(cudaMalloc((void**)&d_A, sizeof(T) * v_m * v_k*batch));
ck(cudaMalloc((void**)&d_B, sizeof(T) * v_k * v_n*batch));
ck(cudaMalloc((void**)&d_C, sizeof(T) * v_m * v_n*batch));
float exec_time = 99999.0f;
int fast_algo = start_algo;
int algo = start_algo;
int second_fast_algo=0;
float second_exec_time = 99999.0f;
cublasHandle_t cublas_handle_default;
cublasCreate(&cublas_handle_default);
cublasStatus_t status=CUBLAS_STATUS_SUCCESS;
for(; algo <= end_algo; algo++)
{
status=CUBLAS_STATUS_SUCCESS;
CudaTimer timer;
timer.start();
for(int ite = 0; ite < ites; ++ite)
{
status=cublasGemmStridedBatchedEx(cublas_handle_default,
transa, transb,
v_m, v_n, v_k,
&alpha,
d_A, a_type, lda,strideA,
d_B, b_type, ldb,strideB,
&beta,
d_C, c_type, ldc,strideC,
batch,
compute_type,
static_cast<cublasGemmAlgo_t>(algo));
}//end for ite
float t=timer.stop()/ites;
if(status == CUBLAS_STATUS_SUCCESS){
if(t<exec_time){
exec_time=t;
fast_algo=algo;
}
}
}//end for algo
cublas_func[index]=GEMM_STRIDE;
cublas_algo[index]=fast_algo;
cublasFunction f;
if(strideA==0){
int m=v_m;
int n=v_n*batch;
int k=v_k;
f=GEMM_A_0;
for(algo=start_algo; algo <= end_algo; algo++)
{
status=CUBLAS_STATUS_SUCCESS;
CudaTimer timer;
timer.start();
for(int ite = 0; ite < ites; ++ite)
{
status=cublasGemmEx(cublas_handle_default,
transa, transb,
m, n, k,
&alpha,
d_A, a_type, lda,
d_B, b_type, ldb,
&beta,
d_C, c_type, ldc,
compute_type,
static_cast<cublasGemmAlgo_t>(algo));
}//end for ite
float t=timer.stop()/ites;
if(status == CUBLAS_STATUS_SUCCESS){
if(t<second_exec_time){
second_exec_time=t;
second_fast_algo=algo;
}
}//end if (status == CUBLAS_STATUS_SUCCESS)
}//end for algo
}//end strideA
if(strideB==0){
int m=v_m;
int n=v_n;
int k=v_k;
f=GEMM_B_0;
for(algo=start_algo; algo <= end_algo; algo++)
{
status=CUBLAS_STATUS_SUCCESS;
CudaTimer timer;
timer.start();
for(int ite = 0; ite < ites; ++ite)
{
for(int count=0;count<batch;count++){
status=cublasGemmEx(cublas_handle_default,
transa, transb,
m, n, k,
&alpha,
d_A, a_type, lda,
d_B, b_type, ldb,
&beta,
d_C, c_type, ldc,
compute_type,
static_cast<cublasGemmAlgo_t>(algo));
}
}//end for ite
float t=timer.stop()/ites;
if(status == CUBLAS_STATUS_SUCCESS){
if(t<second_exec_time){
second_exec_time=t;
second_fast_algo=algo;
}
}
}//end for algo
}//end strideB
//Set the best cublas function
if(second_exec_time<exec_time){
std::cout<<"Get Best Cublas Function(cublasGemmEx): "<<second_fast_algo<<" cublasGemmStridedBatchedEx Time: "
<< exec_time<<" cublasGemmEx Time: "<<second_exec_time<<std::endl;
exec_time=second_exec_time;
fast_algo=second_fast_algo;
cublas_func[index]=(int)f;
cublas_algo[index]=fast_algo;
}else{
std::cout<<"Get Best Cublas Function(cublasGemmStridedBatchedEx): "<<second_fast_algo<<" cublasGemmStridedBatchedEx Time: "
<< exec_time<<" cublasGemmEx Time: "<<second_exec_time<<std::endl;
}
ck(cudaFree(d_A));
ck(cudaFree(d_B));
ck(cudaFree(d_C));
ck(cublasDestroy(cublas_handle_default));
return exec_time;
}
template <typename T>
void XlnetLayer<T>:: profileCublasAlgo(){
//int ites=50;
int ites=5;
float cublas_time[10];
cublas_time[0]=profileCublasGemmStride(
CUBLAS_OP_N, CUBLAS_OP_N,
hidden_dim, batch_size * seq_len, hidden_dim,
hidden_dim,hidden_dim*hidden_dim,hidden_dim, 0,
hidden_dim,buf_size,3, ites,0);
cublas_time[1]=profileCublasGemmStride(CUBLAS_OP_T, CUBLAS_OP_N,seq_len, seq_len, size_per_head,
size_per_head, seq_len * size_per_head,size_per_head, seq_len * size_per_head,seq_len, seq_len * seq_len,
batch_size * head_num, ites,1);
cublas_time[2]=profileCublasGemmEx(
CUBLAS_OP_N, CUBLAS_OP_N,
hidden_dim, seq_len*2,hidden_dim,
hidden_dim, hidden_dim, hidden_dim, ites,2);
cublas_time[3]=profileCublasGemmStride(CUBLAS_OP_T, CUBLAS_OP_N,
seq_len*2, seq_len, size_per_head,
size_per_head, seq_len *2* size_per_head,
size_per_head, seq_len *size_per_head,
seq_len*2, seq_len * seq_len*2,
batch_size * head_num, ites, 3);
cublas_time[4]=profileCublasGemmStride(CUBLAS_OP_T, CUBLAS_OP_N,
2, seq_len, size_per_head,
size_per_head, 2* size_per_head,
size_per_head, seq_len *size_per_head,
2, seq_len*2,
batch_size * head_num,ites,4);
cublas_time[5]=profileCublasGemmStride(
CUBLAS_OP_T, CUBLAS_OP_N,
head_num, seq_len, 2,
2, 2* head_num,
2, seq_len *2,
head_num, seq_len*head_num,
batch_size * seq_len,
ites,5 );
cublas_time[6]=profileCublasGemmStride(CUBLAS_OP_N, CUBLAS_OP_N,
size_per_head, seq_len, seq_len,
size_per_head, seq_len* size_per_head,
seq_len, seq_len*seq_len,
size_per_head, seq_len*size_per_head,
batch_size * head_num,
ites, 6);
cublas_time[7]=profileCublasGemmStride(CUBLAS_OP_T, CUBLAS_OP_N,
hidden_dim, seq_len,hidden_dim,
hidden_dim, 0,
hidden_dim, seq_len*hidden_dim,
hidden_dim, seq_len*hidden_dim,
batch_size,ites,7);
cublas_time[8]=profileCublasGemmStride(CUBLAS_OP_N, CUBLAS_OP_N,
hidden_dim_ff, seq_len,hidden_dim,hidden_dim_ff, 0,
hidden_dim, seq_len*hidden_dim,hidden_dim_ff, seq_len*hidden_dim_ff,
batch_size,
ites,8);
cublas_time[9]=profileCublasGemmStride(
CUBLAS_OP_N, CUBLAS_OP_N,
hidden_dim, seq_len,hidden_dim_ff,hidden_dim, 0,
hidden_dim_ff, seq_len*hidden_dim_ff,hidden_dim, seq_len*hidden_dim,
batch_size,ites,9);
std::cout<<"Sequnece length: "<<seq_len<<", Batch size: "<<batch_size<<" Selected cuBLAS method id: ";
for(int i=0;i<10;i++){
std::cout<<cublas_algo[i]<<",";
}
for(int i=0;i<10;i++){
std::cout<<cublas_func[i]<<",";
}
std::cout<<std::endl<<"Running time of each gemm: ";
for(int i=0;i<10;i++){
std::cout<<cublas_time[i]<<",";
}
std::cout<<std::endl;
}
template <typename T>
void XlnetLayer<T>:: recordCublasGemm(){
using namespace std;
ofstream outfile;
outfile.open(gemm_file.c_str(),ios::app);
if (outfile.is_open())
{
cout<<"Write profile result in file "<<gemm_file<<endl;
std::ostringstream ss;
ss<<gpu_id<<" ,"<<seq_len<<" ,"<<batch_size<<" ,";
for(int i=0;i<10;i++){ ss<<cublas_algo[i]<<" ,"; }
for(int i=0;i<10;i++){ ss<<cublas_func[i]<<" ,";
}
std::string s= ss.str();
outfile<<s<<endl;
outfile.close();
}
else
{
std::cout<< "Can not write profile result to "<<gemm_file<<endl;
}
}
template <typename T>
void XlnetLayer<T>:: setCublasAlgo(){
FILE * fd=fopen(gemm_file.c_str(), "r");
int t_seq_len=0;
int t_batch=0;
int t_gpu_id=-1;
int ifFound=0;
if(fd != NULL)
{
while(!feof(fd)){
int res=fscanf(fd, "%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,%d ,",
&t_gpu_id, &t_seq_len, &t_batch,
&cublas_algo[0], &cublas_algo[1], &cublas_algo[2],&cublas_algo[3], &cublas_algo[4], &cublas_algo[5],
&cublas_algo[6], &cublas_algo[7], &cublas_algo[8],&cublas_algo[9],
&cublas_func[0], &cublas_func[1], &cublas_func[2],&cublas_func[3], &cublas_func[4], &cublas_func[5],
&cublas_func[6], &cublas_func[7], &cublas_func[8],&cublas_func[9]);
if(t_seq_len==seq_len&&t_batch==batch_size&&gpu_id==t_gpu_id&&res==FULL_GEMM_LENGTH){
ifFound=1;
break;
}
}
fclose(fd);
}else if(fd == NULL && ifFound == 0){
printf("Can not find the cublas configuration data. Run profiling code to find the best cublas function.\n");
profileCublasAlgo();
recordCublasGemm();
}
}
template <typename T>
void XlnetLayer<T>::copyCublasAlgo(const int* cublas_algo, const int* cublas_func){
memcpy(this->cublas_algo, cublas_algo, NUM_CUBLAS_FUNC*sizeof(int));
memcpy(this->cublas_func, cublas_func, NUM_CUBLAS_FUNC*sizeof(int));
}
/********************** Constructor & Deconstructor ************************/
template <typename T>
XlnetLayer<T>::XlnetLayer(int batch_size, int seq_len,
int head_num, int size_per_head,int hidden_dim,int hidden_dim_ff,float epsilon,
cudaStream_t stream, cublasHandle_t cublas_handle,
std::string gemm_file,std::string dir,int ifCheck):
batch_size(batch_size),
seq_len(seq_len),
head_num(head_num),
size_per_head(size_per_head),
stream(stream),
cublas_handle(cublas_handle),
epsilon(epsilon),
hidden_dim(hidden_dim),
hidden_dim_ff(hidden_dim_ff),
dir(dir),
ifCheck(ifCheck),
gemm_file(gemm_file),
layer_weight_device(stream,head_num*size_per_head,hidden_dim_ff){
//set buf_size
this->buf_size = batch_size * seq_len*head_num * size_per_head;
this->qk_buf_size = batch_size * seq_len * head_num * seq_len;
//set cublas Alogrithm
cudaGetDevice(&gpu_id);
setCublas();
setCublasAlgo();
//set device variable
allocDeviceMem();
//Sync
cudaDeviceSynchronize();
ck(cudaGetLastError());
}
template <typename T>
XlnetLayer<T>::XlnetLayer(XlnetLayer<T> const& xlnet_layer):
batch_size(xlnet_layer.batch_size),
seq_len(xlnet_layer.seq_len),
head_num(xlnet_layer.head_num),
size_per_head(xlnet_layer.size_per_head),
stream(xlnet_layer.stream),
cublas_handle( xlnet_layer.cublas_handle),
epsilon(xlnet_layer.epsilon),
hidden_dim(xlnet_layer.hidden_dim),
hidden_dim_ff(xlnet_layer.hidden_dim_ff),
buf_size(xlnet_layer.buf_size),
qk_buf_size(xlnet_layer.qk_buf_size),
dir(xlnet_layer.dir),
ifCheck(xlnet_layer.ifCheck),
gemm_file(xlnet_layer.gemm_file),
layer_weight_device(xlnet_layer.stream,xlnet_layer.hidden_dim,xlnet_layer.hidden_dim_ff){
//set cublas Alogrithm
cudaGetDevice(&gpu_id);
setCublas();
copyCublasAlgo(xlnet_layer.cublas_algo,xlnet_layer.cublas_func);
//set device variable
allocDeviceMem();
}
template <typename T>
void XlnetLayer<T>::allocDeviceMem() {
deviceMalloc(&k_head_r, seq_len*2*hidden_dim);
deviceMalloc(&qkv_buf,buf_size*3);
query_buf=qkv_buf;
key_buf=qkv_buf+buf_size;
value_buf=qkv_buf+2*buf_size;
deviceMalloc(&q_buf,buf_size);
deviceMalloc(&k_buf,buf_size);
deviceMalloc(&qk_buf,qk_buf_size);
deviceMalloc(&q_buf_bd, buf_size);
deviceMalloc(&k_buf_bd, batch_size*seq_len*2*hidden_dim);
deviceMalloc(&qk_buf_bd, batch_size*seq_len*head_num*seq_len*2);
deviceMalloc(&qk_buf_bd_shift, batch_size*seq_len*head_num*seq_len);
deviceMalloc(&q_buf_ef, buf_size);
deviceMalloc(&k_buf_ef, batch_size*2*hidden_dim);
deviceMalloc(&qk_buf_ef, batch_size*head_num*seq_len*2);
deviceMalloc(&qk_buf_ef_trans, batch_size*head_num*seq_len*2);
deviceMalloc(&qk_buf_ef_seg, batch_size*head_num*seq_len*seq_len);
deviceMalloc(&qk_buf_ef_seg_trans, batch_size*head_num*seq_len*seq_len);
deviceMalloc(&attn_score, batch_size*head_num*seq_len*seq_len);
deviceMalloc(&value_buf_trans,buf_size);
deviceMalloc(&attn_vec, batch_size*head_num*seq_len*size_per_head);
deviceMalloc(&attn_vec_trans, batch_size*head_num*seq_len*size_per_head);
deviceMalloc(&attn_out, batch_size*hidden_dim*seq_len);
deviceMalloc(&attn_layernorm, batch_size*hidden_dim*seq_len);
deviceMalloc(&output_fc1, batch_size*seq_len*hidden_dim_ff);
deviceMalloc(&output_fc2, batch_size*seq_len*hidden_dim);
deviceMalloc(&output_layernorm, batch_size*seq_len*hidden_dim);
}
template <typename T>
void XlnetLayer<T>::setLayerWeight(LayerWeightHost<T> & layer_weight_host){
layer_weight_device.copyFromHost(layer_weight_host);
}
template <typename T>
XlnetLayer<T>::~XlnetLayer() {
//std::cout << "Deconstruct XlnetLayer" <<std::endl;
deviceFree(k_head_r);
deviceFree(qkv_buf);
deviceFree(q_buf);
deviceFree(k_buf);
deviceFree(qk_buf);
deviceFree(q_buf_bd);
deviceFree(k_buf_bd);
deviceFree(qk_buf_bd);
deviceFree(qk_buf_bd_shift);
deviceFree(q_buf_ef);
deviceFree(k_buf_ef);
deviceFree(qk_buf_ef);
deviceFree(qk_buf_ef_trans);
deviceFree(qk_buf_ef_seg);
deviceFree(qk_buf_ef_seg_trans);
deviceFree(attn_score);
deviceFree(value_buf_trans);
deviceFree(attn_vec);
deviceFree(attn_vec_trans);
deviceFree(attn_out);
deviceFree(attn_layernorm);
deviceFree(output_fc1);
deviceFree(output_fc2);
deviceFree(output_layernorm);
}
//The explicit instantiation part
template class XlnetLayer<__half>;
template class XlnetLayer<float>;
|
the_stack
|
// InputData
InputData::InputData(int batch_size, int seq_len){
this->batch_size=batch_size;
this->seq_len=seq_len;
}
InputData::~InputData(){
}
// InputDataHost
InputDataHost::InputDataHost(int batch_size, int seq_len):InputData(batch_size,seq_len){
inp_k=new int[batch_size * seq_len];
input_mask=new float[batch_size * seq_len];
seg_id=new int[batch_size * seq_len];
}
InputDataHost::~InputDataHost(){
delete []inp_k;
delete []input_mask;
delete []seg_id;
}
void InputDataHost::fillInputData(std::string file_name){
cnpy::npz_t my_npz = cnpy::npz_load(file_name);
setByNpz(my_npz, "input_ids:0", inp_k, batch_size * seq_len);
setByNpz(my_npz, "input_mask:0", input_mask, batch_size * seq_len);
setByNpz(my_npz, "segment_ids:0", seg_id, batch_size * seq_len);
}
// InputDataDevice
InputDataDevice::InputDataDevice(cudaStream_t stream,int batch_size, int seq_len):InputData(batch_size,seq_len){
//std::cout<<"Construct InputDataDevice: "<<this->batch_size<<" "<<this->seq_len<<std::endl;
this->stream=stream;
deviceMalloc(&inp_k, batch_size * seq_len);
deviceMalloc(&input_mask, batch_size * seq_len);
deviceMalloc(&seg_id, batch_size * seq_len);
}
void InputDataDevice::copyFromHost(InputDataHost& inputDataHost){
ck(cudaMemcpyAsync(inp_k, inputDataHost.inp_k,batch_size * seq_len*sizeof(int),cudaMemcpyHostToDevice,stream));
ck(cudaMemcpyAsync(input_mask, inputDataHost.input_mask,batch_size * seq_len*sizeof(float),cudaMemcpyHostToDevice,stream));
ck(cudaMemcpyAsync(seg_id, inputDataHost.seg_id,batch_size * seq_len*sizeof(int),cudaMemcpyHostToDevice,stream));
}
InputDataDevice::~InputDataDevice(){
//std::cout << "Deconstruct InputDataDevice" <<std::endl;
deviceFree(inp_k);
deviceFree(input_mask);
deviceFree(seg_id);
}
/*************************LoadDataPre********************************/
template <typename T>
PreWeight<T>::PreWeight(int hidden_dim, int num_token){
this->num_token=num_token;
this->hidden_dim=hidden_dim;
this->params_word_emb_k=NULL;
}
template <typename T>
PreWeight<T>::~PreWeight(){
}
template <typename T>
PreWeightDevice<T>::PreWeightDevice<T>(cudaStream_t stream,int hidden_dim, int num_token):PreWeight<T>(hidden_dim,num_token){
//std::cout<<"Construct PreWeightDevice: "<<this->hidden_dim<<std::endl;
this->stream=stream;
deviceMalloc(&(this->params_word_emb_k), num_token* hidden_dim);
}
template <typename T>
void PreWeightDevice<T>::copyFromHost(PreWeightHost<T>& preWeightDevice){
deviceMemcpyHtoD(this->stream,this->params_word_emb_k,preWeightDevice.params_word_emb_k,
this->num_token* this->hidden_dim);
}
template <typename T>
PreWeightDevice<T>::~PreWeightDevice<T>(){
//std::cout << "Deconstruct PreWeightDevice" <<std::endl;
deviceFree(this->params_word_emb_k);
}
template <typename T>
PreWeightHost<T>::PreWeightHost<T>(int hidden_dim, int num_token)
:PreWeight<T>(hidden_dim,num_token){
this->params_word_emb_k=new T[num_token* hidden_dim];
}
template <typename T>
void PreWeightHost<T>::fillPreWeight(std::string file_name){
cnpy::npz_t params_npz= cnpy::npz_load(file_name);
setByNpz(params_npz, "model/transformer/word_embedding/lookup_table:0",
this->params_word_emb_k, this->num_token* this->hidden_dim);
}
template <typename T>
PreWeightHost<T>::~PreWeightHost<T>(){
delete []this->params_word_emb_k;
}
/**************************LayerWeight*******************************/
template <typename T>
LayerWeight<T>::LayerWeight(int hidden_dim, int hidden_dim_ff){
this->hidden_dim=hidden_dim;
this->hidden_dim_ff=hidden_dim_ff;
}
template <typename T>
LayerWeight<T>::~LayerWeight(){
}
template <typename T>
LayerWeightDevice<T>::LayerWeightDevice(cudaStream_t stream,int hidden_dim, int hidden_dim_ff):
LayerWeight<T>(hidden_dim, hidden_dim_ff){
//std::cout<<"Construct LayerWeightDevice: "<<this->hidden_dim<<" "<<this->hidden_dim_ff<<std::endl;
this->stream=stream;
deviceMalloc(&this->attr_kernel_QKV,3*hidden_dim * hidden_dim);
this->attr_kernel_Q=this->attr_kernel_QKV;
this->attr_kernel_K=this->attr_kernel_QKV+hidden_dim * hidden_dim;
this->attr_kernel_V=this->attr_kernel_QKV+2*hidden_dim * hidden_dim;
deviceMalloc(&this->attr_bias_Q_w, hidden_dim);
deviceMalloc(&this->attr_pos_emb,hidden_dim*hidden_dim);
deviceMalloc(&this->attr_bias_Q_r, hidden_dim);
deviceMalloc(&this->attr_seg_embed,2*hidden_dim);
deviceMalloc(&this->attr_bias_Q_s, hidden_dim);
deviceMalloc(&this->attr_proj_o, hidden_dim*hidden_dim);
deviceMalloc(&this->attr_layernorm_gamma, hidden_dim);
deviceMalloc(&this->attr_layernorm_beta, hidden_dim);
deviceMalloc(&this->attr_fc1_kernel,hidden_dim*hidden_dim_ff);
deviceMalloc(&this->attr_fc1_bias, hidden_dim_ff);
deviceMalloc(&this->attr_fc2_kernel,hidden_dim*hidden_dim_ff);
deviceMalloc(&this->attr_fc2_bias, hidden_dim);
deviceMalloc(&this->attr_ff_gamma, hidden_dim);
deviceMalloc(&this->attr_ff_beta, hidden_dim);
}
template <typename T>
LayerWeightDevice<T>::LayerWeightDevice(LayerWeightDevice<T> const& layer_weight_device):
LayerWeight<T>(layer_weight_device.hidden_dim, layer_weight_device.hidden_dim_ff){
//std::cout<<"COPY Constructor LayerWeightDevice Without Value COPY: "<<this->hidden_dim<<" "<<this->hidden_dim_ff<<std::endl;
this->stream=layer_weight_device.stream;
deviceMalloc(&this->attr_kernel_QKV,3*this->hidden_dim * this->hidden_dim);
this->attr_kernel_Q=this->attr_kernel_QKV;
this->attr_kernel_K=this->attr_kernel_QKV+this->hidden_dim * this->hidden_dim;
this->attr_kernel_V=this->attr_kernel_QKV+2*this->hidden_dim * this->hidden_dim;
deviceMalloc(&this->attr_bias_Q_w, this->hidden_dim);
deviceMalloc(&this->attr_pos_emb,this->hidden_dim*this->hidden_dim);
deviceMalloc(&this->attr_bias_Q_r, this->hidden_dim);
deviceMalloc(&this->attr_seg_embed,2*this->hidden_dim);
deviceMalloc(&this->attr_bias_Q_s, this->hidden_dim);
deviceMalloc(&this->attr_proj_o, this->hidden_dim*this->hidden_dim);
deviceMalloc(&this->attr_layernorm_gamma, this->hidden_dim);
deviceMalloc(&this->attr_layernorm_beta, this->hidden_dim);
deviceMalloc(&this->attr_fc1_kernel,this->hidden_dim*this->hidden_dim_ff);
deviceMalloc(&this->attr_fc1_bias, this->hidden_dim_ff);
deviceMalloc(&this->attr_fc2_kernel,this->hidden_dim*this->hidden_dim_ff);
deviceMalloc(&this->attr_fc2_bias, this->hidden_dim);
deviceMalloc(&this->attr_ff_gamma, this->hidden_dim);
deviceMalloc(&this->attr_ff_beta, this->hidden_dim);
}
template <typename T>
void LayerWeightDevice<T>::copyFromHost(LayerWeightHost<T>& layer_weight_host){
deviceMemcpyHtoD(stream,this->attr_kernel_Q, layer_weight_host.attr_kernel_Q,this->hidden_dim* this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_kernel_K, layer_weight_host.attr_kernel_K,this->hidden_dim* this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_kernel_V, layer_weight_host.attr_kernel_V,this->hidden_dim* this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_bias_Q_w,layer_weight_host.attr_bias_Q_w, this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_pos_emb,layer_weight_host.attr_pos_emb,this->hidden_dim*this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_bias_Q_r,layer_weight_host.attr_bias_Q_r, this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_seg_embed,layer_weight_host.attr_seg_embed,2*this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_bias_Q_s,layer_weight_host.attr_bias_Q_s, this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_proj_o,layer_weight_host.attr_proj_o, this->hidden_dim*this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_layernorm_gamma, layer_weight_host.attr_layernorm_gamma, this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_layernorm_beta, layer_weight_host.attr_layernorm_beta, this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_fc1_kernel,layer_weight_host.attr_fc1_kernel,this->hidden_dim*this->hidden_dim_ff);
deviceMemcpyHtoD(stream,this->attr_fc1_bias, layer_weight_host.attr_fc1_bias, this->hidden_dim_ff);
deviceMemcpyHtoD(stream,this->attr_fc2_kernel,layer_weight_host.attr_fc2_kernel,this->hidden_dim*this->hidden_dim_ff);
deviceMemcpyHtoD(stream,this->attr_fc2_bias,layer_weight_host.attr_fc2_bias, this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_ff_gamma, layer_weight_host.attr_ff_gamma, this->hidden_dim);
deviceMemcpyHtoD(stream,this->attr_ff_beta,layer_weight_host.attr_ff_beta, this->hidden_dim);
}
template <typename T>
LayerWeightDevice<T>::~LayerWeightDevice(){
//std::cout<<"Deconstruct LayerWeightDevice"<<std::endl;
deviceFree(this->attr_kernel_QKV);
deviceFree(this->attr_bias_Q_w);
deviceFree(this->attr_pos_emb);
deviceFree(this->attr_bias_Q_r);
deviceFree(this->attr_seg_embed);
deviceFree(this->attr_bias_Q_s);
deviceFree(this->attr_proj_o);
deviceFree(this->attr_layernorm_gamma);
deviceFree(this->attr_layernorm_beta);
deviceFree(this->attr_fc1_kernel);
deviceFree(this->attr_fc1_bias);
deviceFree(this->attr_fc2_kernel);
deviceFree(this->attr_fc2_bias);
deviceFree(this->attr_ff_gamma);
deviceFree(this->attr_ff_beta);
}
template <typename T>
LayerWeightHost<T>::LayerWeightHost(int hidden_dim, int hidden_dim_ff):
LayerWeight<T>(hidden_dim, hidden_dim_ff){
//std::cout<<"Constructor LayerWeightHost: "<<this->hidden_dim<<" "<<this->hidden_dim_ff<<std::endl;
this->attr_kernel_Q=new T[hidden_dim* this->hidden_dim];
this->attr_kernel_K=new T[hidden_dim* this->hidden_dim];
this->attr_kernel_V=new T[hidden_dim* this->hidden_dim];
this->attr_bias_Q_w=new T[hidden_dim];
this->attr_pos_emb=new T[hidden_dim*hidden_dim];
this->attr_bias_Q_r=new T[hidden_dim];
this->attr_seg_embed=new T[hidden_dim*2];
this->attr_bias_Q_s=new T[hidden_dim];
this->attr_proj_o=new T[hidden_dim*hidden_dim];
this->attr_layernorm_gamma=new T[hidden_dim];
this->attr_layernorm_beta=new T[hidden_dim];
this->attr_fc1_kernel=new T[hidden_dim*hidden_dim_ff];
this->attr_fc1_bias=new T[hidden_dim_ff];
this->attr_fc2_kernel=new T[hidden_dim*hidden_dim_ff];
this->attr_fc2_bias=new T[hidden_dim];
this->attr_ff_gamma=new T[hidden_dim];
this->attr_ff_beta=new T[hidden_dim];
}
template <typename T>
LayerWeightHost<T>::LayerWeightHost(LayerWeightHost<T> const& layer_weight_host):
LayerWeight<T>(layer_weight_host.hidden_dim, layer_weight_host.hidden_dim_ff){
//std::cout<<"COPY Constructor LayerWeightHost: "<<this->hidden_dim<<" "<<this->hidden_dim_ff<<std::endl;
this->attr_kernel_Q=new T[this->hidden_dim* this->hidden_dim];
this->attr_kernel_K=new T[this->hidden_dim* this->hidden_dim];
this->attr_kernel_V=new T[this->hidden_dim* this->hidden_dim];
this->attr_bias_Q_w=new T[this->hidden_dim];
this->attr_pos_emb=new T[this->hidden_dim*this->hidden_dim];
this->attr_bias_Q_r=new T[this->hidden_dim];
this->attr_seg_embed=new T[this->hidden_dim*2];
this->attr_bias_Q_s=new T[this->hidden_dim];
this->attr_proj_o=new T[this->hidden_dim*this->hidden_dim];
this->attr_layernorm_gamma=new T[this->hidden_dim];
this->attr_layernorm_beta=new T[this->hidden_dim];
this->attr_fc1_kernel=new T[this->hidden_dim*this->hidden_dim_ff];
this->attr_fc1_bias=new T[this->hidden_dim_ff];
this->attr_fc2_kernel=new T[this->hidden_dim*this->hidden_dim_ff];
this->attr_fc2_bias=new T[this->hidden_dim];
this->attr_ff_gamma=new T[this->hidden_dim];
this->attr_ff_beta=new T[this->hidden_dim];
memcpy(this->attr_kernel_Q, layer_weight_host.attr_kernel_Q,this->hidden_dim* this->hidden_dim*sizeof(T));
memcpy(this->attr_kernel_K, layer_weight_host.attr_kernel_K,this->hidden_dim* this->hidden_dim*sizeof(T));
memcpy(this->attr_kernel_V, layer_weight_host.attr_kernel_V,this->hidden_dim* this->hidden_dim*sizeof(T));
memcpy(this->attr_bias_Q_w,layer_weight_host.attr_bias_Q_w, this->hidden_dim*sizeof(T));
memcpy(this->attr_pos_emb,layer_weight_host.attr_pos_emb,this->hidden_dim*this->hidden_dim*sizeof(T));
memcpy(this->attr_bias_Q_r,layer_weight_host.attr_bias_Q_r, this->hidden_dim*sizeof(T));
memcpy(this->attr_seg_embed,layer_weight_host.attr_seg_embed,2*this->hidden_dim*sizeof(T));
memcpy(this->attr_bias_Q_s,layer_weight_host.attr_bias_Q_s, this->hidden_dim*sizeof(T));
memcpy(this->attr_proj_o,layer_weight_host.attr_proj_o, this->hidden_dim*this->hidden_dim*sizeof(T));
memcpy(this->attr_layernorm_gamma, layer_weight_host.attr_layernorm_gamma, this->hidden_dim*sizeof(T));
memcpy(this->attr_layernorm_beta, layer_weight_host.attr_layernorm_beta, this->hidden_dim*sizeof(T));
memcpy(this->attr_fc1_kernel,layer_weight_host.attr_fc1_kernel,this->hidden_dim*this->hidden_dim_ff*sizeof(T));
memcpy(this->attr_fc1_bias, layer_weight_host.attr_fc1_bias, this->hidden_dim_ff*sizeof(T));
memcpy(this->attr_fc2_kernel,layer_weight_host.attr_fc2_kernel,this->hidden_dim*this->hidden_dim_ff*sizeof(T));
memcpy(this->attr_fc2_bias,layer_weight_host.attr_fc2_bias, this->hidden_dim*sizeof(T));
memcpy(this->attr_ff_gamma, layer_weight_host.attr_ff_gamma, this->hidden_dim*sizeof(T));
memcpy(this->attr_ff_beta,layer_weight_host.attr_ff_beta, this->hidden_dim*sizeof(T));
}
template <typename T>
void LayerWeightHost<T>::fillLayerWeight(int i_layer,std::string file_name){
cnpy::npz_t params_npz= cnpy::npz_load(file_name);
std::string str;
str=paraName(i_layer, "/rel_attn/q/kernel:0");
setByNpz(params_npz, str, this->attr_kernel_Q, this->hidden_dim * this->hidden_dim);
str=paraName(i_layer, "/rel_attn/k/kernel:0");
setByNpz(params_npz, str, this->attr_kernel_K, this->hidden_dim * this->hidden_dim);
str=paraName(i_layer, "/rel_attn/v/kernel:0");
setByNpz(params_npz, str, this->attr_kernel_V, this->hidden_dim * this->hidden_dim);
str=paraName("model/transformer/r_w_bias:0");
setByNpz(params_npz, str, this->attr_bias_Q_w, this->hidden_dim, i_layer*this->hidden_dim);
str=paraName(i_layer, "/rel_attn/r/kernel:0");
setByNpz(params_npz, str, this->attr_pos_emb,this->hidden_dim*this->hidden_dim);
str=paraName("model/transformer/r_r_bias:0");
setByNpz(params_npz, str, this->attr_bias_Q_r, this->hidden_dim, i_layer*this->hidden_dim);
str=paraName("model/transformer/seg_embed:0");
setByNpz(params_npz, str, this->attr_seg_embed, 2*this->hidden_dim, i_layer*2*this->hidden_dim);
str=paraName("model/transformer/r_s_bias:0");
setByNpz(params_npz, str, this->attr_bias_Q_s,this->hidden_dim, i_layer*this->hidden_dim);
str=paraName(i_layer, "/rel_attn/o/kernel:0");
setByNpz(params_npz, str,this->attr_proj_o ,this->hidden_dim*this->hidden_dim);
str=paraName(i_layer, "/rel_attn/LayerNorm/gamma:0");
setByNpz(params_npz, str,this->attr_layernorm_gamma, this->hidden_dim);
str=paraName(i_layer, "/rel_attn/LayerNorm/beta:0");
setByNpz(params_npz, str,this->attr_layernorm_beta, this->hidden_dim);
str=paraName(i_layer, "/ff/layer_1/kernel:0");
setByNpz(params_npz, str,this->attr_fc1_kernel, this->hidden_dim*this->hidden_dim_ff);
str=paraName(i_layer, "/ff/layer_1/bias:0");
setByNpz(params_npz, str,this->attr_fc1_bias,this->hidden_dim_ff);
str=paraName(i_layer, "/ff/layer_2/kernel:0");
setByNpz(params_npz, str,this->attr_fc2_kernel, this->hidden_dim*this->hidden_dim_ff);
str=paraName(i_layer, "/ff/layer_2/bias:0");
setByNpz(params_npz, str,this->attr_fc2_bias, this->hidden_dim);
str=paraName(i_layer, "/ff/LayerNorm/gamma:0");
setByNpz(params_npz, str,this->attr_ff_gamma, this->hidden_dim);
str=paraName(i_layer, "/ff/LayerNorm/beta:0");
setByNpz(params_npz, str,this->attr_ff_beta, this->hidden_dim);
}
template <typename T>
LayerWeightHost<T>::~LayerWeightHost(){
delete []this->attr_kernel_Q;
delete []this->attr_kernel_K;
delete []this->attr_kernel_V;
delete []this->attr_bias_Q_w;
delete []this->attr_pos_emb;
delete []this->attr_bias_Q_r;
delete []this->attr_seg_embed;
delete []this->attr_bias_Q_s;
delete []this->attr_proj_o;
delete []this->attr_layernorm_gamma;
delete []this->attr_layernorm_beta;
delete []this->attr_fc1_kernel;
delete []this->attr_fc1_bias;
delete []this->attr_fc2_kernel;
delete []this->attr_fc2_bias;
delete []this->attr_ff_gamma;
delete []this->attr_ff_beta;
}
//The explicit instantiation part
template class PreWeightDevice<__half>;
template class PreWeightDevice<float>;
template class PreWeightHost<__half>;
template class PreWeightHost<float>;
template class LayerWeightDevice<__half>;
template class LayerWeightDevice<float>;
template class LayerWeightHost<__half>;
template class LayerWeightHost<float>;
|
the_stack
|
#include <cuda.h>
#include "DataFormats/EcalDigi/interface/EcalDataFrame.h"
#include "DataFormats/EcalDigi/interface/EcalMGPASample.h"
#include "DataFormats/EcalRecHit/interface/EcalUncalibratedRecHit.h"
#include "DataFormats/Math/interface/approx_exp.h"
#include "DataFormats/Math/interface/approx_log.h"
#include "FWCore/Utilities/interface/CMSUnrollLoop.h"
#include "TimeComputationKernels.h"
#include "KernelHelpers.h"
//#define DEBUG
//#define ECAL_RECO_CUDA_DEBUG
namespace ecal {
namespace multifit {
__device__ __forceinline__ bool use_sample(unsigned int sample_mask, unsigned int sample) {
return sample_mask & (0x1 << (EcalDataFrame::MAXSAMPLES - (sample + 1)));
}
__global__ void kernel_time_compute_nullhypot(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
bool const* useless_sample_values,
SampleVector::Scalar* chi2s,
SampleVector::Scalar* sum0s,
SampleVector::Scalar* sumAAs,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ltx = threadIdx.x;
int ch = tx / nsamples;
int nchannels_per_block = blockDim.x / nsamples;
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
int sample = tx % nsamples;
// shared mem inits
extern __shared__ char sdata[];
char* s_sum0 = sdata;
SampleVector::Scalar* s_sum1 = reinterpret_cast<SampleVector::Scalar*>(s_sum0 + nchannels_per_block * nsamples);
SampleVector::Scalar* s_sumA = s_sum1 + nchannels_per_block * nsamples;
SampleVector::Scalar* s_sumAA = s_sumA + nchannels_per_block * nsamples;
// TODO make sure no div by 0
const auto inv_error =
useless_sample_values[tx] ? 0.0 : 1.0 / (sample_value_errors[tx] * sample_value_errors[tx]);
const auto sample_value = sample_values[tx];
s_sum0[ltx] = useless_sample_values[tx] ? 0 : 1;
s_sum1[ltx] = inv_error;
s_sumA[ltx] = sample_value * inv_error;
s_sumAA[ltx] = sample_value * sample_value * inv_error;
__syncthreads();
// 5 threads for [0, 4] samples
if (sample < 5) {
s_sum0[ltx] += s_sum0[ltx + 5];
s_sum1[ltx] += s_sum1[ltx + 5];
s_sumA[ltx] += s_sumA[ltx + 5];
s_sumAA[ltx] += s_sumAA[ltx + 5];
}
__syncthreads();
if (sample < 2) {
// note double counting of sample 3
s_sum0[ltx] += s_sum0[ltx + 2] + s_sum0[ltx + 3];
s_sum1[ltx] += s_sum1[ltx + 2] + s_sum1[ltx + 3];
s_sumA[ltx] += s_sumA[ltx + 2] + s_sumA[ltx + 3];
s_sumAA[ltx] += s_sumAA[ltx + 2] + s_sumAA[ltx + 3];
}
__syncthreads();
if (sample == 0) {
// note, subtract to remove the double counting of sample == 3
const auto sum0 = s_sum0[ltx] + s_sum0[ltx + 1] - s_sum0[ltx + 3];
const auto sum1 = s_sum1[ltx] + s_sum1[ltx + 1] - s_sum1[ltx + 3];
const auto sumA = s_sumA[ltx] + s_sumA[ltx + 1] - s_sumA[ltx + 3];
const auto sumAA = s_sumAA[ltx] + s_sumAA[ltx + 1] - s_sumAA[ltx + 3];
const auto chi2 = sum0 > 0 ? (sumAA - sumA * sumA / sum1) / sum0 : static_cast<ScalarType>(0);
chi2s[ch] = chi2;
sum0s[ch] = sum0;
sumAAs[ch] = sumAA;
#ifdef DEBUG_TC_NULLHYPOT
if (ch == 0) {
printf("chi2 = %f sum0 = %d sumAA = %f\n", chi2, static_cast<int>(sum0), sumAA);
}
#endif
}
}
constexpr float fast_expf(float x) { return unsafe_expf<6>(x); }
constexpr float fast_logf(float x) { return unsafe_logf<7>(x); }
//#define DEBUG_TC_MAKERATIO
//
// launch ctx parameters are
// 45 threads per channel, X channels per block, Y blocks
// 45 comes from: 10 samples for i <- 0 to 9 and for j <- i+1 to 9
// TODO: it might be much beter to use 32 threads per channel instead of 45
// to simplify the synchronization
//
__global__ void kernel_time_compute_makeratio(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids_eb,
uint32_t const* dids_ee,
bool const* useless_sample_values,
char const* pedestal_nums,
ConfigurationParameters::type const* amplitudeFitParametersEB,
ConfigurationParameters::type const* amplitudeFitParametersEE,
ConfigurationParameters::type const* timeFitParametersEB,
ConfigurationParameters::type const* timeFitParametersEE,
SampleVector::Scalar const* sumAAsNullHypot,
SampleVector::Scalar const* sum0sNullHypot,
SampleVector::Scalar* tMaxAlphaBetas,
SampleVector::Scalar* tMaxErrorAlphaBetas,
SampleVector::Scalar* g_accTimeMax,
SampleVector::Scalar* g_accTimeWgt,
TimeComputationState* g_state,
unsigned const int timeFitParameters_sizeEB,
unsigned const int timeFitParameters_sizeEE,
ConfigurationParameters::type const timeFitLimits_firstEB,
ConfigurationParameters::type const timeFitLimits_firstEE,
ConfigurationParameters::type const timeFitLimits_secondEB,
ConfigurationParameters::type const timeFitLimits_secondEE,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nthreads_per_channel = 45; // n=10, n(n-1)/2
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockDim.x * blockIdx.x;
const int ch = gtx / nthreads_per_channel;
const int ltx = threadIdx.x % nthreads_per_channel;
const int ch_start = ch * nsamples;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
// remove inactive threads
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
const auto did = DetId{dids[inputCh]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto* amplitudeFitParameters = isBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
const auto* timeFitParameters = isBarrel ? timeFitParametersEB : timeFitParametersEE;
const auto timeFitParameters_size = isBarrel ? timeFitParameters_sizeEB : timeFitParameters_sizeEE;
const auto timeFitLimits_first = isBarrel ? timeFitLimits_firstEB : timeFitLimits_firstEE;
const auto timeFitLimits_second = isBarrel ? timeFitLimits_secondEB : timeFitLimits_secondEE;
extern __shared__ char smem[];
ScalarType* shr_chi2s = reinterpret_cast<ScalarType*>(smem);
ScalarType* shr_time_wgt = shr_chi2s + blockDim.x;
ScalarType* shr_time_max = shr_time_wgt + blockDim.x;
ScalarType* shrTimeMax = shr_time_max + blockDim.x;
ScalarType* shrTimeWgt = shrTimeMax + blockDim.x;
// map tx -> (sample_i, sample_j)
int sample_i, sample_j = 0;
if (ltx >= 0 && ltx <= 8) {
sample_i = 0;
sample_j = 1 + ltx;
} else if (ltx <= 16) {
sample_i = 1;
sample_j = 2 + ltx - 9;
} else if (ltx <= 23) {
sample_i = 2;
sample_j = 3 + ltx - 17;
} else if (ltx <= 29) {
sample_i = 3;
sample_j = 4 + ltx - 24;
} else if (ltx <= 34) {
sample_i = 4;
sample_j = 5 + ltx - 30;
} else if (ltx <= 38) {
sample_i = 5;
sample_j = 6 + ltx - 35;
} else if (ltx <= 41) {
sample_i = 6;
sample_j = 7 + ltx - 39;
} else if (ltx <= 43) {
sample_i = 7;
sample_j = 8 + ltx - 42;
} else if (ltx <= 44) {
sample_i = 8;
sample_j = 9;
} else
assert(false);
const auto tx_i = ch_start + sample_i;
const auto tx_j = ch_start + sample_j;
//
// note, given the way we partition the block, with 45 threads per channel
// we will end up with inactive threads which need to be dragged along
// through the synching point
//
bool const condForUselessSamples = useless_sample_values[tx_i] || useless_sample_values[tx_j] ||
sample_values[tx_i] <= 1 || sample_values[tx_j] <= 1;
//
// see cpu implementation for explanation
//
ScalarType chi2 = std::numeric_limits<ScalarType>::max();
ScalarType tmax = 0;
ScalarType tmaxerr = 0;
shrTimeMax[threadIdx.x] = 0;
shrTimeWgt[threadIdx.x] = 0;
bool internalCondForSkipping1 = true;
bool internalCondForSkipping2 = true;
if (!condForUselessSamples) {
const auto rtmp = sample_values[tx_i] / sample_values[tx_j];
const auto invampl_i = 1.0 / sample_values[tx_i];
const auto relErr2_i = sample_value_errors[tx_i] * sample_value_errors[tx_i] * invampl_i * invampl_i;
const auto invampl_j = 1.0 / sample_values[tx_j];
const auto relErr2_j = sample_value_errors[tx_j] * sample_value_errors[tx_j] * invampl_j * invampl_j;
const auto err1 = rtmp * rtmp * (relErr2_i + relErr2_j);
auto err2 = sample_value_errors[tx_j] * (sample_values[tx_i] - sample_values[tx_j]) * (invampl_j * invampl_j);
// TODO non-divergent branch for a block if each block has 1 channel
// otherwise non-divergent for groups of 45 threads
// at this point, pedestal_nums[ch] can be either 0, 1 or 2
if (pedestal_nums[ch] == 2)
err2 *= err2 * 0.5;
const auto err3 = (0.289 * 0.289) * (invampl_j * invampl_j);
const auto total_error = std::sqrt(err1 + err2 + err3);
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto alphabeta = alpha * beta;
const auto invalphabeta = 1.0 / alphabeta;
// variables instead of a struct
const auto ratio_index = sample_i;
const auto ratio_step = sample_j - sample_i;
const auto ratio_value = rtmp;
const auto ratio_error = total_error;
const auto rlim_i_j = fast_expf(static_cast<ScalarType>(sample_j - sample_i) / beta) - 0.001;
internalCondForSkipping1 = !(total_error < 1.0 && rtmp > 0.001 && rtmp < rlim_i_j);
if (!internalCondForSkipping1) {
//
// precompute.
// in cpu version this was done conditionally
// however easier to do it here (precompute) and then just filter out
// if not needed
//
const auto l_timeFitLimits_first = timeFitLimits_first;
const auto l_timeFitLimits_second = timeFitLimits_second;
if (ratio_step == 1 && ratio_value >= l_timeFitLimits_first && ratio_value <= l_timeFitLimits_second) {
const auto time_max_i = static_cast<ScalarType>(ratio_index);
auto u = timeFitParameters[timeFitParameters_size - 1];
CMS_UNROLL_LOOP
for (int k = timeFitParameters_size - 2; k >= 0; k--)
u = u * ratio_value + timeFitParameters[k];
auto du = (timeFitParameters_size - 1) * (timeFitParameters[timeFitParameters_size - 1]);
for (int k = timeFitParameters_size - 2; k >= 1; k--)
du = du * ratio_value + k * timeFitParameters[k];
const auto error2 = ratio_error * ratio_error * du * du;
const auto time_max = error2 > 0 ? (time_max_i - u) / error2 : static_cast<ScalarType>(0);
const auto time_wgt = error2 > 0 ? 1.0 / error2 : static_cast<ScalarType>(0);
// store into shared mem
// note, this name is essentially identical to the one used
// below.
shrTimeMax[threadIdx.x] = error2 > 0 ? time_max : 0;
shrTimeWgt[threadIdx.x] = error2 > 0 ? time_wgt : 0;
} else {
shrTimeMax[threadIdx.x] = 0;
shrTimeWgt[threadIdx.x] = 0;
}
// continue with ratios
const auto stepOverBeta = static_cast<SampleVector::Scalar>(ratio_step) / beta;
const auto offset = static_cast<SampleVector::Scalar>(ratio_index) + alphabeta;
const auto rmin = std::max(ratio_value - ratio_error, 0.001);
const auto rmax = std::min(ratio_value + ratio_error,
fast_expf(static_cast<SampleVector::Scalar>(ratio_step) / beta) - 0.001);
const auto time1 = offset - ratio_step / (fast_expf((stepOverBeta - fast_logf(rmin)) / alpha) - 1.0);
const auto time2 = offset - ratio_step / (fast_expf((stepOverBeta - fast_logf(rmax)) / alpha) - 1.0);
// set these guys
tmax = 0.5 * (time1 + time2);
tmaxerr = 0.5 * std::sqrt((time1 - time2) * (time1 - time2));
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d tmax = %f tmaxerr = %f time1 = %f time2 = %f offset = %f rmin = %f rmax = %f\n",
ch,
ltx,
tmax,
tmaxerr,
time1,
time2,
offset,
rmin,
rmax);
#endif
SampleVector::Scalar sumAf = 0;
SampleVector::Scalar sumff = 0;
const int itmin = std::max(-1, static_cast<int>(std::floor(tmax - alphabeta)));
auto loffset = (static_cast<ScalarType>(itmin) - tmax) * invalphabeta;
// TODO: data dependence
for (int it = itmin + 1; it < nsamples; it++) {
loffset += invalphabeta;
if (useless_sample_values[ch_start + it])
continue;
const auto inverr2 = 1.0 / (sample_value_errors[ch_start + it] * sample_value_errors[ch_start + it]);
const auto term1 = 1.0 + loffset;
const auto f = (term1 > 1e-6) ? fast_expf(alpha * (fast_logf(term1) - loffset)) : 0;
sumAf += sample_values[ch_start + it] * (f * inverr2);
sumff += f * (f * inverr2);
}
const auto sumAA = sumAAsNullHypot[ch];
const auto sum0 = sum0sNullHypot[ch];
chi2 = sumAA;
// TODO: sum0 can not be 0 below, need to introduce the check upfront
if (sumff > 0) {
chi2 = sumAA - sumAf * (sumAf / sumff);
}
chi2 /= sum0;
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d sumAf = %f sumff = %f sumAA = %f sum0 = %d tmax = %f tmaxerr = %f chi2 = %f\n",
ch,
ltx,
sumAf,
sumff,
sumAA,
static_cast<int>(sum0),
tmax,
tmaxerr,
chi2);
#endif
if (chi2 > 0 && tmax > 0 && tmaxerr > 0)
internalCondForSkipping2 = false;
else
chi2 = std::numeric_limits<ScalarType>::max();
}
}
// store into smem
shr_chi2s[threadIdx.x] = chi2;
__syncthreads();
// find min chi2 - quite crude for now
// TODO validate/check
char iter = nthreads_per_channel / 2 + nthreads_per_channel % 2;
bool oddElements = nthreads_per_channel % 2;
CMS_UNROLL_LOOP
while (iter >= 1) {
if (ltx < iter)
// for odd ns, the last guy will just store itself
// exception is for ltx == 0 and iter==1
shr_chi2s[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_chi2s[threadIdx.x]
: std::min(shr_chi2s[threadIdx.x], shr_chi2s[threadIdx.x + iter]);
__syncthreads();
oddElements = iter % 2;
iter = iter == 1 ? iter / 2 : iter / 2 + iter % 2;
}
// filter out inactive or useless samples threads
if (!condForUselessSamples && !internalCondForSkipping1 && !internalCondForSkipping2) {
// min chi2, now compute weighted average of tmax measurements
// see cpu version for more explanation
const auto chi2min = shr_chi2s[threadIdx.x - ltx];
const auto chi2Limit = chi2min + 1.0;
const auto inverseSigmaSquared = chi2 < chi2Limit ? 1.0 / (tmaxerr * tmaxerr) : 0.0;
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d chi2min = %f chi2Limit = %f inverseSigmaSquared = %f\n",
ch,
ltx,
chi2min,
chi2Limit,
inverseSigmaSquared);
#endif
// store into shared mem and run reduction
// TODO: check if cooperative groups would be better
// TODO: check if shuffling intrinsics are better
shr_time_wgt[threadIdx.x] = inverseSigmaSquared;
shr_time_max[threadIdx.x] = tmax * inverseSigmaSquared;
} else {
shr_time_wgt[threadIdx.x] = 0;
shr_time_max[threadIdx.x] = 0;
}
__syncthreads();
// reduce to compute time_max and time_wgt
iter = nthreads_per_channel / 2 + nthreads_per_channel % 2;
oddElements = nthreads_per_channel % 2;
CMS_UNROLL_LOOP
while (iter >= 1) {
if (ltx < iter) {
shr_time_wgt[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_time_wgt[threadIdx.x]
: shr_time_wgt[threadIdx.x] + shr_time_wgt[threadIdx.x + iter];
shr_time_max[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_time_max[threadIdx.x]
: shr_time_max[threadIdx.x] + shr_time_max[threadIdx.x + iter];
shrTimeMax[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shrTimeMax[threadIdx.x]
: shrTimeMax[threadIdx.x] + shrTimeMax[threadIdx.x + iter];
shrTimeWgt[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shrTimeWgt[threadIdx.x]
: shrTimeWgt[threadIdx.x] + shrTimeWgt[threadIdx.x + iter];
}
__syncthreads();
oddElements = iter % 2;
iter = iter == 1 ? iter / 2 : iter / 2 + iter % 2;
}
// load from shared memory the 0th guy (will contain accumulated values)
// compute
// store into global mem
if (ltx == 0) {
const auto tmp_time_max = shr_time_max[threadIdx.x];
const auto tmp_time_wgt = shr_time_wgt[threadIdx.x];
// we are done if there number of time ratios is 0
if (tmp_time_wgt == 0 && tmp_time_max == 0) {
g_state[ch] = TimeComputationState::Finished;
return;
}
// no div by 0
const auto tMaxAlphaBeta = tmp_time_max / tmp_time_wgt;
const auto tMaxErrorAlphaBeta = 1.0 / std::sqrt(tmp_time_wgt);
tMaxAlphaBetas[ch] = tMaxAlphaBeta;
tMaxErrorAlphaBetas[ch] = tMaxErrorAlphaBeta;
g_accTimeMax[ch] = shrTimeMax[threadIdx.x];
g_accTimeWgt[ch] = shrTimeWgt[threadIdx.x];
g_state[ch] = TimeComputationState::NotFinished;
#ifdef DEBUG_TC_MAKERATIO
printf("ch = %d time_max = %f time_wgt = %f\n", ch, tmp_time_max, tmp_time_wgt);
printf("ch = %d tMaxAlphaBeta = %f tMaxErrorAlphaBeta = %f timeMax = %f timeWgt = %f\n",
ch,
tMaxAlphaBeta,
tMaxErrorAlphaBeta,
shrTimeMax[threadIdx.x],
shrTimeWgt[threadIdx.x]);
#endif
}
}
/// launch ctx parameters are
/// 10 threads per channel, N channels per block, Y blocks
/// TODO: do we need to keep the state around or can be removed?!
//#define DEBUG_FINDAMPLCHI2_AND_FINISH
__global__ void kernel_time_compute_findamplchi2_and_finish(
SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids_eb,
uint32_t const* dids_ee,
bool const* useless_samples,
SampleVector::Scalar const* g_tMaxAlphaBeta,
SampleVector::Scalar const* g_tMaxErrorAlphaBeta,
SampleVector::Scalar const* g_accTimeMax,
SampleVector::Scalar const* g_accTimeWgt,
ConfigurationParameters::type const* amplitudeFitParametersEB,
ConfigurationParameters::type const* amplitudeFitParametersEE,
SampleVector::Scalar const* sumAAsNullHypot,
SampleVector::Scalar const* sum0sNullHypot,
SampleVector::Scalar const* chi2sNullHypot,
TimeComputationState* g_state,
SampleVector::Scalar* g_ampMaxAlphaBeta,
SampleVector::Scalar* g_ampMaxError,
SampleVector::Scalar* g_timeMax,
SampleVector::Scalar* g_timeError,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
// configure shared mem
// per block, we need #threads per block * 2 * sizeof(ScalarType)
// we run with N channels per block
extern __shared__ char smem[];
ScalarType* shr_sumAf = reinterpret_cast<ScalarType*>(smem);
ScalarType* shr_sumff = shr_sumAf + blockDim.x;
if (ch >= nchannels)
return;
auto state = g_state[ch];
const auto did = DetId{dids[inputCh]};
const auto* amplitudeFitParameters =
did.subdetId() == EcalBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
// TODO is that better than storing into global and launching another kernel
// for the first 10 threads
if (state == TimeComputationState::NotFinished) {
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto alphabeta = alpha * beta;
const auto invalphabeta = 1.0 / alphabeta;
const auto tMaxAlphaBeta = g_tMaxAlphaBeta[ch];
const auto sample_value = sample_values[gtx];
const auto sample_value_error = sample_value_errors[gtx];
const auto inverr2 =
useless_samples[gtx] ? static_cast<ScalarType>(0) : 1.0 / (sample_value_error * sample_value_error);
const auto offset = (static_cast<ScalarType>(sample) - tMaxAlphaBeta) * invalphabeta;
const auto term1 = 1.0 + offset;
const auto f = term1 > 1e-6 ? fast_expf(alpha * (fast_logf(term1) - offset)) : static_cast<ScalarType>(0.0);
const auto sumAf = sample_value * (f * inverr2);
const auto sumff = f * (f * inverr2);
// store into shared mem
shr_sumAf[threadIdx.x] = sumAf;
shr_sumff[threadIdx.x] = sumff;
} else {
shr_sumAf[threadIdx.x] = 0;
shr_sumff[threadIdx.x] = 0;
}
__syncthreads();
// reduce
// unroll completely here (but hardcoded)
if (sample < 5) {
shr_sumAf[threadIdx.x] += shr_sumAf[threadIdx.x + 5];
shr_sumff[threadIdx.x] += shr_sumff[threadIdx.x + 5];
}
__syncthreads();
if (sample < 2) {
// will need to subtract for ltx = 3, we double count here
shr_sumAf[threadIdx.x] += shr_sumAf[threadIdx.x + 2] + shr_sumAf[threadIdx.x + 3];
shr_sumff[threadIdx.x] += shr_sumff[threadIdx.x + 2] + shr_sumff[threadIdx.x + 3];
}
__syncthreads();
if (sample == 0) {
// exit if the state is done
// note, we do not exit before all __synchtreads are finished
if (state == TimeComputationState::Finished) {
g_timeMax[ch] = 5;
g_timeError[ch] = -999;
return;
}
// subtract to avoid double counting
const auto sumff = shr_sumff[threadIdx.x] + shr_sumff[threadIdx.x + 1] - shr_sumff[threadIdx.x + 3];
const auto sumAf = shr_sumAf[threadIdx.x] + shr_sumAf[threadIdx.x + 1] - shr_sumAf[threadIdx.x + 3];
const auto ampMaxAlphaBeta = sumff > 0 ? sumAf / sumff : 0;
const auto sumAA = sumAAsNullHypot[ch];
const auto sum0 = sum0sNullHypot[ch];
const auto nullChi2 = chi2sNullHypot[ch];
if (sumff > 0) {
const auto chi2AlphaBeta = (sumAA - sumAf * sumAf / sumff) / sum0;
if (chi2AlphaBeta > nullChi2) {
// null hypothesis is better
state = TimeComputationState::Finished;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d chi2AlphaBeta = %f nullChi2 = %f sumAA = %f sumAf = %f sumff = %f sum0 = %f\n",
ch,
chi2AlphaBeta,
nullChi2,
sumAA,
sumAf,
sumff,
sum0);
#endif
}
// store to global
g_ampMaxAlphaBeta[ch] = ampMaxAlphaBeta;
} else {
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d sum0 = %f sumAA = %f sumff = %f sumAf = %f\n", ch, sum0, sumAA, sumff, sumAf);
#endif
state = TimeComputationState::Finished;
}
// store the state to global and finish calcs
g_state[ch] = state;
if (state == TimeComputationState::Finished) {
// store default values into global
g_timeMax[ch] = 5;
g_timeError[ch] = -999;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d finished state\n", ch);
#endif
return;
}
const auto ampMaxError = g_ampMaxError[ch];
const auto test_ratio = ampMaxAlphaBeta / ampMaxError;
const auto accTimeMax = g_accTimeMax[ch];
const auto accTimeWgt = g_accTimeWgt[ch];
const auto tMaxAlphaBeta = g_tMaxAlphaBeta[ch];
const auto tMaxErrorAlphaBeta = g_tMaxErrorAlphaBeta[ch];
// branch to separate large vs small pulses
// see cpu version for more info
if (test_ratio > 5.0 && accTimeWgt > 0) {
const auto tMaxRatio = accTimeWgt > 0 ? accTimeMax / accTimeWgt : static_cast<ScalarType>(0);
const auto tMaxErrorRatio = accTimeWgt > 0 ? 1.0 / std::sqrt(accTimeWgt) : static_cast<ScalarType>(0);
if (test_ratio > 10.0) {
g_timeMax[ch] = tMaxRatio;
g_timeError[ch] = tMaxErrorRatio;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d tMaxRatio = %f tMaxErrorRatio = %f\n", ch, tMaxRatio, tMaxErrorRatio);
#endif
} else {
const auto timeMax = (tMaxAlphaBeta * (10.0 - ampMaxAlphaBeta / ampMaxError) +
tMaxRatio * (ampMaxAlphaBeta / ampMaxError - 5.0)) /
5.0;
const auto timeError = (tMaxErrorAlphaBeta * (10.0 - ampMaxAlphaBeta / ampMaxError) +
tMaxErrorRatio * (ampMaxAlphaBeta / ampMaxError - 5.0)) /
5.0;
state = TimeComputationState::Finished;
g_state[ch] = state;
g_timeMax[ch] = timeMax;
g_timeError[ch] = timeError;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d timeMax = %f timeError = %f\n", ch, timeMax, timeError);
#endif
}
} else {
state = TimeComputationState::Finished;
g_state[ch] = state;
g_timeMax[ch] = tMaxAlphaBeta;
g_timeError[ch] = tMaxErrorAlphaBeta;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d tMaxAlphaBeta = %f tMaxErrorAlphaBeta = %f\n", ch, tMaxAlphaBeta, tMaxErrorAlphaBeta);
#endif
}
}
}
__global__ void kernel_time_compute_fixMGPAslew(uint16_t const* digis_eb,
uint16_t const* digis_ee,
SampleVector::Scalar* sample_values,
SampleVector::Scalar* sample_value_errors,
bool* useless_sample_values,
unsigned const int sample_mask,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
const int inputGtx = ch >= offsetForInputs ? gtx - offsetForInputs * nsamples : gtx;
const auto* digis = ch >= offsetForInputs ? digis_ee : digis_eb;
// remove thread for sample 0, oversubscribing is easier than ....
if (ch >= nchannels || sample == 0)
return;
if (!use_sample(sample_mask, sample))
return;
const auto gainIdPrev = ecalMGPA::gainId(digis[inputGtx - 1]);
const auto gainIdNext = ecalMGPA::gainId(digis[inputGtx]);
if (gainIdPrev >= 1 && gainIdPrev <= 3 && gainIdNext >= 1 && gainIdNext <= 3 && gainIdPrev < gainIdNext) {
sample_values[gtx - 1] = 0;
sample_value_errors[gtx - 1] = 1e+9;
useless_sample_values[gtx - 1] = true;
}
}
__global__ void kernel_time_compute_ampl(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids,
bool const* useless_samples,
SampleVector::Scalar const* g_timeMax,
SampleVector::Scalar const* amplitudeFitParametersEB,
SampleVector::Scalar const* amplitudeFitParametersEE,
SampleVector::Scalar* g_amplitudeMax,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr ScalarType corr4 = 1.;
constexpr ScalarType corr6 = 1.;
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
if (ch >= nchannels)
return;
const auto did = DetId{dids[ch]};
const auto* amplitudeFitParameters =
did.subdetId() == EcalBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
// configure shared mem
extern __shared__ char smem[];
ScalarType* shr_sum1 = reinterpret_cast<ScalarType*>(smem);
auto* shr_sumA = shr_sum1 + blockDim.x;
auto* shr_sumF = shr_sumA + blockDim.x;
auto* shr_sumAF = shr_sumF + blockDim.x;
auto* shr_sumFF = shr_sumAF + blockDim.x;
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto timeMax = g_timeMax[ch];
const auto pedestalLimit = timeMax - (alpha * beta) - 1.0;
const auto sample_value = sample_values[gtx];
const auto sample_value_error = sample_value_errors[gtx];
const auto inverr2 =
sample_value_error > 0 ? 1. / (sample_value_error * sample_value_error) : static_cast<ScalarType>(0);
const auto termOne = 1 + (sample - timeMax) / (alpha * beta);
const auto f = termOne > 1.e-5 ? fast_expf(alpha * fast_logf(termOne) - (sample - timeMax) / beta)
: static_cast<ScalarType>(0.);
bool const cond = ((sample < pedestalLimit) || (f > 0.6 * corr6 && sample <= timeMax) ||
(f > 0.4 * corr4 && sample >= timeMax)) &&
!useless_samples[gtx];
// store into shared mem
shr_sum1[threadIdx.x] = cond ? inverr2 : static_cast<ScalarType>(0);
shr_sumA[threadIdx.x] = cond ? sample_value * inverr2 : static_cast<ScalarType>(0);
shr_sumF[threadIdx.x] = cond ? f * inverr2 : static_cast<ScalarType>(0);
shr_sumAF[threadIdx.x] = cond ? (f * inverr2) * sample_value : static_cast<ScalarType>(0);
shr_sumFF[threadIdx.x] = cond ? f * (f * inverr2) : static_cast<ScalarType>(0);
// reduction
if (sample <= 4) {
shr_sum1[threadIdx.x] += shr_sum1[threadIdx.x + 5];
shr_sumA[threadIdx.x] += shr_sumA[threadIdx.x + 5];
shr_sumF[threadIdx.x] += shr_sumF[threadIdx.x + 5];
shr_sumAF[threadIdx.x] += shr_sumAF[threadIdx.x + 5];
shr_sumFF[threadIdx.x] += shr_sumFF[threadIdx.x + 5];
}
__syncthreads();
if (sample < 2) {
// note: we double count sample 3
shr_sum1[threadIdx.x] += shr_sum1[threadIdx.x + 2] + shr_sum1[threadIdx.x + 3];
shr_sumA[threadIdx.x] += shr_sumA[threadIdx.x + 2] + shr_sumA[threadIdx.x + 3];
shr_sumF[threadIdx.x] += shr_sumF[threadIdx.x + 2] + shr_sumF[threadIdx.x + 3];
shr_sumAF[threadIdx.x] += shr_sumAF[threadIdx.x + 2] + shr_sumAF[threadIdx.x + 3];
shr_sumFF[threadIdx.x] += shr_sumFF[threadIdx.x + 2] + shr_sumFF[threadIdx.x + 3];
}
__syncthreads();
if (sample == 0) {
const auto sum1 = shr_sum1[threadIdx.x] + shr_sum1[threadIdx.x + 1] - shr_sum1[threadIdx.x + 3];
const auto sumA = shr_sumA[threadIdx.x] + shr_sumA[threadIdx.x + 1] - shr_sumA[threadIdx.x + 3];
const auto sumF = shr_sumF[threadIdx.x] + shr_sumF[threadIdx.x + 1] - shr_sumF[threadIdx.x + 3];
const auto sumAF = shr_sumAF[threadIdx.x] + shr_sumAF[threadIdx.x + 1] - shr_sumAF[threadIdx.x + 3];
const auto sumFF = shr_sumFF[threadIdx.x] + shr_sumFF[threadIdx.x + 1] - shr_sumFF[threadIdx.x + 3];
const auto denom = sumFF * sum1 - sumF * sumF;
const auto condForDenom = sum1 > 0 && std::abs(denom) > 1.e-20;
const auto amplitudeMax = condForDenom ? (sumAF * sum1 - sumA * sumF) / denom : static_cast<ScalarType>(0.);
// store into global mem
g_amplitudeMax[ch] = amplitudeMax;
}
}
//#define ECAL_RECO_CUDA_TC_INIT_DEBUG
__global__ void kernel_time_computation_init(uint16_t const* digis_eb,
uint32_t const* dids_eb,
uint16_t const* digis_ee,
uint32_t const* dids_ee,
float const* rms_x12,
float const* rms_x6,
float const* rms_x1,
float const* mean_x12,
float const* mean_x6,
float const* mean_x1,
float const* gain12Over6,
float const* gain6Over1,
SampleVector::Scalar* sample_values,
SampleVector::Scalar* sample_value_errors,
SampleVector::Scalar* ampMaxError,
bool* useless_sample_values,
char* pedestal_nums,
uint32_t const offsetForHashes,
uint32_t const offsetForInputs,
unsigned const int sample_maskEB,
unsigned const int sample_maskEE,
int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int tx = threadIdx.x + blockDim.x * blockIdx.x;
const int ch = tx / nsamples;
const int inputTx = ch >= offsetForInputs ? tx - offsetForInputs * nsamples : tx;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
const auto* digis = ch >= offsetForInputs ? digis_ee : digis_eb;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
// indices/inits
const int sample = tx % nsamples;
const int input_ch_start = inputCh * nsamples;
SampleVector::Scalar pedestal = 0.;
int num = 0;
// configure shared mem
extern __shared__ char smem[];
ScalarType* shrSampleValues = reinterpret_cast<SampleVector::Scalar*>(smem);
ScalarType* shrSampleValueErrors = shrSampleValues + blockDim.x;
// 0 and 1 sample values
const auto adc0 = ecalMGPA::adc(digis[input_ch_start]);
const auto gainId0 = ecalMGPA::gainId(digis[input_ch_start]);
const auto adc1 = ecalMGPA::adc(digis[input_ch_start + 1]);
const auto gainId1 = ecalMGPA::gainId(digis[input_ch_start + 1]);
const auto did = DetId{dids[inputCh]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto sample_mask = did.subdetId() == EcalBarrel ? sample_maskEB : sample_maskEE;
const auto hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId())
: offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId());
// set pedestal
// TODO this branch is non-divergent for a group of 10 threads
if (gainId0 == 1 && use_sample(sample_mask, 0)) {
pedestal = static_cast<SampleVector::Scalar>(adc0);
num = 1;
const auto diff = adc1 - adc0;
if (gainId1 == 1 && use_sample(sample_mask, 1) && std::abs(diff) < 3 * rms_x12[hashedId]) {
pedestal = (pedestal + static_cast<SampleVector::Scalar>(adc1)) / 2.0;
num = 2;
}
} else {
pedestal = mean_x12[ch];
}
// ped subtracted and gain-renormalized samples.
const auto gainId = ecalMGPA::gainId(digis[inputTx]);
const auto adc = ecalMGPA::adc(digis[inputTx]);
bool bad = false;
SampleVector::Scalar sample_value, sample_value_error;
// TODO divergent branch
// TODO: piece below is general both for amplitudes and timing
// potentially there is a way to reduce the amount of code...
if (!use_sample(sample_mask, sample)) {
bad = true;
sample_value = 0;
sample_value_error = 0;
} else if (gainId == 1) {
sample_value = static_cast<SampleVector::Scalar>(adc) - pedestal;
sample_value_error = rms_x12[hashedId];
} else if (gainId == 2) {
sample_value = (static_cast<SampleVector::Scalar>(adc) - mean_x6[hashedId]) * gain12Over6[hashedId];
sample_value_error = rms_x6[hashedId] * gain12Over6[hashedId];
} else if (gainId == 3) {
sample_value =
(static_cast<SampleVector::Scalar>(adc) - mean_x1[hashedId]) * gain6Over1[hashedId] * gain12Over6[hashedId];
sample_value_error = rms_x1[hashedId] * gain6Over1[hashedId] * gain12Over6[hashedId];
} else {
sample_value = 0;
sample_value_error = 0;
bad = true;
}
// TODO: make sure we save things correctly when sample is useless
const auto useless_sample = (sample_value_error <= 0) | bad;
useless_sample_values[tx] = useless_sample;
sample_values[tx] = sample_value;
sample_value_errors[tx] = useless_sample ? 1e+9 : sample_value_error;
// DEBUG
#ifdef ECAL_RECO_CUDA_TC_INIT_DEBUG
if (ch == 0) {
printf("sample = %d sample_value = %f sample_value_error = %f useless = %c\n",
sample,
sample_value,
sample_value_error,
useless_sample ? '1' : '0');
}
#endif
// store into the shared mem
shrSampleValues[threadIdx.x] = sample_value_error > 0 ? sample_value : std::numeric_limits<ScalarType>::min();
shrSampleValueErrors[threadIdx.x] = sample_value_error;
__syncthreads();
// perform the reduction with min
if (sample < 5) {
// note, if equal -> we keep the value with lower sample as for cpu
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 5]
? shrSampleValueErrors[threadIdx.x + 5]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = std::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 5]);
}
__syncthreads();
// a bit of an overkill, but easier than to compare across 3 values
if (sample < 3) {
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 3]
? shrSampleValueErrors[threadIdx.x + 3]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = std::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 3]);
}
__syncthreads();
if (sample < 2) {
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 2]
? shrSampleValueErrors[threadIdx.x + 2]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = std::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 2]);
}
__syncthreads();
if (sample == 0) {
// we only needd the max error
const auto maxSampleValueError = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 1]
? shrSampleValueErrors[threadIdx.x + 1]
: shrSampleValueErrors[threadIdx.x];
// # pedestal samples used
pedestal_nums[ch] = num;
// this is used downstream
ampMaxError[ch] = maxSampleValueError;
// DEBUG
#ifdef ECAL_RECO_CUDA_TC_INIT_DEBUG
if (ch == 0) {
printf("pedestal_nums = %d ampMaxError = %f\n", num, maxSampleValueError);
}
#endif
}
}
///
/// launch context parameters: 1 thread per channel
///
//#define DEBUG_TIME_CORRECTION
__global__ void kernel_time_correction_and_finalize(
// SampleVector::Scalar const* g_amplitude,
::ecal::reco::StorageScalarType const* g_amplitudeEB,
::ecal::reco::StorageScalarType const* g_amplitudeEE,
uint16_t const* digis_eb,
uint32_t const* dids_eb,
uint16_t const* digis_ee,
uint32_t const* dids_ee,
float const* amplitudeBinsEB,
float const* amplitudeBinsEE,
float const* shiftBinsEB,
float const* shiftBinsEE,
SampleVector::Scalar const* g_timeMax,
SampleVector::Scalar const* g_timeError,
float const* g_rms_x12,
float const* timeCalibConstant,
float* g_jitterEB,
float* g_jitterEE,
float* g_jitterErrorEB,
float* g_jitterErrorEE,
uint32_t* flagsEB,
uint32_t* flagsEE,
const int amplitudeBinsSizeEB,
const int amplitudeBinsSizeEE,
ConfigurationParameters::type const timeConstantTermEB,
ConfigurationParameters::type const timeConstantTermEE,
float const offsetTimeValueEB,
float const offsetTimeValueEE,
ConfigurationParameters::type const timeNconstEB,
ConfigurationParameters::type const timeNconstEE,
ConfigurationParameters::type const amplitudeThresholdEB,
ConfigurationParameters::type const amplitudeThresholdEE,
ConfigurationParameters::type const outOfTimeThreshG12pEB,
ConfigurationParameters::type const outOfTimeThreshG12pEE,
ConfigurationParameters::type const outOfTimeThreshG12mEB,
ConfigurationParameters::type const outOfTimeThreshG12mEE,
ConfigurationParameters::type const outOfTimeThreshG61pEB,
ConfigurationParameters::type const outOfTimeThreshG61pEE,
ConfigurationParameters::type const outOfTimeThreshG61mEB,
ConfigurationParameters::type const outOfTimeThreshG61mEE,
uint32_t const offsetForHashes,
uint32_t const offsetForInputs,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int inputGtx = gtx >= offsetForInputs ? gtx - offsetForInputs : gtx;
const auto* dids = gtx >= offsetForInputs ? dids_ee : dids_eb;
const auto& digis = gtx >= offsetForInputs ? digis_ee : digis_eb;
// filter out outside of range threads
if (gtx >= nchannels)
return;
// need to ref the right ptrs
#define ARRANGE(var) auto* var = gtx >= offsetForInputs ? var##EE : var##EB
ARRANGE(g_amplitude);
ARRANGE(g_jitter);
ARRANGE(g_jitterError);
ARRANGE(flags);
#undef ARRANGE
const auto did = DetId{dids[inputGtx]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId())
: offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId());
const auto* amplitudeBins = isBarrel ? amplitudeBinsEB : amplitudeBinsEE;
const auto* shiftBins = isBarrel ? shiftBinsEB : shiftBinsEE;
const auto amplitudeBinsSize = isBarrel ? amplitudeBinsSizeEB : amplitudeBinsSizeEE;
const auto timeConstantTerm = isBarrel ? timeConstantTermEB : timeConstantTermEE;
const auto timeNconst = isBarrel ? timeNconstEB : timeNconstEE;
const auto offsetTimeValue = isBarrel ? offsetTimeValueEB : offsetTimeValueEE;
const auto amplitudeThreshold = isBarrel ? amplitudeThresholdEB : amplitudeThresholdEE;
const auto outOfTimeThreshG12p = isBarrel ? outOfTimeThreshG12pEB : outOfTimeThreshG12pEE;
const auto outOfTimeThreshG12m = isBarrel ? outOfTimeThreshG12mEB : outOfTimeThreshG12mEE;
const auto outOfTimeThreshG61p = isBarrel ? outOfTimeThreshG61pEB : outOfTimeThreshG61pEE;
const auto outOfTimeThreshG61m = isBarrel ? outOfTimeThreshG61mEB : outOfTimeThreshG61mEE;
// load some
const auto amplitude = g_amplitude[inputGtx];
const auto rms_x12 = g_rms_x12[hashedId];
const auto timeCalibConst = timeCalibConstant[hashedId];
int myBin = -1;
for (int bin = 0; bin < amplitudeBinsSize; bin++) {
if (amplitude > amplitudeBins[bin])
myBin = bin;
else
break;
}
ScalarType correction = 0;
if (myBin == -1) {
correction = shiftBins[0];
} else if (myBin == amplitudeBinsSize - 1) {
correction = shiftBins[myBin];
} else {
correction = shiftBins[myBin + 1] - shiftBins[myBin];
correction *= (amplitude - amplitudeBins[myBin]) / (amplitudeBins[myBin + 1] - amplitudeBins[myBin]);
correction += shiftBins[myBin];
}
// correction * 1./25.
correction = correction * 0.04;
const auto timeMax = g_timeMax[gtx];
const auto timeError = g_timeError[gtx];
const auto jitter = timeMax - 5 + correction;
const auto jitterError =
std::sqrt(timeError * timeError + timeConstantTerm * timeConstantTerm * 0.04 * 0.04); // 0.04 = 1./25.
#ifdef DEBUG_TIME_CORRECTION
printf("ch = %d timeMax = %f timeError = %f jitter = %f correction = %f\n",
gtx,
timeMax,
timeError,
jitter,
correction);
// }
#endif
// store back to global
g_jitter[inputGtx] = jitter;
g_jitterError[inputGtx] = jitterError;
// set the flag
// TODO: replace with something more efficient (if required),
// for now just to make it work
if (amplitude > amplitudeThreshold * rms_x12) {
auto threshP = outOfTimeThreshG12p;
auto threshM = outOfTimeThreshG12m;
if (amplitude > 3000.) {
for (int isample = 0; isample < nsamples; isample++) {
int gainid = ecalMGPA::gainId(digis[nsamples * inputGtx + isample]);
if (gainid != 1) {
threshP = outOfTimeThreshG61p;
threshM = outOfTimeThreshG61m;
break;
}
}
}
const auto correctedTime = (timeMax - 5) * 25 + timeCalibConst + offsetTimeValue;
const auto nterm = timeNconst * rms_x12 / amplitude;
const auto sigmat = std::sqrt(nterm * nterm + timeConstantTerm * timeConstantTerm);
if (correctedTime > sigmat * threshP || correctedTime < -sigmat * threshM)
flags[inputGtx] |= 0x1 << EcalUncalibratedRecHit::kOutOfTime;
}
}
} // namespace multifit
} // namespace ecal
|
the_stack
|
namespace layer_norm {
template<typename Ktraits>
__global__ __launch_bounds__(Ktraits::THREADS_PER_CTA)
void ln_bwd_kernel(layer_norm::BwdParams params) {
enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA };
enum { WARPS_M = Ktraits::WARPS_M };
enum { WARPS_N = Ktraits::WARPS_N };
enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW };
enum { COLS = Ktraits::COLS };
enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW };
enum { LDGS = Ktraits::LDGS };
enum { NUM_ELTS = Ktraits::ELTS_PER_LDG };
enum { THREADS_PER_WARP = Ktraits::THREADS_PER_WARP };
enum { CTAS_PER_ROW = Ktraits::CTAS_PER_ROW };
using compute_t = typename Ktraits::compute_t;
using index_t = typename Ktraits::index_t;
using Ivec = typename Ktraits::Ivec;
using Ovec = typename Ktraits::Ovec;
using Wvec = typename Ktraits::Wvec;
using Cvec = typename Ktraits::Cvec;
using Reducer = typename Ktraits::Reducer;
using reduce_t = typename Reducer::Type;
extern __shared__ char smem_[];
const index_t tidx = threadIdx.x;
const index_t bidn = blockIdx.x % CTAS_PER_ROW;
const index_t bidm = blockIdx.x / CTAS_PER_ROW;
const index_t lane = tidx % THREADS_PER_WARP;
const index_t warp = tidx / THREADS_PER_WARP;
const index_t warp_m = warp / Ktraits::WARPS_N;
const index_t warp_n = warp % Ktraits::WARPS_N;
const index_t tid_r = warp_n * THREADS_PER_WARP + lane;
const index_t r = bidm * Ktraits::ROWS_PER_CTA + warp_m;
const index_t c = bidn * THREADS_PER_ROW + warp_n * THREADS_PER_WARP + lane;
static_assert(COLS == THREADS_PER_ROW * LDGS * NUM_ELTS * CTAS_PER_ROW);
Cvec dzy_sum[LDGS];
Cvec dz_sum[LDGS];
memset(dzy_sum, 0, sizeof(dzy_sum));
memset(dz_sum, 0, sizeof(dz_sum));
compute_t * smem_wgrad = reinterpret_cast<compute_t*>(smem_);
char *smem_dgrad = smem_ + Ktraits::SMEM_BYTES_WGRAD;
Reducer reducer(params, bidm, bidn, warp_m, warp_n, lane, smem_dgrad);
Sum<reduce_t> sum;
constexpr float rn = 1.f / float(COLS);
Wvec gamma[LDGS];
index_t idx = c;
#pragma unroll
for( int it = 0; it < LDGS; it++ ) {
gamma[it].load_from(params.gamma, idx);
idx += Ktraits::VEC_COLS_PER_LDG;
}
// TODO if ROWS_PER_CTA does not divide rows, we might get divergence in the
// last blocks with syncthreads!
// grid stride over rows
#pragma unroll 1
for( int row = r; row < params.rows; row += params.ctas_per_col * ROWS_PER_CTA ) {
const compute_t mu_r = static_cast<const compute_t *>(params.mu)[row];
const compute_t rs_r = static_cast<const compute_t *>(params.rs)[row];
Ivec x[LDGS];
Ovec dz[LDGS];
index_t idx = row * Ktraits::VEC_COLS + c;
#pragma unroll
for( int it = 0; it < LDGS; it++ ) {
dz[it].load_from(params.dz, idx);
x[it].load_from(params.x, idx);
idx += Ktraits::VEC_COLS_PER_LDG;
}
compute_t dy[LDGS * NUM_ELTS];
compute_t y[LDGS * NUM_ELTS];
compute_t mdy_local = 0.f;
compute_t mdyy_local = 0.f;
#pragma unroll
for( int it = 0; it < LDGS; it++ ) {
#pragma unroll
for( int jt = 0; jt < NUM_ELTS; jt++ ) {
compute_t x_tmp = x[it].data.elt[jt];
compute_t y_tmp = rs_r * (x_tmp - mu_r);
compute_t dy_tmp = compute_t(gamma[it].data.elt[jt]);
dy_tmp *= compute_t(dz[it].data.elt[jt]);
compute_t dz_tmp = dz[it].data.elt[jt];
mdy_local += dy_tmp;
mdyy_local += dy_tmp * y_tmp;
dy[it * NUM_ELTS + jt] = dy_tmp;
y[it * NUM_ELTS + jt] = y_tmp;
dzy_sum[it].data.elt[jt] += dz_tmp * y_tmp;
dz_sum[it].data.elt[jt] += dz_tmp;
}
}
reduce_t result = reducer.allreduce({mdy_local, mdyy_local}, sum);
mdy_local = layer_norm::Get<0>::of<reduce_t, compute_t>(result) * rn;
mdyy_local = layer_norm::Get<1>::of<reduce_t, compute_t>(result) * rn;
Ivec dx[LDGS];
idx = row * Ktraits::VEC_COLS + c;
#pragma unroll
for( int it = 0; it < LDGS; it++ ) {
#pragma unroll
for( int jt = 0; jt < NUM_ELTS; jt++ ) {
compute_t dy_tmp = dy[it * NUM_ELTS + jt];
compute_t y_tmp = y[it * NUM_ELTS + jt];
compute_t dx_tmp = rs_r * (dy_tmp - (mdyy_local * y_tmp + mdy_local));
dx[it].data.elt[jt] = dx_tmp;
}
dx[it].store_to(params.dx, idx);
idx += Ktraits::VEC_COLS_PER_LDG;
}
} // end: grid stride loop
if( WARPS_M == 1 ) {
idx = r * Ktraits::VEC_COLS + c;
#pragma unroll
for( int it = 0; it < LDGS; it++ ) {
dz_sum[it].store_to(params.dbeta_part, idx);
dzy_sum[it].store_to(params.dgamma_part, idx);
idx += Ktraits::VEC_COLS_PER_LDG;
}
} else {
static_assert(WARPS_M == 1 || Ktraits::CTAS_PER_ROW == 1, "Multiple rows per CTA not supported for Multi-CTA.");
// Finalize reduction of part dgamma and dbeta for this CTA
// by reducing over the rows held across the WARPS_M warps
// Assumption: blockSize divides hidden size.
enum { NUM_RES = COLS / Ktraits::THREADS_PER_CTA };
static_assert(NUM_RES * Ktraits::THREADS_PER_CTA == COLS, "");
idx = warp_m * Ktraits::VEC_COLS + tid_r;
#pragma unroll
for( int it = 0; it < LDGS; it++ ) {
dz_sum[it].store_to(smem_wgrad, idx);
idx += THREADS_PER_ROW;
}
__syncthreads();
compute_t cta_dz_sum[NUM_RES];
memset(cta_dz_sum, 0, sizeof(compute_t) * NUM_RES);
for( int it = 0; it < ROWS_PER_CTA; it++ ) {
for( int jt = 0; jt < NUM_RES; jt++ ) {
cta_dz_sum[jt] += smem_wgrad[it * COLS + tidx + jt * Ktraits::THREADS_PER_CTA];
}
}
__syncthreads();
idx = warp_m * Ktraits::VEC_COLS + tid_r;
#pragma unroll
for( int it = 0; it < LDGS; it++ ) {
dzy_sum[it].store_to(smem_wgrad, idx);
idx += THREADS_PER_ROW;
}
__syncthreads();
compute_t cta_dzy_sum[NUM_RES];
memset(cta_dzy_sum, 0, sizeof(compute_t) * NUM_RES);
for( int it = 0; it < ROWS_PER_CTA; it++ ) {
for( int jt = 0; jt < NUM_RES; jt++ ) {
cta_dzy_sum[jt] += smem_wgrad[it * COLS + tidx + jt * Ktraits::THREADS_PER_CTA];
}
}
compute_t *dgamma_part = static_cast<compute_t *>(params.dgamma_part) + bidm * COLS + tidx;
for( int jt = 0; jt < NUM_RES; jt++ ) {
*dgamma_part = cta_dzy_sum[jt];
dgamma_part += Ktraits::THREADS_PER_CTA;
}
compute_t *dbeta_part = static_cast<compute_t *>(params.dbeta_part) + bidm * COLS + tidx;
for( int jt = 0; jt < NUM_RES; jt++ ) {
*dbeta_part = cta_dz_sum[jt];
dbeta_part += Ktraits::THREADS_PER_CTA;
}
}
}
template<typename Kernel_traits>
__global__ __launch_bounds__(Kernel_traits::THREADS_PER_CTA)
void ln_bwd_finalize_kernel(BwdParams params)
{
using compute_t = typename Kernel_traits::compute_t;
using weight_t = typename Kernel_traits::weight_t;
using index_t = typename Kernel_traits::index_t;
using Reducer = typename Kernel_traits::Reducer;
using reduce_t = typename Reducer::Type;
Sum<reduce_t> sum;
enum { NUM_ELT = Kernel_traits::ELTS_PER_LDG };
enum { THREADS_PER_WARP = Kernel_traits::THREADS_PER_WARP };
__shared__ char smem_[Kernel_traits::SMEM_BYTES_PER_CTA];
constexpr uint32_t bidm = 0;
const uint32_t bidn = blockIdx.x;
const uint32_t tidx = threadIdx.x;
const uint32_t warp = tidx / THREADS_PER_WARP;
const uint32_t lane = tidx % THREADS_PER_WARP;
Reducer reducer(params, bidm, bidn, 0, 0, lane, smem_);
const uint32_t c = bidn * THREADS_PER_WARP + lane;
const uint32_t c_out = bidn * THREADS_PER_WARP / 2 + lane;
constexpr uint32_t COL_STRIDE = Kernel_traits::CTAS * THREADS_PER_WARP;
for( uint32_t col = c, col_out = c_out; col < Kernel_traits::COLS; col += COL_STRIDE, col_out += COL_STRIDE / 2 ) {
// Each thread sums over NUM_ELT columns.
Vec<compute_t, NUM_ELT> dbeta_local, dgamma_local;
memset(&dgamma_local, 0, sizeof(dgamma_local));
memset(&dbeta_local, 0, sizeof(dbeta_local));
for( uint32_t row = warp; row < params.ctas_per_col; row += Kernel_traits::ROWS_PER_CTA ) {
index_t idx = row * Kernel_traits::COLS + col;
Vec<compute_t, NUM_ELT> dbeta_part, dgamma_part;
dbeta_part.load_from(params.dbeta_part, idx);
dgamma_part.load_from(params.dgamma_part, idx);
#pragma unroll
for( int it = 0; it < NUM_ELT; it++ ) {
dgamma_local.data.elt[it] += dgamma_part.data.elt[it];
dbeta_local.data.elt[it] += dbeta_part.data.elt[it];
}
}
void * smem_gamma = smem_;
void * smem_beta = &smem_[Kernel_traits::SMEM_BYTES_TRANSPOSE];
const int write_row = warp;
const int write_col = lane ^ write_row;
const int write_idx = write_row * THREADS_PER_WARP + write_col;
dgamma_local.store_to(smem_gamma, write_idx);
dbeta_local.store_to(smem_beta, write_idx);
__syncthreads();
// It would be probably safe to reuse the first row of smem_beta and smem_gamma
void * smem_gamma_out = &smem_[2 * Kernel_traits::SMEM_BYTES_TRANSPOSE];
void * smem_beta_out = &smem_[2 * Kernel_traits::SMEM_BYTES_TRANSPOSE + Kernel_traits::SMEM_BYTES_OUTPUT];
// More than one iter iff ROWS_PER_CTA < 32.
for( int w = warp; w < THREADS_PER_WARP; w += Kernel_traits::ROWS_PER_CTA ) {
const int read_row = lane;
const int read_col = w ^ read_row;
const int read_idx = read_row * THREADS_PER_WARP + read_col;
memset(&dbeta_local, 0, sizeof(dbeta_local));
memset(&dgamma_local, 0, sizeof(dgamma_local));
// Load beta and gamma transposed
if(read_row < Kernel_traits::ROWS_PER_CTA){
dbeta_local.load_from(smem_beta, read_idx);
dgamma_local.load_from(smem_gamma, read_idx);
}
// Call reducer on the loaded value(s) and convert.
#pragma unroll
for( int it = 0; it < NUM_ELT; it++ ) {
compute_t b_i = dbeta_local.data.elt[it];
compute_t g_i = dgamma_local.data.elt[it];
b_i = reducer.allreduce(b_i, sum);
g_i = reducer.allreduce(g_i, sum);
dgamma_local.data.elt[it] = g_i;
dbeta_local.data.elt[it] = b_i;
}
// Leader stores the result at the current column.
if(lane == 0){
dgamma_local.store_to(smem_gamma_out, w);
dbeta_local.store_to(smem_beta_out, w);
}
}
// All writes done.
__syncthreads();
// Pack and store: 2-wide stores with half the threads.
if( warp == Kernel_traits::ROWS_PER_CTA - 1 && lane < THREADS_PER_WARP / 2 ) {
using src_t = typename TypeToVec2<compute_t>::Type;
using dst_t = typename TypeToVec2<weight_t>::Type;
Vec<src_t, NUM_ELT> dbeta_vec2, dgamma_vec2;
Vec<dst_t, NUM_ELT> dbeta_out2, dgamma_out2;
dgamma_vec2.load_from(smem_gamma_out, lane);
dbeta_vec2.load_from(smem_beta_out, lane);
#pragma unroll
for( int it = 0; it < NUM_ELT; it++ ) {
dgamma_out2.data.elt[it] = Converter<src_t,dst_t>::convert(dgamma_vec2.data.elt[it]);
dbeta_out2.data.elt[it] = Converter<src_t,dst_t>::convert(dbeta_vec2.data.elt[it]);
}
dgamma_out2.store_to(params.dgamma, col_out);
dbeta_out2.store_to(params.dbeta, col_out);
}
}
}
} // namespace layer_norm
|
the_stack
|
**************************************************************************
* \file dct8x8_kernel2.cu
* \brief Contains 2nd kernel implementations of DCT and IDCT routines, used in
* JPEG internal data processing. Optimized device code.
*
* This code implements traditional approach to forward and inverse Discrete
* Cosine Transform to blocks of image pixels (of 8x8 size), as in JPEG standard.
* The data processing is done using floating point representation.
* The routine that performs quantization of coefficients can be found in
* dct8x8_kernel_quantization.cu file.
*/
#pragma once
#include "Common.h"
__constant__ float C_a = 1.387039845322148f; //!< a = (2^0.5) * cos( pi / 16); Used in forward and inverse DCT.
__constant__ float C_b = 1.306562964876377f; //!< b = (2^0.5) * cos( pi / 8); Used in forward and inverse DCT.
__constant__ float C_c = 1.175875602419359f; //!< c = (2^0.5) * cos(3 * pi / 16); Used in forward and inverse DCT.
__constant__ float C_d = 0.785694958387102f; //!< d = (2^0.5) * cos(5 * pi / 16); Used in forward and inverse DCT.
__constant__ float C_e = 0.541196100146197f; //!< e = (2^0.5) * cos(3 * pi / 8); Used in forward and inverse DCT.
__constant__ float C_f = 0.275899379282943f; //!< f = (2^0.5) * cos(7 * pi / 16); Used in forward and inverse DCT.
/**
* Normalization constant that is used in forward and inverse DCT
*/
__constant__ float C_norm = 0.3535533905932737f; // 1 / (8^0.5)
/**
* Width of data block (2nd kernel)
*/
#define KER2_BLOCK_WIDTH 32
/**
* Height of data block (2nd kernel)
*/
#define KER2_BLOCK_HEIGHT 16
/**
* LOG2 of width of data block (2nd kernel)
*/
#define KER2_BW_LOG2 5
/**
* LOG2 of height of data block (2nd kernel)
*/
#define KER2_BH_LOG2 4
/**
* Stride of shared memory buffer (2nd kernel)
*/
#define KER2_SMEMBLOCK_STRIDE (KER2_BLOCK_WIDTH+1)
/**
**************************************************************************
* Performs in-place DCT of vector of 8 elements.
*
* \param Vect0 [IN] - Pointer to the first element of vector
* \param Step [IN] - Value to add to ptr to access other elements
*
* \return None
*/
__device__ void CUDAsubroutineInplaceDCTvector(float *Vect0, int Step)
{
float *Vect1 = Vect0 + Step;
float *Vect2 = Vect1 + Step;
float *Vect3 = Vect2 + Step;
float *Vect4 = Vect3 + Step;
float *Vect5 = Vect4 + Step;
float *Vect6 = Vect5 + Step;
float *Vect7 = Vect6 + Step;
float X07P = (*Vect0) + (*Vect7);
float X16P = (*Vect1) + (*Vect6);
float X25P = (*Vect2) + (*Vect5);
float X34P = (*Vect3) + (*Vect4);
float X07M = (*Vect0) - (*Vect7);
float X61M = (*Vect6) - (*Vect1);
float X25M = (*Vect2) - (*Vect5);
float X43M = (*Vect4) - (*Vect3);
float X07P34PP = X07P + X34P;
float X07P34PM = X07P - X34P;
float X16P25PP = X16P + X25P;
float X16P25PM = X16P - X25P;
(*Vect0) = C_norm * (X07P34PP + X16P25PP);
(*Vect2) = C_norm * (C_b * X07P34PM + C_e * X16P25PM);
(*Vect4) = C_norm * (X07P34PP - X16P25PP);
(*Vect6) = C_norm * (C_e * X07P34PM - C_b * X16P25PM);
(*Vect1) = C_norm * (C_a * X07M - C_c * X61M + C_d * X25M - C_f * X43M);
(*Vect3) = C_norm * (C_c * X07M + C_f * X61M - C_a * X25M + C_d * X43M);
(*Vect5) = C_norm * (C_d * X07M + C_a * X61M + C_f * X25M - C_c * X43M);
(*Vect7) = C_norm * (C_f * X07M + C_d * X61M + C_c * X25M + C_a * X43M);
}
/**
**************************************************************************
* Performs in-place IDCT of vector of 8 elements.
*
* \param Vect0 [IN] - Pointer to the first element of vector
* \param Step [IN] - Value to add to ptr to access other elements
*
* \return None
*/
__device__ void CUDAsubroutineInplaceIDCTvector(float *Vect0, int Step)
{
float *Vect1 = Vect0 + Step;
float *Vect2 = Vect1 + Step;
float *Vect3 = Vect2 + Step;
float *Vect4 = Vect3 + Step;
float *Vect5 = Vect4 + Step;
float *Vect6 = Vect5 + Step;
float *Vect7 = Vect6 + Step;
float Y04P = (*Vect0) + (*Vect4);
float Y2b6eP = C_b * (*Vect2) + C_e * (*Vect6);
float Y04P2b6ePP = Y04P + Y2b6eP;
float Y04P2b6ePM = Y04P - Y2b6eP;
float Y7f1aP3c5dPP = C_f * (*Vect7) + C_a * (*Vect1) + C_c * (*Vect3) + C_d * (*Vect5);
float Y7a1fM3d5cMP = C_a * (*Vect7) - C_f * (*Vect1) + C_d * (*Vect3) - C_c * (*Vect5);
float Y04M = (*Vect0) - (*Vect4);
float Y2e6bM = C_e * (*Vect2) - C_b * (*Vect6);
float Y04M2e6bMP = Y04M + Y2e6bM;
float Y04M2e6bMM = Y04M - Y2e6bM;
float Y1c7dM3f5aPM = C_c * (*Vect1) - C_d * (*Vect7) - C_f * (*Vect3) - C_a * (*Vect5);
float Y1d7cP3a5fMM = C_d * (*Vect1) + C_c * (*Vect7) - C_a * (*Vect3) + C_f * (*Vect5);
(*Vect0) = C_norm * (Y04P2b6ePP + Y7f1aP3c5dPP);
(*Vect7) = C_norm * (Y04P2b6ePP - Y7f1aP3c5dPP);
(*Vect4) = C_norm * (Y04P2b6ePM + Y7a1fM3d5cMP);
(*Vect3) = C_norm * (Y04P2b6ePM - Y7a1fM3d5cMP);
(*Vect1) = C_norm * (Y04M2e6bMP + Y1c7dM3f5aPM);
(*Vect5) = C_norm * (Y04M2e6bMM - Y1d7cP3a5fMM);
(*Vect2) = C_norm * (Y04M2e6bMM + Y1d7cP3a5fMM);
(*Vect6) = C_norm * (Y04M2e6bMP - Y1c7dM3f5aPM);
}
/**
**************************************************************************
* Performs 8x8 block-wise Forward Discrete Cosine Transform of the given
* image plane and outputs result to the array of coefficients. 2nd implementation.
* This kernel is designed to process image by blocks of blocks8x8 that
* utilizes maximum warps capacity, assuming that it is enough of 8 threads
* per block8x8.
*
* \param SrcDst [OUT] - Coefficients plane
* \param ImgStride [IN] - Stride of SrcDst
*
* \return None
*/
__global__ void CUDAkernel2DCT(float *SrcDst, int ImgStride)
{
__shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE];
register int reg1, reg2;
//int OffsThreadInRow = FMUL(threadIdx.y, BLOCK_SIZE) + threadIdx.x;
reg1 = threadIdx.y;
reg1 <<= BLOCK_SIZE_LOG2;
reg1 += threadIdx.x;
int OffsThreadInRow = reg1;
//int OffsThreadInCol = FMUL(threadIdx.z, BLOCK_SIZE);
reg1 = threadIdx.z;
reg1 <<= BLOCK_SIZE_LOG2;
int OffsThreadInCol = reg1;
//SrcDst += FMUL(FMUL(blockIdx.y, KER2_BLOCK_HEIGHT) + OffsThreadInCol, ImgStride) + FMUL(blockIdx.x, KER2_BLOCK_WIDTH) + OffsThreadInRow;
reg1 = blockIdx.y;
reg1 <<= KER2_BH_LOG2;
reg1 += OffsThreadInCol;
reg1 = FMUL(reg1, ImgStride);
reg2 = blockIdx.x;
reg2 <<= KER2_BW_LOG2;
reg1 += reg2;
reg1 += OffsThreadInRow;
SrcDst += reg1;
//float *bl_ptr = block + FMUL(OffsThreadInCol, KER2_SMEMBLOCK_STRIDE) + OffsThreadInRow;
reg1 = OffsThreadInCol;
reg1 = FMUL(reg1, KER2_SMEMBLOCK_STRIDE);
reg1 += OffsThreadInRow;
float *bl_ptr = block + reg1;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
#if 1
__syncthreads();
#endif
//process columns
CUDAsubroutineInplaceDCTvector(block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow, KER2_SMEMBLOCK_STRIDE);
#if 1
__syncthreads();
#endif
//process rows
CUDAsubroutineInplaceDCTvector(block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow - threadIdx.x, 1);
#if 1
__syncthreads();
#endif
bl_ptr = block + reg1;
SrcDst -= FMUL(ImgStride, 7);
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
}
/**
**************************************************************************
* Performs 8x8 block-wise Inverse Discrete Cosine Transform of the given
* coefficients plane and outputs result to the image. 2nd implementation.
* This kernel is designed to process image by blocks of blocks8x8 that
* utilizes maximum warps capacity, assuming that it is enough of 8 threads
* per block8x8.
*
* \param SrcDst [OUT] - Coefficients plane
* \param ImgStride [IN] - Stride of SrcDst
*
* \return None
*/
__global__ void CUDAkernel2IDCT(float *SrcDst, int ImgStride)
{
__shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE];
register int reg1, reg2;
//int OffsThreadInRow = FMUL(threadIdx.y, BLOCK_SIZE) + threadIdx.x;
reg1 = threadIdx.y;
reg1 <<= BLOCK_SIZE_LOG2;
reg1 += threadIdx.x;
int OffsThreadInRow = reg1;
//int OffsThreadInCol = FMUL(threadIdx.z, BLOCK_SIZE);
reg1 = threadIdx.z;
reg1 <<= BLOCK_SIZE_LOG2;
int OffsThreadInCol = reg1;
//SrcDst += FMUL(FMUL(blockIdx.y, KER2_BLOCK_HEIGHT) + OffsThreadInCol, ImgStride) + FMUL(blockIdx.x, KER2_BLOCK_WIDTH) + OffsThreadInRow;
reg1 = blockIdx.y;
reg1 <<= KER2_BH_LOG2;
reg1 += OffsThreadInCol;
reg1 = FMUL(reg1, ImgStride);
reg2 = blockIdx.x;
reg2 <<= KER2_BW_LOG2;
reg1 += reg2;
reg1 += OffsThreadInRow;
SrcDst += reg1;
//float *bl_ptr = block + FMUL(OffsThreadInCol, KER2_SMEMBLOCK_STRIDE) + OffsThreadInRow;
reg1 = OffsThreadInCol;
reg1 = FMUL(reg1, KER2_SMEMBLOCK_STRIDE);
reg1 += OffsThreadInRow;
float *bl_ptr = block + reg1;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(bl_ptr) = *(SrcDst);
#if 1
__syncthreads();
#endif
//process columns
CUDAsubroutineInplaceIDCTvector(block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow, KER2_SMEMBLOCK_STRIDE);
#if 1
__syncthreads();
#endif
//process rows
CUDAsubroutineInplaceIDCTvector(block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow - threadIdx.x, 1);
#if 1
__syncthreads();
#endif
bl_ptr = block + reg1;
SrcDst -= FMUL(ImgStride, 7);
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
SrcDst += ImgStride;
bl_ptr += KER2_SMEMBLOCK_STRIDE;
*(SrcDst) = *(bl_ptr);
}
|
the_stack
|
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/strings_column_factories.cuh>
#include <cudf/strings/split/split.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h> // upper_bound()
#include <thrust/copy.h> // copy_if()
#include <thrust/count.h> // count_if()
#include <thrust/reduce.h> // maximum()
#include <thrust/transform.h> // transform()
namespace cudf {
namespace strings {
namespace detail {
using string_index_pair = thrust::pair<const char*, size_type>;
namespace {
/**
* @brief Base class for delimiter-based tokenizers.
*
* These are common methods used by both split and rsplit tokenizer functors.
*/
struct base_split_tokenizer {
__device__ const char* get_base_ptr() const
{
return d_strings.child(strings_column_view::chars_column_index).data<char>();
}
__device__ string_view const get_string(size_type idx) const
{
return d_strings.element<string_view>(idx);
}
__device__ bool is_valid(size_type idx) const { return d_strings.is_valid(idx); }
/**
* @brief Initialize token elements for all strings.
*
* The process_tokens() only handles creating tokens for strings that contain
* delimiters. This function will initialize the output tokens for all
* strings by assigning null entries for null and empty strings and the
* string itself for strings with no delimiters.
*
* The tokens are placed in output order so that all tokens for each output
* column are stored consecutively in `d_all_tokens`.
*
* @param idx Index of string in column
* @param column_count Number of columns in output
* @param d_all_tokens Tokens vector for all strings
*/
__device__ void init_tokens(size_type idx,
size_type column_count,
string_index_pair* d_all_tokens) const
{
auto d_tokens = d_all_tokens + idx;
if (is_valid(idx)) {
auto d_str = get_string(idx);
*d_tokens = string_index_pair{d_str.data(), d_str.size_bytes()};
--column_count;
d_tokens += d_strings.size();
}
// this is like fill() but output needs to be strided
for (size_type col = 0; col < column_count; ++col)
d_tokens[d_strings.size() * col] = string_index_pair{nullptr, 0};
}
base_split_tokenizer(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: d_strings(d_strings), d_delimiter(d_delimiter), max_tokens(max_tokens)
{
}
protected:
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
size_type max_tokens;
};
/**
* @brief The tokenizer functions for split().
*
* The methods here count delimiters, tokens, and output token elements
* for each string in a strings column.
*/
struct split_tokenizer_fn : base_split_tokenizer {
/**
* @brief This will create tokens around each delimiter honoring the string boundaries
* in which the delimiter resides.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the delimiter in the chars column
* @param d_token_counts Token counts for each string
* @param d_positions The beginning byte position of each delimiter
* @param positions_count Number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx,
size_type const* d_token_counts,
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
string_index_pair* d_all_tokens) const
{
size_type str_idx = d_indexes[idx];
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // the first delimiter for the string rules them all
--str_idx; // all of these are off by 1 from the upper_bound call
size_type token_count = d_token_counts[str_idx]; // max_tokens already included
const char* const base_ptr = get_base_ptr(); // d_positions values are based on this ptr
// this string's tokens output
auto d_tokens = d_all_tokens + str_idx;
// this string
const string_view d_str = get_string(str_idx);
const char* str_ptr = d_str.data(); // beginning of the string
const char* const str_end_ptr = str_ptr + d_str.size_bytes(); // end of the string
// build the index-pair of each token for this string
for (size_type col = 0; col < token_count; ++col) {
auto next_delim = ((idx + col) < positions_count) // boundary check for delims in last string
? (base_ptr + d_positions[idx + col]) // start of next delimiter
: str_end_ptr; // or end of this string
auto eptr = (next_delim < str_end_ptr) // make sure delimiter is inside this string
&& (col + 1 < token_count) // and this is not the last token
? next_delim
: str_end_ptr;
// store the token into the output vector
d_tokens[col * d_strings.size()] =
string_index_pair{str_ptr, static_cast<size_type>(eptr - str_ptr)};
// point past this delimiter
str_ptr = eptr + d_delimiter.size_bytes();
}
}
/**
* @brief Returns `true` if the byte at `idx` is the start of the delimiter.
*
* @param idx Index of a byte in the chars column.
* @param d_offsets Offsets values to locate the chars ranges.
* @param chars_bytes Total number of characters to process.
* @return true if delimiter is found starting at position `idx`
*/
__device__ bool is_delimiter(size_type idx, // chars index
int32_t const* d_offsets,
size_type chars_bytes) const
{
auto d_chars = get_base_ptr() + d_offsets[0];
if (idx + d_delimiter.size_bytes() > chars_bytes) return false;
return d_delimiter.compare(d_chars + idx, d_delimiter.size_bytes()) == 0;
}
/**
* @brief This counts the tokens for strings that contain delimiters.
*
* @param idx Index of a delimiter
* @param d_positions Start positions of all the delimiters
* @param positions_count The number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_counts The token counts for all the strings
*/
__device__ void count_tokens(size_type idx, // delimiter index
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
size_type* d_counts) const
{
size_type str_idx = d_indexes[idx];
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // first delimiter found handles all of them for this string
auto const delim_length = d_delimiter.size_bytes();
string_view const d_str = get_string(str_idx - 1);
const char* const base_ptr = get_base_ptr();
size_type delim_count = 0; // re-count delimiters to compute the token-count
size_type last_pos = d_positions[idx] - delim_length;
while ((idx < positions_count) && (d_indexes[idx] == str_idx)) {
// make sure the whole delimiter is inside the string before counting it
auto d_pos = d_positions[idx];
if (((base_ptr + d_pos + delim_length - 1) < (d_str.data() + d_str.size_bytes())) &&
((d_pos - last_pos) >= delim_length)) {
++delim_count; // only count if the delimiter fits
last_pos = d_pos; // overlapping delimiters are ignored too
}
++idx;
}
// the number of tokens is delim_count+1 but capped to max_tokens
d_counts[str_idx - 1] =
((max_tokens > 0) && (delim_count + 1 > max_tokens)) ? max_tokens : delim_count + 1;
}
split_tokenizer_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: base_split_tokenizer(d_strings, d_delimiter, max_tokens)
{
}
};
/**
* @brief The tokenizer functions for split().
*
* The methods here count delimiters, tokens, and output token elements
* for each string in a strings column.
*
* Same as split_tokenizer_fn except tokens are counted from the end of each string.
*/
struct rsplit_tokenizer_fn : base_split_tokenizer {
/**
* @brief This will create tokens around each delimiter honoring the string boundaries
* in which the delimiter resides.
*
* The tokens are processed from the end of each string so the `max_tokens`
* is honored correctly.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the delimiter in the chars column
* @param d_token_counts Token counts for each string
* @param d_positions The ending byte position of each delimiter
* @param positions_count Number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx, // delimiter position index
size_type const* d_token_counts, // token counts for each string
size_type const* d_positions, // end of each delimiter
size_type positions_count, // total number of delimiters
size_type const* d_indexes, // string indices for each delimiter
string_index_pair* d_all_tokens) const
{
size_type str_idx = d_indexes[idx];
if ((idx + 1 < positions_count) && d_indexes[idx + 1] == str_idx)
return; // the last delimiter for the string rules them all
--str_idx; // all of these are off by 1 from the upper_bound call
size_type token_count = d_token_counts[str_idx]; // max_tokens already included
const char* const base_ptr = get_base_ptr(); // d_positions values are based on this ptr
// this string's tokens output
auto d_tokens = d_all_tokens + str_idx;
// this string
const string_view d_str = get_string(str_idx);
const char* const str_begin_ptr = d_str.data(); // beginning of the string
const char* str_ptr = str_begin_ptr + d_str.size_bytes(); // end of the string
// build the index-pair of each token for this string
for (size_type col = 0; col < token_count; ++col) {
auto prev_delim = (idx >= col) // boundary check for delims in first string
? (base_ptr + d_positions[idx - col] + 1) // end of prev delimiter
: str_begin_ptr; // or the start of this string
auto sptr = (prev_delim > str_begin_ptr) // make sure delimiter is inside the string
&& (col + 1 < token_count) // and this is not the last token
? prev_delim
: str_begin_ptr;
// store the token into the output -- building the array backwards
d_tokens[d_strings.size() * (token_count - 1 - col)] =
string_index_pair{sptr, static_cast<size_type>(str_ptr - sptr)};
str_ptr = sptr - d_delimiter.size_bytes(); // get ready for the next prev token
}
}
/**
* @brief Returns `true` if the byte at `idx` is the end of the delimiter.
*
* @param idx Index of a byte in the chars column.
* @param d_offsets Offsets values to locate the chars ranges.
* @return true if delimiter is found ending at position `idx`
*/
__device__ bool is_delimiter(size_type idx, int32_t const* d_offsets, size_type) const
{
auto delim_length = d_delimiter.size_bytes();
if (idx < delim_length - 1) return false;
auto d_chars = get_base_ptr() + d_offsets[0];
return d_delimiter.compare(d_chars + idx - (delim_length - 1), delim_length) == 0;
}
/**
* @brief This counts the tokens for strings that contain delimiters.
*
* Token counting starts at the end of the string to honor the `max_tokens`
* appropriately.
*
* @param idx Index of a delimiter
* @param d_positions End positions of all the delimiters
* @param positions_count The number of delimiters
* @param d_indexes Indices of the strings for each delimiter
* @param d_counts The token counts for all the strings
*/
__device__ void count_tokens(size_type idx,
size_type const* d_positions,
size_type positions_count,
size_type const* d_indexes,
size_type* d_counts) const
{
size_type str_idx = d_indexes[idx]; // 1-based string index created by upper_bound()
if ((idx > 0) && d_indexes[idx - 1] == str_idx)
return; // first delimiter found handles all of them for this string
auto const delim_length = d_delimiter.size_bytes();
const string_view d_str = get_string(str_idx - 1); // -1 for 0-based index
const char* const base_ptr = get_base_ptr();
size_type delim_count = 0;
size_type last_pos = d_positions[idx] - delim_length;
while ((idx < positions_count) && (d_indexes[idx] == str_idx)) {
// make sure the whole delimiter is inside the string before counting it
auto d_pos = d_positions[idx];
if (((base_ptr + d_pos + 1 - delim_length) >= d_str.data()) &&
((d_pos - last_pos) >= delim_length)) {
++delim_count; // only count if the delimiter fits
last_pos = d_pos; // overlapping delimiters are also ignored
}
++idx;
}
// the number of tokens is delim_count+1 but capped to max_tokens
d_counts[str_idx - 1] =
((max_tokens > 0) && (delim_count + 1 > max_tokens)) ? max_tokens : delim_count + 1;
}
rsplit_tokenizer_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
size_type max_tokens)
: base_split_tokenizer(d_strings, d_delimiter, max_tokens)
{
}
};
/**
* @brief Generic split function called by split() and rsplit().
*
* This function will first count the number of delimiters in the entire strings
* column. Next it records the position of all the delimiters. These positions
* are used for the remainder of the code to build string_index_pair elements
* for each output column.
*
* The number of tokens for each string is computed by analyzing the delimiter
* position values and mapping them to each string.
* The number of output columns is determined by the string with the most tokens.
* Next the `string_index_pairs` for the entire column are created using the
* delimiter positions and their string indices vector.
*
* Finally, each column is built by creating a vector of tokens (`string_index_pairs`)
* according to their position in each string. The first token from each string goes
* into the first output column, the 2nd token from each string goes into the 2nd
* output column, etc.
*
* Output should be comparable to Pandas `split()` with `expand=True` but the
* rows/columns are transposed.
*
* ```
* import pandas as pd
* pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__'])
* print(pd_series.str.split(pat='_', expand=True))
* 0 1 2 3 4 5 6
* 0 '' None None None None None None
* 1 None None None None None None None
* 2 a b None None None None None
* 3 '' a b '' None None None
* 4 '' '' aa '' bb '' ''
* 5 '' a '' bbb '' '' c
* 6 '' aa b '' ccc '' ''
*
* print(pd_series.str.split(pat='_', n=1, expand=True))
* 0 1
* 0 '' None
* 1 None None
* 2 a b
* 3 '' a_b_
* 4 '' _aa__bb__
* 5 '' a__bbb___c
* 6 '' aa_b__ccc__
*
* print(pd_series.str.split(pat='_', n=2, expand=True))
* 0 1 2
* 0 '' None None
* 1 None None None
* 2 a b None
* 3 '' a b_
* 4 '' aa__bb__
* 5 '' a _bbb___c
* 6 '' aa b__ccc__
* ```
*
* @tparam Tokenizer provides unique functions for split/rsplit.
* @param strings_column The strings to split
* @param tokenizer Tokenizer for counting and producing tokens
* @return table of columns for the output of the split
*/
template <typename Tokenizer>
std::unique_ptr<table> split_fn(strings_column_view const& strings_column,
Tokenizer tokenizer,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::vector<std::unique_ptr<column>> results;
auto const strings_count = strings_column.size();
if (strings_count == 0) {
results.push_back(make_empty_column(data_type{type_id::STRING}));
return std::make_unique<table>(std::move(results));
}
auto d_offsets = strings_column.offsets().data<int32_t>();
d_offsets += strings_column.offset(); // nvbug-2808421 : do not combine with the previous line
auto const chars_bytes =
cudf::detail::get_value<int32_t>(
strings_column.offsets(), strings_column.offset() + strings_count, stream) -
cudf::detail::get_value<int32_t>(strings_column.offsets(), strings_column.offset(), stream);
// count the number of delimiters in the entire column
auto const delimiter_count =
thrust::count_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(chars_bytes),
[tokenizer, d_offsets, chars_bytes] __device__(size_type idx) {
return tokenizer.is_delimiter(idx, d_offsets, chars_bytes);
});
// create vector of every delimiter position in the chars column
rmm::device_uvector<size_type> delimiter_positions(delimiter_count, stream);
auto d_positions = delimiter_positions.data();
auto copy_end = thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(chars_bytes),
delimiter_positions.begin(),
[tokenizer, d_offsets, chars_bytes] __device__(size_type idx) {
return tokenizer.is_delimiter(idx, d_offsets, chars_bytes);
});
// create vector of string indices for each delimiter
rmm::device_uvector<size_type> string_indices(delimiter_count, stream); // these will
auto d_string_indices = string_indices.data(); // be strings that only contain delimiters
thrust::upper_bound(rmm::exec_policy(stream),
d_offsets,
d_offsets + strings_count,
delimiter_positions.begin(),
copy_end,
string_indices.begin());
// compute the number of tokens per string
rmm::device_uvector<size_type> token_counts(strings_count, stream);
auto d_token_counts = token_counts.data();
// first, initialize token counts for strings without delimiters in them
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_token_counts,
[tokenizer] __device__(size_type idx) {
// null are 0, all others 1
return static_cast<size_type>(tokenizer.is_valid(idx));
});
// now compute the number of tokens in each string
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
delimiter_count,
[tokenizer, d_positions, delimiter_count, d_string_indices, d_token_counts] __device__(
size_type idx) {
tokenizer.count_tokens(idx, d_positions, delimiter_count, d_string_indices, d_token_counts);
});
// the columns_count is the maximum number of tokens for any string
auto const columns_count = thrust::reduce(rmm::exec_policy(stream),
token_counts.begin(),
token_counts.end(),
0,
thrust::maximum<size_type>{});
// boundary case: if no columns, return one null column (custrings issue #119)
if (columns_count == 0) {
results.push_back(std::make_unique<column>(
data_type{type_id::STRING},
strings_count,
rmm::device_buffer{0, stream, mr}, // no data
cudf::detail::create_null_mask(strings_count, mask_state::ALL_NULL, stream, mr),
strings_count));
}
// create working area to hold all token positions
rmm::device_uvector<string_index_pair> tokens(columns_count * strings_count, stream);
string_index_pair* d_tokens = tokens.data();
// initialize the token positions
// -- accounts for nulls, empty, and strings with no delimiter in them
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[tokenizer, columns_count, d_tokens] __device__(size_type idx) {
tokenizer.init_tokens(idx, columns_count, d_tokens);
});
// get the positions for every token using the delimiter positions
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
delimiter_count,
[tokenizer,
d_token_counts,
d_positions,
delimiter_count,
d_string_indices,
d_tokens] __device__(size_type idx) {
tokenizer.process_tokens(
idx, d_token_counts, d_positions, delimiter_count, d_string_indices, d_tokens);
});
// Create each column.
// - Each pair points to the strings for that column for each row.
// - Create the strings column from the vector using the strings factory.
for (size_type col = 0; col < columns_count; ++col) {
auto column_tokens = d_tokens + (col * strings_count);
results.emplace_back(
make_strings_column(column_tokens, column_tokens + strings_count, stream, mr));
}
return std::make_unique<table>(std::move(results));
}
/**
* @brief Base class for whitespace tokenizers.
*
* These are common methods used by both split and rsplit tokenizer functors.
*/
struct base_whitespace_split_tokenizer {
// count the tokens only between non-whitespace characters
__device__ size_type count_tokens(size_type idx) const
{
if (d_strings.is_null(idx)) return 0;
const string_view d_str = d_strings.element<string_view>(idx);
size_type token_count = 0;
// run of whitespace is considered a single delimiter
bool spaces = true;
auto itr = d_str.begin();
while (itr != d_str.end()) {
char_utf8 ch = *itr;
if (spaces == (ch <= ' '))
itr++;
else {
token_count += static_cast<size_type>(spaces);
spaces = !spaces;
}
}
if (max_tokens && (token_count > max_tokens)) token_count = max_tokens;
if (token_count == 0) token_count = 1; // always at least 1 token
return token_count;
}
base_whitespace_split_tokenizer(column_device_view const& d_strings, size_type max_tokens)
: d_strings(d_strings), max_tokens(max_tokens)
{
}
protected:
column_device_view const d_strings;
size_type max_tokens; // maximum number of tokens
};
/**
* @brief The tokenizer functions for split() with whitespace.
*
* The whitespace tokenizer has no delimiter and handles one or more
* consecutive whitespace characters as a single delimiter.
*/
struct whitespace_split_tokenizer_fn : base_whitespace_split_tokenizer {
/**
* @brief This will create tokens around each runs of whitespace characters.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the string to process
* @param d_token_counts Token counts for each string
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx,
size_type const* d_token_counts,
string_index_pair* d_all_tokens) const
{
string_index_pair* d_tokens = d_all_tokens + idx;
if (d_strings.is_null(idx)) return;
string_view const d_str = d_strings.element<cudf::string_view>(idx);
if (d_str.empty()) return;
whitespace_string_tokenizer tokenizer(d_str);
size_type token_count = d_token_counts[idx];
size_type token_idx = 0;
position_pair token{0, 0};
while (tokenizer.next_token() && (token_idx < token_count)) {
token = tokenizer.get_token();
d_tokens[d_strings.size() * (token_idx++)] =
string_index_pair{d_str.data() + token.first, (token.second - token.first)};
}
if (token_count == max_tokens)
d_tokens[d_strings.size() * (token_idx - 1)] =
string_index_pair{d_str.data() + token.first, (d_str.size_bytes() - token.first)};
}
whitespace_split_tokenizer_fn(column_device_view const& d_strings, size_type max_tokens)
: base_whitespace_split_tokenizer(d_strings, max_tokens)
{
}
};
/**
* @brief The tokenizer functions for rsplit() with whitespace.
*
* The whitespace tokenizer has no delimiter and handles one or more
* consecutive whitespace characters as a single delimiter.
*
* This one processes tokens from the end of each string.
*/
struct whitespace_rsplit_tokenizer_fn : base_whitespace_split_tokenizer {
/**
* @brief This will create tokens around each runs of whitespace characters.
*
* Each token is placed in `d_all_tokens` so they align consecutively
* with other tokens for the same output column.
* That is, `d_tokens[col * strings_count + string_index]` is the token at column `col`
* for string at `string_index`.
*
* @param idx Index of the string to process
* @param d_token_counts Token counts for each string
* @param d_all_tokens All output tokens for the strings column
*/
__device__ void process_tokens(size_type idx, // string position index
size_type const* d_token_counts,
string_index_pair* d_all_tokens) const
{
string_index_pair* d_tokens = d_all_tokens + idx;
if (d_strings.is_null(idx)) return;
string_view const d_str = d_strings.element<cudf::string_view>(idx);
if (d_str.empty()) return;
whitespace_string_tokenizer tokenizer(d_str, true);
size_type token_count = d_token_counts[idx];
size_type token_idx = 0;
position_pair token{0, 0};
while (tokenizer.prev_token() && (token_idx < token_count)) {
token = tokenizer.get_token();
d_tokens[d_strings.size() * (token_count - 1 - token_idx)] =
string_index_pair{d_str.data() + token.first, (token.second - token.first)};
++token_idx;
}
if (token_count == max_tokens)
d_tokens[d_strings.size() * (token_count - token_idx)] =
string_index_pair{d_str.data(), token.second};
}
whitespace_rsplit_tokenizer_fn(column_device_view const& d_strings, size_type max_tokens)
: base_whitespace_split_tokenizer(d_strings, max_tokens)
{
}
};
/**
* @brief Generic split function called by split() and rsplit() using whitespace as a delimiter.
*
* The number of tokens for each string is computed by counting consecutive characters
* between runs of whitespace in each string. The number of output columns is determined
* by the string with the most tokens. Next the string_index_pairs for the entire column
* is created.
*
* Finally, each column is built by creating a vector of tokens (string_index_pairs)
* according to their position in each string. The first token from each string goes
* into the first output column, the 2nd token from each string goes into the 2nd
* output column, etc.
*
* This can be compared to Pandas `split()` with no delimiter and with `expand=True` but
* with the rows/columns transposed.
*
* import pandas as pd
* pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc '])
* print(pd_series.str.split(pat=None, expand=True))
* 0 1 2
* 0 None None None
* 1 None None None
* 2 a b None
* 3 a b None
* 4 aa bb None
* 5 a bbb c
* 6 aa b ccc
*
* print(pd_series.str.split(pat=None, n=1, expand=True))
* 0 1
* 0 None None
* 1 None None
* 2 a b
* 3 a b
* 4 aa bb
* 5 a bbb c
* 6 aa b ccc
*
* print(pd_series.str.split(pat=None, n=2, expand=True))
* 0 1 2
* 0 None None None
* 1 None None None
* 2 a b None
* 3 a b None
* 4 aa bb None
* 5 a bbb c
* 6 aa b ccc
*
* @tparam Tokenizer provides unique functions for split/rsplit.
* @param strings_count The number of strings in the column
* @param tokenizer Tokenizer for counting and producing tokens
* @return table of columns for the output of the split
*/
template <typename Tokenizer>
std::unique_ptr<table> whitespace_split_fn(size_type strings_count,
Tokenizer tokenizer,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// compute the number of tokens per string
rmm::device_uvector<size_type> token_counts(strings_count, stream);
auto d_token_counts = token_counts.data();
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_token_counts,
[tokenizer] __device__(size_type idx) { return tokenizer.count_tokens(idx); });
// column count is the maximum number of tokens for any string
size_type const columns_count = thrust::reduce(rmm::exec_policy(stream),
token_counts.begin(),
token_counts.end(),
0,
thrust::maximum<size_type>{});
std::vector<std::unique_ptr<column>> results;
// boundary case: if no columns, return one null column (issue #119)
if (columns_count == 0) {
results.push_back(std::make_unique<column>(
data_type{type_id::STRING},
strings_count,
rmm::device_buffer{0, stream, mr}, // no data
cudf::detail::create_null_mask(strings_count, mask_state::ALL_NULL, stream, mr),
strings_count));
}
// get the positions for every token
rmm::device_uvector<string_index_pair> tokens(columns_count * strings_count, stream);
string_index_pair* d_tokens = tokens.data();
thrust::fill(rmm::exec_policy(stream),
d_tokens,
d_tokens + (columns_count * strings_count),
string_index_pair{nullptr, 0});
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[tokenizer, d_token_counts, d_tokens] __device__(size_type idx) {
tokenizer.process_tokens(idx, d_token_counts, d_tokens);
});
// Create each column.
// - Each pair points to a string for that column for each row.
// - Create the strings column from the vector using the strings factory.
for (size_type col = 0; col < columns_count; ++col) {
auto column_tokens = d_tokens + (col * strings_count);
results.emplace_back(
make_strings_column(column_tokens, column_tokens + strings_count, stream, mr));
}
return std::make_unique<table>(std::move(results));
}
} // namespace
std::unique_ptr<table> split(
strings_column_view const& strings_column,
string_scalar const& delimiter = string_scalar(""),
size_type maxsplit = -1,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
size_type max_tokens = 0;
if (maxsplit > 0) max_tokens = maxsplit + 1; // makes consistent with Pandas
auto strings_device_view = column_device_view::create(strings_column.parent(), stream);
if (delimiter.size() == 0) {
return whitespace_split_fn(strings_column.size(),
whitespace_split_tokenizer_fn{*strings_device_view, max_tokens},
stream,
mr);
}
string_view d_delimiter(delimiter.data(), delimiter.size());
return split_fn(
strings_column, split_tokenizer_fn{*strings_device_view, d_delimiter, max_tokens}, stream, mr);
}
std::unique_ptr<table> rsplit(
strings_column_view const& strings_column,
string_scalar const& delimiter = string_scalar(""),
size_type maxsplit = -1,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
size_type max_tokens = 0;
if (maxsplit > 0) max_tokens = maxsplit + 1; // makes consistent with Pandas
auto strings_device_view = column_device_view::create(strings_column.parent(), stream);
if (delimiter.size() == 0) {
return whitespace_split_fn(strings_column.size(),
whitespace_rsplit_tokenizer_fn{*strings_device_view, max_tokens},
stream,
mr);
}
string_view d_delimiter(delimiter.data(), delimiter.size());
return split_fn(
strings_column, rsplit_tokenizer_fn{*strings_device_view, d_delimiter, max_tokens}, stream, mr);
}
} // namespace detail
// external APIs
std::unique_ptr<table> split(strings_column_view const& strings_column,
string_scalar const& delimiter,
size_type maxsplit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::split(strings_column, delimiter, maxsplit, rmm::cuda_stream_default, mr);
}
std::unique_ptr<table> rsplit(strings_column_view const& strings_column,
string_scalar const& delimiter,
size_type maxsplit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rsplit(strings_column, delimiter, maxsplit, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
the_stack
|
#pragma once
#include <gunrock/util/array_utils.cuh>
namespace gunrock {
namespace oprtr {
#define FORALL_BLOCKSIZE 256
#define FORALL_GRIDSIZE 256
/*template <
typename T,
typename SizeT,
typename ApplyLambda>
__global__ void ForAll_Kernel(
T *d_array,
ApplyLambda apply,
SizeT length)
{
const SizeT STRIDE = (SizeT) blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < length)
{
apply(d_array, i);
i += STRIDE;
}
}*/
template <typename ArrayT, typename SizeT, typename ApplyLambda>
__global__ void ForAll_Kernel(ArrayT array, ApplyLambda apply, SizeT length) {
// typedef typename ArrayT::SizeT SizeT;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < length) {
apply(array + 0, i);
i += STRIDE;
}
}
template <typename ArrayT, typename SizeT, typename ApplyLambda>
__global__ void SharedForAll_Kernel(ArrayT array, ApplyLambda apply, SizeT length){
extern __shared__ char shared_array[];
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
SizeT aligned_length = ((length + blockDim.x - 1)/blockDim.x) * blockDim.x;
for (; i < aligned_length; i += STRIDE){
apply(array + 0, i, shared_array);
/* __syncthreads();
if (blockDim.x * blockIdx.x + threadIdx.x == 0){
printf("%d points done\n", i);
}*/
}
}
/*template <
typename T_in,
typename T_out,
typename SizeT,
typename ApplyLambda>
__global__ void ForAll_Kernel(
T_in *d_ins,
T_out *d_outs,
ApplyLambda apply,
SizeT length)
{
const SizeT STRIDE = (SizeT) blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < length)
{
apply(d_ins, d_outs, i);
i += STRIDE;
}
}*/
template <typename ArrayT_out, typename ArrayT_in, typename SizeT,
typename ApplyLambda>
__global__ void ForAll_Kernel(ArrayT_out array_out, ArrayT_in array_in,
ApplyLambda apply, SizeT length) {
// typedef typename ArrayT_in::SizeT SizeT;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
// printf("(%d, %d) length = %d\n", blockIdx.x, threadIdx.x, length);
while (i < length) {
// printf("Applying %d\n", i);
apply(array_out + 0, array_in + 0, i);
i += STRIDE;
}
}
template <typename ArrayT_out, typename ArrayT_in1, typename ArrayT_in2,
typename SizeT, typename ApplyLambda>
__global__ void ForAll_Kernel(ArrayT_out array_out, ArrayT_in1 array_in1,
ArrayT_in2 array_in2, ApplyLambda apply,
SizeT length) {
// typedef typename ArrayT_in::SizeT SizeT;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
// printf("(%d, %d) length = %d\n", blockIdx.x, threadIdx.x, length);
while (i < length) {
// printf("Applying %d\n", i);
apply(array_out + 0, array_in1 + 0, array_in2 + 0, i);
i += STRIDE;
}
}
/*template <
typename T,
typename SizeT,
typename CondLambda,
typename ApplyLambda>
__global__ void ForAllCond_Kernel(
T *d_array,
CondLambda cond,
ApplyLambda apply,
SizeT length)
{
const SizeT STRIDE = (SizeT) blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < length)
{
if (cond(d_array, i))
apply(d_array, i);
i += STRIDE;
}
}*/
template <typename ArrayT, typename SizeT, typename CondLambda,
typename ApplyLambda>
__global__ void ForAllCond_Kernel(ArrayT array, CondLambda cond,
ApplyLambda apply, SizeT length) {
// typedef typename ArrayT::SizeT SizeT;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < length) {
if (cond(array + 0, i)) apply(array + 0, i);
i += STRIDE;
}
}
/*template <
typename T_in,
typename T_out,
typename SizeT,
typename CondLambda,
typename ApplyLambda>
__global__ void ForAllCond_Kernel(
T_in *d_ins,
T_out *d_outs,
CondLambda cond,
ApplyLambda apply,
SizeT length)
{
const SizeT STRIDE = (SizeT) blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < length)
{
if (cond(d_ins, d_outs, i))
apply(d_ins, d_outs, i);
i += STRIDE;
}
}*/
template <typename ArrayT_out, typename ArrayT_in, typename SizeT,
typename CondLambda, typename ApplyLambda>
__global__ void ForAllCond_Kernel(ArrayT_out array_out, ArrayT_in array_in,
CondLambda cond, ApplyLambda apply,
SizeT length) {
// typedef typename ArrayT_in::SizeT SizeT;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < length) {
if (cond(array_out + 0, array_in + 0, i))
apply(array_out + 0, array_in + 0, i);
i += STRIDE;
}
}
template <typename ArrayT_out, typename ArrayT_in1, typename ArrayT_in2,
typename SizeT, typename CondLambda, typename ApplyLambda>
__global__ void ForAllCond_Kernel(ArrayT_out array_out, ArrayT_in1 array_in1,
ArrayT_in2 array_in2, CondLambda cond,
ApplyLambda apply, SizeT length) {
// typedef typename ArrayT_in::SizeT SizeT;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < length) {
if (cond(array_out + 0, array_in1 + 0, array_in2 + 0, i))
apply(array_out + 0, array_in1 + 0, array_in2 + 0, i);
i += STRIDE;
}
}
template <typename T, typename SizeT, typename ApplyLambda>
cudaError_t ForAll(T *elements, ApplyLambda apply, SizeT length,
util::Location target = util::DEVICE,
cudaStream_t stream = 0) {
cudaError_t retval = cudaSuccess;
if ((target & util::HOST) == util::HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++) apply(elements, i);
}
if ((target & util::DEVICE) == util::DEVICE) {
ForAll_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
elements, apply, length);
}
return retval;
}
template <typename T_out, typename T_in, typename SizeT, typename ApplyLambda>
cudaError_t ForAll(T_out *elements_out, T_in *elements_in, ApplyLambda apply,
SizeT length, util::Location target = util::HOST,
cudaStream_t stream = 0) {
cudaError_t retval = cudaSuccess;
if ((target & util::HOST) == util::HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++) apply(elements_out, elements_in, i);
}
if ((target & util::DEVICE) == util::DEVICE) {
ForAll_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
elements_out, elements_in, apply, length);
}
return retval;
}
template <typename T_out, typename T_in1, typename T_in2, typename SizeT,
typename ApplyLambda>
cudaError_t ForAll(T_out *elements_out, T_in1 *elements_in1,
T_in2 *elements_in2, ApplyLambda apply, SizeT length,
util::Location target = util::HOST,
cudaStream_t stream = 0) {
cudaError_t retval = cudaSuccess;
if ((target & util::HOST) == util::HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++)
apply(elements_out, elements_in1, elements_in2, i);
}
if ((target & util::DEVICE) == util::DEVICE) {
ForAll_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
elements_out, elements_in1, elements_in2, apply, length);
}
return retval;
}
template <typename T, typename SizeT, typename CondLambda, typename ApplyLambda>
cudaError_t ForAllCond(T *elements, CondLambda cond, ApplyLambda apply,
SizeT length, util::Location target = util::DEVICE,
cudaStream_t stream = 0) {
cudaError_t retval = cudaSuccess;
if ((target & util::HOST) == util::HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++)
if (cond(elements, i)) apply(elements, i);
}
if ((target & util::DEVICE) == util::DEVICE) {
ForAllCond_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
elements, cond, apply, length);
}
return retval;
}
template <typename T_out, typename T_in, typename SizeT, typename CondLambda,
typename ApplyLambda>
cudaError_t ForAllCond(T_out *elements_out, T_in *elements_in, CondLambda cond,
ApplyLambda apply, SizeT length,
util::Location target = util::DEVICE,
cudaStream_t stream = 0) {
cudaError_t retval = cudaSuccess;
if ((target & util::HOST) == util::HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++)
if (cond(elements_out, elements_in, i))
apply(elements_out, elements_in, i);
}
if ((target & util::DEVICE) == util::DEVICE) {
ForAllCond_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
elements_out, elements_in, cond, apply, length);
}
return retval;
}
template <typename T_out, typename T_in1, typename T_in2, typename SizeT,
typename CondLambda, typename ApplyLambda>
cudaError_t ForAllCond(T_out *elements_out, T_in1 *elements_in1,
T_in2 *elements_in2, CondLambda cond, ApplyLambda apply,
SizeT length, util::Location target = util::DEVICE,
cudaStream_t stream = 0) {
cudaError_t retval = cudaSuccess;
if ((target & util::HOST) == util::HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++)
if (cond(elements_out, elements_in1, elements_in2, i))
apply(elements_out, elements_in1, elements_in2, i);
}
if ((target & util::DEVICE) == util::DEVICE) {
ForAllCond_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
elements_out, elements_in1, elements_in2, cond, apply, length);
}
return retval;
}
} // namespace oprtr
namespace util {
// this is start
template <typename SizeT, typename ValueT, ArrayFlag FLAG,
unsigned int cudaHostRegisterFlag>
template <typename ApplyLambda>
cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::SharedForAll(
// ArrayT array,
ApplyLambda apply,
SizeT length, //= PreDefinedValues<SizeT>::InvalidValue,
Location target, // = util::LOCATION_DEFAULT,
cudaStream_t stream, // = 0,
unsigned sh_mem_size, // = 0 size of dynamic shared memory
dim3 grid_size, // = util::PreDefinedValues<int>::InvalidValue
dim3 block_size) // = util::PreDefinedValues<int>::InvalidValue
{
cudaError_t retval = cudaSuccess;
if (length == PreDefinedValues<SizeT>::InvalidValue) length = this->GetSize();
if (target == LOCATION_DEFAULT) target = this->setted | this->allocated;
if ((target & HOST) == HOST) {
//#pragma omp parallel for
// for (SizeT i = 0; i < length; i++) apply((*this) + 0, i, sh_mem_size);
}
if ((target & DEVICE) == DEVICE) {
//if (!util::isValid(grid_size)) grid_size = FORALL_GRIDSIZE;
//if (!util::isValid(block_size)) block_size = FORALL_BLOCKSIZE;
oprtr::SharedForAll_Kernel<<<grid_size, block_size, sh_mem_size, stream>>>((*this), apply,
length);
}
return retval;
}
template <typename SizeT, typename ValueT, ArrayFlag FLAG,
unsigned int cudaHostRegisterFlag>
template <typename ApplyLambda>
cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::ForAll(
// ArrayT array,
ApplyLambda apply,
SizeT length, //= PreDefinedValues<SizeT>::InvalidValue,
Location target, // = util::LOCATION_DEFAULT,
cudaStream_t stream, // = 0,
int grid_size, // = util::PreDefinedValues<int>::InvalidValue
int block_size) // = util::PreDefinedValues<int>::InvalidValue
{
cudaError_t retval = cudaSuccess;
if (length == PreDefinedValues<SizeT>::InvalidValue) length = this->GetSize();
if (target == LOCATION_DEFAULT) target = this->setted | this->allocated;
if ((target & HOST) == HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++) apply((*this) + 0, i);
}
if ((target & DEVICE) == DEVICE) {
if (!util::isValid(grid_size)) grid_size = FORALL_GRIDSIZE;
if (!util::isValid(block_size)) block_size = FORALL_BLOCKSIZE;
oprtr::ForAll_Kernel<<<grid_size, block_size, 0, stream>>>((*this), apply,
length);
}
return retval;
}
template <typename SizeT, typename ValueT, ArrayFlag FLAG,
unsigned int cudaHostRegisterFlag>
template <typename ArrayT_in, typename ApplyLambda>
cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::ForAll(
ArrayT_in &array_in,
// ArrayT_out array_out,
ApplyLambda apply,
SizeT length, // = PreDefinedValues<SizeT>::InvalidValue,
Location target, // = LOCATION_DEFAULT,
cudaStream_t stream) // = 0)
{
// typedef typename ArrayT_in::SizeT SizeT;
cudaError_t retval = cudaSuccess;
if (length == PreDefinedValues<SizeT>::InvalidValue) length = this->GetSize();
if (target == util::LOCATION_DEFAULT) target = this->setted | this->allocated;
if ((target & HOST) == HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++) apply((*this) + 0, array_in + 0, i);
}
if ((target & DEVICE) == DEVICE) {
// printf("Launch kernel, length = %d\n", length);
oprtr::ForAll_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
(*this), array_in, apply, length);
}
return retval;
}
template <typename SizeT, typename ValueT, ArrayFlag FLAG,
unsigned int cudaHostRegisterFlag>
template <typename ArrayT_in1, typename ArrayT_in2, typename ApplyLambda>
cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::ForAll(
ArrayT_in1 &array_in1, ArrayT_in2 &array_in2,
// ArrayT_out array_out,
ApplyLambda apply,
SizeT length, // = PreDefinedValues<SizeT>::InvalidValue,
Location target, // = LOCATION_DEFAULT,
cudaStream_t stream) // = 0)
{
// typedef typename ArrayT_in::SizeT SizeT;
cudaError_t retval = cudaSuccess;
if (length == PreDefinedValues<SizeT>::InvalidValue) length = this->GetSize();
if (target == util::LOCATION_DEFAULT) target = this->setted | this->allocated;
if ((target & HOST) == HOST) {
// util::PrintMsg("Launching on HOST, length = " + std::to_string(length));
#pragma omp parallel for
for (SizeT i = 0; i < length; i++) {
// util::PrintMsg(std::to_string(i) + " " + std::to_string((*this)[i]));
apply((*this) + 0, array_in1 + 0, array_in2 + 0, i);
}
}
if ((target & DEVICE) == DEVICE) {
// util::PrintMsg("Launching on DEVICE, length = " +
// std::to_string(length));
oprtr::ForAll_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
(*this), array_in1, array_in2, apply, length);
}
return retval;
}
template <typename SizeT, typename ValueT, ArrayFlag FLAG,
unsigned int cudaHostRegisterFlag>
template <typename CondLambda, typename ApplyLambda>
cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::ForAllCond(
// ArrayT array,
CondLambda cond, ApplyLambda apply,
SizeT length, // = PreDefinedValues<SizeT>::InvalidValue,
Location target, // = LOCATION_DEFAULT,
cudaStream_t stream) // = 0)
{
// typedef typename ArrayT::SizeT SizeT;
cudaError_t retval = cudaSuccess;
if (length == PreDefinedValues<SizeT>::InvalidValue) length = this->GetSize();
if (target == LOCATION_DEFAULT) target = this->setted | this->allocated;
if ((target & HOST) == HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++)
if (cond((*this) + 0, i)) apply((*this) + 0, i);
}
if ((target & DEVICE) == DEVICE) {
oprtr::ForAllCond_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
(*this), cond, apply, length);
}
return retval;
}
template <typename SizeT, typename ValueT, ArrayFlag FLAG,
unsigned int cudaHostRegisterFlag>
template <typename ArrayT_in, typename CondLambda, typename ApplyLambda>
cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::ForAllCond(
ArrayT_in &array_in,
// ArrayT_out array_out,
CondLambda cond, ApplyLambda apply,
SizeT length, // = PreDefinedValues<SizeT>::InvalidValue,
Location target, // = LOCATION_DEFAULT,
cudaStream_t stream) // = 0)
{
// typedef typename ArrayT_in::SizeT SizeT;
cudaError_t retval = cudaSuccess;
if (length == PreDefinedValues<SizeT>::InvalidValue) length = this->GetSize();
if (target == util::LOCATION_DEFAULT) target = this->setted | this->allocated;
if ((target & HOST) == HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++)
if (cond((*this) + 0, array_in + 0, i))
apply((*this) + 0, array_in + 0, i);
}
if ((target & DEVICE) == DEVICE) {
oprtr::ForAllCond_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
(*this), array_in, cond, apply, length);
}
return retval;
}
template <typename SizeT, typename ValueT, ArrayFlag FLAG,
unsigned int cudaHostRegisterFlag>
template <typename ArrayT_in1, typename ArrayT_in2, typename CondLambda,
typename ApplyLambda>
cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::ForAllCond(
ArrayT_in1 &array_in1, ArrayT_in2 &array_in2,
// ArrayT_out array_out,
CondLambda cond, ApplyLambda apply,
SizeT length, // = PreDefinedValues<SizeT>::InvalidValue,
Location target, // = LOCATION_DEFAULT,
cudaStream_t stream) // = 0)
{
// typedef typename ArrayT_in::SizeT SizeT;
cudaError_t retval = cudaSuccess;
if (length == PreDefinedValues<SizeT>::InvalidValue) length = this->GetSize();
if (target == util::LOCATION_DEFAULT) target = this->setted | this->allocated;
if ((target & HOST) == HOST) {
#pragma omp parallel for
for (SizeT i = 0; i < length; i++)
if (cond((*this) + 0, array_in1 + 0, array_in2 + 0, i))
apply((*this) + 0, array_in1 + 0, array_in2 + 0, i);
}
if ((target & DEVICE) == DEVICE) {
oprtr::ForAllCond_Kernel<<<FORALL_GRIDSIZE, FORALL_BLOCKSIZE, 0, stream>>>(
(*this), array_in1, array_in2, cond, apply, length);
}
return retval;
}
} // namespace util
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMaxLinearKernel(void* input, sd::LongType const* inputShape, int* starts, int* lengths,
sd::LongType numOfClasses, void* output, sd::LongType const* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
auto segment = blockIdx.x;
if (threadIdx.x == 0) {
// threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
// segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
extern __shared__ unsigned char shmem[];
val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
z[zIndex] = x[shape::getIndexOffset(start, inputShape)];
val[segment] = z[zIndex];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void unsortedSegmentMaxLinearKernel(void* input, sd::LongType const* inputShape, void* indices,
sd::LongType const* indicesShape, int* starts, int* lengths,
sd::LongType numOfClasses, void* output,
sd::LongType const* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ I* y; // int threadsPerSegment, start, finish;
auto segment = blockIdx.x;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
y = reinterpret_cast<I*>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape);
// start = starts[segment];
// finish = start + lengths[segment];
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)];
else
z[zIndex] = -DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment) {
sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMaxTadKernel(void* inputBuf, sd::LongType const* inputShape, sd::LongType const* inputTads,
sd::LongType const* inputTadOffsets, I* indices, int* starts, int* lengths,
sd::LongType numOfClasses, void* outputBuf, sd::LongType const* outputShape,
sd::LongType const* outputTads, sd::LongType const* outputTadOffsets,
T filler = 0) {
__shared__ T* val;
__shared__ sd::LongType len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
__shared__ I segment;
if (threadIdx.x == 0) {
segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (idx <= total) {
auto x = reinterpret_cast<T*>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]);
// z[zIndex] = x[xIndex];
}
} else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[segment]) sd::math::atomics::sd_atomicMax(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentMaxFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
// int numClasses = output->sizeAt(0);
// if input is a vector: (as if in doc sample)
// sd::LongType idx = indices->e<sd::LongType>(0);
output->assign(-DataTypeUtils::infOrMax<T>());
auto stream = context->getCudaStream();
indices->syncToHost();
sd::LongType numOfClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(256, 512, 256);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens});
if (input->isVector()) {
segmentMaxLinearKernel<T, I><<<numOfClasses, input->lengthOf(), numOfClasses * 32 + 32, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
segmentMaxTadKernel<T, I><<<packX.numberOfTads(), 512, 2048, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens});
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentMaxFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMaxFunctor_, (context, input, indices, output),
SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentMaxFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
output->assign(DataTypeUtils::infOrMax<T>());
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), row, classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
unsortedSegmentMaxLinearKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
output->assign(-DataTypeUtils::max<T>());
segmentMaxTadKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentMaxFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMaxFunctor_,
(context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// segment max
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMaxBPLinearKernel(void* inputBuf, sd::LongType const* inputShape, void* forwardOutput,
sd::LongType const* forwardShape, void* eps,
sd::LongType const* epsShape, void* indicesBuf,
sd::LongType const* indicesShape, void* outputBuf,
sd::LongType const* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ sd::LongType xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradIn = reinterpret_cast<T*>(forwardOutput);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape);
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
if (sd::math::sd_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) {
z[zOffset] = gradOut[gradOffsetO];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMaxBPTadKernel(void* inputBuf, sd::LongType const* inputShape, void* forwardOutput,
sd::LongType const* forwardShape, void* eps, sd::LongType const* epsShape,
void* indicesBuf, sd::LongType const* indicesShape, void* outputBuf,
sd::LongType const* outputShape, sd::LongType const* inputTad,
sd::LongType const* inputOffsets, sd::LongType const* gradInTad,
sd::LongType const* gradInOffsets, sd::LongType const* gradOutTad,
sd::LongType const* gradOutOffsets, sd::LongType const* outTad,
sd::LongType const* outOffsets) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ sd::LongType xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradIn = reinterpret_cast<T*>(forwardOutput);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[yIndex];
T* current = x + inputOffsets[i];
T* currentOut = z + outOffsets[i];
T* in = gradIn + gradInOffsets[segment];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
if (sd::math::sd_abs(in[e] - current[e]) <= T(1.e-6)) currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
sd::Status segmentMaxFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
// int numOfClasses = gradOut->sizeAt(0);
// if input is a vector: (as if in doc sample)
auto stream = context->getCudaStream();
NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(),
context); //->shapeInfo(), context);
segmentMaxFunctor_<T, I>(context, input, indices, &tempRes);
NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes});
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
segmentMaxBPLinearKernel<T, I><<<1 + gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
output->specialBuffer(), output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
sd::LongType const* inputTads = packX.specialShapeInfo();
sd::LongType const* inputTadOffsets = packX.specialOffsets();
sd::LongType const* outputTads = packZ.specialShapeInfo();
sd::LongType const* outputTadOffsets = packZ.specialOffsets();
sd::LongType const* gradInTads = packGradIn.specialShapeInfo();
sd::LongType const* gradInTadOffsets = packGradIn.specialOffsets();
sd::LongType const* gradOutTads = packGradOut.specialShapeInfo();
sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets();
segmentMaxBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets,
gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
sd::Status segmentMaxFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMaxFunctorBP_,
(context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static sd::Status unsortedSegmentMaxFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) {
// int numOfClasses = gradOut->sizeAt(0);
// if input is a vector: (as if in doc sample)
auto stream = context->getCudaStream();
NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(),
context); //->shapeInfo(), context);
unsortedSegmentMaxFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes);
NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes});
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
segmentMaxBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
output->specialBuffer(), output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
sd::LongType const* inputTads = packX.specialShapeInfo();
sd::LongType const* inputTadOffsets = packX.specialOffsets();
sd::LongType const* outputTads = packZ.specialShapeInfo();
sd::LongType const* outputTadOffsets = packZ.specialOffsets();
sd::LongType const* gradInTads = packGradIn.specialShapeInfo();
sd::LongType const* gradInTadOffsets = packGradIn.specialOffsets();
sd::LongType const* gradOutTads = packGradOut.specialShapeInfo();
sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets();
segmentMaxBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets,
gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
sd::Status unsortedSegmentMaxFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
sd::LongType numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMaxFunctorBP_,
(context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
the_stack
|
#define CUDA_NUM_THREADS 256
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#define EPS 1e-8
#define SAFE_DIV(a, b) ( (b==0)? ( (a)/(EPS) ): ( (a)/(b) ) )
template <typename scalar_t>
__global__ void kernel_resample2d_update_output(const int n,
const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size, int dilation) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t val = 0.0f;
scalar_t sum = 0.0f;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t sigma = DIM3_INDEX(input2, b, 2, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - floor(xf); // alpha
scalar_t beta = yf - floor(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
for (int fy = 0; fy < kernel_size/2; fy += 1) {
int yT = max(min( int (floor(yf)-fy*dilation), idim_h-1), 0);
int yB = max(min( int (floor(yf)+(fy+1)*dilation),idim_h-1), 0);
for (int fx = 0; fx < kernel_size/2; fx += 1) {
int xL = max(min( int (floor(xf)-fx*dilation ), idim_w-1), 0);
int xR = max(min( int (floor(xf)+(fx+1)*dilation), idim_w-1), 0);
scalar_t xL_ = ( static_cast<scalar_t>( fx *dilation)+alpha );
scalar_t xR_ = ( static_cast<scalar_t>((1.+fx)*dilation)-alpha );
scalar_t yT_ = ( static_cast<scalar_t>( fy *dilation)+beta );
scalar_t yB_ = ( static_cast<scalar_t>((1.+fy)*dilation)-beta );
scalar_t xL_P = exp(SAFE_DIV(-xL_*xL_, 2*sigma*sigma));
scalar_t xR_P = exp(SAFE_DIV(-xR_*xR_, 2*sigma*sigma));
scalar_t yT_P = exp(SAFE_DIV(-yT_*yT_, 2*sigma*sigma));
scalar_t yB_P = exp(SAFE_DIV(-yB_*yB_, 2*sigma*sigma));
// if (sigma==0){
// printf("xL_P %.10f\n", xL_P);
// // printf("%.10f\n", -(xL_*xL_)/(2*sigma*sigma));
// }
val += static_cast<scalar_t> (yT_P*xL_P * DIM3_INDEX(input1, b, c, yT, xL));
val += static_cast<scalar_t> (yT_P*xR_P * DIM3_INDEX(input1, b, c, yT, xR));
val += static_cast<scalar_t> (yB_P*xL_P * DIM3_INDEX(input1, b, c, yB, xL));
val += static_cast<scalar_t> (yB_P*xR_P * DIM3_INDEX(input1, b, c, yB, xR));
sum += (yT_P*xL_P + yT_P*xR_P + yB_P*xL_P + yB_P*xR_P);
}
}
output[index] = SAFE_DIV(val, sum);
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input1(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size, int dilation) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t sum = 0.0f;
// scalar_t *xL_P = new scalar_t [kernel_size*kernel_size/4];
// scalar_t *xR_P = new scalar_t [kernel_size*kernel_size/4];
// scalar_t *yT_P = new scalar_t [kernel_size*kernel_size/4];
// scalar_t *yB_P = new scalar_t [kernel_size*kernel_size/4];
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t sigma = DIM3_INDEX(input2, b, 2, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - int(xf); // alpha
scalar_t beta = yf - int(yf); // beta
for (int fy = 0; fy < kernel_size/2; fy += 1) {
for (int fx = 0; fx < kernel_size/2; fx += 1) {
scalar_t xL_ = ( static_cast<scalar_t>( fx *dilation)+alpha );
scalar_t xR_ = ( static_cast<scalar_t>((1.+fx)*dilation)-alpha );
scalar_t yT_ = ( static_cast<scalar_t>( fy *dilation)+beta );
scalar_t yB_ = ( static_cast<scalar_t>((1.+fy)*dilation)-beta );
// scalar_t xL_ = ( alpha+static_cast<scalar_t>(fx) );
// scalar_t xR_ = ( 1.-alpha+static_cast<scalar_t>(fx) );
// scalar_t yT_ = ( beta+static_cast<scalar_t>(fy) );
// scalar_t yB_ = ( 1-beta+static_cast<scalar_t>(fy) );
scalar_t xL_P = exp(SAFE_DIV(-xL_*xL_, 2*sigma*sigma));
scalar_t xR_P = exp(SAFE_DIV(-xR_*xR_, 2*sigma*sigma));
scalar_t yT_P = exp(SAFE_DIV(-yT_*yT_, 2*sigma*sigma));
scalar_t yB_P = exp(SAFE_DIV(-yB_*yB_, 2*sigma*sigma));
// scalar_t xL_P = exp(SAFE_DIV(-xL_*xL_,2*sigma*sigma));
// scalar_t xR_P = exp(-(xR_*xR_)/(2*sigma*sigma));
// scalar_t yT_P = exp(-(yT_*yT_)/(2*sigma*sigma));
// scalar_t yB_P = exp(-(yB_*yB_)/(2*sigma*sigma));
sum += (yT_P*xL_P + yT_P*xR_P + yB_P*xL_P + yB_P*xR_P);
// printf("%f\n", SAFE_DIV(-xL_*xL_, 2*sigma*sigma));
}
}
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
for (int fy = 0; fy < kernel_size/2; fy += 1) {
int yT = max(min( int (floor(yf)-fy*dilation), idim_h-1), 0);
int yB = max(min( int (floor(yf)+(fy+1)*dilation),idim_h-1), 0);
// int yT = max(min( int (floor(yf)-fy ), idim_h-1), 0);
// int yB = max(min( int (floor(yf)+fy+1), idim_h-1), 0);
for (int fx = 0; fx < kernel_size/2; fx += 1) {
int xL = max(min( int (floor(xf)-fx*dilation ), idim_w-1), 0);
int xR = max(min( int (floor(xf)+(fx+1)*dilation), idim_w-1), 0);
// int xL = max(min( int (floor(xf)-fx ), idim_w-1), 0);
// int xR = max(min( int (floor(xf)+fx+1), idim_w-1), 0);
scalar_t xL_ = ( static_cast<scalar_t>( fx *dilation)+alpha );
scalar_t xR_ = ( static_cast<scalar_t>((1.+fx)*dilation)-alpha );
scalar_t yT_ = ( static_cast<scalar_t>( fy *dilation)+beta );
scalar_t yB_ = ( static_cast<scalar_t>((1.+fy)*dilation)-beta );
// scalar_t xL_ = ( alpha+static_cast<scalar_t>(fx) );
// scalar_t xR_ = ( 1.-alpha+static_cast<scalar_t>(fx) );
// scalar_t yT_ = ( beta+static_cast<scalar_t>(fy) );
// scalar_t yB_ = ( 1-beta+static_cast<scalar_t>(fy) );
scalar_t xL_P = exp(SAFE_DIV(-xL_*xL_, 2*sigma*sigma));
scalar_t xR_P = exp(SAFE_DIV(-xR_*xR_, 2*sigma*sigma));
scalar_t yT_P = exp(SAFE_DIV(-yT_*yT_, 2*sigma*sigma));
scalar_t yB_P = exp(SAFE_DIV(-yB_*yB_, 2*sigma*sigma));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT), (xL)), SAFE_DIV(yT_P*xL_P, sum) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT), (xR)), SAFE_DIV(yT_P*xR_P, sum) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB), (xL)), SAFE_DIV(yB_P*xL_P, sum) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB), (xR)), SAFE_DIV(yB_P*xR_P, sum) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input2(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size, int dilation) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t grad1 = 0.0;
scalar_t grad2 = 0.0;
scalar_t sum = 0.0;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t sigma = DIM3_INDEX(input2, b, 2, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - floor(xf); // alpha
scalar_t beta = yf - floor(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
scalar_t sumgrad = 0.0;
for (int fy = 0; fy < kernel_size/2; fy += 1) {
int yT = max(min( int (floor(yf)-fy*dilation), idim_h-1), 0);
int yB = max(min( int (floor(yf)+(fy+1)*dilation),idim_h-1), 0);
for (int fx = 0; fx < kernel_size/2; fx += 1) {
int xL = max(min( int (floor(xf)-fx*dilation ), idim_w-1), 0);
int xR = max(min( int (floor(xf)+(fx+1)*dilation), idim_w-1), 0);
scalar_t xL_ = ( static_cast<scalar_t>( fx *dilation)+alpha );
scalar_t xR_ = ( static_cast<scalar_t>((1.+fx)*dilation)-alpha );
scalar_t yT_ = ( static_cast<scalar_t>( fy *dilation)+beta );
scalar_t yB_ = ( static_cast<scalar_t>((1.+fy)*dilation)-beta );
scalar_t xL_P = exp(SAFE_DIV(-xL_*xL_, 2*sigma*sigma));
scalar_t xR_P = exp(SAFE_DIV(-xR_*xR_, 2*sigma*sigma));
scalar_t yT_P = exp(SAFE_DIV(-yT_*yT_, 2*sigma*sigma));
scalar_t yB_P = exp(SAFE_DIV(-yB_*yB_, 2*sigma*sigma));
sum += (yT_P*xL_P + yT_P*xR_P + yB_P*xL_P + yB_P*xR_P);
for (int ch = 0; ch < odim_c; ++ch) {
if (c==0) {
grad1 += SAFE_DIV(xL_ * yT_P * xL_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yT, xL), -sigma*sigma);
grad1 -= SAFE_DIV(xR_ * yT_P * xR_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yT, xR), -sigma*sigma);
grad1 += SAFE_DIV(xL_ * yB_P * xL_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yB, xL), -sigma*sigma);
grad1 -= SAFE_DIV(xR_ * yB_P * xR_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yB, xR), -sigma*sigma);
sumgrad += SAFE_DIV(( xL_*yT_P*xL_P - xR_*yT_P*xR_P + xL_*yB_P*xL_P - xR_*yB_P*xR_P ), -sigma*sigma);
}
else if (c==1) {
grad1 += SAFE_DIV(yT_ * yT_P * xL_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yT, xL), -sigma*sigma);
grad1 += SAFE_DIV(yT_ * yT_P * xR_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yT, xR), -sigma*sigma);
grad1 -= SAFE_DIV(yB_ * yB_P * xL_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yB, xL), -sigma*sigma);
grad1 -= SAFE_DIV(yB_ * yB_P * xR_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yB, xR), -sigma*sigma);
sumgrad += SAFE_DIV(( yT_*yT_P*xL_P + yT_*yT_P*xR_P - yB_*yB_P*xL_P - yB_*yB_P*xR_P ), -sigma*sigma);
}
else if (c==2) {
grad1 += SAFE_DIV((yT_*yT_+xL_*xL_) * yT_P * xL_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yT, xL), sigma*sigma*sigma);
grad1 += SAFE_DIV((yT_*yT_+xR_*xR_) * yT_P * xR_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yT, xR), sigma*sigma*sigma);
grad1 += SAFE_DIV((yB_*yB_+xL_*xL_) * yB_P * xL_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yB, xL), sigma*sigma*sigma);
grad1 += SAFE_DIV((yB_*yB_+xR_*xR_) * yB_P * xR_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yB, xR), sigma*sigma*sigma);
sumgrad += SAFE_DIV(( (yT_*yT_+xL_*xL_)*yT_P*xL_P + (yT_*yT_+xR_*xR_)*yT_P*xR_P + (yB_*yB_+xL_*xL_)*yB_P*xL_P + (yB_*yB_+xR_*xR_)*yB_P*xR_P ), sigma*sigma*sigma);
}
}
}
}
for (int fy = 0; fy < kernel_size/2; fy += 1) {
int yT = max(min( int (floor(yf)-fy*dilation), idim_h-1), 0);
int yB = max(min( int (floor(yf)+(fy+1)*dilation),idim_h-1), 0);
for (int fx = 0; fx < kernel_size/2; fx += 1) {
int xL = max(min( int (floor(xf)-fx*dilation ), idim_w-1), 0);
int xR = max(min( int (floor(xf)+(fx+1)*dilation), idim_w-1), 0);
scalar_t xL_ = ( static_cast<scalar_t>( fx *dilation)+alpha );
scalar_t xR_ = ( static_cast<scalar_t>((1.+fx)*dilation)-alpha );
scalar_t yT_ = ( static_cast<scalar_t>( fy *dilation)+beta );
scalar_t yB_ = ( static_cast<scalar_t>((1.+fy)*dilation)-beta );
scalar_t xL_P = exp(SAFE_DIV(-xL_*xL_, 2*sigma*sigma));
scalar_t xR_P = exp(SAFE_DIV(-xR_*xR_, 2*sigma*sigma));
scalar_t yT_P = exp(SAFE_DIV(-yT_*yT_, 2*sigma*sigma));
scalar_t yB_P = exp(SAFE_DIV(-yB_*yB_, 2*sigma*sigma));
for (int ch = 0; ch < odim_c; ++ch) {
grad2 += sumgrad/odim_c * yT_P * xL_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yT, xL);
grad2 += sumgrad/odim_c * yT_P * xR_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yT, xR);
grad2 += sumgrad/odim_c * yB_P * xL_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yB, xL);
grad2 += sumgrad/odim_c * yB_P * xR_P * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, yB, xR);
}
}
}
gradInput[index] = SAFE_DIV(grad1, sum) - SAFE_DIV(grad2, sum*sum);
}
void resample2d_kernel_forward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& output,
int kernel_size,
int dilation) {
int n = output.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
// TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF
AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] {
kernel_resample2d_update_output<scalar_t><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
input1.data<scalar_t>(),
input1_size,
input1_stride,
input2.data<scalar_t>(),
input2_size,
input2_stride,
output.data<scalar_t>(),
output_size,
output_stride,
kernel_size,
dilation);
}));
// TODO: ATen-equivalent check
// THCudaCheck(cudaGetLastError());
}
void resample2d_kernel_backward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& gradOutput,
at::Tensor& gradInput1,
at::Tensor& gradInput2,
int kernel_size,
int dilation) {
int n = gradOutput.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3));
const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3));
const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3));
const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3));
AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] {
kernel_resample2d_backward_input1<scalar_t><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
input1.data<scalar_t>(),
input1_size,
input1_stride,
input2.data<scalar_t>(),
input2_size,
input2_stride,
gradOutput.data<scalar_t>(),
gradOutput_size,
gradOutput_stride,
gradInput1.data<scalar_t>(),
gradInput1_size,
gradInput1_stride,
kernel_size,
dilation
);
}));
const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3));
const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3));
n = gradInput2.numel();
AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] {
kernel_resample2d_backward_input2<scalar_t><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
input1.data<scalar_t>(),
input1_size,
input1_stride,
input2.data<scalar_t>(),
input2_size,
input2_stride,
gradOutput.data<scalar_t>(),
gradOutput_size,
gradOutput_stride,
gradInput2.data<scalar_t>(),
gradInput2_size,
gradInput2_stride,
kernel_size,
dilation
);
}));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(cudaGetLastError());
}
|
the_stack
|
namespace hvvr {
template <PixelFormat PixelFormat>
CUDA_DEVICE void writeSurface(vector4 val, Texture2D tex, unsigned int x, unsigned int y) {
if (PixelFormat == PixelFormat::RGBA32F) {
surf2Dwrite(float4(val), tex.d_surfaceObject, x * sizeof(float4), y);
} else if (PixelFormat == PixelFormat::RGBA16) {
surf2Dwrite(ToColor4Unorm16(val), tex.d_surfaceObject, x * sizeof(uint64_t), y);
} else {
surf2Dwrite(ToColor4Unorm8SRgb(val), tex.d_surfaceObject, x * sizeof(uchar4), y);
}
}
// 4-tap B-spline, based on http://vec3.ca/bicubic-filtering-in-fewer-taps/
CUDA_DEVICE vector4 bicubicFast(Texture2D tex, vector2 coord) {
vector2 pixCoord = coord * vector2(tex.width, tex.height);
vector2 pixCenter = vector2(floorf(pixCoord.x - 0.5f), floorf(pixCoord.y - 0.5f)) + 0.5f;
vector2 iDim = vector2(1.0f / tex.width, 1.0f / tex.height);
vector2 one = vector2(1.0f, 1.0f);
// fractionalOffset
vector2 f = pixCoord - pixCenter;
vector2 f2 = f * f;
vector2 f3 = f2 * f;
vector2 omf2 = (one - f) * (one - f);
vector2 omf3 = omf2 * (one - f);
float sixth = (1.0f / 6.0f);
vector2 w0 = sixth * omf3;
vector2 w1 = ((4.0f / 6.0f) * one + 0.5f * f3 - f2);
vector2 w3 = sixth * f3;
vector2 w2 = one - w0 - w1 - w3;
vector2 s0 = w0 + w1;
vector2 s1 = w2 + w3;
vector2 f0 = w1 / (w0 + w1);
vector2 f1 = w3 / (w2 + w3);
vector2 t0 = (pixCenter - one + f0) * iDim;
vector2 t1 = (pixCenter + one + f1) * iDim;
auto T = tex.d_texObject;
// and sample and blend
return vector4(tex2D<float4>(T, t0.x, t0.y)) * s0.x * s0.y + vector4(tex2D<float4>(T, t1.x, t0.y)) * s1.x * s0.y +
vector4(tex2D<float4>(T, t0.x, t1.y)) * s0.x * s1.y + vector4(tex2D<float4>(T, t1.x, t1.y)) * s1.x * s1.y;
}
struct EccentricityToTexCoordMapping {
EccentricityMap eccentricityMap;
float texMapSize;
float invTexMapSize;
float invMaxEccentricity;
};
void GPUCamera::getEccentricityMap(EccentricityToTexCoordMapping& map) const {
map.eccentricityMap = eccentricityMap;
map.texMapSize = (float)polarTextures.raw.height;
map.invTexMapSize = 1.0f / polarTextures.raw.height;
map.invMaxEccentricity = 1.0f / maxEccentricityRadians;
}
// eccentricity is in the range [0,maxEccentricityRadians]
CUDA_DEVICE float eccentricityToTexCoord(float eccentricity, EccentricityToTexCoordMapping eToTexMap) {
return (eToTexMap.eccentricityMap.applyInverse(eccentricity) + 0.5f) * eToTexMap.invTexMapSize;
}
CUDA_DEVICE vector2 getNormalizedCoord(int x, int y, int width, int height) {
return vector2(((float)x + 0.5f) / (float)width, ((float)y + 0.5f) / (float)height);
}
// Aligned along z axis
CUDA_DEVICE vector3 angularEyeCoordToDirection(float theta, float e) {
float z = -cosf(e);
float xyLength = sqrtf(1.0f - z * z);
vector2 xy = vector2(cosf(-theta), sinf(-theta)) * xyLength;
return {xy.x, xy.y, z};
}
CUDA_DEVICE void eyeSpaceDirectionToAngularEyeCoord(vector3 dir, float& theta, float& eccentricity) {
eccentricity = acosf(-dir.z);
// Angle of rotation about z, measured from x
theta = -atan2f(dir.y, dir.x);
}
CUDA_DEVICE void polarTextureCoordToAngularEyeCoord(vector2 coord,
EccentricityToTexCoordMapping eToTexMap,
float& theta,
float& eccentricity) {
eccentricity = eToTexMap.eccentricityMap.apply(coord.y * eToTexMap.texMapSize - 0.5f);
theta = (2.0f * Pi * coord.x) - Pi;
}
CUDA_DEVICE vector2 angularEyeCoordToPolarTextureCoord(float theta,
float eccentricity,
EccentricityToTexCoordMapping eToTexMap) {
float x = (theta + Pi) / (2.0f * Pi);
float y = eccentricityToTexCoord(eccentricity, eToTexMap);
return {x, y};
}
CUDA_DEVICE void computeMoments3x3Window(
cudaTextureObject_t tex, vector2 coord, vector2 invDim, vector4& m_1, vector4& m_2) {
float offsets[3] = {-1.0f, 0.0f, 1.0f};
m_1 = vector4(0.0f);
m_2 = vector4(0.0f);
for (int x = 0; x < 3; ++x) {
for (int y = 0; y < 3; ++y) {
vector4 c(tex2D<float4>(tex, coord.x + (offsets[x] * invDim.x), coord.y + (offsets[y] * invDim.y)));
m_1 += c;
m_2 += c * c;
}
}
float weight = 1.0f / 9.0f;
m_1 *= weight;
m_2 *= weight;
}
CUDA_DEVICE vector4 sqrt(vector4 v) {
return vector4(sqrtf(v.x), sqrtf(v.y), sqrtf(v.z), sqrtf(v.w));
}
CUDA_DEVICE vector4 clampToNeighborhood(vector4 oldValue,
GPUCamera::PolarTextures polarTex,
vector2 coord,
TemporalFilterSettings settings) {
vector4 m_1 = vector4(tex2D<float4>(polarTex.moment1.d_texObject, coord.x, coord.y));
vector4 m_2 = vector4(tex2D<float4>(polarTex.moment2.d_texObject, coord.x, coord.y));
vector4 stdDev = sqrt(m_2 - (m_1 * m_1));
// Arbitrary
float scaleFactor = settings.stddevMultiplier;
vector4 minC = m_1 - (stdDev * scaleFactor);
vector4 maxC = m_1 + (stdDev * scaleFactor);
return clamp(oldValue, minC, maxC);
}
template <PixelFormat PixelFormat>
CUDA_KERNEL void ComputeMoments(GPUCamera::PolarTextures polarTex) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < polarTex.raw.width && j < polarTex.raw.height) {
vector4 m_1, m_2;
vector2 invDim = vector2(1.0f / polarTex.raw.width, 1.0f / polarTex.raw.height);
vector2 coord = vector2(invDim.x * i, invDim.y * j);
computeMoments3x3Window(polarTex.raw.d_texObject, coord, invDim, m_1, m_2);
writeSurface<PixelFormat>(m_1, polarTex.moment1, i, j);
writeSurface<PixelFormat>(m_2, polarTex.moment2, i, j);
}
}
template <PixelFormat PixelFormat>
CUDA_KERNEL void FoveatedPolarToScreenSpaceKernel(GPUCamera::PolarTextures polarTex,
Texture2D resultTexture,
GPUImage resultImage,
matrix3x3 sampleSpaceToEyeSpaceMatrix,
EccentricityToTexCoordMapping eToTexMap,
Texture2D previousResultTexture,
matrix4x4 eyeSpaceToPreviousSampleSpaceMatrix,
TemporalFilterSettings settings) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < resultImage.width() && j < resultImage.height()) {
vector2 normalizedCoord = getNormalizedCoord(i, j, resultImage.width(), resultImage.height());
vector3 sampleSpacePoint = vector3(normalizedCoord, 1.0f);
vector3 eyeSpaceDirection = normalize(sampleSpaceToEyeSpaceMatrix * sampleSpacePoint);
float theta, eccentricity;
eyeSpaceDirectionToAngularEyeCoord(eyeSpaceDirection, theta, eccentricity);
/** Display full mapping, for debugging
theta = normalizedCoord.x * 2.0f * Pi;
eccentricity = normalizedCoord.y * eToTexMap.invMaxEccentricity;
*/
vector2 coord = angularEyeCoordToPolarTextureCoord(theta, eccentricity, eToTexMap);
vector4 newValue = bicubicFast(polarTex.raw, coord);
vector4 result = newValue;
vector4 surfaceResult = result;
float tValue = tex2D<float>(polarTex.depth.d_texObject, coord.x, coord.y);
if (tValue < CUDA_INF) {
vector3 currentEyePosition = angularEyeCoordToDirection(theta, eccentricity) * tValue;
vector4 prevSamplePosition = eyeSpaceToPreviousSampleSpaceMatrix * vector4(currentEyePosition, 1.0f);
vector2 oldTexCoord = vector2(prevSamplePosition.x, prevSamplePosition.y) * (1.0f / prevSamplePosition.z);
float alpha = settings.alpha;
vector4 oldValue = newValue;
if (oldTexCoord.x > 0 && oldTexCoord.y > 0 && oldTexCoord.x < 1 && oldTexCoord.y < 1 && tValue > 0) {
oldValue = vector4(tex2D<float4>(previousResultTexture.d_texObject, oldTexCoord.x, oldTexCoord.y));
}
vector4 clampedOldValue = clampToNeighborhood(oldValue, polarTex, coord, settings);
// Make alpha settings be dependent on eccentricity. Make it higher in fovea and lower toward periphery
float normalizedE = eccentricity * eToTexMap.invMaxEccentricity;
float mn = 0.2f, mx = 0.35f;
float t = clamp((normalizedE - mn) / (mx - mn), 0.f, 1.f);
alpha = lerp(0.5f, alpha, t);
// Heuristic hack! Turn down TAA clamping in the periphery
normalizedE = sqrtf(sqrtf(sqrtf(sqrtf(normalizedE))));
clampedOldValue.x = lerp(clampedOldValue.x, oldValue.x, normalizedE);
clampedOldValue.y = lerp(clampedOldValue.y, oldValue.y, normalizedE);
clampedOldValue.z = lerp(clampedOldValue.z, oldValue.z, normalizedE);
clampedOldValue.w = lerp(clampedOldValue.w, oldValue.w, normalizedE);
surfaceResult = alpha * newValue + (1.0f - alpha) * clampedOldValue;
result = surfaceResult;
}
if (PixelFormat == PixelFormat::RGBA32F) {
vector4* output = (vector4*)resultImage.data();
output[resultImage.stride() * j + i] = result;
} else {
uint32_t* output = (uint32_t*)resultImage.data();
output[resultImage.stride() * j + i] = ToColor4Unorm8SRgb(result);
}
writeSurface<PixelFormat>(surfaceResult, resultTexture, i, j);
}
}
CUDA_DEVICE float getEccentricity(unsigned i, unsigned j, Texture2D tex, matrix3x3 sampleSpaceToEyeSpaceMatrix) {
vector2 normalizedCoord = getNormalizedCoord(i, j, tex.width, tex.height);
vector3 eyeSpaceDirection =
normalize(sampleSpaceToEyeSpaceMatrix * vector3(normalizedCoord.x, normalizedCoord.y, 1.0f));
float theta, eccentricity;
eyeSpaceDirectionToAngularEyeCoord(eyeSpaceDirection, theta, eccentricity);
return eccentricity;
}
CUDA_DEVICE vector4 texelFetch(Texture2D tex, unsigned i, unsigned j) {
vector2 coord = getNormalizedCoord(i, j, tex.width, tex.height);
return vector4(tex2D<float4>(tex.d_texObject, coord.x, coord.y));
}
// The reliance on eccentricity is a pure guess, a better implementation would make this more principled or
// at least try and obtain the formula used (but not published) in
// https://research.nvidia.com/sites/default/files/publications/supplementary.pdf
template <PixelFormat PixelFormat>
CUDA_KERNEL void SeparableFilterUsingEccentricity(Texture2D output,
Texture2D input,
vector2i step,
ContrastEnhancementSettings settings,
matrix3x3 sampleSpaceToEyeSpaceMatrix) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < output.width && j < output.height) {
float eccentricity = getEccentricity(i, j, output, sampleSpaceToEyeSpaceMatrix);
int filterRadius = 5;
vector4 valueSum = vector4(0.0f, 0.0f, 0.0f, 0.0f);
float weightSum = 0.0f;
for (int R = -filterRadius; R <= filterRadius; ++R) {
vector2i tapLoc(clamp((int)i + R * step.x, (int)0, (int)output.width - 1),
clamp((int)j + R * step.y, (int)0, (int)output.height - 1));
float normDist = fabsf(float(R)) / float(filterRadius + 0.1);
float weight = powf(1.0f - normDist, sqrtf(eccentricity));
valueSum += texelFetch(input, tapLoc.x, tapLoc.y) * weight;
weightSum += weight;
}
vector4 result = valueSum / weightSum;
surf2Dwrite(ToColor4Unorm8SRgb(result), output.d_surfaceObject, i * sizeof(uchar4), j);
}
}
/** From https://research.nvidia.com/sites/default/files/publications/supplementary.pdf
They had a vec2 for sigma, we currently have a float so don't need to take its length */
__device__ vector4 enhanceContrast(vector4 pix, vector4 pmean, float sigma, float f_e) {
// computer amount of contrast enhancement
// based on degree of foveation (sigma)
float cScale = 1.f + sigma * f_e;
vector4 scaledColor = pmean + (pix - pmean) * cScale;
return clamp(scaledColor, 0.0f, 1.0f);
}
template <PixelFormat PixelFormat>
CUDA_KERNEL void FinishConstrastEnhancement(GPUImage resultImage,
Texture2D unfilteredTexture,
Texture2D filteredTexture,
ContrastEnhancementSettings settings,
matrix3x3 sampleSpaceToEyeSpaceMatrix,
EccentricityToTexCoordMapping eToTexMap) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < resultImage.width() && j < resultImage.height()) {
// TODO: compute sigma
float eccentricity = getEccentricity(i, j, unfilteredTexture, sampleSpaceToEyeSpaceMatrix);
float sigma = 8.0f;
float t = eccentricity * eToTexMap.invMaxEccentricity;
sigma *= max(0.001f, clamp(t * t, 0.f, 1.f));
vector4 pix = texelFetch(unfilteredTexture, i, j);
vector4 pmean = texelFetch(filteredTexture, i, j);
vector4 result = enhanceContrast(pix, pmean, sigma, settings.f_e);
uint32_t* output = (uint32_t*)resultImage.data();
output[resultImage.stride() * j + i] = ToColor4Unorm8SRgb(result);
}
}
void GPUCamera::foveatedPolarToScreenSpace(const matrix4x4& eyeToEyePrevious,
const matrix3x3& eyePreviousToSamplePrevious,
const matrix3x3& sampleToEye) {
KernelDim dim = KernelDim(resultImage.width(), resultImage.height(), CUDA_GROUP_WIDTH, CUDA_GROUP_HEIGHT);
KernelDim polDim =
KernelDim(polarTextures.raw.width, polarTextures.raw.height, CUDA_GROUP_WIDTH, CUDA_GROUP_HEIGHT);
matrix4x4 eyeToSamplePrevious = matrix4x4(eyePreviousToSamplePrevious) * eyeToEyePrevious;
EccentricityToTexCoordMapping eToTexMap;
getEccentricityMap(eToTexMap);
ComputeMoments<PixelFormat::RGBA16><<<polDim.grid, polDim.block, 0, stream>>>(polarTextures);
switch (outputModeToPixelFormat(outputMode)) {
case PixelFormat::RGBA32F:
FoveatedPolarToScreenSpaceKernel<PixelFormat::RGBA32F><<<dim.grid, dim.block, 0, stream>>>(
polarTextures, resultTexture, resultImage, sampleToEye, eToTexMap, previousResultTexture,
eyeToSamplePrevious, temporalFilterSettings);
break;
case PixelFormat::RGBA8_SRGB:
FoveatedPolarToScreenSpaceKernel<PixelFormat::RGBA8_SRGB><<<dim.grid, dim.block, 0, stream>>>(
polarTextures, resultTexture, resultImage, sampleToEye, eToTexMap, previousResultTexture,
eyeToSamplePrevious, temporalFilterSettings);
break;
default:
assert(false);
}
if (contrastEnhancementSettings.enable) {
assert(outputModeToPixelFormat(outputMode) == PixelFormat::RGBA8_SRGB);
SeparableFilterUsingEccentricity<PixelFormat::RGBA8_SRGB>
<<<dim.grid, dim.block, 0, stream>>>(contrastEnhancementBuffers.horizontallyFiltered, resultTexture, {0, 1},
contrastEnhancementSettings, sampleToEye);
SeparableFilterUsingEccentricity<PixelFormat::RGBA8_SRGB><<<dim.grid, dim.block, 0, stream>>>(
contrastEnhancementBuffers.fullyFiltered, contrastEnhancementBuffers.horizontallyFiltered, {1, 0},
contrastEnhancementSettings, sampleToEye);
FinishConstrastEnhancement<PixelFormat::RGBA8_SRGB>
<<<dim.grid, dim.block, 0, stream>>>(resultImage, resultTexture, contrastEnhancementBuffers.fullyFiltered,
contrastEnhancementSettings, sampleToEye, eToTexMap);
}
std::swap(previousResultTexture, resultTexture);
}
void GPUCamera::updateEyeSpaceFoveatedSamples(BeamBatch& eyeHierarchy) {
ArrayView<hvvr::DirectionalBeam> eyeBeams = eyeHierarchy.directionalBeams;
d_batchSpaceBeams = GPUBuffer<DirectionalBeam>(eyeBeams.cbegin(), eyeBeams.cend());
validSampleCount = uint32_t(d_batchSpaceBeams.size());
// Allocate and calculate eye-space frusta
uint32_t blockCount = ((uint32_t)eyeBeams.size() + BLOCK_SIZE - 1) / BLOCK_SIZE;
GPUBuffer<SimpleRayFrustum> d_foveatedEyeSpaceTileFrusta =
GPUBuffer<SimpleRayFrustum>(blockCount * TILES_PER_BLOCK);
GPUBuffer<SimpleRayFrustum> d_foveatedEyeSpaceBlockFrusta = GPUBuffer<SimpleRayFrustum>(blockCount);
ComputeEyeSpaceFrusta(d_batchSpaceBeams, d_foveatedEyeSpaceTileFrusta, d_foveatedEyeSpaceBlockFrusta);
DynamicArray<SimpleRayFrustum> simpleTileFrusta(d_foveatedEyeSpaceTileFrusta.size());
DynamicArray<SimpleRayFrustum> simpleBlockFrusta(d_foveatedEyeSpaceBlockFrusta.size());
d_foveatedEyeSpaceTileFrusta.readback(simpleTileFrusta.data());
d_foveatedEyeSpaceBlockFrusta.readback(simpleBlockFrusta.data());
for (int i = 0; i < eyeHierarchy.tileFrusta3D.size(); ++i) {
eyeHierarchy.tileFrusta3D[i] = Frustum(simpleTileFrusta[i].origins, simpleTileFrusta[i].directions);
}
for (int i = 0; i < eyeHierarchy.blockFrusta3D.size(); ++i) {
eyeHierarchy.blockFrusta3D[i] = Frustum(simpleBlockFrusta[i].origins, simpleBlockFrusta[i].directions);
}
}
} // namespace hvvr
|
the_stack
|
void invert_cpu(float* data, int actualsize, float* log_determinant) {
int maxsize = actualsize;
int n = actualsize;
*log_determinant = 0.0;
if (actualsize == 1) { // special case, dimensionality == 1
*log_determinant = ::logf(data[0]);
data[0] = 1.0 / data[0];
} else if(actualsize >= 2) { // dimensionality >= 2
for (int i=1; i < actualsize; i++) data[i] /= data[0]; // normalize row 0
for (int i=1; i < actualsize; i++) {
for (int j=i; j < actualsize; j++) { // do a column of L
float sum = 0.0;
for (int k = 0; k < i; k++)
sum += data[j*maxsize+k] * data[k*maxsize+i];
data[j*maxsize+i] -= sum;
}
if (i == actualsize-1) continue;
for (int j=i+1; j < actualsize; j++) { // do a row of U
float sum = 0.0;
for (int k = 0; k < i; k++)
sum += data[i*maxsize+k]*data[k*maxsize+j];
data[i*maxsize+j] =
(data[i*maxsize+j]-sum) / data[i*maxsize+i];
}
}
for(int i=0; i<actualsize; i++) {
*log_determinant += ::log10(fabs(data[i*n+i]));
//printf("log_determinant: %e\n",*log_determinant);
}
for ( int i = 0; i < actualsize; i++ ) // invert L
for ( int j = i; j < actualsize; j++ ) {
float x = 1.0;
if ( i != j ) {
x = 0.0;
for ( int k = i; k < j; k++ )
x -= data[j*maxsize+k]*data[k*maxsize+i];
}
data[j*maxsize+i] = x / data[j*maxsize+j];
}
for ( int i = 0; i < actualsize; i++ ) // invert U
for ( int j = i; j < actualsize; j++ ) {
if ( i == j ) continue;
float sum = 0.0;
for ( int k = i; k < j; k++ )
sum += data[k*maxsize+j]*( (i==k) ? 1.0 : data[i*maxsize+k] );
data[i*maxsize+j] = -sum;
}
for ( int i = 0; i < actualsize; i++ ) // final inversion
for ( int j = 0; j < actualsize; j++ ) {
float sum = 0.0;
for ( int k = ((i>j)?i:j); k < actualsize; k++ )
sum += ((j==k)?1.0:data[j*maxsize+k])*data[k*maxsize+i];
data[j*maxsize+i] = sum;
}
} else {
PRINT("Error: Invalid dimensionality for invert(...)\n");
}
}
///////////////////////////////////////////////////////////////////////////////
// Validate command line arguments
///////////////////////////////////////////////////////////////////////////////
int validateArguments(int argc, char** argv, int* num_clusters, int* target_num_clusters) {
if(argc <= 5 && argc >= 4) {
// parse num_clusters
if(!sscanf(argv[1],"%d",num_clusters)) {
printf("Invalid number of starting clusters\n\n");
printUsage(argv);
return 1;
}
// Check bounds for num_clusters
if(*num_clusters < 1) {
printf("Invalid number of starting clusters\n\n");
printUsage(argv);
return 1;
}
// parse infile
FILE* infile = fopen(argv[2],"r");
if(!infile) {
printf("Invalid infile.\n\n");
printUsage(argv);
return 2;
}
// parse target_num_clusters
if(argc == 5) {
if(!sscanf(argv[4],"%d",target_num_clusters)) {
printf("Invalid number of desired clusters.\n\n");
printUsage(argv);
return 4;
}
if(*target_num_clusters > *num_clusters) {
printf("target_num_clusters must be less than equal to num_clusters\n\n");
printUsage(argv);
return 4;
}
} else {
*target_num_clusters = 0;
}
// Clean up so the EPA is happy
fclose(infile);
return 0;
} else {
printUsage(argv);
return 1;
}
}
///////////////////////////////////////////////////////////////////////////////
// Print usage statement
///////////////////////////////////////////////////////////////////////////////
void printUsage(char** argv)
{
printf("Usage: %s num_clusters infile outfile [target_num_clusters]\n",argv[0]);
printf("\t num_clusters: The number of starting clusters\n");
printf("\t infile: ASCII space-delimited FCS data file\n");
printf("\t outfile: Clustering results output file\n");
printf("\t target_num_clusters: A desired number of clusters. Must be less than or equal to num_clusters\n");
}
void writeCluster(FILE* f, clusters_t &clusters, const int c, const int num_dimensions) {
fprintf(f,"Probability: %f\n", clusters.pi[c]);
fprintf(f,"N: %f\n",clusters.N[c]);
fprintf(f,"Means: ");
for(int i=0; i<num_dimensions; i++){
fprintf(f,"%f ",clusters.means[c*num_dimensions+i]);
}
fprintf(f,"\n");
fprintf(f,"\nR Matrix:\n");
for(int i=0; i<num_dimensions; i++) {
for(int j=0; j<num_dimensions; j++) {
fprintf(f,"%f ", clusters.R[c*num_dimensions*num_dimensions+i*num_dimensions+j]);
}
fprintf(f,"\n");
}
fflush(f);
}
/*
* Seeds the cluster centers (means) with random data points
*/
void seed_clusters(clusters_t* clusters, float* fcs_data, int num_clusters, int num_dimensions, int num_events) {
float fraction;
int seed;
if(num_clusters > 1) {
fraction = (num_events-1.0f)/(num_clusters-1.0f);
} else {
fraction = 0.0;
}
srand((unsigned int) time(NULL));
// Sets the means from evenly distributed points in the input data
for(int c=0; c < num_clusters; c++) {
clusters->N[c] = (float)num_events/(float)num_clusters;
for(int d=0; d < num_dimensions; d++)
clusters->means[c*num_dimensions+d] = fcs_data[((int)(c*fraction))*num_dimensions+d];
}
}
void add_clusters(clusters_t &clusters, const int c1, const int c2, clusters_t &temp_cluster, const int num_dimensions) {
float wt1,wt2;
wt1 = (clusters.N[c1]) / (clusters.N[c1] + clusters.N[c2]);
wt2 = 1.0f - wt1;
// Compute new weighted means
for(int i=0; i<num_dimensions;i++) {
temp_cluster.means[i] = wt1*clusters.means[c1*num_dimensions+i] + wt2*clusters.means[c2*num_dimensions+i];
}
// Compute new weighted covariance
for(int i=0; i<num_dimensions; i++) {
for(int j=i; j<num_dimensions; j++) {
// Compute R contribution from cluster1
temp_cluster.R[i*num_dimensions+j] = ((temp_cluster.means[i]-clusters.means[c1*num_dimensions+i])
*(temp_cluster.means[j]-clusters.means[c1*num_dimensions+j])
+clusters.R[c1*num_dimensions*num_dimensions+i*num_dimensions+j])*wt1;
// Add R contribution from cluster2
temp_cluster.R[i*num_dimensions+j] += ((temp_cluster.means[i]-clusters.means[c2*num_dimensions+i])
*(temp_cluster.means[j]-clusters.means[c2*num_dimensions+j])
+clusters.R[c2*num_dimensions*num_dimensions+i*num_dimensions+j])*wt2;
// Because its symmetric...
temp_cluster.R[j*num_dimensions+i] = temp_cluster.R[i*num_dimensions+j];
}
}
// Compute pi
temp_cluster.pi[0] = clusters.pi[c1] + clusters.pi[c2];
// compute N
temp_cluster.N[0] = clusters.N[c1] + clusters.N[c2];
float log_determinant;
// Copy R to Rinv matrix
memcpy(temp_cluster.Rinv,temp_cluster.R,sizeof(float)*num_dimensions*num_dimensions);
// Invert the matrix
invert_cpu(temp_cluster.Rinv,num_dimensions,&log_determinant);
// Compute the constant
temp_cluster.constant[0] = (-num_dimensions)*0.5f*::logf(2.0f*PI)-0.5f*log_determinant;
// avgvar same for all clusters
temp_cluster.avgvar[0] = clusters.avgvar[0];
}
void copy_cluster(clusters_t &dest, const int c_dest, clusters_t &src, const int c_src, const int num_dimensions) {
dest.N[c_dest] = src.N[c_src];
dest.pi[c_dest] = src.pi[c_src];
dest.constant[c_dest] = src.constant[c_src];
dest.avgvar[c_dest] = src.avgvar[c_src];
memcpy(&(dest.means[c_dest*num_dimensions]),&(src.means[c_src*num_dimensions]),sizeof(float)*num_dimensions);
memcpy(&(dest.R[c_dest*num_dimensions*num_dimensions]),&(src.R[c_src*num_dimensions*num_dimensions]),sizeof(float)*num_dimensions*num_dimensions);
memcpy(&(dest.Rinv[c_dest*num_dimensions*num_dimensions]),&(src.Rinv[c_src*num_dimensions*num_dimensions]),sizeof(float)*num_dimensions*num_dimensions);
// do we need to copy memberships?
}
void printCluster(clusters_t &clusters, const int c, const int num_dimensions) {
writeCluster(stdout,clusters,c,num_dimensions);
}
float cluster_distance(clusters_t &clusters, const int c1, const int c2, clusters_t &temp_cluster, const int num_dimensions) {
// Add the clusters together, this updates pi,means,R,N and stores in temp_cluster
add_clusters(clusters,c1,c2,temp_cluster,num_dimensions);
return clusters.N[c1]*clusters.constant[c1] + clusters.N[c2]*clusters.constant[c2] - temp_cluster.N[0]*temp_cluster.constant[0];
}
// Free the cluster data structures on host
void freeCluster(clusters_t* c) {
free(c->N);
free(c->pi);
free(c->constant);
free(c->avgvar);
free(c->means);
free(c->R);
free(c->Rinv);
free(c->memberships);
}
// Free the cluster data structures on device
void freeClusterDevice(clusters_t* c) {
CUDA_SAFE_CALL(cudaFree(c->N));
CUDA_SAFE_CALL(cudaFree(c->pi));
CUDA_SAFE_CALL(cudaFree(c->constant));
CUDA_SAFE_CALL(cudaFree(c->avgvar));
CUDA_SAFE_CALL(cudaFree(c->means));
CUDA_SAFE_CALL(cudaFree(c->R));
CUDA_SAFE_CALL(cudaFree(c->Rinv));
CUDA_SAFE_CALL(cudaFree(c->memberships));
}
// Setup the cluster data structures on host
void setupCluster(clusters_t* c, const int num_clusters, const int num_events, const int num_dimensions) {
c->N = (float*) malloc(sizeof(float)*num_clusters);
c->pi = (float*) malloc(sizeof(float)*num_clusters);
c->constant = (float*) malloc(sizeof(float)*num_clusters);
c->avgvar = (float*) malloc(sizeof(float)*num_clusters);
c->means = (float*) malloc(sizeof(float)*num_dimensions*num_clusters);
c->R = (float*) malloc(sizeof(float)*num_dimensions*num_dimensions*num_clusters);
c->Rinv = (float*) malloc(sizeof(float)*num_dimensions*num_dimensions*num_clusters);
c->memberships = (float*) malloc(sizeof(float)*num_events*num_clusters);
}
// Setup the cluster data structures on device
clusters_t* setupClusterDevice(clusters_t* c, const int num_clusters, const int num_events, const int num_dimensions) {
CUDA_SAFE_CALL(cudaMalloc((void**) &c->N, sizeof(float)*num_clusters));
CUDA_SAFE_CALL(cudaMalloc((void**) &c->pi, sizeof(float)*num_clusters));
CUDA_SAFE_CALL(cudaMalloc((void**) &c->constant, sizeof(float)*num_clusters));
CUDA_SAFE_CALL(cudaMalloc((void**) &c->avgvar, sizeof(float)*num_clusters));
CUDA_SAFE_CALL(cudaMalloc((void**) &c->means, sizeof(float)*num_dimensions*num_clusters));
CUDA_SAFE_CALL(cudaMalloc((void**) &c->R, sizeof(float)*num_dimensions*num_dimensions*num_clusters));
CUDA_SAFE_CALL(cudaMalloc((void**) &c->Rinv, sizeof(float)*num_dimensions*num_dimensions*num_clusters));
CUDA_SAFE_CALL(cudaMalloc((void**) &c->memberships,
sizeof(float)*num_events*(num_clusters+NUM_CLUSTERS_PER_BLOCK-num_clusters % NUM_CLUSTERS_PER_BLOCK)));
clusters_t* d_clusters;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_clusters, sizeof(clusters_t)));
// Copy Cluster data to device
CUDA_SAFE_CALL(cudaMemcpy(d_clusters, c, sizeof(clusters_t), cudaMemcpyHostToDevice));
DEBUG("Finished copying cluster data to device.\n");
return d_clusters;
}
void copyClusterFromDevice(clusters_t* c, clusters_t *c_tmp, clusters_t* d_c, const int num_clusters, const int num_dimensions) {
if (d_c != NULL)
CUDA_SAFE_CALL(cudaMemcpy(c_tmp, d_c, sizeof(clusters_t),cudaMemcpyDeviceToHost));
// copy all of the arrays from the structs
CUDA_SAFE_CALL(cudaMemcpy(c->N, c_tmp->N, sizeof(float)*num_clusters,cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(c->pi, c_tmp->pi, sizeof(float)*num_clusters,cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(c->constant, c_tmp->constant, sizeof(float)*num_clusters,cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(c->avgvar, c_tmp->avgvar, sizeof(float)*num_clusters,cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(c->means, c_tmp->means, sizeof(float)*num_dimensions*num_clusters,cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(c->R, c_tmp->R, sizeof(float)*num_dimensions*num_dimensions*num_clusters,cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(c->Rinv, c_tmp->Rinv, sizeof(float)*num_dimensions*num_dimensions*num_clusters,cudaMemcpyDeviceToHost));
}
void copyClusterToDevice(clusters_t* c, clusters_t *c_tmp, const int num_clusters, const int num_dimensions) {
CUDA_SAFE_CALL(cudaMemcpy(c_tmp->N, c->N, sizeof(float)*num_clusters,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(c_tmp->pi, c->pi, sizeof(float)*num_clusters,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(c_tmp->constant, c->constant, sizeof(float)*num_clusters,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(c_tmp->avgvar, c->avgvar, sizeof(float)*num_clusters,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(c_tmp->means, c->means, sizeof(float)*num_dimensions*num_clusters,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(c_tmp->R, c->R, sizeof(float)*num_dimensions*num_dimensions*num_clusters,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(c_tmp->Rinv, c->Rinv, sizeof(float)*num_dimensions*num_dimensions*num_clusters,cudaMemcpyHostToDevice));
}
clusters_t* cluster(int original_num_clusters, int desired_num_clusters,
int* final_num_clusters, int num_dimensions, int num_events,
float* fcs_data_by_event) {
int regroup_iterations = 0;
int params_iterations = 0;
int reduce_iterations = 0;
int ideal_num_clusters = original_num_clusters;
int stop_number;
// Number of clusters to stop iterating at.
if(desired_num_clusters == 0) {
stop_number = 1;
} else {
stop_number = desired_num_clusters;
}
// Transpose the event data (allows coalesced access pattern in E-step kernel)
// This has consecutive values being from the same dimension of the data
// (num_dimensions by num_events matrix)
float* fcs_data_by_dimension = (float*) malloc(sizeof(float)*num_events*num_dimensions);
for(int e=0; e<num_events; e++) {
for(int d=0; d<num_dimensions; d++) {
if(isnan(fcs_data_by_event[e*num_dimensions+d])) {
printf("Error: Found NaN value in input data. Exiting.\n");
return NULL;
}
fcs_data_by_dimension[d*num_events+e] = fcs_data_by_event[e*num_dimensions+d];
}
}
PRINT("Number of events: %d\n",num_events);
PRINT("Number of dimensions: %d\n\n",num_dimensions);
PRINT("Starting with %d cluster(s), will stop at %d cluster(s).\n",original_num_clusters,stop_number);
// This the shared memory space between the GPUs
clusters_t clusters;
setupCluster(&clusters, original_num_clusters, num_events, num_dimensions);
// another set of clusters for saving the results of the best configuration
clusters_t *saved_clusters = (clusters_t*) malloc(sizeof(clusters_t));
setupCluster(saved_clusters, original_num_clusters, num_events, num_dimensions);
DEBUG("Finished allocating shared cluster structures on host\n");
// hold the result from regroup kernel
float* shared_likelihoods = (float*) malloc(sizeof(float)*NUM_BLOCKS);
float likelihood, old_likelihood;
float min_rissanen = FLT_MAX;
// Used as a temporary cluster for combining clusters in "distance" computations
clusters_t scratch_cluster;
setupCluster(&scratch_cluster, 1, num_events, num_dimensions);
DEBUG("Finished allocating memory on host for clusters.\n");
// Setup the cluster data structures on device
// First allocate structures on the host, CUDA malloc the arrays
// Then CUDA malloc structures on the device and copy them over
clusters_t temp_clusters;
clusters_t *d_clusters = setupClusterDevice(&temp_clusters, original_num_clusters, num_events, num_dimensions);
// allocate device memory for FCS data
float* d_fcs_data_by_event;
float* d_fcs_data_by_dimension;
// allocate and copy relavant FCS data to device.
int mem_size = num_dimensions * num_events * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc( (void**) &d_fcs_data_by_event, mem_size));
CUDA_SAFE_CALL(cudaMalloc( (void**) &d_fcs_data_by_dimension, mem_size));
CUDA_SAFE_CALL(cudaMemcpy( d_fcs_data_by_event, fcs_data_by_event, mem_size,cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL(cudaMemcpy( d_fcs_data_by_dimension, fcs_data_by_dimension, mem_size,cudaMemcpyHostToDevice) );
DEBUG("GPU: Finished copying FCS data to device.\n");
//////////////// Initialization done, starting kernels ////////////////
DEBUG("Invoking seed_clusters kernel.\n");
// seed_clusters sets initial pi values,
// finds the means / covariances and copies it to all the clusters
seed_clusters_kernel<<< 1, NUM_THREADS_MSTEP >>>( d_fcs_data_by_event, d_clusters, num_dimensions, original_num_clusters, num_events);
// Computes the R matrix inverses, and the gaussian constant
constants_kernel<<<original_num_clusters, NUM_THREADS_MSTEP>>>(d_clusters,original_num_clusters,num_dimensions);
// copy clusters from the device
copyClusterFromDevice(&clusters, &temp_clusters, d_clusters, original_num_clusters, num_dimensions);
//seed_clusters(&clusters,fcs_data_by_event,original_num_clusters,num_dimensions,num_events);
DEBUG("Starting Clusters\n");
for(int c=0; c < original_num_clusters; c++) {
DEBUG("Cluster #%d\n",c);
DEBUG("\tN: %f\n",clusters.N[c]);
DEBUG("\tpi: %f\n",clusters.pi[c]);
// means
DEBUG("\tMeans: ");
for(int d=0; d < num_dimensions; d++) {
DEBUG("%.2f ",clusters.means[c*num_dimensions+d]);
}
DEBUG("\n");
DEBUG("\tR:\n\t");
for(int d=0; d < num_dimensions; d++) {
for(int e=0; e < num_dimensions; e++)
DEBUG("%.2f ",clusters.R[c*num_dimensions*num_dimensions+d*num_dimensions+e]);
DEBUG("\n\t");
}
DEBUG("R-inverse:\n\t");
for(int d=0; d < num_dimensions; d++) {
for(int e=0; e < num_dimensions; e++)
DEBUG("%.2f ",clusters.Rinv[c*num_dimensions*num_dimensions+d*num_dimensions+e]);
DEBUG("\n\t");
}
DEBUG("\n");
DEBUG("\tAvgvar: %e\n",clusters.avgvar[c]);
DEBUG("\tConstant: %e\n",clusters.constant[c]);
}
// synchronize after first gpu does the seeding, copy result to all gpus
copyClusterToDevice(&clusters, &temp_clusters, original_num_clusters, num_dimensions);
// Calculate an epsilon value
float epsilon = (1+num_dimensions+0.5f*(num_dimensions+1)*num_dimensions)*
::logf((float)num_events*num_dimensions)*0.001f;
int iters;
//epsilon = 1e-6;
PRINT("Gaussian.cu: epsilon = %f\n",epsilon);
float* d_likelihoods;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_likelihoods, sizeof(float)*NUM_BLOCKS));
// Variables for GMM reduce order
float distance, min_distance = 0.0;
float rissanen;
int min_c1, min_c2;
for(int num_clusters=original_num_clusters; num_clusters >= stop_number; num_clusters--) {
/*************** EM ALGORITHM *****************************/
// do initial E-step
// Calculates a cluster membership probability
// for each event and each cluster.
DEBUG("Invoking E-step kernels.");
estep1<<<dim3(num_clusters,NUM_BLOCKS), NUM_THREADS_ESTEP>>>(d_fcs_data_by_dimension,d_clusters,num_dimensions,num_events);
estep2<<<NUM_BLOCKS, NUM_THREADS_ESTEP>>>(d_fcs_data_by_dimension,d_clusters,num_dimensions,num_clusters,num_events,d_likelihoods);
regroup_iterations++;
// Copy the likelihood totals from each block, sum them up to get a total
CUDA_SAFE_CALL(cudaMemcpy(shared_likelihoods,d_likelihoods,sizeof(float)*NUM_BLOCKS,cudaMemcpyDeviceToHost));
likelihood = 0.0;
for(int i=0;i<NUM_BLOCKS;i++) {
likelihood += shared_likelihoods[i];
}
DEBUG("Likelihood: %e\n",likelihood);
float change = epsilon*2;
PRINT("Performing EM algorithm on %d clusters.\n",num_clusters);
iters = 0;
// This is the iterative loop for the EM algorithm.
// It re-estimates parameters, re-computes constants, and then regroups the events
// These steps keep repeating until the change in likelihood is less than some epsilon
while(iters < MIN_ITERS || (fabs(change) > epsilon && iters < MAX_ITERS)) {
old_likelihood = likelihood;
DEBUG("Invoking reestimate_parameters (M-step) kernel.");
// This kernel computes a new N, pi isn't updated until compute_constants though
mstep_N<<<num_clusters, NUM_THREADS_MSTEP>>>(d_clusters,num_dimensions,num_clusters,num_events);
CUDA_SAFE_CALL(cudaMemcpy(clusters.N,temp_clusters.N,sizeof(float)*num_clusters,cudaMemcpyDeviceToHost));
dim3 gridDim1(num_clusters, num_dimensions);
dim3 blockDim1(NUM_THREADS_MSTEP, 1);
mstep_means<<<gridDim1, blockDim1>>>(d_fcs_data_by_dimension,d_clusters,
num_dimensions,num_clusters,num_events);
CUDA_SAFE_CALL(cudaMemcpy(clusters.means,temp_clusters.means,
sizeof(float)*num_clusters*num_dimensions,cudaMemcpyDeviceToHost));
// Reduce means for all clusters, copy back to device
for(int c=0; c < num_clusters; c++) {
DEBUG("Cluster %d Means:", c);
for(int d=0; d < num_dimensions; d++) {
if(clusters.N[c] > 0.5f) {
clusters.means[c*num_dimensions+d] /= clusters.N[c];
} else {
clusters.means[c*num_dimensions+d] = 0.0f;
}
DEBUG(" %f",clusters.means[c*num_dimensions+d]);
}
DEBUG("\n");
}
CUDA_SAFE_CALL(cudaMemcpy(temp_clusters.means,clusters.means,
sizeof(float)*num_clusters*num_dimensions,cudaMemcpyHostToDevice));
// Covariance is symmetric, so we only need to compute N*(N+1)/2 matrix elements per cluster
dim3 gridDim2((num_clusters+NUM_CLUSTERS_PER_BLOCK-1)/NUM_CLUSTERS_PER_BLOCK,
num_dimensions*(num_dimensions+1)/2);
mstep_covariance2<<<gridDim2, blockDim1>>>(d_fcs_data_by_dimension,d_clusters,
num_dimensions,num_clusters,num_events);
CUDA_SAFE_CALL(cudaMemcpy(clusters.R,temp_clusters.R,
sizeof(float)*num_clusters*num_dimensions*num_dimensions,cudaMemcpyDeviceToHost));
DEBUG("After cov2\tR:\n\t");
for(int c=0; c < num_clusters; c++)
for(int d=0; d < num_dimensions; d++)
for(int e=0; e < num_dimensions; e++)
DEBUG("%.2f ",clusters.R[c*num_dimensions*num_dimensions+d*num_dimensions+e]);
DEBUG("\n");
// Reduce R for all clusters, copy back to device
{
for(int c=0; c < num_clusters; c++) {
if(clusters.N[c] > 0.5f) {
for(int d=0; d < num_dimensions*num_dimensions; d++) {
clusters.R[c*num_dimensions*num_dimensions+d] /= clusters.N[c];
}
} else {
for(int i=0; i < num_dimensions; i++) {
for(int j=0; j < num_dimensions; j++) {
if(i == j) {
clusters.R[c*num_dimensions*num_dimensions+i*num_dimensions+j] = 1.0;
} else {
clusters.R[c*num_dimensions*num_dimensions+i*num_dimensions+j] = 0.0;
}
}
}
}
}
}
CUDA_SAFE_CALL(cudaMemcpy(temp_clusters.R,clusters.R,
sizeof(float)*num_clusters*num_dimensions*num_dimensions,cudaMemcpyHostToDevice));
//CUT_CHECK_ERROR("M-step Kernel execution failed: ");
params_iterations++;
DEBUG("Invoking constants kernel.");
// Inverts the R matrices, computes the constant, normalizes cluster probabilities
constants_kernel<<<num_clusters, NUM_THREADS_MSTEP>>>(d_clusters,num_clusters,num_dimensions);
CUDA_SAFE_CALL(cudaMemcpy(clusters.constant, temp_clusters.constant,
sizeof(float)*num_clusters,cudaMemcpyDeviceToHost));
for(int temp_c=0; temp_c < num_clusters; temp_c++)
DEBUG("Cluster %d constant: %e\n",temp_c,clusters.constant[temp_c]);
DEBUG("Invoking regroup (E-step) kernel with %d blocks.\n",NUM_BLOCKS);
// Compute new cluster membership probabilities for all the events
estep1<<<dim3(num_clusters,NUM_BLOCKS), NUM_THREADS_ESTEP>>>(d_fcs_data_by_dimension,d_clusters,num_dimensions,num_events);
estep2<<<NUM_BLOCKS, NUM_THREADS_ESTEP>>>(d_fcs_data_by_dimension,d_clusters,num_dimensions,num_clusters,num_events,d_likelihoods);
regroup_iterations++;
// check if kernel execution generated an error
//CUT_CHECK_ERROR("Kernel execution failed");
// Copy the likelihood totals from each block, sum them up to get a total
CUDA_SAFE_CALL(cudaMemcpy(shared_likelihoods,d_likelihoods,sizeof(float)*NUM_BLOCKS,cudaMemcpyDeviceToHost));
{
likelihood = 0.0;
for(int i=0;i<NUM_BLOCKS;i++) {
likelihood += shared_likelihoods[i];
}
DEBUG("Likelihood: %e\n",likelihood);
}
change = likelihood - old_likelihood;
DEBUG("GPU 0: Change in likelihood: %e\n",change);
iters++;
}
DEBUG("GPU done with EM loop\n");
// copy all of the arrays from the device
copyClusterFromDevice(&clusters, &temp_clusters, NULL, num_clusters, num_dimensions);
CUDA_SAFE_CALL(cudaMemcpy(clusters.memberships, temp_clusters.memberships, sizeof(float)*num_events*num_clusters,cudaMemcpyDeviceToHost));
DEBUG("GPU done with copying cluster data from device\n");
// Calculate Rissanen Score
rissanen = -likelihood + 0.5f*(num_clusters*(1.0f+num_dimensions+0.5f*(num_dimensions+1.0f)*num_dimensions)-1.0f)*::logf((float)num_events*num_dimensions);
PRINT("\nLikelihood: %e\n",likelihood);
PRINT("\nRissanen Score: %e\n",rissanen);
// Save the cluster data the first time through, so we have a base rissanen score and result
// Save the cluster data if the solution is better and the user didn't specify a desired number
// If the num_clusters equals the desired number, stop
if(num_clusters == original_num_clusters || (rissanen < min_rissanen && desired_num_clusters == 0) || (num_clusters == desired_num_clusters)) {
min_rissanen = rissanen;
ideal_num_clusters = num_clusters;
memcpy(saved_clusters->N,clusters.N,sizeof(float)*num_clusters);
memcpy(saved_clusters->pi,clusters.pi,sizeof(float)*num_clusters);
memcpy(saved_clusters->constant,clusters.constant,sizeof(float)*num_clusters);
memcpy(saved_clusters->avgvar,clusters.avgvar,sizeof(float)*num_clusters);
memcpy(saved_clusters->means,clusters.means,sizeof(float)*num_dimensions*num_clusters);
memcpy(saved_clusters->R,clusters.R,sizeof(float)*num_dimensions*num_dimensions*num_clusters);
memcpy(saved_clusters->Rinv,clusters.Rinv,sizeof(float)*num_dimensions*num_dimensions*num_clusters);
memcpy(saved_clusters->memberships,clusters.memberships,sizeof(float)*num_events*num_clusters);
}
/**************** Reduce GMM Order ********************/
// Don't want to reduce order on the last iteration
if(num_clusters > stop_number) {
//startTimer(timers.cpu);
{
// First eliminate any "empty" clusters
for(int i=num_clusters-1; i >= 0; i--) {
if(clusters.N[i] < 0.5) {
DEBUG("Cluster #%d has less than 1 data point in it.\n",i);
for(int j=i; j < num_clusters-1; j++) {
copy_cluster(clusters,j,clusters,j+1,num_dimensions);
}
num_clusters--;
}
}
min_c1 = 0;
min_c2 = 1;
DEBUG("Number of non-empty clusters: %d\n",num_clusters);
// For all combinations of subclasses...
// If the number of clusters got really big might need to do a non-exhaustive search
// Even with 100*99/2 combinations this doesn't seem to take too long
for(int c1=0; c1<num_clusters;c1++) {
for(int c2=c1+1; c2<num_clusters;c2++) {
// compute distance function between the 2 clusters
distance = cluster_distance(clusters,c1,c2,scratch_cluster,num_dimensions);
// Keep track of minimum distance
if((c1 ==0 && c2 == 1) || distance < min_distance) {
min_distance = distance;
min_c1 = c1;
min_c2 = c2;
}
}
}
PRINT("\nMinimum distance between (%d,%d). Combining clusters\n",min_c1,min_c2);
// Add the two clusters with min distance together
add_clusters(clusters,min_c1,min_c2,scratch_cluster,num_dimensions);
// Copy new combined cluster into the main group of clusters, compact them
copy_cluster(clusters,min_c1,scratch_cluster,0,num_dimensions);
for(int i=min_c2; i < num_clusters-1; i++) {
//printf("Copying cluster %d to cluster %d\n",i+1,i);
copy_cluster(clusters,i,clusters,i+1,num_dimensions);
}
}
// Copy the clusters back to the device
copyClusterToDevice(&clusters, &temp_clusters, num_clusters, num_dimensions);
} // GMM reduction block
reduce_iterations++;
} // outer loop from M to 1 clusters
PRINT("\nFinal rissanen Score was: %f, with %d clusters.\n",min_rissanen,ideal_num_clusters);
CUDA_SAFE_CALL(cudaFree(d_likelihoods));
CUDA_SAFE_CALL(cudaFree(d_fcs_data_by_event));
CUDA_SAFE_CALL(cudaFree(d_fcs_data_by_dimension));
CUDA_SAFE_CALL(cudaFree(d_clusters));
freeCluster(&scratch_cluster);
freeCluster(&clusters);
freeClusterDevice(&temp_clusters);
free(fcs_data_by_dimension);
free(shared_likelihoods);
*final_num_clusters = ideal_num_clusters;
return saved_clusters;
}
|
the_stack
|
#include <cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int Bits,
int BlockSize>
struct TPointHistOneByte {
const int InnerHistBitsCount = Bits - 5;
float* Histogram;
static constexpr int GetHistSize() {
return BlockSize * 32;
}
static constexpr int AddPointsBatchSize() {
return TLoadSize<LoadSize()>::Size();
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 700
const int NN = 2;
#else
const int NN = 4;
#endif
return NN;
}
static constexpr int GetBlockSize() {
return BlockSize;
}
static constexpr ELoadSize LoadSize() {
#if __CUDA_ARCH__ < 500
return ELoadSize::OneElement;
#else
return ELoadSize::FourElements;
// return ELoadSize::TwoElements;
#endif
}
static constexpr int BlockLoadSize(ECIndexLoadType indexLoadType) {
return TLoadSize<LoadSize()>::Size() * BlockSize * Unroll(indexLoadType);
}
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* hist) {
static_assert(Bits >= 5, "Error: this hist is for 5-8 bits");
const int histSize = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
hist[i] = 0;
}
Histogram = hist + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = (threadIdx.x + i) & 3;
int bin = (ci >> (24 - 8 * f)) & 255;
// int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> Bits) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Histogram[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Histogram[offset] += statToAdd;
}
}
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + i) & 3;
int bins[N];
float stats[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = (ci[k] >> (24 - 8 * f)) & 255;
// bins[k] = bfe(ci[k], 24 - 8 * f, 8);
stats[k] = (bins[k] >> Bits) == 0 ? t[k] : 0.0f;
}
int offsets[N];
int higherBin[N];
const int mask = (1 << InnerHistBitsCount) - 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
higherBin[k] = (bins[k] >> 5) & mask;
offsets[k] = 4 * higherBin[k] + f + ((bins[k] & 31) << 5);
}
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
if (pass == higherBin[j]) {
Histogram[offsets[j]] += stats[j];
}
}
}
} else {
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
Histogram[offsets[j]] += stats[j];
}
}
}
}
template <int N>
__forceinline__ __device__ void AddPoints(const ui32* ci, const float* t) {
const int NN = AddPointsBatchSize();
static_assert(N % NN == 0, "Error: incorrect stripe size");
#pragma unroll
for (int k = 0; k < N; k += NN) {
AddPointsImpl<NN>(ci + k, t + k);
}
}
__forceinline__ __device__ void Reduce() {
Histogram -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Histogram[i];
}
Histogram[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Histogram + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Histogram[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
TFeatureInBlock group = features[fid];
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize + group.FoldOffsetInGroup;
if (fold < features[fid].Folds) {
const float val = Histogram[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesGatherImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
/*
* Single part
*/
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesGatherImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
}
|
the_stack
|
#include "cuda_kernels.h"
#include "cub/cub.cuh"
#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <climits>
#include <cfloat>
#include <vector>
#include <type_traits>
namespace fastertransformer
{
/* ********************************** common kernel *********************************** */
template <typename T>
__global__ void init_kernel(bool* finished,
int* sequence_length,
int* word_ids,
T* cum_log_probs,
const int sentence_id,
const int beam_width,
const int batch_size)
{
const bool IS_FP16 = std::is_same<T, half>::value;
const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : 1e20f;
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size * beam_width; index += blockDim.x * gridDim.x)
{
finished[index] = false;
sequence_length[index] = 0;
word_ids[index] = sentence_id;
cum_log_probs[index] = (index % beam_width == 0) ? (T)0.0f: -MAX_T_VAL;
}
}
template <typename T>
void init_kernelLauncher(bool* finished,
int* sequence_length,
int* word_ids,
T* cum_log_probs,
const int sentence_id,
const int batch_size,
const int beam_width,
cudaStream_t stream)
{
dim3 grid((int)ceil(batch_size * beam_width * 1.0 / 256));
dim3 block(256);
init_kernel<T><<<grid, block, 0, stream>>>(finished,
sequence_length,
word_ids,
cum_log_probs,
sentence_id,
beam_width,
batch_size);
}
__global__ void sampling_init_kernel(bool* finished,
int* sequence_length,
int* word_ids,
const int start_id,
const int batch_size)
{
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size; index += blockDim.x * gridDim.x)
{
finished[index] = false;
sequence_length[index] = 0;
word_ids[index] = start_id;
}
}
void sampling_init_kernelLauncher(bool* finished,
int* sequence_length,
int* word_ids,
const int start_id,
const int batch_size,
cudaStream_t stream)
{
dim3 grid((int)ceil(batch_size * 1.0 / 256));
dim3 block(256);
sampling_init_kernel<<<grid, block, 0, stream>>>(finished,
sequence_length,
word_ids,
start_id,
batch_size);
}
template <typename T>
__global__ void embedding_lookup_sine_position_encoding_kernel(T* from_tensor,
const T* embedding_table,
const T* position_encoding,
const int* word_ids,
const int batch_size,
const int hidden_units)
{
// 1. lookup from embedding table
// 2. multiply hidden_dim**0.5
// 3. add the position encoding
T scale = (T)sqrtf(float(hidden_units));
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size * hidden_units; index += blockDim.x * gridDim.x)
{
const int row_index = index / hidden_units;
const int col_index = index % hidden_units;
from_tensor[index] = embedding_table[word_ids[row_index] * hidden_units + col_index] * scale + position_encoding[col_index];
}
}
template <typename T>
void embedding_lookup_sine_position_encoding_kernel_launcher(T* from_tensor,
const T* embedding_table,
const T* position_encoding,
const int* word_ids,
const int batch_size,
const int hidden_units,
cudaStream_t stream)
{
dim3 grid(min(batch_size, 65536));
dim3 block(min(hidden_units, 1024));
embedding_lookup_sine_position_encoding_kernel<T><<<grid, block, 0, stream>>>(from_tensor,
embedding_table,
position_encoding,
word_ids,
batch_size,
hidden_units);
}
// TODO Add half2 implementation
template <typename T>
__global__ void embedding_position_lookups_kernel(T* from_tensor,
const T* embedding_table,
const T* pos_table,
const int* word_ids,
const int local_batch_size,
const int batch_size,
const int hidden_units,
int step,
int ite,
int max_input_len,
const int* start_lengths)
{
int timestep = step - 1;
// if the input is padded in the batch, indices of the word_id and the pos_table also should be shifted forward by the length of the padding.
int len_padding = max_input_len - start_lengths[local_batch_size * ite + blockIdx.x];
int idx_word_id = (step == max_input_len) ? timestep - len_padding : timestep;
int idx_pos_table = timestep - len_padding;
// printf("batch id: %d, len_padding: %d, max_input_len: %d\n", local_batch_size * ite + blockIdx.x, len_padding, max_input_len);
int *word_ids_buf = (int*)word_ids + idx_word_id * batch_size + local_batch_size * ite;
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < local_batch_size * hidden_units; index += blockDim.x * gridDim.x)
{
const int row_index = index / hidden_units;
const int col_index = index % hidden_units;
from_tensor[index] = embedding_table[word_ids_buf[row_index] * hidden_units + col_index]
+ pos_table[idx_pos_table * hidden_units + col_index];
}
}
template <typename T>
void embedding_position_lookups_kernel_launcher(T* from_tensor,
const T* embedding_table,
const T* pos_table,
const int* word_ids,
const int local_batch_size,
const int batch_size,
const int hidden_units,
int step,
int ite,
int max_input_len,
const int* start_lengths,
cudaStream_t stream)
{
dim3 grid(min(local_batch_size, 65536));
dim3 block(min(hidden_units, 1024));
embedding_position_lookups_kernel<T><<<grid, block, 0, stream>>>(from_tensor,
embedding_table,
pos_table,
word_ids,
local_batch_size,
batch_size,
hidden_units,
step,
ite,
max_input_len,
start_lengths);
}
template <typename T> __launch_bounds__(1024, 1)
__global__ void start_id_embedding_position_lookups_kernel(T* from_tensor,
int* output_ids,
const T* embedding_table,
const T* pos_table,
const int* word_ids,
const int start_step,
const int length,
const int max_length,
const int batch_size,
const int hidden_units)
{
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size * length * hidden_units; index += blockDim.x * gridDim.x)
{
// transpose the word_ids [batch, length] (part of [batch, max_length]) to output_ids [length, batch]
if(index < batch_size * max_length)
{
const int seq_id = index % max_length;
const int batch_id = index / max_length;
if(seq_id < length)
output_ids[seq_id * batch_size + batch_id] = word_ids[index];
// output_ids[index] = word_ids[index];
}
// embedding lookup from word ids [batch, length] (part of [batch, max_length]) and [vocab, hidden] to generate embedding [batch, length, hidden]
const int word_index = index / hidden_units;
const int word_index_row = word_index / length;
const int word_index_col = word_index % length;
const int real_word_index = word_index_row * max_length + word_index_col;
const int step = start_step + word_index % length;
const int col_index = index % hidden_units;
from_tensor[index] = embedding_table[word_ids[real_word_index] * hidden_units + col_index]
+ pos_table[(step - 1) * hidden_units + col_index];
}
}
template <typename T>
void start_id_embedding_position_lookups_kernel_launcher(T* from_tensor,
int *output_ids,
const T* embedding_table,
const T* pos_table,
const int* word_ids,
const int start_step,
const int length,
const int max_length,
const int batch_size,
const int hidden_units,
cudaStream_t stream)
{
dim3 grid(min(batch_size * length, 65536));
dim3 block(min(hidden_units, 1024));
start_id_embedding_position_lookups_kernel<T><<<grid, block, 0, stream>>>(from_tensor,
output_ids,
embedding_table,
pos_table,
word_ids,
start_step,
length,
max_length,
batch_size,
hidden_units);
}
// TODO Add half2 implementation
template <typename T>
__global__ void apply_temperature_penalty_kernel(T* logits,
const T temperature_inverse,
const int m,
const int vocab_size,
const int vocab_size_padd)
{
const bool IS_FP16 = std::is_same<T, half>::value;
const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX;
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < m * vocab_size_padd; index += blockDim.x * gridDim.x)
{
if(index % vocab_size_padd < vocab_size) logits[index] = logits[index] * temperature_inverse;
else logits[index] = -MAX_T_VAL;
}
}
template <typename T>
void apply_temperature_penalty_kernelLauncher(T* logits,
const T temperature,
const int m,
const int vocab_size,
const int vocab_size_padd,
cudaStream_t stream) {
dim3 grid(min(m, 65536));
dim3 block(min(vocab_size_padd, 1024));
const T temperature_inverse = (T)(1.f / (float) temperature);
apply_temperature_penalty_kernel<T><<<grid, block, 0, stream>>>(logits,
temperature_inverse,
m,
vocab_size,
vocab_size_padd);
}
template <typename T>
__global__ void apply_repetition_penalty_kernel(T* logits,
const float penalty,
int* start_ids,
int* output_ids,
const int batch_size,
const int local_batch_size,
const int vocab_size,
const int vocab_size_padd,
const int* start_lengths,
const int max_input_len,
const int step,
const int ite) {
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < local_batch_size * step; index += blockDim.x * gridDim.x) {
int tid = index / local_batch_size;
int lid = index % local_batch_size;
int bid = lid + ite * local_batch_size;
bool is_mask = (tid >= start_lengths[bid] && tid < max_input_len);
if (is_mask) continue; // padding has nothing to do with repetition penalty.
int vid;
if (tid < start_lengths[bid]) { // get tokens from context input
int idx = bid * max_input_len + tid; // start_ids shape: (batch_size, max_input_len)
vid = start_ids[idx];
} else { // get tokens from previous output
int idx = batch_size * tid + local_batch_size * ite + lid; // output_ids shape: (input_len + output_len, batch_size)
vid = output_ids[idx];
}
if(vid >= vocab_size) continue;
int idx_out = lid * vocab_size_padd + vid; // logits shape: (local_batch_size, vocab_size_padd)
logits[idx_out] = logits[idx_out] < T(0) ? float(logits[idx_out]) * penalty : float(logits[idx_out]) / penalty;
}
}
template <typename T>
void apply_repetition_penalty_kernelLauncher(T* logits,
const float penalty,
int* start_ids,
int* output_ids,
const int batch_size,
const int local_batch_size,
const int vocab_size,
const int vocab_size_padd,
const int* start_lengths,
const int max_input_len,
const int step,
const int ite,
cudaStream_t stream) {
dim3 block(512);
dim3 grid((int)(ceil(local_batch_size * step / 512.)));
apply_repetition_penalty_kernel<T><<<grid, block, 0, stream>>>(logits,
penalty,
start_ids,
output_ids,
batch_size,
local_batch_size,
vocab_size,
vocab_size_padd,
start_lengths,
max_input_len,
step,
ite);
}
__global__ void set_start_ids_kernel(int* out_ids,
const int* in_ids,
const int max_start_len,
const int step,
const int ite,
const int batch_size,
const int local_batch_size,
const int end_id)
{
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < local_batch_size)
{
int in_id = in_ids[(ite * local_batch_size + id) * max_start_len + step];
if(in_id != end_id)
out_ids[step * batch_size + ite * local_batch_size + id] = in_ids[(ite * local_batch_size + id) * max_start_len + step];
}
}
void set_start_ids_kernelLauncher(int* out_ids,
const int* in_ids,
const int max_start_len,
const int step,
const int ite,
const int batch_size,
const int local_batch_size,
const int end_id,
cudaStream_t stream)
{
dim3 grid((int)(ceil(local_batch_size / 512.)));
set_start_ids_kernel<<<grid, 512, 0, stream>>>(out_ids,
in_ids,
max_start_len,
step,
ite,
batch_size,
local_batch_size,
end_id);
}
template <typename T>
__global__ void kernel_padding_kernel(T *padded_kernel, const T *kernel,
const int row_dim, const int col_dim, const int padded_col_dim)
{
for(int id = threadIdx.x + blockIdx.x * blockDim.x; id < row_dim * padded_col_dim; id += blockDim.x * gridDim.x)
{
int row_id = id / padded_col_dim;
int col_id = id % padded_col_dim;
if(col_id < col_dim)
{
padded_kernel[id] = kernel[row_id * col_dim + col_id];
}
else
{
padded_kernel[id] = (T)(0.0f);
}
}
}
template <typename T>
void kernel_padding_kernelLauncher(T *padded_kernel, const T *kernel,
const int row_dim, const int col_dim, const int padded_col_dim, cudaStream_t stream)
{
// pad 0 into the kernel from shape [row_dim, col_dim] to [row_dim, padded_col_dim]
dim3 block(512);
dim3 grid(min(65536, (int)(ceil(row_dim * padded_col_dim / 512.)) ));
kernel_padding_kernel<<<grid, block, 0, stream>>>(padded_kernel, kernel, row_dim, col_dim, padded_col_dim);
}
template <typename T1, typename T2>
__global__ void bias_padding_kernel(T1 *padded_bias, const T2 *bias,
const int col_dim, const int padded_col_dim)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < col_dim)
{
padded_bias[index] = (T1)bias[index];
}
else if(index >= col_dim && index < padded_col_dim)
{
padded_bias[index] = (T1)(std::is_same<T1, half>::value ? -60000 : -1e20f);
}
}
template <typename T1, typename T2>
void bias_padding_kernelLauncher(T1 *padded_bias, const T2 *bias,
const int col_dim, const int padded_col_dim, cudaStream_t stream)
{
// pad -max into the bias from shape [col_dim] to [padded_col_dim]
dim3 block(512);
dim3 grid( (int)(ceil(padded_col_dim / 512.)) );
assert(grid.x < 65536);
bias_padding_kernel<<<grid, block, 0, stream>>>(padded_bias, bias, col_dim, padded_col_dim);
}
/* *************************** end of common kernel *********************************** */
/* ********************************** BeamSearch kernel *********************************** */
template<typename T>
__global__
void broadcast_kernel(T* log_probs,
T* cum_log_probs,
const int vocab_size,
const int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int bid = tid / vocab_size;
if(tid < N)
log_probs[tid] += cum_log_probs[bid];
}
void broadcast_kernelLauncher(float* log_probs,
float* cum_log_probs,
const int batch_size,
const int beam_width,
const int vocab_size,
cudaStream_t stream)
{
int N = batch_size * beam_width * vocab_size;
dim3 block(1024);
dim3 grid((N - 1) / block.x + 1);
broadcast_kernel<float><<<grid, block, 0, stream>>>(log_probs, cum_log_probs, vocab_size, N);
}
template <typename T>
__global__
void update_kernel(T* log_probs, T* cum_log_probs,
bool* finished,
int* parent_ids, int* sequence_length,
int* word_ids, int* output_ids,
const int batch_size, const int beam_width,
const int vocab_size, const int end_id,
int* finished_count)
{
int tid = threadIdx.x;
sequence_length[tid] = finished[tid] ? sequence_length[tid] : sequence_length[tid] + 1;
int beam_id = word_ids[tid] / vocab_size;
int word_id = word_ids[tid] % vocab_size;
cum_log_probs[tid] = log_probs[word_ids[tid]];
sequence_length[tid] = sequence_length[beam_id];
finished[tid] = word_id == end_id ? 1 : 0;
parent_ids[tid] = beam_id;
word_ids[tid] = word_id;
output_ids[tid] = word_id;
}
void update_kernelLauncher(float* log_probs, float* cum_log_probs,
bool* finished,
int* parent_ids, int* sequence_length,
int* word_ids, int* output_ids,
const int batch_size, const int beam_width,
const int vocab_size, cudaStream_t stream,
const int end_id, int* finished_count)
{
dim3 grid(1);
dim3 block(batch_size * beam_width);
assert(block.x <= 1024);
update_kernel<float><<<grid, block, 0, stream>>>(log_probs, cum_log_probs,
finished, parent_ids, sequence_length,
word_ids, output_ids, batch_size,
beam_width, vocab_size, end_id,
finished_count);
}
template <typename T>
__global__
void update_kernel_v2(bool* finished, int* parent_ids,
int* sequence_length,
int* word_ids, int* output_ids,
const int vocab_size, const int end_id,
const int batch_size, const int beam_width,
int* finished_count)
{
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < batch_size * beam_width; index += blockDim.x * gridDim.x)
{
sequence_length[index] = finished[index] ? sequence_length[index] : sequence_length[index] + 1;
int beam_id = word_ids[index] / vocab_size;
int word_id = word_ids[index] % vocab_size;
sequence_length[index] = sequence_length[beam_id];
finished[index] = word_id == end_id ? 1 : 0;
parent_ids[index] = beam_id;
word_ids[index] = word_id;
output_ids[index] = word_id;
}
}
void update_kernelLauncher_v2(bool* finished, int* parent_ids,
int* sequence_length, int* word_ids,
int* output_ids,
int* finished_count,
DecodingBeamsearchArguments args,
cudaStream_t stream)
{
dim3 grid((int)ceil(args.batch_size_ * args.beam_width_ * 1.0 / 256));
dim3 block(256);
update_kernel_v2<float><<<grid, block, 0, stream>>>(finished, parent_ids,
sequence_length, word_ids,
output_ids, args.vocab_size_padded_,
args.end_id_,
args.batch_size_, args.beam_width_,
finished_count);
}
template <typename T>
__global__ void update_KV_cache_kernel(const T* __restrict key_src_cache,
T* key_tgt_cache,
const T* __restrict value_src_cache,
T* value_tgt_cache,
const int* beam_ids,
const bool* finished,
const int batch_size,
const int beam_width,
const int hidden_dim,
const int cache_size,
const int step,
const int decoder_layers)
{
int layer_id = blockIdx.x / batch_size / beam_width / step;
int batch_id = (blockIdx.x % (batch_size * beam_width * step)) / (beam_width * step);
int beam_id = (blockIdx.x % (beam_width * step)) / step;
if(finished[batch_id * beam_width + beam_id]) return;
int step_id = blockIdx.x % step;
int hidden_id = step_id * batch_size * beam_width * hidden_dim +
beam_ids[batch_id * beam_width + beam_id] * hidden_dim;
int tgt_hidden_id = step_id * batch_size * beam_width * hidden_dim +
batch_id * beam_width * hidden_dim + beam_id * hidden_dim;
const T* key_src_ptr = key_src_cache + layer_id * cache_size;
T* key_tgt_ptr = key_tgt_cache + layer_id * cache_size;
const T* value_src_ptr = value_src_cache + layer_id * cache_size;
T* value_tgt_ptr = value_tgt_cache + layer_id * cache_size;
for(int tid = threadIdx.x; tid < hidden_dim; tid += blockDim.x)
{
key_tgt_ptr[tgt_hidden_id + tid] = key_src_ptr[hidden_id + tid];
value_tgt_ptr[tgt_hidden_id + tid] = value_src_ptr[hidden_id + tid];
}
}
template <>
__global__ void update_KV_cache_kernel(const half* __restrict key_src_cache,
half* key_tgt_cache,
const half* __restrict value_src_cache,
half* value_tgt_cache,
const int* beam_ids,
const bool* finished,
const int batch_size,
const int beam_width,
const int hidden_dim,
const int cache_size,
const int step,
const int decoder_layers)
{
int layer_id = blockIdx.x / batch_size / beam_width / step;
int batch_id = (blockIdx.x % (batch_size * beam_width * step)) / (beam_width * step);
int beam_id = (blockIdx.x % (beam_width * step)) / step;
if(finished[batch_id * beam_width + beam_id]) return;
int step_id = blockIdx.x % step;
int hidden_id = (step_id * batch_size * beam_width * hidden_dim +
beam_ids[batch_id * beam_width + beam_id] * hidden_dim) / 2;
int tgt_hidden_id = (step_id * batch_size * beam_width * hidden_dim +
batch_id * beam_width * hidden_dim + beam_id * hidden_dim) / 2;
const half2* key_src_ptr = (const half2*)key_src_cache + layer_id * cache_size / 2;
half2* key_tgt_ptr = (half2*)key_tgt_cache + layer_id * cache_size / 2;
const half2* value_src_ptr = (const half2*)value_src_cache + layer_id * cache_size / 2;
half2* value_tgt_ptr = (half2*)value_tgt_cache + layer_id * cache_size / 2;
for(int tid = threadIdx.x; tid < hidden_dim / 2; tid += blockDim.x)
{
key_tgt_ptr[tgt_hidden_id + tid] = key_src_ptr[hidden_id + tid];
value_tgt_ptr[tgt_hidden_id + tid] = value_src_ptr[hidden_id + tid];
}
}
template <typename T>
__global__ void update_KV_batch_major_cache_kernel(const T* __restrict key_src_cache,
T* key_tgt_cache,
const T* __restrict value_src_cache,
T* value_tgt_cache,
const int* beam_ids,
const bool* finished,
const int batch_size,
const int beam_width,
const int size_per_head,
const int cache_size,
const int step,
const int max_seq_len,
const int decoder_layers)
{
int layer_id = blockIdx.z;
int head_id = blockIdx.y;
int bb_id = blockIdx.x;
int batch_id = bb_id / beam_width;
int beam_id = bb_id % beam_width;
if(finished[batch_id * beam_width + beam_id]) return;
const int hidden_dim = size_per_head * gridDim.y;
int src_offset = layer_id * cache_size +
(beam_ids[batch_id * beam_width + beam_id] * hidden_dim +
head_id * size_per_head) * max_seq_len;
int tgt_offset = layer_id * cache_size +
((batch_id * beam_width + beam_id) * hidden_dim +
head_id * size_per_head) * max_seq_len;
// for better memory access always do 16 byte loads.
// [B, H, Dh/x, L, x] and [B, H, L, Dh/x, x] (i.e. [B, H, L, Dh])
auto key_src_ptr = reinterpret_cast<const uint4*>(key_src_cache + src_offset);
auto value_src_ptr = reinterpret_cast<const uint4*>(value_src_cache + src_offset);
auto key_tgt_ptr = reinterpret_cast<uint4*>(key_tgt_cache + tgt_offset);
auto value_tgt_ptr = reinterpret_cast<uint4*>(value_tgt_cache + tgt_offset);
constexpr int x = (sizeof(T) == 4)? 4 : 8;
// step starts from 1
#if 0
constexpr int WARP_SIZE = 32;
const int num_warps = blockDim.x / WARP_SIZE;
const int warp_id = threadIdx.x / WARP_SIZE;
const int lane_id = threadIdx.x % WARP_SIZE;
for (int dhx = warp_id; dhx < size_per_head/x; dhx += num_warps)
{
for (int tid = lane_id; tid < step; tid += WARP_SIZE)
{
key_tgt_ptr[dhx * max_seq_len + tid] = key_src_ptr[dhx * max_seq_len + tid];
}
}
#else
// seems to be a bit faster
for (int tid = threadIdx.x; tid < max_seq_len * size_per_head/x; tid += blockDim.x)
{
// could consider fast int division here
if (tid % max_seq_len < step)
{
key_tgt_ptr[tid] = key_src_ptr[tid];
}
}
#endif
for (int tid = threadIdx.x; tid < step * size_per_head/x; tid += blockDim.x)
{
value_tgt_ptr[tid] = value_src_ptr[tid];
}
}
template <typename T>
void update_KV_cache_kernelLauncher(T** key_cache,
T** value_cache,
const int* beam_ids,
const bool* finished,
const int batch_size,
const int beam_width,
const int head_num,
const int size_per_head,
const int step,
const int decoder_max_seq_len,
const int cache_size,
const int decoder_layers,
cudaStream_t stream)
{
int src_id = step & 0x1;
int tgt_id = 1 - src_id;
if (decoder_max_seq_len < 0)
{
int hidden_dim = head_num * size_per_head;
dim3 grid(decoder_layers * batch_size * beam_width * step);
dim3 block(min(1024, hidden_dim));
block.x = block.x / (4 / sizeof(T));
update_KV_cache_kernel<<<grid, block, 0, stream>>>(
key_cache[src_id], key_cache[tgt_id],
value_cache[src_id], value_cache[tgt_id],
beam_ids, finished,
batch_size, beam_width, hidden_dim, cache_size, step, decoder_layers);
}
else
{
dim3 grid(batch_size * beam_width, head_num, decoder_layers);
constexpr int block_sz = 128;
update_KV_batch_major_cache_kernel<<<grid, block_sz, 0, stream>>>(
key_cache[src_id], key_cache[tgt_id],
value_cache[src_id], value_cache[tgt_id],
beam_ids, finished,
batch_size, beam_width, size_per_head, cache_size, step,
decoder_max_seq_len, decoder_layers);
}
}
template <typename T>
__global__
void apply_logit_penalties_kernel(int step,
int vocab_size,
int beam_width,
T* log_probs,
int* current_ids,
int* previous_ids,
int* parent_ids,
int end_id,
float inv_temp,
float len_penalty,
float repeat_penalty,
int* vocab_mask) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int bbid = blockIdx.y;
int bbsize = gridDim.y;
int batchid = bbid / beam_width;
// int beamid = bbid % beam_width;
for (int i = tid + bid*blockDim.x; i < vocab_size; i += blockDim.x*gridDim.x) {
log_probs[i+bbid*vocab_size] *= inv_temp;
}
if (tid == 0 && bid == 0) {
// apply repetition penalty (this can apply the penalty multiple times to a repeated word).
int prev_id = current_ids[bbid];
if (log_probs[prev_id+bbid*vocab_size] > T(0)) {
log_probs[prev_id+bbid*vocab_size] = float(log_probs[prev_id+bbid*vocab_size]) / repeat_penalty;
} else {
log_probs[prev_id+bbid*vocab_size] = float(log_probs[prev_id+bbid*vocab_size]) * repeat_penalty;
}
if (step > 1) {
int parent_beamid = parent_ids[bbsize*(step-2) + bbid];
for (int i = step-2; i > 0; --i) {
prev_id = previous_ids[bbsize*i+batchid*beam_width+parent_beamid];
if (log_probs[prev_id+bbid*vocab_size] > T(0)) {
log_probs[prev_id+bbid*vocab_size] = float(log_probs[prev_id+bbid*vocab_size]) / repeat_penalty;
} else {
log_probs[prev_id+bbid*vocab_size] = float(log_probs[prev_id+bbid*vocab_size]) * repeat_penalty;
}
//if (i > 0) parent_beamid = parent_ids[bbsize*(i-1)+parent_beamid];
parent_beamid = parent_ids[bbsize*(i-1)+parent_beamid];
}
}
prev_id = previous_ids[batchid*beam_width];
if (log_probs[prev_id+bbid*vocab_size] > T(0)) {
log_probs[prev_id+bbid*vocab_size] = float(log_probs[prev_id+bbid*vocab_size]) / repeat_penalty;
} else {
log_probs[prev_id+bbid*vocab_size] = float(log_probs[prev_id+bbid*vocab_size]) * repeat_penalty;
}
// apply length penalty
if (log_probs[end_id+bbid*vocab_size] > T(0)) {
log_probs[end_id+bbid*vocab_size] = float(log_probs[end_id+bbid*vocab_size]) / len_penalty;
} else {
log_probs[end_id+bbid*vocab_size] = float(log_probs[end_id+bbid*vocab_size]) * len_penalty;
}
}
}
template <typename T>
void apply_logit_penalties(int step,
T* log_probs,
int* current_ids,
int* previous_ids,
int* parent_ids,
GptArguments args,
cudaStream_t stream) {
int vocab_size = args.vocab_size_padded_;
int beam_width = 1;
int batch_size = args.batch_size_;
dim3 block(256);
dim3 grid((vocab_size + block.x - 1)/block.x, beam_width*batch_size);
apply_logit_penalties_kernel<T><<<grid, block, 0, stream>>> (step,
vocab_size,
beam_width,
log_probs,
current_ids,
previous_ids,
parent_ids,
args.end_id_,
1.f/args.temperature_,
args.len_penalty,
args.repetition_penalty_,
args.vocab_mask);
}
extern __shared__ char transposeTileBuf_g[];
template <typename data_type>
__global__ void transpose_kernel(data_type * __restrict__ out, const data_type *__restrict__ in, int height, int width, int tH, int tW, int stride)
// int tH, int tW should be template parameters for the best performance, we do not do that sine the task is tiny.
// batch stride (blockIdx.z dimension) for fully packed tensor == height * width
{
data_type *tile = (data_type *)transposeTileBuf_g;
int tidx = threadIdx.x % tW;
int tidy = threadIdx.x / tW;
int xIndex = blockIdx.x * tW + tidx;
int yIndex = blockIdx.y * tH + tidy;
int indexIn = xIndex + yIndex * width;
if ((xIndex < width) && (yIndex < height))
{
tile[tidy * tW + tidx] = in[blockIdx.z * stride + indexIn];
}
tidx = threadIdx.x % tH;
tidy = threadIdx.x / tH;
xIndex = blockIdx.y * tH + tidx;
yIndex = blockIdx.x * tW + tidy;
int indexOut = xIndex + yIndex * height;
__syncthreads();
if ((xIndex < height) && (yIndex < width))
{
out[blockIdx.z * stride + indexOut] = tile[tidx * tW + tidy];
}
}
template <typename data_type>
void transpose(data_type *out, const data_type *in, int batch, int height, int width, int stride, cudaStream_t stream)
{
int tW, tH;
if ((width <= 1) || (height <= 1) )
{
assert(0);
}
if (height <= width)
{
tH = std::min((height / 2) * 2, 16);
tW = std::min(256 / tH, width);
}
else
{
tW = std::min((width / 2) * 2, 16);
tH = std::min(256 / tW, height);
}
assert(tW <= width);
assert(tH <= height);
dim3 grid((width + tW - 1) / tW, (height + tH - 1) / tH, batch);
transpose_kernel<data_type><<<grid, tW * tH, tH * tW * sizeof(data_type), stream>>>(out, in, height, width, tH, tW, stride);
}
// TODO Add half2 implementation
template <typename DataType_>
__global__ void transpose_axis_01_kernel(DataType_ *out, DataType_ *in, const int dim0, const int dim1, const int dim2)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim0 * dim1 * dim2)
{
const int input_dim2_index = index % dim2;
index = (index - input_dim2_index) / dim2;
const int input_dim1_index = index % dim1;
index = (index - input_dim1_index) / dim1;
const int input_dim0_index = index % dim0;
out[input_dim1_index * dim0 * dim2 +
input_dim0_index * dim2 +
input_dim2_index] = in[input_dim0_index * dim1 * dim2 +
input_dim1_index * dim2 +
input_dim2_index];
}
}
template <typename DataType_>
void transpose_axis_01_kernelLauncher(DataType_ *out, DataType_ *in, const int dim0,
const int dim1, const int dim2, cudaStream_t stream)
{
dim3 block(512);
dim3 grid((int)(ceil(dim0 * dim1 * dim2 / 512.)));
transpose_axis_01_kernel<<<grid, block, 0, stream>>>(out, in, dim0, dim1, dim2);
}
/* *************************** end of BeamSearch kernel *********************************** */
/* ********************************** Sampling kernel *********************************** */
__global__ void topp_initialization_kernel(bool* finished,
int* sequence_length,
int* word_ids,
int* topp_id_val_buf,
int* topp_offset_buf,
const int batch_size,
const int n,
const int start_id)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
if(bid == 0)
{
for(int i = tid; i < batch_size + 1; i+= blockDim.x)
{
topp_offset_buf[i] = i * n;
}
for(int i = tid; i < batch_size; i+= blockDim.x)
{
if(finished != nullptr) finished[i] = false;
if(sequence_length != nullptr) sequence_length[i] = 0;
if(word_ids != nullptr) word_ids[i] = start_id;
}
}
int index = tid + bid * blockDim.x;
while(index < batch_size * n)
{
topp_id_val_buf[index] = index % n;
index += blockDim.x * gridDim.x;
}
}
__global__ void topp_initialization_kernel_v2(bool* finished,
int* sequence_length,
int* word_ids,
int* topp_id_val_buf,
int* topp_offset_buf,
int* begin_topp_offset_buf_,
const int batch_size,
const int n,
const int start_id)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
if(bid == 0)
{
for(int i = tid; i < batch_size + 1; i+= blockDim.x)
{
topp_offset_buf[i] = i * n;
begin_topp_offset_buf_[i] = topp_offset_buf[i];
}
for(int i = tid; i < batch_size; i+= blockDim.x)
{
if(finished != nullptr) finished[i] = false;
if(sequence_length != nullptr) sequence_length[i] = 0;
if(word_ids != nullptr) word_ids[i] = start_id;
}
}
int index = tid + bid * blockDim.x;
while(index < batch_size * n)
{
topp_id_val_buf[index] = index % n;
index += blockDim.x * gridDim.x;
}
}
void topp_initialization_kernelLauncher(bool* finished,
int* sequence_length,
int* word_ids,
int* topp_id_val_buf,
int* topp_offset_buf,
const int n,
DecodingSamplingArguments args,
cudaStream_t stream)
{
// n: the coloumn number of logits_buffer for top_p sampling
topp_initialization_kernel<<<32, 512, 0, stream>>>(finished,
sequence_length,
word_ids,
topp_id_val_buf,
topp_offset_buf,
args.batch_size_,
n,
args.start_id_);
}
void topp_initialization_kernelLauncher_v2(bool* finished,
int* sequence_length,
int* word_ids,
int* topp_id_val_buf,
int* topp_offset_buf,
int* begin_topp_offset_buf_,
const int n,
DecodingSamplingArguments args,
cudaStream_t stream)
{
// n: the coloumn number of logits_buffer for top_p sampling
topp_initialization_kernel_v2<<<32, 512, 0, stream>>>(finished,
sequence_length,
word_ids,
topp_id_val_buf,
topp_offset_buf,
begin_topp_offset_buf_,
args.batch_size_,
n,
args.start_id_);
}
template <typename T>
size_t get_topp_sort_temp_storage_size(const T* log_probs,
const int* id_vals,
T* sorted_log_probs,
int* sorted_id_vals,
int* topp_offset_buf,
const int batch_size,
const int vocab_size)
{
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage,
temp_storage_bytes,
log_probs,
sorted_log_probs,
id_vals,
sorted_id_vals,
vocab_size * batch_size,
batch_size,
topp_offset_buf, topp_offset_buf + 1);
return temp_storage_bytes;
}
/* *************************** end of Sampling kernel *********************************** */
// TODO Remove the gather_tree_kernel of th_op/utils.cu
// modified from TensorFlow's implementation of tf.contrib.seq2seq.gather_tree
__global__ void gather_tree_kernel(const int batch_size, const int max_time, const int beam_width, const int end_token,
const int* step_ids, const int* parent_ids, int* max_sequence_lengths, int* beams) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < batch_size * beam_width; i += gridDim.x * blockDim.x) {
const int batch = i / beam_width;
const int beam = i % beam_width;
const int max_seq_len_b = min(max_time, __ldg(max_sequence_lengths + batch));
if (max_seq_len_b <= 0) {
continue;
}
#define GET_IX(time_ix, beam_ix) (batch_size * beam_width * (time_ix) + beam_width * batch + (beam_ix))
const int initial_beam_ix = GET_IX(max_seq_len_b - 1, beam);
beams[initial_beam_ix] = __ldg(step_ids + initial_beam_ix);
int parent = __ldg(parent_ids + initial_beam_ix) % beam_width;
bool found_bad = false;
for (int level = max_seq_len_b - 2; level >= 0; --level) {
const int level_beam_ix = GET_IX(level, beam);
const int level_parent_ix = GET_IX(level, parent);
if (parent < 0 || parent > beam_width) {
beams[level_beam_ix] = -1;
parent = -1;
found_bad = true;
} else {
beams[level_beam_ix] = __ldg(step_ids + level_parent_ix);
parent = __ldg(parent_ids + level_parent_ix) % beam_width;
}
}
// Not necessary when using a BeamSearchDecoder, but necessary
// when a user feeds in possibly broken trajectory (i.e., non-eos
// entries in a beam following eos entries).
if (!found_bad) {
bool finished = false;
for (int time = 0; time < max_seq_len_b; ++time) {
const int level_beam_ix = GET_IX(time, beam);
if (finished) {
beams[level_beam_ix] = end_token;
} else if (beams[level_beam_ix] == end_token) {
finished = true;
}
}
}
#undef GET_IX
}
}
void gather_tree_kernel_launcher(int max_time, int batch_size, int beam_width,
int* step_ids, int* parent_ids, int* max_sequence_lengths,
int end_token, int* beams, cudaStream_t stream) {
int batchbeam = batch_size * beam_width;
dim3 grid(1), block(batchbeam);
// though decoder do not support > 1024 for now
if (batchbeam > 1024) {
grid.x = ceil(batch_size * beam_width / 1024.);
block.x = 1024;
}
gather_tree_kernel<<<grid, block, 0, stream>>>(batch_size, max_time, beam_width, end_token,
step_ids, parent_ids, max_sequence_lengths, beams);
}
/* ********************************** Instantiation *********************************** */
template
void embedding_lookup_sine_position_encoding_kernel_launcher(float* from_tensor,
const float* embedding_table,
const float* position_encoding,
const int* word_ids,
const int batch_size,
const int hidden_units,
cudaStream_t stream);
template
void embedding_lookup_sine_position_encoding_kernel_launcher(half* from_tensor,
const half* embedding_table,
const half* position_encoding,
const int* word_ids,
const int batch_size,
const int hidden_units,
cudaStream_t stream);
template
void embedding_position_lookups_kernel_launcher(float* from_tensor,
const float* embedding_table,
const float* pos_table,
const int* word_ids,
const int local_batch_size,
const int batch_size,
const int hidden_units,
int step,
int ite,
int max_input_len,
const int* start_lengths,
cudaStream_t stream);
template
void embedding_position_lookups_kernel_launcher(half* from_tensor,
const half* embedding_table,
const half* pos_table,
const int* word_ids,
const int local_batch_size,
const int batch_size,
const int hidden_units,
int step,
int ite,
int max_input_len,
const int* start_lengths,
cudaStream_t stream);
template
void start_id_embedding_position_lookups_kernel_launcher(float* from_tensor,
int* output_ids,
const float* embedding_table,
const float* pos_table,
const int* word_ids,
const int start_step,
const int length,
const int max_length,
const int batch_size,
const int hidden_units,
cudaStream_t stream);
template
void start_id_embedding_position_lookups_kernel_launcher(half* from_tensor,
int* output_ids,
const half* embedding_table,
const half* pos_table,
const int* word_ids,
const int start_step,
const int length,
const int max_length,
const int batch_size,
const int hidden_units,
cudaStream_t stream);
template void apply_temperature_penalty_kernelLauncher(float* logits,
const float temperature,
const int m,
const int vocab_size,
const int vocab_size_padd,
cudaStream_t stream);
template void apply_temperature_penalty_kernelLauncher(half* logits,
const half temperature,
const int m,
const int vocab_size,
const int vocab_size_padd,
cudaStream_t stream);
template void apply_repetition_penalty_kernelLauncher(float* logits,
const float penalty,
int* start_ids,
int* output_ids,
const int batch_size,
const int local_batch_size,
const int vocab_size,
const int vocab_size_padd,
const int* start_lengths,
const int max_input_len,
const int step,
const int ite,
cudaStream_t stream);
template void apply_repetition_penalty_kernelLauncher(half* logits,
const float penalty,
int* start_ids,
int* output_ids,
const int batch_size,
const int local_batch_size,
const int vocab_size,
const int vocab_size_padd,
const int* start_lengths,
const int max_input_len,
const int step,
const int ite,
cudaStream_t stream);
template void kernel_padding_kernelLauncher(float *padded_kernel, const float *kernel,
const int row_dim, const int col_dim,
const int padded_col_dim, cudaStream_t stream);
template void kernel_padding_kernelLauncher(half *padded_kernel, const half *kernel,
const int row_dim, const int col_dim,
const int padded_col_dim, cudaStream_t stream);
template void bias_padding_kernelLauncher(float *padded_bias, const float *bias, const int col_dim,
const int padded_col_dim, cudaStream_t stream);
template void bias_padding_kernelLauncher(float *padded_bias, const half *bias, const int col_dim,
const int padded_col_dim, cudaStream_t stream);
template void bias_padding_kernelLauncher(half *padded_bias, const half *bias, const int col_dim,
const int padded_col_dim, cudaStream_t stream);
template void update_KV_cache_kernelLauncher(float** key_cache,
float** value_cache,
const int* beam_ids,
const bool* finished,
const int batch_size,
const int beam_width,
const int head_num,
const int size_per_head,
const int step,
const int decoder_max_seq_len,
const int cache_size,
const int decoder_layers,
cudaStream_t stream);
template void update_KV_cache_kernelLauncher(half** key_cache,
half** value_cache,
const int* beam_ids,
const bool* finished,
const int batch_size,
const int beam_width,
const int head_num,
const int size_per_head,
const int step,
const int decoder_max_seq_len,
const int cache_size,
const int decoder_layers,
cudaStream_t stream);
template void apply_logit_penalties(int step,
float* log_probs,
int* current_ids,
int* previous_ids,
int* parent_ids,
GptArguments args,
cudaStream_t stream);
template void apply_logit_penalties(int step,
half* log_probs,
int* current_ids,
int* previous_ids,
int* parent_ids,
GptArguments args,
cudaStream_t stream);
template size_t get_topp_sort_temp_storage_size(const float* log_probs,
const int* id_vals,
float* sorted_log_probs,
int* sorted_id_vals,
int* topp_offset_buf,
const int batch_size,
const int vocab_size);
template size_t get_topp_sort_temp_storage_size(const half* log_probs,
const int* id_vals,
half* sorted_log_probs,
int* sorted_id_vals,
int* topp_offset_buf,
const int batch_size,
const int vocab_size);
template void transpose(float *out,
const float *in,
int batch,int height,
int width,int stride,
cudaStream_t stream);
template void transpose(half *out,
const half *in,
int batch,int height,
int width,int stride,
cudaStream_t stream);
template void transpose_axis_01_kernelLauncher(float *out,
float *in,
const int dim0,
const int dim1,
const int dim2,
cudaStream_t stream);
template void transpose_axis_01_kernelLauncher(half *out,
half *in,
const int dim0,
const int dim1,
const int dim2,
cudaStream_t stream);
template void init_kernelLauncher(bool* finished,
int* sequence_length,
int* word_ids,
float* cum_log_probs,
const int sentence_id,
const int batch_size,
const int beam_width,
cudaStream_t stream);
template void init_kernelLauncher(bool* finished,
int* sequence_length,
int* word_ids,
half* cum_log_probs,
const int sentence_id,
const int batch_size,
const int beam_width,
cudaStream_t stream);
/* *************************** end of Instantiation *********************************** */
} // end of name space fastertransformer
|
the_stack
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth bilateral filter
namespace kfusion
{
namespace device
{
__global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
int value = src(y, x);
int tx = min (x - ksz / 2 + ksz, src.cols - 1);
int ty = min (y - ksz / 2 + ksz, src.rows - 1);
float sum1 = 0;
float sum2 = 0;
for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy)
{
for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx)
{
int depth = src(cy, cx);
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
float color2 = (value - depth) * (value - depth);
float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half));
sum1 += depth * weight;
sum2 += weight;
}
}
dst(y, x) = __float2int_rn (sum1 / sum2);
}
}
}
void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y));
cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel, cudaFuncCachePreferL1) );
bilateral_kernel<<<grid, block>>>(src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth));
cudaSafeCall ( cudaGetLastError () );
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth truncation
namespace kfusion
{
namespace device
{
__global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
if(depth(y, x) > max_dist)
depth(y, x) = 0;
}
}
}
void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
truncate_depth_kernel<<<grid, block>>>(depth, static_cast<ushort>(max_dist * 1000.f));
cudaSafeCall ( cudaGetLastError() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Build depth pyramid
namespace kfusion
{
namespace device
{
__global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src(2 * y, 2 * x);
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
int sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx)
{
int val = src(cy, cx);
if (abs (val - center) < sigma_depth_mult3)
{
sum += val;
++count;
}
}
dst(y, x) = (count == 0) ? 0 : sum / count;
}
}
}
void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y));
pyramid_kernel<<<grid, block>>>(source, pyramid, sigma_depth * 3);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute normals
namespace kfusion
{
namespace device
{
__global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
Normal n_out = make_float4(qnan, qnan, qnan, 0.f);
if (x < depth.cols - 1 && y < depth.rows - 1)
{
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
n_out = make_float4(-n.x, -n.y, -n.z, 0.f);
}
}
normals(y, x) = n_out;
}
__global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float4 n = normals(y, x);
if (isnan(n.x))
depth(y, x) = 0;
}
}
}
}
void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
compute_normals_kernel<<<grid, block>>>(depth, reproj, normals);
cudaSafeCall ( cudaGetLastError () );
mask_depth_kernel<<<grid, block>>>(normals, depth);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute computePointNormals
namespace kfusion
{
namespace device
{
__global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
if (x >= depth.cols - 1 || y >= depth.rows - 1)
return;
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f);
points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f);
}
}
}
}
void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
points_normals_kernel<<<grid, block>>>(reproj, depth, points, normals);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute dists
namespace kfusion
{
namespace device
{
__global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float xl = (x - c.x) * finv.x;
float yl = (y - c.y) * finv.y;
float lambda = sqrtf (xl * xl + yl * yl + 1);
dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters
}
}
__global__ void cloud_to_depth_kernel(const PtrStep<Point> cloud, PtrStepSz<ushort> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
depth(y, x) = cloud(y, x).z * 1000; //meters
}
}
}
}
void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
compute_dists_kernel<<<grid, block>>>(depth, dists, make_float2(1.f/f.x, 1.f/f.y), c);
cudaSafeCall ( cudaGetLastError () );
}
void kfusion::device::cloud_to_depth(const Points& cloud, Depth depth)
{
dim3 block (32, 8);
dim3 grid (divUp (cloud.cols (), block.x), divUp (cloud.rows (), block.y));
cloud_to_depth_kernel<<<grid, block>>>(cloud, depth);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= ddst.cols || y >= ddst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
ushort d = 0;
float4 n = make_float4(qnan, qnan, qnan, qnan);
int xs = x * 2;
int ys = y * 2;
int d00 = dsrc(ys+0, xs+0);
int d01 = dsrc(ys+0, xs+1);
int d10 = dsrc(ys+1, xs+0);
int d11 = dsrc(ys+1, xs+1);
if (d00 * d01 != 0 && d10 * d11 != 0)
{
d = (d00 + d01 + d10 + d11)/4;
float4 n00 = nsrc(ys+0, xs+0);
float4 n01 = nsrc(ys+0, xs+1);
float4 n10 = nsrc(ys+1, xs+0);
float4 n11 = nsrc(ys+1, xs+1);
n.x = (n00.x + n01.x + n10.x + n11.x)*0.25;
n.y = (n00.y + n01.y + n10.y + n11.y)*0.25;
n.z = (n00.z + n01.z + n10.z + n11.z)*0.25;
}
ddst(y, x) = d;
ndst(y, x) = n;
}
}
}
void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
int in_cols = depth.cols ();
int in_rows = depth.rows ();
int out_cols = in_cols / 2;
int out_rows = in_rows / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
resize_depth_normals_kernel<<<grid, block>>>(depth, normals, depth_out, normals_out);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= vdst.cols || y >= vdst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f);
int xs = x * 2;
int ys = y * 2;
float3 d00 = tr(vsrc(ys+0, xs+0));
float3 d01 = tr(vsrc(ys+0, xs+1));
float3 d10 = tr(vsrc(ys+1, xs+0));
float3 d11 = tr(vsrc(ys+1, xs+1));
if (!isnan(d00.x * d01.x * d10.x * d11.x))
{
float3 d = (d00 + d01 + d10 + d11) * 0.25f;
vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f);
float3 n00 = tr(nsrc(ys+0, xs+0));
float3 n01 = tr(nsrc(ys+0, xs+1));
float3 n10 = tr(nsrc(ys+1, xs+0));
float3 n11 = tr(nsrc(ys+1, xs+1));
float3 n = (n00 + n01 + n10 + n11)*0.25f;
ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f);
}
}
}
}
void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out)
{
int out_cols = points.cols () / 2;
int out_rows = points.rows () / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
resize_points_normals_kernel<<<grid, block>>>(points, normals, points_out, normals_out);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
int d = depth(y,x);
if (d == 0)
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = reproj(x, y, d * 0.001f);
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
__global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
float3 p = tr(points(y,x));
if (isnan(p.x))
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = p;
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
}
}
void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
render_image_kernel<<<grid, block>>>((PtrStep<ushort>)depth, normals, reproj, light_pose, image);
cudaSafeCall ( cudaGetLastError () );
}
void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
render_image_kernel<<<grid, block>>>((PtrStep<Point>)points, normals, reproj, light_pose, image);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= normals.cols || y >= normals.rows)
return;
float4 n = normals(y, x);
#if 0
unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f);
unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f);
unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f);
#else
unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f);
unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f);
unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f);
#endif
colors(y, x) = make_uchar4(b, g, r, 0);
}
}
}
void kfusion::device::renderTangentColors(const Normals& normals, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y));
tangent_colors_kernel<<<grid, block>>>(normals, image);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < output.size)
{
float4 p = cloud[idx];
float8 n = normals[idx];
float12 o;
o.x = p.x;
o.y = p.y;
o.z = p.z;
o.normal_x = n.x;
o.normal_y = n.y;
o.normal_z = n.z;
output.data[idx] = o;
}
}
}
}
void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output)
{
const int block = 256;
int total = (int)output.size ();
mergePointNormalKernel<<<divUp (total, block), block>>>(cloud, normals, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
the_stack
|
DEV static real_t terminal_U_penalty(const base_t *s, const int i, const int j, param_t p)
{
return s[i] == U || s[j] == U ? p->terminal_AU_penalty : RCONST(0.);
}
DEV static real_t dangle_3p_energy(const base_t *s,
const int i,
const int j,
const int ip1,
param_t p)
{
return p->dangle_3p[s[i]][s[j]][s[ip1]] + terminal_U_penalty(s,i,j,p);
}
DEV static real_t dangle_5p_energy(const base_t *s,
const int i,
const int j,
const int jm1,
param_t p)
{
return p->dangle_5p[s[i]][s[j]][s[jm1]] + terminal_U_penalty(s,i,j,p);
}
DEV static real_t terminal_stack(const base_t *s,
const int i,
const int j,
const int ip1,
const int jm1,
param_t p)
{
return p->tstack[s[i]][s[j]][s[ip1]][s[jm1]] + terminal_U_penalty(s,i,j,p);
}
DEV static real_t terminal_stack_multibranch(const base_t *s,
const int i,
const int j,
const int ip1,
const int jm1,
param_t p)
{
return p->tstackm[s[i]][s[j]][s[ip1]][s[jm1]] + terminal_U_penalty(s,i,j,p);
}
DEV static const real_t *lookup_find(const base_t *s, const int d, param_t p)
{
int i;
switch (d) {
case 3:
for (i = 0; i < p->ntriloop; i++)
if (sequences_match(s, p->triloop[i].seq, d+2))
return &p->triloop[i].val;
break;
case 4:
for (i = 0; i < p->ntloop; i++)
if (sequences_match(s, p->tloop[i].seq, d+2))
return &p->tloop[i].val;
break;
case 6:
for (i = 0; i < p->nhexaloop; i++)
if (sequences_match(s, p->hexaloop[i].seq, d+2))
return &p->hexaloop[i].val;
break;
}
return 0;
}
/***
* Energy of a hairpin loop with d unpaired bases, d = j-i-1
* s[i] is paired with s[j]
* s[i+1] is mismatched with s[j-1]
***/
DEV static real_t hairpin_loop_energy(const base_t *s,
const int i,
const int j,
const int d,
param_t p)
{
/* Lookup tables for special hairpin loops */
const real_t *val;
if ((val = lookup_find(&s[i],d,p)))
return *val;
/* Hairpin loop initiation penalty */
real_t e;
if (d > LOOP_MAX)
e = p->hairpin_loop_initiation[LOOP_MAX] + p->Extrapolation_for_large_loops *
LOG((real_t) d / LOOP_MAX);
else
e = p->hairpin_loop_initiation[d];
if (d == 3) {
if (contains_only_base(C,d,&s[i+1]))
e += p->c_hairpin_of_3;
e += terminal_U_penalty(s,i,j,p);
} else {
e += p->tstackh[s[i]][s[j]][s[i+1]][s[j-1]];
if (contains_only_base(C,d,&s[i+1]))
e += p->c_hairpin_slope*d + p->c_hairpin_intercept;
}
if (s[i] == G && s[j] == U && i > 1 && s[i-1] == G && s[i-2] == G)
e += p->bonus_for_GGG_hairpin;
return e;
}
DEV static real_t real_min(real_t a, real_t b) { return a < b ? a : b; }
/***
* Energy of an internal/bulge loop with d1, d2 unpaired bases,
* d1 = ip-i-1, d2 = j-jp-1
* s[i] is paired with s[j]
* s[i+1] is mismatched sith s[j-1]
* s[ip-1] is mismatched with s[jp+1]
* s[ip] is paired with s[jp]
***/
DEV static real_t internal_loop_energy(const base_t *s,
const int i,
const int j,
const int ip,
const int jp,
const int d1,
const int d2,
param_t p)
{
/* Bulge loops */
if (d1 == 0 || d2 == 0) {
real_t e = p->bulge_loop_initiation[d1+d2];
if (d1 == 1 || d2 == 1) { /* single-nucleotide bulge */
e += p->stack[s[i]][s[j]][s[ip]][s[jp]];
if ((d1 == 1 && s[i+1] == C && (s[i] == C || s[i+2] == C)) ||
(d2 == 1 && s[j-1] == C && (s[j] == C || s[j-2] == C)))
e += p->Bonus_for_Single_C_bulges_adjacent_to_C;
} else {
e += terminal_U_penalty(s,i,j,p);
e += terminal_U_penalty(s,ip,jp,p);
}
return e;
}
/* Small internal loops */
if (d1 == 1 && d2 == 1)
return p->int11[s[i]][s[i+1]][s[i+2]][s[j-2]][s[j-1]][s[j]];
if (d1 == 2 && d2 == 2)
return p->int22[s[i]][s[ip]][s[j]][s[jp]][s[i+1]][s[i+2]][s[j-1]][s[j-2]];
if (d1 == 1 && d2 == 2)
return p->int21[s[i]][s[j]][s[i+1]][s[j-1]][s[jp+1]][s[ip]][s[jp]];
if (d1 == 2 && d2 == 1)
return p->int21[s[jp]][s[ip]][s[jp+1]][s[ip-1]][s[i+1]][s[j]][s[i]];
/* Larger internal loops */
tab4_t *sp;
if (d1 == 1 || d2 == 1)
sp = &p->tstacki1n;
else if ((d1 == 2 && d2 == 3) || (d1 == 3 && d2 == 2))
sp = &p->tstacki23;
else
sp = &p->tstacki;
return p->internal_loop_initiation[d1+d2] +
real_min(p->fm_array_first_element * abs(d1-d2), p->maximum_correction) +
(*sp)[s[i]][s[j]][s[i+1]][s[j-1]] +
(*sp)[s[jp]][s[ip]][s[jp+1]][s[ip-1]];
}
/* return -ln(e^-a + e^-b) */
DEV static real_t free_energy_sum(const real_t a, const real_t b)
{
if (a < b)
return a - LOG1P(EXP(a-b));
else if (b < a)
return b - LOG1P(EXP(b-a));
else
return a - LOG(2);
}
DEV static void free_energy_accumulate(real_t *a, const real_t b)
{
*a = free_energy_sum(*a,b);
}
DEV HOST static int int_min(int a, int b) { return a < b ? a : b; }
DEV HOST static int ind(int i, int j, int n)
{
return i*n + j;
}
DEV HOST static int upper_triangle_index(int i, int j)
{
return (j*(j-1))/2 + i;
}
DEV HOST inline static int can_pair(int i, int j, int n, const int *bcp)
{
if (i>=0 && j<=n-1 && i != j && j>=0 && i<=n-1){
if (i < j)
return bcp[upper_triangle_index(i, j)];
else
return bcp[upper_triangle_index(j, i)];
}
else
return 0;
}
DEV static int wrap(int i, int n)
{
return i >= n ? i-n : i;
}
DEV static int is_exterior(int i, int j)
{
return j < i;
}
DEV static int is_interior(int i, int j)
{
return i < j;
}
DEV HOST real_t* array_val(real_t *__restrict a, int i, int j, int n, const int *__restrict bcp)
{
return can_pair(i,j,n,bcp) ? &a[ind(i,j,n)] : 0;
}
#ifdef __CUDACC__
#define ISTART blockIdx.x
#define IINC gridDim.x
#else
#define ISTART 0
#define IINC 1
#endif
GLOBAL static void calc_hairpin_stack_exterior_multibranch
(const int d,
const int n,
const base_t *__restrict s,
const int *__restrict bcp,
real_t *__restrict v,
const real_t *__restrict x,
const real_t *__restrict w5,
const real_t *__restrict w3,
const param_t p)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ((is_exterior(i,j) && i-j <= LOOP_MIN) || !can_pair(i,j,n,bcp))
continue;
real_t vij = INF;
if (i != n-1 && j != 0) {
/* hairpin loop */
if (is_interior(i,j))
vij = hairpin_loop_energy(s,i,j,d,p);
/* stack */
if (can_pair(i+1,j-1,n,bcp) && !(is_interior(i,j) && d <= LOOP_MIN-2))
free_energy_accumulate(&vij, p->stack[s[i]][s[j]][s[i+1]][s[j-1]] + v[ind(i+1,j-1,n)]);
}
/* exterior loop */
if (is_exterior(i,j)) {
free_energy_accumulate(&vij, w3[i+1] + w5[j-1] + terminal_U_penalty(s,i,j,p));
if (i != n-1)
free_energy_accumulate(&vij, w3[i+2] + w5[j-1] + dangle_3p_energy(s,i,j,i+1,p));
if (j != 0)
free_energy_accumulate(&vij, w3[i+1] + w5[j-2] + dangle_5p_energy(s,i,j,j-1,p));
if (i != n-1 && j != 0)
free_energy_accumulate(&vij, w3[i+2] + w5[j-2] + terminal_stack(s,i,j,i+1,j-1,p));
}
/* multibranch loop */
if (d > 2*LOOP_MIN + 3 && i != n-1 && j != 0) {
free_energy_accumulate(&vij, x[ind((d-2)%5,i+1,n)] + terminal_U_penalty(s,i,j,p) + p->a + p->c);
if (i != n-2)
free_energy_accumulate(&vij, x[ind((d-3)%5,i+2,n)] + dangle_3p_energy(s,i,j,i+1,p) + p->a + p->b + p->c);
if (j != 1)
free_energy_accumulate(&vij, x[ind((d-3)%5,i+1,n)] + dangle_5p_energy(s,i,j,j-1,p) + p->a + p->b + p->c);
if (i != n-2 && j != 1)
free_energy_accumulate(&vij, x[ind((d-4)%5,i+2,n)] + terminal_stack_multibranch(s,i,j,i+1,j-1,p) + p->a + 2*p->b + p->c);
}
v[ind(i,j,n)] = vij;
}
}
#ifdef __CUDACC__
#define NTHREAD 128
#define THREAD_X 8
#define THREAD_Y 16
#if THREAD_X*THREAD_Y != NTHREAD
#error THREAD_X * THREAD_Y must be equal to NTHREAD
#endif
DEV static void free_energy_reduce(real_t *x, int tid, int nt)
{
__shared__ real_t buf[NTHREAD];
buf[tid] = *x;
for (nt /= 2, __syncthreads(); nt > 0; nt /= 2, __syncthreads())
if (tid < nt)
free_energy_accumulate(&buf[tid], buf[tid+nt]);
if (tid == 0)
*x = buf[0];
}
#endif /* __CUDACC__ */
GLOBAL static void calc_internal
(const int d,
const int n,
const base_t *__restrict s,
const int *__restrict bcp,
real_t *__restrict v,
const param_t p)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ((is_exterior(i,j) && i-j <= LOOP_MIN) ||
(is_interior(i,j) && d <= LOOP_MIN+2) ||
!can_pair(i,j,n,bcp))
continue;
real_t vij = INF;
#ifdef __CUDACC__
const int d1start = threadIdx.x;
const int d1inc = blockDim.x;
#else
const int d1start = 0;
const int d1inc = 1;
#endif
const int dmax = int_min(LOOP_MAX, d-2);
const int d1max = int_min(dmax, n-i-2);
int d1;
for (d1 = d1start; d1 <= d1max; d1 += d1inc) {
const int ip = i+d1+1;
const int d2max = int_min(dmax-d1, j-1);
#ifdef __CUDACC__
const int d2start = d1 > 0 ? threadIdx.y : threadIdx.y + 1;
const int d2inc = blockDim.y;
#else
const int d2start = d1 > 0 ? 0 : 1;
const int d2inc = 1;
#endif
int d2;
for (d2 = d2start; d2 <= d2max; d2 += d2inc) {
const int jp = j-d2-1;
if (can_pair(ip,jp,n,bcp))
free_energy_accumulate(&vij, internal_loop_energy(s,i,j,ip,jp,d1,d2,p)
+ v[ind(ip,jp,n)]);
}
}
#ifdef __CUDACC__
const int tid = threadIdx.x * blockDim.y + threadIdx.y;
free_energy_reduce(&vij, tid, blockDim.x*blockDim.y);
if (tid != 0)
continue;
#endif
free_energy_accumulate(&v[ind(i,j,n)], vij);
}
}
DEV static real_t coaxial_flush(const base_t *s,
const int i,
const int j,
const int ip,
const int jp,
param_t p)
{
return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) +
p->coaxial[s[i]][s[j]][s[ip]][s[jp]];
}
DEV static real_t coaxial_mismatch1(const base_t *s,
const int i,
const int j,
const int ip,
const int jp,
param_t p)
{
return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) +
p->tstackcoax[s[j]][s[i]][s[j+1]][s[i-1]] +
p->coaxstack[s[j+1]][s[i-1]][s[ip]][s[jp]];
}
DEV static real_t coaxial_mismatch2(const base_t *s,
const int i,
const int j,
const int ip,
const int jp,
param_t p)
{
return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) +
p->tstackcoax[s[jp]][s[ip]][s[jp+1]][s[ip-1]] +
p->coaxstack[s[j]][s[i]][s[j+1]][s[jp+1]];
}
GLOBAL static void calc_coaxial
(const int d, /* diagonal - length of bases in between i and j, exclusive */
const int n,
const base_t *__restrict s,
const int *__restrict bcp,
real_t *__restrict v,
const real_t *__restrict y,
const real_t *__restrict w5,
const real_t *__restrict w3,
const param_t p)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ((is_exterior(i,j) && i-j <= LOOP_MIN) || !can_pair(i,j,n,bcp))
continue;
const real_t *v1;
real_t vij = INF;
/* exterior */
if (is_exterior(i,j)) {
int k, kstart;
#ifdef __CUDACC__
kstart = threadIdx.x;
const int kinc = blockDim.x;
#else
kstart = 0;
const int kinc = 1;
#endif
for (k = kstart; k < j - LOOP_MIN; k += kinc) {
if ((v1 = array_val(v,k,j-1,n,bcp)))
free_energy_accumulate(&vij, w3[i+1] + w5[k-1] + coaxial_flush(s,k,j-1,j,i,p) + (*v1));
if (j-2 >= 0) {
if (i < n-1 && (v1 = array_val(v,k,j-2,n,bcp)))
free_energy_accumulate(&vij, w3[i+2] + w5[k-1] + coaxial_mismatch2(s,k,j-2,j,i,p) + (*v1));
if ((v1 = array_val(v,k+1,j-2,n,bcp)))
free_energy_accumulate(&vij, w3[i+1] + w5[k-1] + coaxial_mismatch1(s,k+1,j-2,j,i,p) + (*v1));
}
}
#ifdef __CUDACC__
kstart = i+LOOP_MIN+1 + threadIdx.x;
#else
kstart = i+LOOP_MIN+1;
#endif
for (k = kstart; k < n; k += kinc) {
if ((v1 = array_val(v,i+1,k,n,bcp)))
free_energy_accumulate(&vij, w3[k+1] + w5[j-1] + coaxial_flush(s,j,i,i+1,k,p) + (*v1));
if (j > 0 && (v1 = array_val(v,i+2,k,n,bcp)))
free_energy_accumulate(&vij, w3[k+1] + w5[j-2] + coaxial_mismatch1(s,j,i,i+2,k,p) + (*v1));
if ((v1 = array_val(v,i+2,k-1,n,bcp)))
free_energy_accumulate(&vij, w3[k+1] + w5[j-1] + coaxial_mismatch2(s,j,i,i+2,k-1,p) + (*v1));
}
} /* end exterior */
/* multibranch */
if (d > 2*LOOP_MIN + 3 && i != n-1 && j != 0) {
int ktmp;
#ifdef __CUDACC__
int ktmpstart = i+2 + threadIdx.x;
const int ktmpinc = blockDim.x;
#else
int ktmpstart = i+2;
const int ktmpinc = 1;
#endif
for (ktmp = ktmpstart; ktmp < jtmp-2; ktmp += ktmpinc) {
const int k = wrap(ktmp,n);
if (k != n-1) {
if ((v1 = array_val(v,i+1,k,n,bcp)))
free_energy_accumulate(&vij, coaxial_flush(s,j,i,i+1,k,p) + (*v1) + p->a_2c +
y[ind(k+1,j-1,n)]);
if (ktmp+2 < jtmp-1 && i+1 != n-1 && k+1 != n-1 && (v1 = array_val(v,i+2,k,n,bcp))) {
const real_t tmp = (*v1) + p->a_2b_2c;
free_energy_accumulate(&vij, coaxial_mismatch2(s,j,i,i+2,k,p) + tmp + y[ind(k+2,j-1,n)]);
if (j != 1) {
free_energy_accumulate(&vij, coaxial_mismatch1(s,j,i,i+2,k,p) + tmp + y[ind(k+1,j-2,n)]);
}
}
}
}
#ifdef __CUDACC__
ktmpstart = i+3 + threadIdx.x;
#else
ktmpstart = i+3;
#endif
for (ktmp = ktmpstart; ktmp < jtmp-1; ktmp += ktmpinc) {
const int k = wrap(ktmp,n);
if (k != 0) {
if ((v1 = array_val(v,k,j-1,n,bcp)))
free_energy_accumulate(&vij, coaxial_flush(s,k,j-1,j,i,p) + (*v1) + p->a_2c +
y[ind(i+1,k-1,n)]);
if (j != 1 && ktmp > i+3 && (v1 = array_val(v,k,j-2,n,bcp))) {
const real_t tmp = (*v1) + p->a_2b_2c;
if (k != 1)
free_energy_accumulate(&vij, coaxial_mismatch1(s,k,j-2,j,i,p) + tmp + y[ind(i+1,k-2,n)]);
if (i != n-2)
free_energy_accumulate(&vij, coaxial_mismatch2(s,k,j-2,j,i,p) + tmp + y[ind(i+2,k-1,n)]);
}
}
}
} /* end multibranch */
#ifdef __CUDACC__
free_energy_reduce(&vij, threadIdx.x, blockDim.x);
if (threadIdx.x != 0)
continue;
#endif
free_energy_accumulate(&v[ind(i,j,n)], vij);
} /* end loop over i */
} /* end calc_coaxial */
/***
* For arrays w, wl, xl, two diagonals are stored.
* Element i of the current diagonal - that is, w(i,j) -
* is referenced as w[d%2][i].
* Element i of the previous diagonal - that is, w(i,j-1) -
* is referenced as w((d-1)%2,i)
*
* For array x, five diagonals are stored.
* Similarly to w, x[ind(d%5,i,n)] refers to element i on
* the current diagonal, and x[ind((d-k)%5,i,n)] to element i
* on a previous diagonal d-k.
* Specifically:
*
* x(i,j) --> x(d%5,i,n)
* x(i+1,j,n) --> x((d-1)%5,i+1)
* x(i+1,j-1,n) --> x((d-2)%5,i+1)
* x(i+2,j-1,n) --> x((d-3)%5,i+1)
* x(i+1,j-2,n) --> x((d-3)%5,i+1)
* x(i+2,j-2,n) --> x((d-4)%5,i+1)
***/
GLOBAL static void calc_wl
(const int d, /* diagonal - length of bases in between i and j, exclusive */
const int n,
const base_t *__restrict s,
const int *__restrict bcp,
real_t *__restrict v,
real_t *__restrict z,
real_t *__restrict wq,
real_t *__restrict w,
real_t *__restrict wl,
const param_t p)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if (is_exterior(i,j) && i-j <= LOOP_MIN)
continue;
real_t wqtmp = INF, wltmp = INF;
const real_t *v1;
if ((v1 = array_val(v,i,j,n,bcp))) {
const real_t tmp = (*v1) + terminal_U_penalty(s,i,j,p);
free_energy_accumulate(&wqtmp, tmp);
free_energy_accumulate(&wltmp, tmp + p->c);
}
if (i != n-1 && (v1 = array_val(v,i+1,j,n,bcp))) {
const real_t tmp = (*v1) + dangle_5p_energy(s,j,i+1,i,p);
free_energy_accumulate(&wqtmp, tmp);
free_energy_accumulate(&wltmp, tmp + p->b + p->c);
}
if (j != 0 && (v1 = array_val(v,i,j-1,n,bcp))) {
const real_t tmp = (*v1) + dangle_3p_energy(s,j-1,i,j,p);
free_energy_accumulate(&wqtmp, tmp);
free_energy_accumulate(&wltmp, tmp + p->b + p->c);
}
if (i != n-1 && j != 0 && (v1 = array_val(v,i+1,j-1,n,bcp))) {
const real_t tmp = (*v1) + terminal_stack_multibranch(s,j-1,i+1,j,i,p);
free_energy_accumulate(&wqtmp, tmp);
free_energy_accumulate(&wltmp, tmp + 2*p->b + p->c);
}
if (is_interior(i,j))
wq[upper_triangle_index(i,j)] = wqtmp;
/* WL array */
wl[ind(d%2,i,n)] = z[ind(i,j,n)] = wltmp;
if (i != n-1 && d > 0)
free_energy_accumulate(&wl[ind(d%2,i,n)], wl[ind((d-1)%2,i+1,n)] + p->b);
/* W array */
w[ind(d%2,i,n)] = wl[ind(d%2,i,n)];
if (j != 0 && d > 0)
free_energy_accumulate(&w[ind(d%2,i,n)], w[ind((d-1)%2,i,n)] + p->b);
} /* end loop over i */
} /* end calc_wl */
GLOBAL static void calc_xl
(const int d, /* diagonal - length of bases in between i and j, exclusive */
const int n,
const real_t *__restrict z,
const real_t *__restrict yl,
real_t *__restrict xl)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if (is_exterior(i,j) && i-j <= LOOP_MIN)
continue;
#ifdef __CUDACC__
if (threadIdx.x == 0)
xl[ind(d%2,i,n)] = INF;
#else
xl[ind(d%2,i,n)] = INF;
#endif
if (is_interior(i,j) && d <= 2*LOOP_MIN+1)
continue;
#ifdef __CUDACC__
const int kstart = i+1 + threadIdx.x;
const int kinc = blockDim.x;
#else
const int kstart = i+1;
const int kinc = 1;
#endif
int ktmp;
real_t tmp = INF;
for (ktmp = kstart; ktmp < jtmp-1; ktmp += kinc) {
if (ktmp != n-1) {
const int k = wrap(ktmp,n);
free_energy_accumulate(&tmp, z[ind(i,k,n)] + yl[ind(k+1,j,n)]);
}
}
#ifdef __CUDACC__
free_energy_reduce(&tmp, threadIdx.x, blockDim.x);
if (threadIdx.x != 0)
continue;
#endif
free_energy_accumulate(&xl[ind(d%2,i,n)], tmp);
} /* end loop over i */
} /* end calc_xl */
GLOBAL static void calc_z
(const int d, /* diagonal - length of bases in between i and j, exclusive */
const int n,
const base_t *__restrict s,
const int *__restrict bcp,
real_t *__restrict v,
real_t *__restrict z,
real_t *__restrict xl,
real_t *__restrict wq,
const param_t p)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ((is_exterior(i,j) && i-j <= LOOP_MIN) ||
(is_interior(i,j) && d <= 2*LOOP_MIN+1))
continue;
#ifdef __CUDACC__
const int kstart = i+LOOP_MIN+1 + threadIdx.x;
const int kinc = blockDim.x;
#else
const int kstart = i+LOOP_MIN+1;
const int kinc = 1;
#endif
int ktmp;
real_t tmp1 = INF, tmp2 = INF;
for (ktmp = kstart; ktmp < jtmp-LOOP_MIN-1; ktmp += kinc) {
const int k = wrap(ktmp,n);
if (k == n-1)
continue;
real_t *v1, *v2;
if ((v1 = array_val(v,i,k,n,bcp)) && (v2 = array_val(v,k+1,j,n,bcp)))
free_energy_accumulate(&tmp1, (*v1) + (*v2) + coaxial_flush(s,i,k,k+1,j,p));
if (j == 0 || k+1 == n-1)
continue;
if (i != n-1 && (v1 = array_val(v,i+1,k,n,bcp)) && (v2 = array_val(v,k+2,j,n,bcp)))
free_energy_accumulate(&tmp2, (*v1) + (*v2) + coaxial_mismatch1(s,i+1,k,k+2,j,p));
if ((v1 = array_val(v,i,k,n,bcp)) && (v2 = array_val(v,k+2,j-1,n,bcp)))
free_energy_accumulate(&tmp2, (*v1) + (*v2) + coaxial_mismatch2(s,i,k,k+2,j-1,p));
}
#ifdef __CUDACC__
free_energy_reduce(&tmp1, threadIdx.x, blockDim.x);
free_energy_reduce(&tmp2, threadIdx.x, blockDim.x);
if (threadIdx.x != 0)
continue;
#endif
if (is_interior(i,j))
free_energy_accumulate(&wq[upper_triangle_index(i,j)], free_energy_sum(tmp1,tmp2));
const real_t wcoax = free_energy_sum(tmp1 + 2*p->c, tmp2 + 2*p->b + 2*p->c);
free_energy_accumulate(&z[ind(i,j,n)], wcoax);
free_energy_accumulate(&xl[ind(d%2,i,n)], wcoax);
} /* end loop over i */
} /* end calc_z */
GLOBAL static void calc_x
(const int d, /* diagonal - length of bases in between i and j, exclusive */
const int n,
real_t *__restrict yl,
real_t *__restrict y,
const real_t *__restrict w,
const real_t *__restrict wl,
real_t *__restrict xl,
real_t *__restrict x,
const param_t p)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if (is_exterior(i,j) && i-j <= LOOP_MIN)
continue;
x[ind(d%5,i,n)] = INF;
if (d > 2*LOOP_MIN+1 || is_exterior(i,j)) {
if (i != n-1)
free_energy_accumulate(&xl[ind(d%2,i,n)], xl[ind((d-1)%2,i+1,n)] + p->b);
/* x array */
x[ind(d%5,i,n)] = xl[ind(d%2,i,n)];
if (j != 0)
free_energy_accumulate(&x[ind(d%5,i,n)], x[ind((d-1)%5,i,n)] + p->b);
}
yl[ind(i,j,n)] = free_energy_sum(wl[ind(d%2,i,n)], xl[ind(d%2,i,n)]);
y[ind(i,j,n)] = free_energy_sum(w[ind(d%2,i,n)], x[ind(d%5,i,n)]);
} /* end loop over i */
} /* end calc_x */
GLOBAL static void init_w5_and_w3(int n, real_t *w5, real_t *w3)
{
w5[-1] = w5[0] = w3[n-1] = w3[n] = 0;
}
GLOBAL static void calc_w5_and_w3(
const int d,
const int n,
real_t *__restrict w5,
real_t *__restrict w3,
const real_t *__restrict wq)
{
#ifdef __CUDACC__
const int istart = threadIdx.x;
const int iinc = blockDim.x;
#else
const int istart = 0;
const int iinc = 1;
#endif
real_t w5tmp = INF, w3tmp = INF;
int i;
for (i = istart; i + LOOP_MIN <= d; i += iinc) {
free_energy_accumulate(&w5tmp, w5[i-1] + wq[upper_triangle_index(i,d+1)]);
free_energy_accumulate(&w3tmp, w3[n-i] + wq[upper_triangle_index(n-d-2,n-i-1)]);
}
#ifdef __CUDACC__
free_energy_reduce(&w5tmp, threadIdx.x, blockDim.x);
free_energy_reduce(&w3tmp, threadIdx.x, blockDim.x);
if (threadIdx.x != 0)
return;
#endif
w5[d+1] = w5[d];
w3[n-d-2] = w3[n-d-1];
free_energy_accumulate(&w5[d+1], w5tmp);
free_energy_accumulate(&w3[n-d-2], w3tmp);
} /* end calc_w5_and_w3 */
prna_t prna_new(const char *s, param_t par, int quiet, int *base_cp)
{
prna_t p = (prna_t) safe_malloc(sizeof(struct prna));
memset(p, 0, sizeof(struct prna));
const int n = p->n = strlen(s);
printf("sequence length = %d\n", n);
p->seq = (base_t *) safe_malloc(n*sizeof(base_t));
p->base_can_pair = base_cp;
sequence_from_string(p->seq, s);
p->v = (real_t *) safe_malloc(n*n*sizeof(real_t));
p->w5 = (real_t *) safe_malloc((n+1)*sizeof(real_t)) + 1;
p->w3 = (real_t *) safe_malloc((n+1)*sizeof(real_t));
real_t *z, *yl, *y, *wq, *w, *wl, *xl, *x;
#ifdef __CUDACC__ /* do multithreaded fill on GPU */
printf("Performing Calculation on GPU\n");
real_t *v, *w5, *w3;
#define ALLOC(a,sz) CU(cudaMalloc(&a,(sz)*sizeof(real_t)))
ALLOC(v,n*n);
ALLOC(w5,n+1);
w5++;
ALLOC(w3,n+1);
ALLOC(z,n*n);
ALLOC(yl,n*n);
ALLOC(y,n*n);
ALLOC(wq,n*(n-1)/2);
ALLOC(w,2*n);
ALLOC(wl,2*n);
ALLOC(xl,2*n);
ALLOC(x,5*n);
param_t dev_par;
CU(cudaMalloc(&dev_par, sizeof(struct param)));
CU(cudaMemcpy(dev_par, par, sizeof(struct param), cudaMemcpyHostToDevice));
base_t *dev_s;
CU(cudaMalloc(&dev_s,n*sizeof(base_t)));
CU(cudaMemcpy(dev_s, p->seq, n*sizeof(base_t), cudaMemcpyHostToDevice));
int *dev_bcp;
CU(cudaMalloc(&dev_bcp,(n*(n-1)/2)*sizeof(int)));
CU(cudaMemcpy(dev_bcp, p->base_can_pair, (n*(n-1)/2)*sizeof(int), cudaMemcpyHostToDevice));
init_w5_and_w3<<<1,1>>>(n,w5,w3);
for (int d = 0; d < n-1; d++) {
calc_hairpin_stack_exterior_multibranch<<<n,1>>>(d, n, dev_s, dev_bcp, v, x, w5, w3, dev_par);
calc_internal<<<n,dim3(THREAD_X,THREAD_Y,1)>>>(d, n, dev_s, dev_bcp, v, dev_par);
calc_coaxial<<<n,NTHREAD>>>(d, n, dev_s, dev_bcp, v, y, w5, w3, dev_par);
calc_wl<<<n,1>>>(d, n, dev_s, dev_bcp, v, z, wq, w, wl, dev_par);
calc_xl<<<n,NTHREAD>>>(d, n, z, yl, xl);
calc_z<<<n,NTHREAD>>>(d, n, dev_s, dev_bcp, v, z, xl, wq, dev_par);
calc_x<<<n,1>>>(d, n, yl, y, w, wl, xl, x, dev_par);
calc_w5_and_w3<<<1,NTHREAD>>>(d, n, w5, w3, wq);
}
CU(cudaMemcpy(p->v, v, n*n*sizeof(base_t), cudaMemcpyDeviceToHost));
CU(cudaMemcpy(p->w5 - 1, w5 - 1, (n+1)*sizeof(base_t), cudaMemcpyDeviceToHost));
CU(cudaMemcpy(p->w3, w3, (n+1)*sizeof(base_t), cudaMemcpyDeviceToHost));
CU(cudaFree(v));
CU(cudaFree(w5 - 1));
CU(cudaFree(w3));
CU(cudaFree(z));
CU(cudaFree(yl));
CU(cudaFree(y));
CU(cudaFree(wq));
CU(cudaFree(w));
CU(cudaFree(wl));
CU(cudaFree(xl));
CU(cudaFree(x));
CU(cudaFree(dev_par));
CU(cudaFree(dev_s));
CU(cudaFree(dev_bcp));
#else /* do serial fill on CPU */
#define ALLOC(a,sz) a = (real_t *) safe_malloc((sz)*sizeof(real_t))
printf("Performing Calculations on CPU\n");
ALLOC(z,n*n);
ALLOC(yl,n*n);
ALLOC(y,n*n);
ALLOC(wq,n*(n-1)/2);
ALLOC(w,2*n);
ALLOC(wl,2*n);
ALLOC(xl,2*n);
ALLOC(x,5*n);
init_w5_and_w3(n,p->w5,p->w3);
int d;
for (d = 0; d < n-1; d++) {
calc_hairpin_stack_exterior_multibranch(d, n, p->seq, p->base_can_pair, p->v, x, p->w5, p->w3, par);
calc_internal(d, n, p->seq, p->base_can_pair, p->v, par);
calc_coaxial(d, n, p->seq, p->base_can_pair, p->v, y, p->w5, p->w3, par);
calc_wl(d, n, p->seq, p->base_can_pair, p->v, z, wq, w, wl, par);
calc_xl(d, n, z, yl, xl);
calc_z(d, n, p->seq, p->base_can_pair, p->v, z, xl, wq, par);
calc_x(d, n, yl, y, w, wl, xl, x, par);
calc_w5_and_w3(d, n, p->w5, p->w3, wq);
}
free(z);
free(yl);
free(y);
free(wq);
free(w);
free(wl);
free(xl);
free(x);
#endif /* __CUDACC__ */
return p;
} /* end prna_new */
void prna_delete(prna_t p)
{
if (p) {
if (p->seq)
free(p->seq);
if (p->v)
free(p->v);
if (p->w5 - 1)
free(p->w5 - 1);
if (p->w3)
free(p->w3);
free(p);
}
}
#define SHOWARR(a) \
if (p->a) { \
int i, j; \
for (i = 0; i < n; i++) { \
printf("%s%4d: ",#a,i+1); \
for (j = 0; j < n; j++) { \
const real_t *aij = array_val(p->a,i,j,n,bcp); \
printf(RF" ", aij ? (*aij)*RT : INF); \
} \
printf("\n"); \
} \
}
#define SHOW(a) \
if (p->a) { \
int i; \
printf("%s: ",#a); \
for (i = 0; i < n; i++) \
printf(RF" ", p->a[i] * RT); \
printf("\n"); \
} \
void prna_show(const prna_t p)
{
int i, n = p->n;
const base_t *s = p->seq;
const int *bcp = p->base_can_pair;
printf("n: %d\n", n);
printf("seq: ");
for (i = 0; i < n; i++)
printf("%c", base_as_char(s[i]));
printf("\n");
SHOWARR(v);
SHOW(w5);
SHOW(w3);
}
static real_t free_energy_of_pair(const prna_t p, int i, int j)
{
const int n = p->n;
//const base_t *s = p->seq;
const int *bcp = p->base_can_pair;
if (can_pair(i,j,n,bcp)){
return *array_val(p->v,i,j,n,bcp) + *array_val(p->v,j,i,n,bcp) - p->w3[0];
}
else
return INF;
}
real_t probability_of_pair(const prna_t p, int i, int j)
{
return exp(-free_energy_of_pair(p,i,j));
}
real_t get_v_array(const prna_t p, int i, int j)
{
const int n = p->n;
const int *bcp = p->base_can_pair;
if (can_pair(i,j,n,bcp)){
return *array_val(p->v,i,j,n,bcp);
}
else
return -INF;
}
real_t get_w3_array(const prna_t p, int i)
{
return p->w3[i];
}
real_t get_w5_array(const prna_t p, int i)
{
return p->w5[i];
}
void prna_write_neg_log10_probabilities(const prna_t p, const char *fn)
{
FILE *f = safe_fopen(fn,"w");
int i, j;
fprintf(f,"%d\n%-8s%-8s-log10(probability)\n",p->n,"i","j");
for (i = 0; i < p->n; i++)
for (j = i+1; j < p->n; j++)
if (can_pair(i,j,p->n,p->base_can_pair))
fprintf(f,"%-8d%-8d" RF "\n", i+1, j+1,
free_energy_of_pair(p,i,j)/LOG(10));
fclose(f);
}
void prna_write_probability_matrix(const prna_t p, const char *fn)
{
FILE *f = safe_fopen(fn,"w");
const int n = p->n;
//const base_t *s = p->seq;
const int *bcp = p->base_can_pair;
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++)
fprintf(f,RF" ",
can_pair(i,j,n,bcp) ? probability_of_pair(p,i,j) : 0);
fprintf(f,"\n");
}
fclose(f);
}
static void write_ct_structure(FILE *f, const char *s, int n, const int *pair)
{
char fmt[256];
sprintf(fmt,"%d",n);
int ns = strlen(fmt)+1;
if (ns < 5)
ns = 5;
sprintf(fmt,"%%%dd",ns);
int i;
for (i = 0; i < n; i++) {
fprintf(f,fmt,i+1);
fprintf(f,"%2c ",s[i]);
fprintf(f,fmt,i);
fprintf(f,fmt,i == n-1 ? 0 : i+2);
fprintf(f,fmt,pair[i] == i ? 0 : pair[i]+1);
fprintf(f,fmt,i+1);
fprintf(f,"\n");
}
}
static void unpair(int *pair, int i)
{
const int j = pair[i];
pair[i] = i;
pair[j] = j;
}
static int is_paired(const int *pair, int i)
{
return pair[i] != i;
}
static void remove_helices_shorter_than(int min_helix_length, int *pair, int n)
{
int i;
for (i = 0; i < n-2; i++) {
int j = pair[i];
if (j <= i)
continue;
int npair = 1;
while (pair[i+1] == j-1 || pair[i+2] == j-1 || pair[i+1] == j-2) {
if (pair[i+1] == j-1)
;
else if (pair[i+2] == j-1) {
if (is_paired(pair,i+1))
unpair(pair,i+1);
i++;
} else
j--;
i++;
j--;
npair++;
}
if (npair < min_helix_length) {
unpair(pair,i);
if (i >= 2) {
while (pair[i-1] == j+1 || pair[i-2] == j+1 || pair[i-1] == j+2) {
if (pair[i-1] == j+1)
unpair(pair,i-1);
else if (pair[i-2] == j+1) {
unpair(pair,i-2);
i--;
} else {
unpair(pair,i-1);
j++;
}
i--;
j++;
}
} else if (i == 1) {
while (pair[i-1] == j+1 || pair[i-1] == j+2) {
if (pair[i-1] == j+1)
unpair(pair,i-1);
else {
unpair(pair,i-1);
j++;
}
i--;
j++;
}
}
}
}
} /* end remove_helices_shorter_than */
void prna_write_probknot(const prna_t p, const char *fn, const char *s, int min_helix_length)
{
const int n = p->n;
int *pair = (int *) safe_malloc(n*sizeof(int));
int i;
for (i = 0; i < n; i++) {
pair[i] = i; /* unpaired */
int j;
for (j = 0; j < n; j++)
if (free_energy_of_pair(p,i,j) < free_energy_of_pair(p,i,pair[i]))
pair[i] = j;
}
for (i = 0; i < n; i++)
if (pair[pair[i]] != i)
pair[i] = i; /* unpaired */
if (min_helix_length > 1)
remove_helices_shorter_than(min_helix_length,pair,n);
/* write the structure */
if (fn) {
FILE *f = safe_fopen(fn,"w");
write_ct_structure(f,s,n,pair);
fclose(f);
} else {
write_ct_structure(stdout,s,n,pair);
}
free(pair);
}
int *generate_bcp(const char *s)
{
int length = strlen(s);
int i, j;
int *base_cp = (int *) safe_malloc((length*(length-1)/2)*sizeof(int));
base_t *seq = (base_t *) safe_malloc(length*sizeof(base_t));
sequence_from_string(seq, s);
for (i=0; i<length; i++){
for (j=i+1; j<length; j++){
if ((j-i < LOOP_MIN+1) || !isupper(s[i]) || !isupper(s[j])){
base_cp[(j*(j-1))/2 + i]=0;
}
else{
base_cp[upper_triangle_index(i,j)]=is_canonical_pair(seq[i],seq[j]) && ((i > 0 && j < length-1 && is_canonical_pair(seq[i-1],seq[j+1]))
|| (j-i>=LOOP_MIN+3 && is_canonical_pair(seq[i+1],seq[j-1])));
}
}
}
return base_cp;
}
|
the_stack
|
#define DEBUG_DEV
#ifdef DEBUG_DEV
#define getErrorCuda(command)\
command;\
cudaDeviceSynchronize();\
cudaThreadSynchronize();\
if (cudaPeekAtLastError() != cudaSuccess){\
std::cout << #command << " : " << cudaGetErrorString(cudaGetLastError())\
<< " in file " << __FILE__ << " at line " << __LINE__ << std::endl;\
exit(1);\
}
#endif
#ifndef DEBUG_DEV
#define getErrorCuda(command) command;
#endif
__constant__ float const_stencilWeight[21];
// base case
__global__ void stencil(float *src, float *dst, int size, float *stencilWeight)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx += 11;
if (idx >= size)
return;
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += src[idx+i] * stencilWeight[i+10];
}
dst[idx] = out;
}
// read only cache stencil coefficients
__global__ void stencilReadOnly1(float *src, float *dst, int size, float* stencilWeight)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx += 11;
if (idx >= size)
return;
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += src[idx+i] * __ldg(&stencilWeight[i+10]);
}
dst[idx] = out;
}
// read only data
__global__ void stencilReadOnly2(float *src, float *dst, int size, float* stencilWeight)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx += 11;
if (idx >= size)
return;
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += __ldg(&src[idx+i]) * stencilWeight[i+10];
}
dst[idx] = out;
}
// read only coefficients and data
__global__ void stencilReadOnly3(float *src, float *dst, int size, float* stencilWeight)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx += 11;
if (idx >= size)
return;
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += __ldg(&src[idx+i]) * __ldg(&stencilWeight[i+10]);
}
dst[idx] = out;
}
// constat memory coefficients
__global__ void stencilConst1(float *src, float *dst, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx += 11;
if (idx >= size)
return;
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += src[idx+i] * const_stencilWeight[i+10];
}
dst[idx] = out;
}
// constant memory coefficients and data through read only cache
__global__ void stencilConst2(float *src, float *dst, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx += 11;
if (idx >= size)
return;
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += __ldg(&src[idx+i]) * const_stencilWeight[i+10];
}
dst[idx] = out;
}
// constant memory coefficients and data from shared
__global__ void stencilShared1(float *src, float *dst, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float buffer[1024+21];
for(int i = threadIdx.x; i < 1024+21; i = i + 1024)
{
buffer[i] = src[idx+i];
}
idx += 11;
if (idx >= size)
return;
__syncthreads();
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += buffer[threadIdx.x+10+i] * const_stencilWeight[i+10];
}
dst[idx] = out;
}
// constant memory coefficients and data from shared thorugh read only
__global__ void stencilShared2(float *src, float *dst, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float buffer[1024+21];
for(int i = threadIdx.x; i < 1024+21; i = i + 1024)
{
buffer[i] = __ldg(&src[idx+i]);
}
idx += 11;
if (idx >= size)
return;
__syncthreads();
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += buffer[threadIdx.x+10+i] * const_stencilWeight[i+10];
}
dst[idx] = out;
}
bool verify(float *arr, float *corr, int count)
{
// skip the first elements since they may be wrong
for(int i = 11; i < count; i++)
{
if(arr[i] != corr[i])
{
std::cout << "check failed" << i << " " << arr[i] << " != " << corr[i] << std::endl;
exit(1);
}
}
}
int main()
{
float *a;
float *b;
float *bOut;
float *bCorr;
float *weights;
getErrorCuda(cudaMalloc(&a, sizeof(float)*102400000));
getErrorCuda(cudaMalloc(&b, sizeof(float)*102400000));
getErrorCuda(cudaMallocHost(&bOut, sizeof(float)*102400000));
getErrorCuda(cudaMallocManaged(&bCorr, sizeof(float)*102400000));
getErrorCuda(cudaMallocManaged(&weights, sizeof(float)*21));
cudaDeviceSynchronize();
for(int i = 0; i < 102400000;i++)
{
//a[i] = 0;
//b[i] = 0;
bCorr[i] = 0;
}
cudaMemset(a, 1, 102400000);
cudaMemset(b, 1, 102400000);
cudaMemset(bCorr, 1, 102400000);
cudaMemset(bOut, 1, 102400000);
cudaDeviceSynchronize();
int blockSize = 1024;
int blocks = 10000;
for(int i = 0; i < 21;i++)
weights[i] = i-10;
cudaDeviceSynchronize();
// copy to constant memory
cudaMemcpyToSymbol(const_stencilWeight, weights, sizeof(float)*21);
// run the basic case once to get the "correct" results
getErrorCuda((stencil<<<blocks, blockSize>>>(a, bCorr, 10240000, weights)));
cudaDeviceSynchronize();
getErrorCuda((stencil<<<blocks, blockSize>>>(a, b, 10240000, weights)));
cudaDeviceSynchronize();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
cudaSetDevice(0);
float minTime = 10000;
for(int i = 0; i < 10; i++)
{
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
getErrorCuda((stencil<<<blocks, blockSize>>>(a, b, 10240000, weights)));
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
std::chrono::duration<float> elapsed_seconds = end-start;
minTime = std::min(elapsed_seconds.count(), minTime);
}
std::cout << "Non optimized " << (blockSize*blocks)/minTime << " updates/s" << std::endl;
minTime = 10000;
std::cout << std::endl;
for(int i = 0; i < 10; i++)
{
cudaDeviceSynchronize();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
getErrorCuda((stencilReadOnly1<<<blocks, blockSize>>>(a, b, 10240000, weights)));
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
std::chrono::duration<float> elapsed_seconds = end-start;
minTime = std::min(elapsed_seconds.count(), minTime);
}
std::cout << "read only cache stencil coefficients " <<(blockSize*blocks)/minTime << " updates/s" << std::endl;
minTime = 10000;
for(int i = 0; i < 10; i++)
{
cudaDeviceSynchronize();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
getErrorCuda((stencilReadOnly2<<<blocks, blockSize>>>(a, b, 10240000, weights)));
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
std::chrono::duration<float> elapsed_seconds = end-start;
minTime = std::min(elapsed_seconds.count(), minTime);
}
std::cout << "read only data " << (blockSize*blocks)/minTime << " updates/s" << std::endl;
minTime = 10000;
for(int i = 0; i < 10; i++)
{
cudaDeviceSynchronize();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
getErrorCuda((stencilReadOnly3<<<blocks, blockSize>>>(a, b, 10240000, weights)));
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
std::chrono::duration<float> elapsed_seconds = end-start;
minTime = std::min(elapsed_seconds.count(), minTime);
}
std::cout << "read only coefficients and data " << (blockSize*blocks)/minTime << " updates/s" << std::endl;
minTime = 10000;
std::cout << std::endl;
for(int i = 0; i < 10; i++)
{
cudaDeviceSynchronize();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
getErrorCuda((stencilConst1<<<blocks, blockSize>>>(a, b, 10240000)));
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
std::chrono::duration<float> elapsed_seconds = end-start;
minTime = std::min(elapsed_seconds.count(), minTime);
}
std::cout << "constant memory coefficients " << (blockSize*blocks)/minTime << " updates/s" << std::endl;
minTime = 10000;
for(int i = 0; i < 10; i++)
{
cudaDeviceSynchronize();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
getErrorCuda((stencilConst2<<<blocks, blockSize>>>(a, b, 10240000)));
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
std::chrono::duration<float> elapsed_seconds = end-start;
minTime = std::min(elapsed_seconds.count(), minTime);
}
std::cout << "constant memory coefficients and data through read only cache " << (blockSize*blocks)/minTime << " updates/s" << std::endl;
std::cout << std::endl;
minTime = 10000;
for(int i = 0; i < 10; i++)
{
cudaDeviceSynchronize();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
getErrorCuda((stencilShared1<<<blocks, blockSize>>>(a, b, 10240000)));
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
std::chrono::duration<float> elapsed_seconds = end-start;
minTime = std::min(elapsed_seconds.count(), minTime);
}
std::cout << "constant memory coefficients and data from shared " << (blockSize*blocks)/minTime << " updates/s" << std::endl;
minTime = 10000;
minTime = 10000;
for(int i = 0; i < 10; i++)
{
cudaDeviceSynchronize();
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
getErrorCuda((stencilShared2<<<blocks, blockSize>>>(a, b, 10240000)));
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
getErrorCuda(cudaMemcpy(bOut, b, sizeof(float)*10240000, cudaMemcpyDefault));
verify(bOut, bCorr, 1000);
std::chrono::duration<float> elapsed_seconds = end-start;
minTime = std::min(elapsed_seconds.count(), minTime);
}
std::cout << "constant memory coefficients and data from shared thorugh read only " << (blockSize*blocks)/minTime << " updates/s" << std::endl;
minTime = 10000;
}
|
the_stack
|
#include "chrono_multicore/cuda/ChMPM.cuh"
#include "chrono_multicore/cuda/ChMPMUtils.h"
#include "chrono_multicore/cuda/ChCudaHelper.cuh"
#include "chrono_multicore/cuda/ChGPUVector.cuh"
#include <cub/cub.cuh>
#include "chrono_multicore/cuda/matrixf.cuh"
//#define BOX_YIELD
#define SPHERE_YIELD
//#define DRUCKER_PRAGER
namespace chrono {
struct Bounds {
float minimum[3];
float maximum[3];
};
float3 min_bounding_point;
float3 max_bounding_point;
MPM_Settings host_settings;
std::vector<int> particle_node_mapping;
std::vector<int> node_particle_mapping;
std::vector<int> node_start_index;
std::vector<int> particle_number;
unsigned int num_mpm_nodes_active;
// GPU Things
float3* lower_bound;
float3* upper_bound;
gpu_vector<float> pos, vel, JE_JP;
gpu_vector<float> node_mass;
gpu_vector<float> marker_volume;
gpu_vector<float> grid_vel, delta_v;
gpu_vector<float> rhs;
gpu_vector<float> marker_Fe, marker_Fe_hat, marker_Fp;
gpu_vector<float> PolarS, PolarR;
gpu_vector<float> old_vel_node_mpm;
gpu_vector<float> ml, mg, mg_p, ml_p;
gpu_vector<float> dot_g_proj_norm;
gpu_vector<float> marker_plasticity;
CUDA_CONSTANT MPM_Settings device_settings;
CUDA_CONSTANT Bounds system_bounds;
cudaEvent_t start;
cudaEvent_t stop;
float time_measured = 0;
/////// BB Constants
__device__ float alpha = 0.0001;
__device__ float dot_ms_ms = 0;
__device__ float dot_ms_my = 0;
__device__ float dot_my_my = 0;
#define a_min 1e-13
#define a_max 1e13
#define neg_BB1_fallback 0.11
#define neg_BB2_fallback 0.12
#define LOOP_TWO_RING_GPUSP(X) \
cx = GridCoord(xix, inv_bin_edge, system_bounds.minimum[0]); \
cy = GridCoord(xiy, inv_bin_edge, system_bounds.minimum[1]); \
cz = GridCoord(xiz, inv_bin_edge, system_bounds.minimum[2]); \
for (int i = cx - 2; i <= cx + 2; ++i) { \
for (int j = cy - 2; j <= cy + 2; ++j) { \
for (int k = cz - 2; k <= cz + 2; ++k) { \
int current_node = GridHash(i, j, k, device_settings.bins_per_axis_x, device_settings.bins_per_axis_y, \
device_settings.bins_per_axis_z); \
float current_node_locationx = i * bin_edge + system_bounds.minimum[0]; \
float current_node_locationy = j * bin_edge + system_bounds.minimum[1]; \
float current_node_locationz = k * bin_edge + system_bounds.minimum[2]; \
X \
} \
} \
}
//////========================================================================================================================================================================
////
void CUDA_HOST_DEVICE WeakEqual(const float& x, const float& y, float COMPARE_EPS = FLT_EPSILON) {
if (fabsf(x - y) > COMPARE_EPS) {
printf("%f does not equal %f %.20e\n", x, y, fabsf(x - y));
// exit(1);
}
}
void CUDA_HOST_DEVICE WeakEqual(const Mat33f& a, const Mat33f& b, float COMPARE_EPS = FLT_EPSILON) {
WeakEqual(a[0], b[0], COMPARE_EPS);
WeakEqual(a[1], b[1], COMPARE_EPS);
WeakEqual(a[2], b[2], COMPARE_EPS);
WeakEqual(a[3], b[3], COMPARE_EPS);
WeakEqual(a[4], b[4], COMPARE_EPS);
WeakEqual(a[5], b[5], COMPARE_EPS);
WeakEqual(a[6], b[6], COMPARE_EPS);
WeakEqual(a[7], b[7], COMPARE_EPS);
WeakEqual(a[8], b[8], COMPARE_EPS);
}
CUDA_GLOBAL void kComputeBounds(const float* pos, // input
float3* lower, // output
float3* upper // output
) {
typedef cub::BlockReduce<float3, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(device_settings.num_mpm_markers - block_start, blockDim.x);
const int index = block_start + threadIdx.x;
if (index < device_settings.num_mpm_markers) {
float3 data = make_float3(pos[index * 3 + 0], pos[index * 3 + 1], pos[index * 3 + 2]);
float3 blockUpper = BlockReduce(temp_storage).Reduce(data, float3Max(), num_valid);
__syncthreads();
float3 blockLower = BlockReduce(temp_storage).Reduce(data, float3Min(), num_valid);
if (threadIdx.x == 0) {
AtomicMax(upper, blockUpper);
AtomicMin(lower, blockLower);
}
}
}
////========================================================================================================================================================================
CUDA_GLOBAL void kRasterize(const float* sorted_pos, // input
const float* sorted_vel, // input
float* grid_mass, // output
float* grid_vel) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
const float vix = sorted_vel[p * 3 + 0];
const float viy = sorted_vel[p * 3 + 1];
const float viz = sorted_vel[p * 3 + 2];
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge) * device_settings.mass;
atomicAdd(&grid_mass[current_node], weight); //
atomicAdd(&grid_vel[current_node * 3 + 0], weight * vix);
atomicAdd(&grid_vel[current_node * 3 + 1], weight * viy);
atomicAdd(&grid_vel[current_node * 3 + 2], weight * viz);)
}
}
CUDA_GLOBAL void kRasterize(const float* sorted_pos, // input
float* grid_mass) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge) * device_settings.mass;
atomicAdd(&grid_mass[current_node], weight); //
)
}
}
//
////========================================================================================================================================================================
//
CUDA_GLOBAL void kNormalizeWeights(float* grid_mass, // input
float* grid_vel) { // output
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
float n_mass = grid_mass[i];
if (n_mass > FLT_EPSILON) {
grid_vel[i * 3 + 0] /= n_mass;
grid_vel[i * 3 + 1] /= n_mass;
grid_vel[i * 3 + 2] /= n_mass;
}
}
}
//////========================================================================================================================================================================
////
CUDA_GLOBAL void kComputeParticleVolumes(const float* sorted_pos, // input
float* grid_mass, // output
float* volume) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
float particle_density = 0;
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge);
particle_density += grid_mass[current_node] * weight; //
)
// Inverse density to remove division
particle_density = (bin_edge * bin_edge * bin_edge) / particle_density;
volume[p] = device_settings.mass * particle_density;
}
}
CUDA_GLOBAL void kFeHat(const float* sorted_pos, // input
const float* marker_Fe, // input
const float* grid_vel, // input
float* marker_Fe_hat) { // output
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
Mat33f Fe_hat_t(0.0);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP(
float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
Fe_hat_t[0] += vnx * valx; Fe_hat_t[1] += vny * valx; Fe_hat_t[2] += vnz * valx; //
Fe_hat_t[3] += vnx * valy; Fe_hat_t[4] += vny * valy; Fe_hat_t[5] += vnz * valy; //
Fe_hat_t[6] += vnx * valz; Fe_hat_t[7] += vny * valz; Fe_hat_t[8] += vnz * valz;
// float3 vel(grid_vel[current_node * 3 + 0], grid_vel[current_node * 3 + 1],
// grid_vel[current_node * 3 + 2]); //
// float3 kern = dN(xi - current_node_location, inv_bin_edge); //
// Fe_hat_t += OuterProduct(device_settings.dt * vel, kern);
)
Mat33f m_Fe(marker_Fe, p, device_settings.num_mpm_markers);
Mat33f m_Fe_hat = (Mat33f(1.0) + device_settings.dt * Fe_hat_t) * m_Fe;
m_Fe_hat.Store(marker_Fe_hat, p, device_settings.num_mpm_markers);
}
}
// CUDA_GLOBAL void kSVD(Mat33f* marker_Fe_hat, Mat33f* PolarR, Mat33f* PolarS) {
// const int p = blockIdx.x * blockDim.x + threadIdx.x;
// if (p < device_settings.num_mpm_markers) {
// Mat33f U, V, R, S, W;
// float3 E;
// SVD(marker_Fe_hat[p], U, E, V);
// // Perform polar decomposition F = R*S
// R = MultTranspose(U, V);
// S = V * MultTranspose(Mat33f(E), V);
//
// PolarR[p] = R;
// PolarS[p] = S;
// }
//}
CUDA_GLOBAL void kApplyForces(const float* sorted_pos, // input
const float* marker_Fe_hat, // input
const float* marker_Fe, // input
const float* marker_volume, // input
const float* node_mass, // input
const float* plasticity, // input
float* PolarR, // input
float* PolarS, // input
float* grid_vel) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const Mat33f FE(marker_Fe, p, device_settings.num_mpm_markers);
const Mat33f FE_hat(marker_Fe_hat, p, device_settings.num_mpm_markers);
const float a = -one_third;
const float J = Determinant(FE_hat);
const float Ja = powf(J, a);
#if defined(BOX_YIELD) || defined(SPHERE_YIELD)
const float current_mu = device_settings.mu * expf(device_settings.hardening_coefficient * (plasticity[p]));
#else
const float current_mu = device_settings.mu;
#endif
#if 1
Mat33f JaFE = Ja * FE;
Mat33f UE, VE;
float3 EE;
SVD(JaFE, UE, EE, VE); /* Perform a polar decomposition, FE=RE*SE, RE is the Unitary part*/
Mat33f RE = MultTranspose(UE, VE);
Mat33f SE = VE * MultTranspose(EE, VE);
RE.Store(PolarR, p, device_settings.num_mpm_markers);
PolarS[p + 0 * device_settings.num_mpm_markers] = SE[0];
PolarS[p + 1 * device_settings.num_mpm_markers] = SE[1];
PolarS[p + 2 * device_settings.num_mpm_markers] = SE[2];
PolarS[p + 3 * device_settings.num_mpm_markers] = SE[4];
PolarS[p + 4 * device_settings.num_mpm_markers] = SE[5];
PolarS[p + 5 * device_settings.num_mpm_markers] = SE[8];
#else
const Mat33f A = Potential_Energy_Derivative_Deviatoric(Ja * FE_hat, current_mu, PolarR[p], PolarS[p]);
#endif
const Mat33f H = AdjointTranspose(FE_hat) * (1.0f / J);
const Mat33f A = 2.f * current_mu * (JaFE - RE);
const Mat33f Z_B = Z__B(A, FE_hat, Ja, a, H);
const Mat33f vPEDFepT = device_settings.dt * marker_volume[p] * MultTranspose(Z_B, FE);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
LOOP_TWO_RING_GPUSP( //
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
float fx = vPEDFepT[0] * valx + vPEDFepT[3] * valy + vPEDFepT[6] * valz;
float fy = vPEDFepT[1] * valx + vPEDFepT[4] * valy + vPEDFepT[7] * valz;
float fz = vPEDFepT[2] * valx + vPEDFepT[5] * valy + vPEDFepT[8] * valz;
float mass = node_mass[current_node]; //
if (mass > 0) {
atomicAdd(&grid_vel[current_node * 3 + 0], -fx / mass); //
atomicAdd(&grid_vel[current_node * 3 + 1], -fy / mass); //
atomicAdd(&grid_vel[current_node * 3 + 2], -fz / mass); //
})
}
}
CUDA_GLOBAL void kRhs(const float* node_mass, // input
const float* grid_vel,
float* rhs) {
const int current_node = blockIdx.x * blockDim.x + threadIdx.x;
if (current_node < device_settings.num_mpm_nodes) {
float mass = node_mass[current_node]; //
if (mass > 0) {
rhs[current_node * 3 + 0] = mass * grid_vel[current_node * 3 + 0]; //
rhs[current_node * 3 + 1] = mass * grid_vel[current_node * 3 + 1]; //
rhs[current_node * 3 + 2] = mass * grid_vel[current_node * 3 + 2]; //
} else {
rhs[current_node * 3 + 0] = 0;
rhs[current_node * 3 + 1] = 0;
rhs[current_node * 3 + 2] = 0;
}
}
}
CUDA_GLOBAL void kMultiplyA(const float* sorted_pos, // input
const float* v_array,
const float* old_vel_node_mpm,
const float* PolarR, // input
const float* PolarS, // input
const float* marker_Fe, // input
const float* marker_Fe_hat, // input
const float* marker_volume, // input
const float* plasticity, // input
float* result_array) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = sorted_pos[p * 3 + 0];
const float xiy = sorted_pos[p * 3 + 1];
const float xiz = sorted_pos[p * 3 + 2];
// float VAP[7];
// float delta_F[7] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
Mat33f delta_F(0.0f);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP( //
float vnx = v_array[current_node * 3 + 0]; //
float vny = v_array[current_node * 3 + 1]; //
float vnz = v_array[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
delta_F[0] += vnx * valx; delta_F[1] += vny * valx; delta_F[2] += vnz * valx; //
delta_F[3] += vnx * valy; delta_F[4] += vny * valy; delta_F[5] += vnz * valy; //
delta_F[6] += vnx * valz; delta_F[7] += vny * valz; delta_F[8] += vnz * valz;)
const Mat33f m_FE(marker_Fe, p, device_settings.num_mpm_markers);
delta_F = delta_F * m_FE;
#if defined(BOX_YIELD) || defined(SPHERE_YIELD)
const float current_mu =
2.0f * device_settings.mu * expf(device_settings.hardening_coefficient * (plasticity[p]));
#else
const float current_mu = 2.0f * device_settings.mu;
#endif
Mat33f RE(PolarR, p, device_settings.num_mpm_markers);
const Mat33f F(marker_Fe_hat, p, device_settings.num_mpm_markers);
const float a = -one_third;
const float J = Determinant(F);
const float Ja = powf(J, a);
const Mat33f H = AdjointTranspose(F) * (1.0f / J);
const Mat33f B_Z = B__Z(delta_F, F, Ja, a, H);
const Mat33f WE = TransposeMult(RE, B_Z);
// C is the original second derivative
SymMat33f SE;
SE[0] = PolarS[p + device_settings.num_mpm_markers * 0];
SE[1] = PolarS[p + device_settings.num_mpm_markers * 1];
SE[2] = PolarS[p + device_settings.num_mpm_markers * 2];
SE[3] = PolarS[p + device_settings.num_mpm_markers * 3];
SE[4] = PolarS[p + device_settings.num_mpm_markers * 4];
SE[5] = PolarS[p + device_settings.num_mpm_markers * 5];
const Mat33f C_B_Z = current_mu * (B_Z - Solve_dR(RE, SE, WE));
const Mat33f FE = Ja * F;
const Mat33f A = current_mu * (FE - RE);
const Mat33f P1 = Z__B(C_B_Z, F, Ja, a, H);
const Mat33f P2 = (a * DoubleDot(H, delta_F)) * Z__B(A, F, Ja, a, H);
const Mat33f P3 = (a * Ja * DoubleDot(A, delta_F)) * H;
const Mat33f P4 = (-a * Ja * DoubleDot(A, F)) * H * TransposeMult(delta_F, H);
const Mat33f VAP = marker_volume[p] * MultTranspose(P1 + P2 + P3 + P4, m_FE);
// Mat33f VAP = d2PsidFdFO(delta_F, m_FE_hat, PolarR[p], PolarS[p], current_mu);
// WeakEqual(VAP, VAP2);
LOOP_TWO_RING_GPUSP( //
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
float resx = VAP[0] * valx + VAP[3] * valy + VAP[6] * valz;
float resy = VAP[1] * valx + VAP[4] * valy + VAP[7] * valz;
float resz = VAP[2] * valx + VAP[5] * valy + VAP[8] * valz;
atomicAdd(&result_array[current_node * 3 + 0], resx); atomicAdd(&result_array[current_node * 3 + 1], resy);
atomicAdd(&result_array[current_node * 3 + 2], resz););
}
}
CUDA_GLOBAL void kMultiplyB(const float* v_array,
const float* old_vel_node_mpm,
const float* node_mass,
float* result_array) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
float mass = node_mass[i];
if (mass > 0) {
result_array[i * 3 + 0] += mass * (v_array[i * 3 + 0]);
result_array[i * 3 + 1] += mass * (v_array[i * 3 + 1]);
result_array[i * 3 + 2] += mass * (v_array[i * 3 + 2]);
}
}
}
void MPM_ComputeBounds() {
max_bounding_point = make_float3(-FLT_MAX, -FLT_MAX, -FLT_MAX);
min_bounding_point = make_float3(FLT_MAX, FLT_MAX, FLT_MAX);
cudaMemcpyAsync(lower_bound, &min_bounding_point, sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpyAsync(upper_bound, &max_bounding_point, sizeof(float3), cudaMemcpyHostToDevice);
kComputeBounds<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, //
lower_bound, //
upper_bound); //
cudaMemcpy(&min_bounding_point, lower_bound, sizeof(float3), cudaMemcpyDeviceToHost);
cudaMemcpy(&max_bounding_point, upper_bound, sizeof(float3), cudaMemcpyDeviceToHost);
min_bounding_point.x = host_settings.kernel_radius * roundf(min_bounding_point.x / host_settings.kernel_radius);
min_bounding_point.y = host_settings.kernel_radius * roundf(min_bounding_point.y / host_settings.kernel_radius);
min_bounding_point.z = host_settings.kernel_radius * roundf(min_bounding_point.z / host_settings.kernel_radius);
max_bounding_point.x = host_settings.kernel_radius * roundf(max_bounding_point.x / host_settings.kernel_radius);
max_bounding_point.y = host_settings.kernel_radius * roundf(max_bounding_point.y / host_settings.kernel_radius);
max_bounding_point.z = host_settings.kernel_radius * roundf(max_bounding_point.z / host_settings.kernel_radius);
max_bounding_point = max_bounding_point + host_settings.kernel_radius * 8;
min_bounding_point = min_bounding_point - host_settings.kernel_radius * 6;
cudaMemcpyToSymbolAsync(system_bounds, &min_bounding_point, sizeof(float3), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbolAsync(system_bounds, &max_bounding_point, sizeof(float3), sizeof(float3), cudaMemcpyHostToDevice);
host_settings.bin_edge = host_settings.kernel_radius * 2;
host_settings.bins_per_axis_x = int(max_bounding_point.x - min_bounding_point.x) / (int)host_settings.bin_edge;
host_settings.bins_per_axis_y = int(max_bounding_point.y - min_bounding_point.y) / (int)host_settings.bin_edge;
host_settings.bins_per_axis_z = int(max_bounding_point.z - min_bounding_point.z) / (int)host_settings.bin_edge;
host_settings.inv_bin_edge = float(1.) / host_settings.bin_edge;
host_settings.num_mpm_nodes =
host_settings.bins_per_axis_x * host_settings.bins_per_axis_y * host_settings.bins_per_axis_z;
cudaCheck(cudaMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
printf("max_bounding_point [%f %f %f]\n", max_bounding_point.x, max_bounding_point.y, max_bounding_point.z);
printf("min_bounding_point [%f %f %f]\n", min_bounding_point.x, min_bounding_point.y, min_bounding_point.z);
printf("Compute DOF [%d %d %d] [%f] %d %d\n", host_settings.bins_per_axis_x, host_settings.bins_per_axis_y,
host_settings.bins_per_axis_z, host_settings.bin_edge, host_settings.num_mpm_nodes,
host_settings.num_mpm_markers);
}
//
void Multiply(gpu_vector<float>& input, gpu_vector<float>& output) {
int size = (int)input.size();
kMultiplyA<<<CONFIG(size)>>>(pos.data_d, // input
input.data_d, //
old_vel_node_mpm.data_d,
PolarR.data_d, // input
PolarS.data_d, // input
marker_Fe.data_d, // input
marker_Fe_hat.data_d, // input
marker_volume.data_d, // input
marker_plasticity.data_d, // input
output.data_d);
kMultiplyB<<<CONFIG(size)>>>(input.data_d, old_vel_node_mpm.data_d, node_mass.data_d, output.data_d);
}
CUDA_GLOBAL void kSubtract(int size, float* x, float* y) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
y[i] = y[i] - x[i];
}
}
template <bool inner>
CUDA_GLOBAL void kResetGlobals() {
if (inner) {
dot_ms_ms = 0;
dot_ms_my = 0;
dot_my_my = 0;
} else {
alpha = 0.0001;
}
}
template <bool even>
CUDA_GLOBAL void kUpdateAlpha(int num_items, float* ml_p, float* ml, float* mg_p, float* mg) {
typedef cub::BlockReduce<float, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(num_items - block_start, blockDim.x);
const int tid = block_start + threadIdx.x;
if (tid < num_items) {
float data, block_sum;
float ms = ml_p[tid] - ml[tid];
float my = mg_p[tid] - mg[tid];
if (even) {
data = ms * ms;
block_sum = BlockReduce(temp_storage).Reduce(data, cub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_ms_ms, block_sum);
}
} else {
data = my * my;
block_sum = BlockReduce(temp_storage).Reduce(data, cub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_my_my, block_sum);
}
}
__syncthreads();
data = ms * my;
block_sum = BlockReduce(temp_storage).Reduce(data, cub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_ms_my, block_sum);
}
}
}
template <bool even>
CUDA_GLOBAL void kAlpha() {
if (even) {
if (dot_ms_my <= 0) {
alpha = neg_BB1_fallback;
} else {
alpha = fminf(a_max, fmaxf(a_min, dot_ms_ms / dot_ms_my));
}
} else {
if (dot_ms_my <= 0) {
alpha = neg_BB2_fallback;
} else {
alpha = fminf(a_max, fmaxf(a_min, dot_ms_my / dot_my_my));
}
}
// printf("alpha: %f %f %f %f \n", alpha, dot_ms_ms, dot_ms_my, dot_my_my);
}
CUDA_GLOBAL void kCompute_ml_p(int num_items, float* ml, float* mg, float* ml_p) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_items) {
ml_p[i] = ml[i] - alpha * mg[i];
// printf("mlps : [%f %f %f]\n", ml_p[i], ml[i], mg[i]);
}
}
CUDA_GLOBAL void kResidual(int num_items, float* mg, float* dot_g_proj_norm) {
typedef cub::BlockReduce<float, num_threads_per_block> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int block_start = blockDim.x * blockIdx.x;
const int num_valid = min(num_items - block_start, blockDim.x);
float data, block_sum;
const int tid = block_start + threadIdx.x;
if (tid < num_items) {
data = mg[tid] * mg[tid];
block_sum = BlockReduce(temp_storage).Reduce(data, cub::Sum(), num_valid);
if (threadIdx.x == 0) {
atomicAdd(&dot_g_proj_norm[0], block_sum);
}
// printf("resid [%f %f]\n", mg[tid], dot_g_proj_norm[0]);
}
}
float time_no_shur = 0;
float time_shur = 0;
void MPM_BBSolver(gpu_vector<float>& r, gpu_vector<float>& delta_v) {
time_shur = 0;
time_no_shur = 0;
const unsigned int size = (unsigned int)r.size();
float lastgoodres = 10e30f;
{
CudaEventTimer timer(start, stop, true, time_no_shur);
dot_g_proj_norm.resize(1);
ml.resize(size);
mg.resize(size);
mg_p.resize(size);
ml_p.resize(size);
ml = delta_v;
mg = 0;
}
{
CudaEventTimer timer(start, stop, true, time_shur);
Multiply(ml, mg);
}
{
CudaEventTimer timer(start, stop, true, time_no_shur);
kSubtract<<<CONFIG(size)>>>(size, r.data_d, mg.data_d);
mg_p = mg;
}
kResetGlobals<false><<<1, 1>>>();
for (int current_iteration = 0; current_iteration < host_settings.num_iterations; current_iteration++) {
{
CudaEventTimer timer(start, stop, true, time_no_shur);
kResetGlobals<true><<<1, 1>>>();
kCompute_ml_p<<<CONFIG(size)>>>(size, ml.data_d, mg.data_d, ml_p.data_d);
mg_p = 0;
}
{
CudaEventTimer timer(start, stop, true, time_shur);
Multiply(ml_p, mg_p);
}
{
CudaEventTimer timer(start, stop, true, time_no_shur);
kSubtract<<<CONFIG(size)>>>(size, r.data_d, mg_p.data_d);
if (current_iteration % 2 == 0) {
kUpdateAlpha<true><<<CONFIG(size)>>>(size, ml_p.data_d, ml.data_d, mg_p.data_d, mg.data_d);
kAlpha<true><<<1, 1>>>();
} else {
kUpdateAlpha<false><<<CONFIG(size)>>>(size, ml_p.data_d, ml.data_d, mg_p.data_d, mg.data_d);
kAlpha<false><<<1, 1>>>();
}
ml = ml_p;
mg = mg_p;
dot_g_proj_norm = 0;
kResidual<<<CONFIG(size)>>>(size, mg.data_d, dot_g_proj_norm.data_d);
dot_g_proj_norm.copyDeviceToHost();
float g_proj_norm = sqrtf(dot_g_proj_norm.data_h[0]);
if (g_proj_norm < lastgoodres) {
lastgoodres = g_proj_norm;
delta_v = ml;
}
// printf("[%f]\n", lastgoodres);
}
}
cudaCheck(cudaPeekAtLastError());
cudaCheck(cudaDeviceSynchronize());
printf("MPM Solver: [%f, %f %f] \n", time_no_shur, time_shur, lastgoodres);
}
CUDA_GLOBAL void kIncrementVelocity(float* delta_v, float* old_vel_node_mpm, float* grid_vel) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_nodes) {
grid_vel[i * 3 + 0] += delta_v[i * 3 + 0] - old_vel_node_mpm[i * 3 + 0];
grid_vel[i * 3 + 1] += delta_v[i * 3 + 1] - old_vel_node_mpm[i * 3 + 1];
grid_vel[i * 3 + 2] += delta_v[i * 3 + 2] - old_vel_node_mpm[i * 3 + 2];
}
}
CUDA_GLOBAL void kUpdateParticleVelocity(float* grid_vel,
float* old_vel_node_mpm,
float* pos_marker,
float* vel_marker) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = pos_marker[p * 3 + 0];
const float xiy = pos_marker[p * 3 + 1];
const float xiz = pos_marker[p * 3 + 2];
float3 V_flip;
V_flip.x = vel_marker[p * 3 + 0];
V_flip.y = vel_marker[p * 3 + 1];
V_flip.z = vel_marker[p * 3 + 2];
float3 V_pic = make_float3(0.0, 0.0, 0.0);
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
int cx, cy, cz;
LOOP_TWO_RING_GPUSP(
float weight = N((xix - current_node_locationx) * inv_bin_edge) *
N((xiy - current_node_locationy) * inv_bin_edge) *
N((xiz - current_node_locationz) * inv_bin_edge);
float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
V_pic.x += vnx * weight; //
V_pic.y += vny * weight; //
V_pic.z += vnz * weight; //
V_flip.x += (vnx - old_vel_node_mpm[current_node * 3 + 0]) * weight; //
V_flip.y += (vny - old_vel_node_mpm[current_node * 3 + 1]) * weight; //
V_flip.z += (vnz - old_vel_node_mpm[current_node * 3 + 2]) * weight; //
)
float3 new_vel = (1.0 - alpha) * V_pic + alpha * V_flip;
float speed = Length(new_vel);
if (speed > device_settings.max_velocity) {
new_vel = new_vel * device_settings.max_velocity / speed;
}
vel_marker[p * 3 + 0] = new_vel.x;
vel_marker[p * 3 + 1] = new_vel.y;
vel_marker[p * 3 + 2] = new_vel.z;
}
}
CUDA_GLOBAL void kUpdateDeformationGradient(float* grid_vel,
float* pos_marker,
float* marker_Fe,
float* marker_Fp,
float* plasticity,
float* JE_JP) {
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < device_settings.num_mpm_markers) {
const float xix = pos_marker[p * 3 + 0];
const float xiy = pos_marker[p * 3 + 1];
const float xiz = pos_marker[p * 3 + 2];
Mat33f vel_grad(0.0);
int cx, cy, cz;
const float bin_edge = device_settings.bin_edge;
const float inv_bin_edge = device_settings.inv_bin_edge;
LOOP_TWO_RING_GPUSP(float vnx = grid_vel[current_node * 3 + 0]; //
float vny = grid_vel[current_node * 3 + 1]; //
float vnz = grid_vel[current_node * 3 + 2];
float Tx = (xix - current_node_locationx) * inv_bin_edge; //
float Ty = (xiy - current_node_locationy) * inv_bin_edge; //
float Tz = (xiz - current_node_locationz) * inv_bin_edge; //
float valx = dN(Tx) * inv_bin_edge * N(Ty) * N(Tz); //
float valy = N(Tx) * dN(Ty) * inv_bin_edge * N(Tz); //
float valz = N(Tx) * N(Ty) * dN(Tz) * inv_bin_edge; //
vel_grad[0] += vnx * valx; vel_grad[1] += vny * valx; vel_grad[2] += vnz * valx; //
vel_grad[3] += vnx * valy; vel_grad[4] += vny * valy; vel_grad[5] += vnz * valy; //
vel_grad[6] += vnx * valz; vel_grad[7] += vny * valz; vel_grad[8] += vnz * valz;
)
Mat33f delta_F = (Mat33f(1.0) + device_settings.dt * vel_grad);
Mat33f m_FE(marker_Fe, p, device_settings.num_mpm_markers);
Mat33f m_FPpre(marker_Fp, p, device_settings.num_mpm_markers);
Mat33f Fe_tmp = delta_F * m_FE;
Mat33f F_tmp = Fe_tmp * m_FPpre;
Mat33f U, V;
float3 E;
SVD(Fe_tmp, U, E, V);
float3 E_clamped = E;
#if defined(BOX_YIELD)
// Simple box clamp
E_clamped.x = Clamp(E.x, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
E_clamped.y = Clamp(E.y, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
E_clamped.z = Clamp(E.z, 1.0 - device_settings.theta_c, 1.0 + device_settings.theta_s);
plasticity[p] = fabsf(E.x * E.y * E.z - E_clamped.x * E_clamped.y * E_clamped.z);
#elif defined(SPHERE_YIELD)
// Clamp to sphere (better)
float center = 1.0 + (device_settings.theta_s - device_settings.theta_c) * .5;
float radius = (device_settings.theta_s + device_settings.theta_c) * .5;
float3 offset = E - center;
float lent = Length(offset);
if (lent > radius) {
offset = offset * radius / lent;
}
E_clamped = offset + center;
plasticity[p] = fabsf(E.x * E.y * E.z - E_clamped.x * E_clamped.y * E_clamped.z);
#elif defined(DRUCKER_PRAGER)
float3 eps = make_float3(logf(E.x), logf(E.y), logf(E.z));
float tr_eps = (eps.x + eps.y + eps.z);
float3 eps_hat = make_float3(logf(E.x), logf(E.y), logf(E.z));
float f_norm_eps_hat = Length(eps_hat);
float delta_gp = f_norm_eps_hat +
(3.0f * device_settings.lambda + 2.0f * device_settings.mu) / (2.0f * device_settings.mu) *
tr_eps * 0;//plasticity[p + device_settings.num_mpm_markers];
float delta_qp = 0;
if (delta_gp <= 0) {
// CASE 1
delta_qp = 0;
} else if (f_norm_eps_hat == 0 || tr_eps > 0) {
// CASE 2
delta_qp = f_norm_eps_hat;
E_clamped = make_float3(1.0f, 1.0f, 1.0f);
} else {
// CASE 3
delta_qp = delta_gp;
E_clamped.x = expf(eps.x - delta_gp * eps_hat.x / f_norm_eps_hat);
E_clamped.y = expf(eps.y - delta_gp * eps_hat.y / f_norm_eps_hat);
E_clamped.z = expf(eps.z - delta_gp * eps_hat.z / f_norm_eps_hat);
}
// Holds the plasticity
float qp_new = plasticity[p] + delta_qp;
float theta_Fp = 0.00110865;
// device_settings.h0 + (device_settings.h1 * qp_new - device_settings.h3) * exp(-device_settings.h2 *
// qp_new);
// 35.0f + (9.0f * qp_new - 10.0f) * exp(-.2f * qp_new);
plasticity[p] = qp_new;
plasticity[p + device_settings.num_mpm_markers] =
sqrtf(2.0 / 3.0) * (2.0f * sinf(theta_Fp)) / (3.0f - sinf(theta_Fp));
printf("YLD: [%f %f %f] %f [%f %f]\n", delta_gp, f_norm_eps_hat, tr_eps, eps_hat.x + eps_hat.y + eps_hat.z,
qp_new, plasticity[p + device_settings.num_mpm_markers]);
#endif
// printf("E %d %f %f %f\n", p, E_clamped.x * E_clamped.y * E_clamped.z, E.x * E.y * E.z, plasticity[p]);
// Inverse of Diagonal E_clamped matrix is 1/E_clamped
Mat33f m_FP = V * MultTranspose(Mat33f(1.0 / E_clamped), U) * F_tmp;
float JP_new = Determinant(m_FP);
// Ensure that F_p is purely deviatoric
Mat33f T1 = powf(JP_new, 1.0 / 3.0) * U * MultTranspose(Mat33f(E_clamped), V);
Mat33f T2 = powf(JP_new, -1.0 / 3.0) * m_FP;
JE_JP[p * 2 + 0] = Determinant(T1);
JE_JP[p * 2 + 1] = Determinant(T2);
T1.Store(marker_Fe, p, device_settings.num_mpm_markers);
T2.Store(marker_Fp, p, device_settings.num_mpm_markers);
// printf("JP: %f JE: %f\n", Determinant(marker_Fe[p]), Determinant(marker_Fp[p]));
}
}
void MPM_UpdateDeformationGradient(MPM_Settings& settings,
std::vector<float>& positions,
std::vector<float>& velocities,
std::vector<float>& jejp) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
host_settings = settings;
printf("Solving MPM: %d\n", host_settings.num_iterations);
pos.data_h = positions;
pos.copyHostToDevice();
vel.data_h = velocities;
vel.copyHostToDevice();
cudaCheck(cudaMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
MPM_ComputeBounds();
node_mass.resize(host_settings.num_mpm_nodes);
node_mass = 0;
grid_vel.resize(host_settings.num_mpm_nodes * 3);
grid_vel = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
// ========================================================================================
kRasterize<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
vel.data_d, // input
node_mass.data_d, // output
grid_vel.data_d // output
);
}
printf("kRasterize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kNormalizeWeights<<<CONFIG(host_settings.num_mpm_nodes)>>>(node_mass.data_d, // output
grid_vel.data_d);
}
printf("kNormalizeWeights: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kUpdateDeformationGradient<<<CONFIG(host_settings.num_mpm_markers)>>>(
grid_vel.data_d, pos.data_d, marker_Fe.data_d, marker_Fp.data_d, marker_plasticity.data_d, JE_JP.data_d);
JE_JP.copyDeviceToHost();
}
jejp = JE_JP.data_h;
printf("kUpdateDeformationGradient: %f\n", time_measured);
time_measured = 0;
}
void MPM_Solve(MPM_Settings& settings, std::vector<float>& positions, std::vector<float>& velocities) {
old_vel_node_mpm.resize(host_settings.num_mpm_nodes * 3);
rhs.resize(host_settings.num_mpm_nodes * 3);
old_vel_node_mpm = grid_vel;
// cudaCheck(cudaPeekAtLastError());
// cudaCheck(cudaDeviceSynchronize());
{
CudaEventTimer timer(start, stop, true, time_measured);
kFeHat<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, marker_Fe.data_d, grid_vel.data_d,
marker_Fe_hat.data_d);
}
printf("kFeHat: %f\n", time_measured);
time_measured = 0;
// kSVD<<<CONFIG(host_settings.num_mpm_markers)>>>(marker_Fe_hat.data_d, PolarR.data_d, PolarS.data_d);
{
CudaEventTimer timer(start, stop, true, time_measured);
kApplyForces<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
marker_Fe_hat.data_d, // input
marker_Fe.data_d, // input
marker_volume.data_d, // input
node_mass.data_d, // input
marker_plasticity.data_d, // input
PolarR.data_d, // output
PolarS.data_d, // output
grid_vel.data_d); // output
}
printf("kApplyForces: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kRhs<<<CONFIG(host_settings.num_mpm_nodes)>>>(node_mass.data_d, grid_vel.data_d, rhs.data_d);
}
printf("kRhs: %f\n", time_measured);
time_measured = 0;
delta_v.resize(host_settings.num_mpm_nodes * 3);
delta_v = old_vel_node_mpm;
MPM_BBSolver(rhs, delta_v);
{
CudaEventTimer timer(start, stop, true, time_measured);
kIncrementVelocity<<<CONFIG(host_settings.num_mpm_nodes)>>>(delta_v.data_d, old_vel_node_mpm.data_d,
grid_vel.data_d);
}
printf("kIncrementVelocity: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kUpdateParticleVelocity<<<CONFIG(host_settings.num_mpm_markers)>>>(grid_vel.data_d, old_vel_node_mpm.data_d,
pos.data_d, vel.data_d);
}
printf("kUpdateParticleVelocity: %f\n", time_measured);
time_measured = 0;
vel.copyDeviceToHost();
velocities = vel.data_h;
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
CUDA_GLOBAL void kInitFeFp(float* marker_Fe, float* marker_Fp, float* marker_RE, float* marker_SE) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < device_settings.num_mpm_markers) {
Mat33f T(1.0f);
T.Store(marker_Fe, i, device_settings.num_mpm_markers);
T.Store(marker_Fp, i, device_settings.num_mpm_markers);
T.Store(marker_RE, i, device_settings.num_mpm_markers);
marker_SE[i + device_settings.num_mpm_markers * 0] = 1.0f;
marker_SE[i + device_settings.num_mpm_markers * 1] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 2] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 3] = 1.0f;
marker_SE[i + device_settings.num_mpm_markers * 4] = 0.0f;
marker_SE[i + device_settings.num_mpm_markers * 5] = 1.0f;
}
}
void MPM_Initialize(MPM_Settings& settings, std::vector<float>& positions) {
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
cudaEventCreate(&start);
cudaEventCreate(&stop);
host_settings = settings;
cudaCheck(cudaMalloc(&lower_bound, sizeof(float3)));
cudaCheck(cudaMalloc(&upper_bound, sizeof(float3)));
pos.data_h = positions;
pos.copyHostToDevice();
cudaCheck(cudaMemcpyToSymbolAsync(device_settings, &host_settings, sizeof(MPM_Settings)));
MPM_ComputeBounds();
marker_volume.resize(host_settings.num_mpm_markers);
node_mass.resize(host_settings.num_mpm_nodes);
node_mass = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kRasterize<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
node_mass.data_d); // output
}
printf("kRasterize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kComputeParticleVolumes<<<CONFIG(host_settings.num_mpm_markers)>>>(pos.data_d, // input
node_mass.data_d, // input
marker_volume.data_d); // output
}
printf("kComputeParticleVolumes: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
marker_Fe.resize(host_settings.num_mpm_markers * 9);
marker_Fe_hat.resize(host_settings.num_mpm_markers * 9);
marker_Fp.resize(host_settings.num_mpm_markers * 9);
PolarR.resize(host_settings.num_mpm_markers * 9);
PolarS.resize(host_settings.num_mpm_markers * 6);
JE_JP.resize(host_settings.num_mpm_markers * 2);
marker_plasticity.resize(host_settings.num_mpm_markers * 2);
marker_plasticity = 0;
}
printf("Resize: %f\n", time_measured);
time_measured = 0;
{
CudaEventTimer timer(start, stop, true, time_measured);
kInitFeFp<<<CONFIG(host_settings.num_mpm_markers)>>>(marker_Fe.data_d, // output
marker_Fp.data_d, // output
PolarR.data_d, // output
PolarS.data_d); // output
}
printf("kInitFeFp: %f\n", time_measured);
time_measured = 0;
// cudaCheck(cudaPeekAtLastError());
// cudaCheck(cudaDeviceSynchronize());
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
}
|
the_stack
|
#include <cuda.h>
// GPU implementation of proper Marian top-k operator for TopkNodeOp
// This file contains a lot of code-duplicaton with src/translator/nth_element.cu
// the goal is to replace the beam-search specific topk search with this code.
// Currently this is only used in the unit tests, but we will move forward and
// make the beam-search more graph and operator-based.
namespace marian {
namespace gpu {
const int MAX_BINS = 500;
const int BLOCK_SIZE = 512;
#define UNROLL_MAXARG_LOOP(n, max) \
if(tid < (n) && tid + (n) < (max)) { \
if(sharedValues[tid + (n)] > sharedValues[tid]) { \
sharedIndices[tid] = sharedIndices[tid + (n)]; \
sharedValues[tid] = sharedValues[tid + (n)]; \
} \
}
// finds maximum element (first step)
template <typename T>
__global__ void gMaxElement(IndexType* binIndices, // out: top-k positions
T* binValues, // out: top-k scores
const T* inValues, // this is the probs array, only one with type float or half
int rows, // we iterate over this many rows, row-major layout
int cols, // a row has that many columns, row-major layout
float minimal, // minimal is the smallest possible value. For simplicity we assume we look for the maxmimum.
bool descending) // This will be the largest possible value if the order is reversed (i.e. we look for the minimum).
{
extern __shared__ float sharedValues[];
__shared__ IndexType sharedIndices[BLOCK_SIZE];
// id of current thread within block
int tid = threadIdx.x;
float flip = descending ? 1.f : -1.f;
// Roll over every row in row-major 2D representation of the data
for(int rowIdx = 0; rowIdx < rows; ++rowIdx) {
int begin = rowIdx * cols; // start index of a row
int end = rowIdx * cols + cols; // end index of a row
// We look at at most blockDim.x * 2 = 1024 values within a block, i.e. each thread reduces two values.
// Here we set the position to begin + blockId * 1024 + threadId. If a row has more values we
// partition the row according to blocks of 1024 values.
int i = begin + blockIdx.x * (blockDim.x * 2) + tid;
// Initialize shared values to minimal value.
sharedValues[tid] = minimal;
// Do first set of comparisons outside loop, saves one iteration.
if(i + blockDim.x < end) { // Are we in a position for which we can access and compare two values in a row partition (shifted by block size)?
// yes, hence compare:
float a = flip * (float)inValues[i]; // value from first half of row parition for this block
float b = flip * (float)inValues[i + blockDim.x]; // value from second half of row partition for this block
if(a > b) { // just a max
sharedIndices[tid] = i;
sharedValues[tid] = a;
} else {
sharedIndices[tid] = i + blockDim.x;
sharedValues[tid] = b;
}
} else if(i < end) { // Are we instead in a position that has access to one value in the row partition (shifting by block size would be out of bounds)?
// Yes, hence save the current value and index as new max, no need to compare.
sharedIndices[tid] = i;
sharedValues[tid] = flip * (float)inValues[i];
} // nothing else to do here
// We move to the next set of 1024 values shifted by block size times number of blocks
// and look at two of them according to thread id.
while(i + 2 * gridDim.x * blockDim.x < end) {
i += 2 * gridDim.x * blockDim.x;
// Check if first value is larger than what we have seen so far
float a = flip * (float)inValues[i];
if(a > sharedValues[tid]) {
// Yes, hence save index and value
sharedIndices[tid] = i;
sharedValues[tid] = a;
}
// Check if second value is larger than what we have seen so far
if(i + blockDim.x < end) {
float b = flip * (float)inValues[i + blockDim.x];
if(b > sharedValues[tid]) {
// Yes, hence save index and value
sharedIndices[tid] = i + blockDim.x;
sharedValues[tid] = b;
}
}
}
// We are done with the first sweep and have populated shared memory, time to wait for the other threads and reduce it all
__syncthreads();
// Reduce over shared memory, here per loop until we hit the last 32 unreduced elements
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < end) {
if(sharedValues[tid + s] > sharedValues[tid]) {
// keep the max
sharedIndices[tid] = sharedIndices[tid + s];
sharedValues[tid] = sharedValues[tid + s];
}
}
__syncthreads();
}
// Reduce over shared memory, here per unrolled code for powers of 2 lower equal 32.
// Because we are at 32 (warp size) the threads run in lock-step and we can abandon syncing.
UNROLL_MAXARG_LOOP(32, end);
UNROLL_MAXARG_LOOP(16, end);
UNROLL_MAXARG_LOOP(8, end);
UNROLL_MAXARG_LOOP(4, end);
UNROLL_MAXARG_LOOP(2, end);
UNROLL_MAXARG_LOOP(1, end);
// OK, we are done with the reduction and in the first thread
if(tid == 0) {
// assign the final maximal value to the bin, one bin per row and block
binIndices[rowIdx * gridDim.x + blockIdx.x] = sharedIndices[0]; // [rows, num_blocks]
binValues[rowIdx * gridDim.x + blockIdx.x] = sharedValues[0]; // [rows, num_blocks]
}
__syncthreads();
}
}
// This runs after the function above, we now have the maximum value per row and block and can look further
// for the top-k results. As above we pretend this does only maximum search.
// This runs restricted to one row (one row per block)
template <typename T>
__global__ void gMaxElementUpdate(IndexType* binIndices, // memory for bin indices
T* binValues, // memory for bin costs
IndexType* outIndices, // result indices
T* outValues, // result costs
T* inValues, // should work well enough with half, uses float everywhere else
const int cols, // size of continous memory we search over
const int K, // how many top-K elements?
int numBlocks, // number of blocks/bins used in above function (per row)
float minimal, // value for minimal element
bool descending)
{
extern __shared__ float sharedValues[];
__shared__ int sharedIndices[BLOCK_SIZE];
__shared__ float bestBinCost;
__shared__ int bestBinCostIdx;
const int tid = threadIdx.x;
float flip = descending ? 1.f : -1.f;
// we only look at one row in this kernel
const int rowIdx = blockIdx.x; // index of the row corresponds to block index
const int begin = rowIdx * cols; // start offset for this row relative to inValues tensor start
const int end = rowIdx * cols + cols; // end offset for this row relative to inValues tensor start
int num_bins = numBlocks; // why not just use numBlocks?
// iterate over top-k results
for(int k = 0; k < K; ++k) {
int kthOutIdx = rowIdx * K + k; // offset into output tensor relative to outIndices/outValues tensor start
int i = tid;
sharedValues[tid] = minimal; // initialize to smallest value, everything else will be larger
// as in the function above, the code here does a tree reduction over shared memory to find the single maximum element
if(i + blockDim.x < num_bins) {
float a = binValues[rowIdx * numBlocks + i];
float b = binValues[rowIdx * numBlocks + i + blockDim.x];
if(a > b) {
sharedValues[tid] = a;
sharedIndices[tid] = i;
} else {
sharedValues[tid] = b;
sharedIndices[tid] = i + blockDim.x;
}
} else if(i < num_bins) {
sharedValues[tid] = binValues[rowIdx * numBlocks + i];
sharedIndices[tid] = i;
}
while(i + 2 * blockDim.x < num_bins) {
i += 2 * blockDim.x;
float a = binValues[rowIdx * numBlocks + i];
if(a > sharedValues[tid]) {
sharedValues[tid] = a;
sharedIndices[tid] = i;
}
if(i + blockDim.x < num_bins) {
float b = binValues[rowIdx * numBlocks + i + blockDim.x];
if(b > sharedValues[tid]) {
sharedValues[tid] = b;
sharedIndices[tid] = i + blockDim.x;
}
}
}
__syncthreads();
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < num_bins) {
if(sharedValues[tid + s] > sharedValues[tid]) {
sharedValues[tid] = sharedValues[tid + s];
sharedIndices[tid] = sharedIndices[tid + s];
}
}
__syncthreads();
}
UNROLL_MAXARG_LOOP(32, num_bins);
UNROLL_MAXARG_LOOP(16, num_bins);
UNROLL_MAXARG_LOOP(8, num_bins);
UNROLL_MAXARG_LOOP(4, num_bins);
UNROLL_MAXARG_LOOP(2, num_bins);
UNROLL_MAXARG_LOOP(1, num_bins);
if(tid == 0) {
bestBinCost = sharedValues[0];
bestBinCostIdx = rowIdx * numBlocks + sharedIndices[0];
inValues[binIndices[bestBinCostIdx]] = flip * minimal; // this is restored in the last lines of this function
outIndices[kthOutIdx] = binIndices[bestBinCostIdx] - begin; // relative to beginning of row hence substract `begin`
outValues[kthOutIdx] = flip * bestBinCost; // undo flip by flipping again
}
__syncthreads();
// Second part of the algorithm, why it that not replacing the first function call??
// Also shouldn't we skip here if k == K - 1?
// After marking the previously largest element with "flip * minimal" we populate again
// shared memory with the largest element as in gMaxElement(...)
if(k < K - 1) {
i = begin + (bestBinCostIdx - rowIdx * numBlocks) * (blockDim.x * 2) + tid;
const int dist = num_bins * 2 * blockDim.x;
sharedValues[tid] = minimal;
if(i + blockDim.x < end) {
float a = flip * (float)inValues[i];
float b = flip * (float)inValues[i + blockDim.x];
if(a > b) {
sharedIndices[tid] = i;
sharedValues[tid] = a;
} else {
sharedIndices[tid] = i + blockDim.x;
sharedValues[tid] = b;
}
} else if(i < end) {
sharedIndices[tid] = i;
sharedValues[tid] = flip * (float)inValues[i];
}
while(i + dist < end) {
i += dist;
float a = flip * (float)inValues[i];
if(a > sharedValues[tid]) {
sharedIndices[tid] = i;
sharedValues[tid] = a;
}
if(i + blockDim.x < end) {
float b = flip * (float)inValues[i + blockDim.x];
if(b > sharedValues[tid]) {
sharedIndices[tid] = i + blockDim.x;
sharedValues[tid] = b;
}
}
}
__syncthreads();
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < end) {
if(sharedValues[tid + s] > sharedValues[tid]) {
sharedIndices[tid] = sharedIndices[tid + s];
sharedValues[tid] = sharedValues[tid + s];
}
}
__syncthreads();
}
UNROLL_MAXARG_LOOP(32, end);
UNROLL_MAXARG_LOOP(16, end);
UNROLL_MAXARG_LOOP(8, end);
UNROLL_MAXARG_LOOP(4, end);
UNROLL_MAXARG_LOOP(2, end);
UNROLL_MAXARG_LOOP(1, end);
if(tid == 0) {
binIndices[bestBinCostIdx] = sharedIndices[0];
binValues[bestBinCostIdx] = sharedValues[0];
}
__syncthreads();
}
}
// final operation to restore blanked-out input values. They were blanked out for marking
// already found values. Since we want input values to be invariant we restore here.
// @TODO: The lack of constness here might be a problem for concurrent processing (which we currently don't have)
for(int k = tid; k < K; k += blockDim.x) {
int kthOutIdx = rowIdx * K + k;
inValues[begin + outIndices[kthOutIdx]] = outValues[kthOutIdx];
}
}
void TopK(Tensor outVal, Tensor outInd, Ptr<Allocator> allocator, const Tensor in, int k, int axis, bool descending) {
ABORT_IF(axis != in->shape().size() - 1, "Currently only works for last axis");
ABORT_IF(!isFloat(in->type()), "Input should be float type and not {}", in->type());
ABORT_IF(outInd->type() != Type::uint32, "Output should be have type {}", Type::uint32);
ABORT_IF(outVal->type() != in->type(), "Output should be have type {}", in->type());
cudaSetDevice(outInd->getDeviceId().no);
int cols = in->shape()[-1]; // e.g. in beam search that would be [beam * dimVoc]
int rows = in->shape().elements() / cols; // e.g. in beam search that would be [time * batch]
ABORT_IF(k > cols, "Cannot select more than {} elements for axis {}", cols, axis);
float minimal = NumericLimits<float>(in->type()).lowest; // lowest if looking for max
const int numBlocks = std::min(MAX_BINS, int(cols / (2 * BLOCK_SIZE)) + int(cols % (2 * BLOCK_SIZE) != 0));
auto tempMemInd = allocator->alloc<IndexType>(rows * numBlocks);
MemoryPiece::PtrType tempMemVal;
if(in->type() == Type::float32) {
tempMemVal = allocator->alloc<float>(rows * numBlocks);
// first find the maximum value per row and block and save indices and values to temporary memory
gMaxElement<<<numBlocks, // blocks
BLOCK_SIZE, // threads
BLOCK_SIZE * sizeof(float), // shared memory size
/* stream_ */ 0>>>(
tempMemInd->data<IndexType>(), tempMemVal->data<float>(),
in->data<float>(), rows, cols, minimal, descending);
gMaxElementUpdate<<<rows, // blocks ... seems we can have up to 2^31-1 of these, so we are safe?
BLOCK_SIZE, // threads
BLOCK_SIZE * sizeof(float), // shared memory size
/* stream_ */ 0>>>(
tempMemInd->data<IndexType>(), tempMemVal->data<float>(),
outInd->data<IndexType>(), outVal->data<float>(),
in->data<float>(), cols, k, numBlocks, minimal, descending);
#if COMPILE_FP16
} else if(in->type() == Type::float16) {
tempMemVal = allocator->alloc<__half>(rows * numBlocks);
// first find the maximum value per row and block and save indices and values to temporary memory
gMaxElement<<<numBlocks, // blocks
BLOCK_SIZE, // threads
BLOCK_SIZE * sizeof(float), // shared memory size
/* stream_ */ 0>>>(
tempMemInd->data<IndexType>(), tempMemVal->data<__half>(),
in->data<__half>(), rows, cols, minimal, descending);
gMaxElementUpdate<<<rows, // blocks ... seems we can have up to 2^31-1 of these, so we are safe?
BLOCK_SIZE, // threads
BLOCK_SIZE * sizeof(float), // shared memory size
/* stream_ */ 0>>>(
tempMemInd->data<IndexType>(), tempMemVal->data<__half>(),
outInd->data<IndexType>(), outVal->data<__half>(),
in->data<__half>(), cols, k, numBlocks, minimal, descending);
#endif
} else {
ABORT("Topk not implemented for type {}", in->type());
}
allocator->free(tempMemInd);
allocator->free(tempMemVal);
}
}
}
|
the_stack
|
#define BUILD_DEV __device__
namespace anakin{
namespace saber{
template<typename Dtype>
__global__ void ker_relu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype neg_slop,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count) {
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = in_var > Dtype(0) ? in_var : in_var * neg_slop;
}
}
template<typename Dtype>
__global__ void ker_sigmoid_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count) {
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = Dtype( Dtype(1) / (Dtype(1)+ exp(-in_var)));
}
}
template<typename Dtype>
__global__ void ker_tanh_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count) {
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
//(expf(in_var) - expf(-in_var)) / (expf(in_var) + expf(-in_var));exp
out_data[out_idx] = Dtype(1) - (Dtype(2) / (Dtype(1) + exp(in_var * 2)));
}
}
template<typename Dtype>
__global__ void ker_stanh_fwd(Dtype * out_data,
const Dtype* in_data, const int count, const Dtype slope, const Dtype coef,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count) {
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
Dtype var = in_var * slope;
//output_data[j] = param.coef * tanh(param.negative_slope * input_data[j]);
out_data[out_idx] = Dtype( coef * (Dtype(1) - (Dtype(2) / (Dtype(1) + exp(var * 2)))));
}
}
template<typename Dtype>
__global__ void ker_clipped_relu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype clipped_threadhold,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count) {
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
in_var = in_var > 0 ? in_var : 0;
out_data[out_idx] = in_var < clipped_threadhold? in_var : clipped_threadhold;
}
}
template<typename Dtype>
__global__ void ker_swish_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype beta,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count) {
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = Dtype( in_var / (Dtype(1)+ exp(-(beta * in_var))));
}
}
template<typename Dtype>
__global__ void ker_elu_fwd(Dtype * out_data,
const Dtype* in_data, const int count, Dtype coef,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
out_data[out_idx] = in_var > 0 ? in_var : coef * (exp(in_var)-1);
}
}
template<typename Dtype>
__global__ void ker_gelu_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
Dtype coeff = 0.5 * (std::erf(in_var / pow(2, 0.5)) + 1);
out_data[out_idx] = in_var * coeff;
}
}
template<typename Dtype>
__global__ void ker_prelu_fwd(Dtype * out_data,
const Dtype* in_data, const int count,
const Dtype* slope, bool is_channel_shared,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride) {
CUDA_KERNEL_LOOP(tid, count){
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
Dtype in_var = in_data[in_idx];
if (is_channel_shared) {
out_data[out_idx] = in_var > 0 ? in_var : slope[0] * in_var;
} else {
out_data[out_idx] = in_var > 0 ? in_var : slope[c] * in_var;
}
}
}
template <>
SaberStatus SaberActivation<NV, AK_FLOAT>::create( \
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ActivationParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
return SaberSuccess;
}
template <>
SaberStatus SaberActivation<NV, AK_FLOAT>::init( \
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ActivationParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberActivation<NV, AK_FLOAT>::dispatch( \
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ActivationParam<NV>& param) {
Shape in_shape = inputs[0]->valid_shape();
Shape out_shape = outputs[0]->valid_shape();
Shape stride_in = inputs[0]->get_stride();
Shape stride_out = outputs[0]->get_stride();
const float *in_data = (const float*)inputs[0]->data();
float *out_data = (float*)outputs[0]->mutable_data();
const int count = inputs[0]->valid_size();
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
float negative_slope = param.negative_slope;
float coef = param.coef;
switch (param.active) {
//x > 0 ? x : 0
case Active_relu:
ker_relu_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count, negative_slope,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
// sigmoid: 1/(exp(-x) + 1)
case Active_sigmoid:
ker_sigmoid_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
// swish: x / (exp(-b * x) + 1)
case Active_swish:
ker_swish_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
// tanh : (exp(x) - exp(-x)) / (exp(x) + exp(-x))
case Active_tanh:
ker_tanh_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
// stanh : b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}
case Active_stanh:
ker_stanh_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count, negative_slope, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
// x > 0 ? x : 0;
// x < threshold ? x : threshold
case Active_clipped_relu:
ker_clipped_relu_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
//elu: x > 0 ? x : coef * (exp(x) - 1)
case Active_elu:
ker_elu_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
//gelu: x * 0.5(erf(x/sqrt(2)) + 1)
case Active_gelu:
ker_gelu_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
//prelu: x > 0 ? x : slope[c] * x
case Active_prelu:
auto prelu_param = param.prelu_param;
const float* slope_ptr = (const float*)prelu_param.slope->data();
bool shared = prelu_param.channel_shared;
ker_prelu_fwd<float>
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count,
slope_ptr, shared,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3]);
break;
}
CUDA_POST_KERNEL_CHECK;
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
return SaberSuccess;
}
// =================================int8 ==================
class ReluDev{
public:
static __device__ float run(float in, float negative_slope, float placeholder) {
return (in > 0.f) ? in : in * negative_slope;
}
};
class SigmoidDev{
public:
static __device__ float run(float in, float placeholder1, float placeholder2) {
return float( float(1) / (float(1)+ exp(-in)));
}
};
template <typename Op>
__global__
void ker_act_fwd_fp32_to_int8(char* out_data, const float* in_data,
int in_num, int in_channel_4, int in_height, int in_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
const float negtive_slope, const float coef, float scale, int count) {
int load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int write_w = (gid) % in_width;
int write_h = (gid / (out_h_stride)) % in_height;
int write_c = (gid / (out_c_stride)) % in_channel_4;
int write_n = (gid / (out_n_stride)) % in_num;
int in_offset = write_n * in_n_stride
+ write_c * in_c_stride * 4
+ write_h * in_h_stride
+ write_w * in_w_stride;
int out_offset = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w;
if (gid < count) {
char4 write;
float temp;
temp = in_data[in_offset] * scale;
temp = Op::run(temp, negtive_slope, coef);
load0 = __float2int_rn(temp);
write.x = static_cast<char>(load0);
in_offset += in_c_stride;
temp = in_data[in_offset] * scale;
temp = Op::run(temp, negtive_slope, coef);
load1 = __float2int_rn(temp);
write.y = static_cast<char>(load1);
in_offset += in_c_stride;
temp = in_data[in_offset] * scale;
temp = Op::run(temp, negtive_slope, coef);
load2 = __float2int_rn(temp);
write.z = static_cast<char>(load2);
in_offset += in_c_stride;
temp = in_data[in_offset] * scale;
temp = Op::run(temp, negtive_slope, coef);
load3 = __float2int_rn(temp);
write.w = static_cast<char>(load3);
((char4*)out_data)[out_offset] = write;
}
}
template <typename Op>
__global__
void ker_act_fwd_int8_to_fp32(float* out_data, const char* in_data,
int in_num, int in_channel_4, int in_height, int in_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
const float negtive_slope, const float coef, const float scale, int count) {
float load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int read_w = (gid) % in_width;
int read_h = (gid / (in_h_stride)) % in_height;
int read_c = (gid / (in_c_stride)) % in_channel_4;
int read_n = (gid / (in_n_stride)) % in_num;
int in_offset = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w;
int out_offset = read_n * out_n_stride
+ read_c * (out_c_stride << 2)
+ read_h * out_h_stride
+ read_w * out_w_stride;
if (gid < count) {
char4 readin = ((const char4*)in_data)[in_offset];
load0 = static_cast<float>(readin.x) * scale;
load1 = static_cast<float>(readin.y) * scale;
load2 = static_cast<float>(readin.z) * scale;
load3 = static_cast<float>(readin.w) * scale;
load0 = Op::run(load0, negtive_slope, coef);
load1 = Op::run(load1, negtive_slope, coef);
load2 = Op::run(load2, negtive_slope, coef);
load3 = Op::run(load3, negtive_slope, coef);
out_data[out_offset] = load0; out_offset += out_c_stride;
out_data[out_offset] = load1; out_offset += out_c_stride;
out_data[out_offset] = load2; out_offset += out_c_stride;
out_data[out_offset] = load3;
}
}
__global__ void ker_sigmoid_fwd_int8(char * out_data,
const char* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float in_scale = 1.f, float out_scale = 1.f) {
CUDA_KERNEL_LOOP(tid, count) {
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
char in_var = in_data[in_idx];
float in = static_cast<float>(in_var) * in_scale;
in = float( float(1) / (float(1)+ exp(-in)));
in /= out_scale;
out_data[out_idx] = static_cast<char>(in);
}
}
template <>
SaberStatus SaberActivation<NV, AK_INT8>::create(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ActivationParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
if (inputs[0]->get_dtype() == AK_FLOAT) {
Shape in_shape = inputs[0]->valid_shape();
_int8_input.reshape(in_shape);
_int8_input.set_scale(inputs[0]->get_scale());
_int8_input.set_layout(Layout_NCHW_C4);
}
return SaberSuccess;
}
template <>
SaberStatus SaberActivation<NV, AK_INT8>::init(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ActivationParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
__global__ void ker_clipped_relu_fwd_s8s8(char * out_data,
const char* in_data, const int count, float clipped_threadhold,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float in_scale, float out_scale) {
CUDA_KERNEL_LOOP(tid, count) {
int w = tid % in_w;
int h = (tid / (in_w)) % in_h;
int c = (tid / (in_h * in_w)) % in_c;
int n = (tid / (in_c * in_h * in_w)) % in_n;
int in_idx = n * in_n_stride
+ c * in_c_stride
+ h * in_h_stride
+ w * in_w_stride;
int out_idx = n * out_n_stride
+ c * out_c_stride
+ h * out_h_stride
+ w * out_w_stride;
char in_var = in_data[in_idx];
if (in_var < 0) {
out_data[out_idx] = 0;
} else {
float temp = static_cast<float>(in_var) * in_scale;
if (temp > clipped_threadhold) {
temp = clipped_threadhold * in_scale / out_scale;
out_data[out_idx] = static_cast<char>(__float2int_rn(temp));
} else {
out_data[out_idx] = in_var;
}
}
}
}
__global__
void ker_clipped_relu_fwd_s8s8(void* out_data, const void* in_data, const float clipped_threadhold,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
const float scale, const float out_scale, int count) {
float load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int read_w = (gid) % valid_width;
int read_h = (gid / (in_h_stride)) % valid_height;
int read_c = (gid / (in_c_stride)) % valid_channel_4;
int read_n = (gid / (in_n_stride)) % valid_num;
int in_offset = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w;
if (gid < count) {
char4 readin = __ldg(&((const char4*)in_data)[in_offset]);
load0 = static_cast<float>(readin.x) * scale;
load1 = static_cast<float>(readin.y) * scale;
load2 = static_cast<float>(readin.z) * scale;
load3 = static_cast<float>(readin.w) * scale;
load0 = load0 > 0 ? load0 : 0;
load0 = load0 < clipped_threadhold? load0 : clipped_threadhold;
load1 = load1 > 0 ? load1 : 0;
load1 = load1 < clipped_threadhold? load1 : clipped_threadhold;
load2 = load2 > 0 ? load2 : 0;
load2 = load2 < clipped_threadhold? load2 : clipped_threadhold;
load3 = load3 > 0 ? load3 : 0;
load3 = load3 < clipped_threadhold? load3 : clipped_threadhold;
char4 store;
store.x = static_cast<char>(__float2int_rn(load0 * out_scale));
store.y = static_cast<char>(__float2int_rn(load1 * out_scale));
store.z = static_cast<char>(__float2int_rn(load2 * out_scale));
store.w = static_cast<char>(__float2int_rn(load3 * out_scale));
((char4*)out_data)[in_offset] = store;
}
}
__global__
void ker_clipped_relu_fwd_s8f32(void* out_data, const void* in_data,
const float clipped_threadhold,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
const float scale, const float out_scale, int count) {
float load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int read_w = (gid) % valid_width;
int read_h = (gid / (in_h_stride)) % valid_height;
int read_c = (gid / (in_c_stride)) % valid_channel_4;
int read_n = (gid / (in_n_stride)) % valid_num;
int scale_index = read_c << 2;
int in_offset = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w;
int out_offset = read_n * out_n_stride
+ read_c * (out_c_stride << 2)
+ read_h * out_h_stride
+ read_w * out_w_stride;
if (gid < count) {
char4 readin = __ldg(&((const char4*)in_data)[in_offset]);
load0 = static_cast<float>(readin.x) * scale;
load1 = static_cast<float>(readin.y) * scale;
load2 = static_cast<float>(readin.z) * scale;
load3 = static_cast<float>(readin.w) * scale;
load0 = load0 > 0 ? load0 : 0;
load0 = load0 < clipped_threadhold? load0 : clipped_threadhold;
load1 = load1 > 0 ? load1 : 0;
load1 = load1 < clipped_threadhold? load1 : clipped_threadhold;
load2 = load2 > 0 ? load2 : 0;
load2 = load2 < clipped_threadhold? load2 : clipped_threadhold;
load3 = load3 > 0 ? load3 : 0;
load3 = load3 < clipped_threadhold? load3 : clipped_threadhold;
((float*)out_data)[out_offset] = load0; out_offset += out_c_stride;
((float*)out_data)[out_offset] = load1; out_offset += out_c_stride;
((float*)out_data)[out_offset] = load2; out_offset += out_c_stride;
((float*)out_data)[out_offset] = load3;
}
}
template <>
SaberStatus SaberActivation<NV, AK_INT8>::dispatch(
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ActivationParam<NV>& param) {
const void *in_data = inputs[0]->data();
void *out_data = outputs[0]->mutable_data();
const int count = inputs[0]->valid_size();
int in_c_4 = inputs[0]->channel() / 4;
int out_c_4 = outputs[0]->channel() / 4;
// float negative_slope = param.negative_slope;
float coef = param.coef;
float in_scale = inputs[0]->get_scale()[0];
float out_scale = 1.f / outputs[0]->get_scale()[0];
Shape out_stride = outputs[0]->get_stride();
Shape in_shape = inputs[0]->valid_shape();
Shape out_shape = outputs[0]->valid_shape();
// int count = in_shape[0] * in_shape[1] * in_shape[2] * in_shape[3];
cudaStream_t cuda_stream = _ctx->get_compute_stream();
if (inputs[0]->get_dtype() == AK_FLOAT) {
conv_calibrate_fp32_int8_c4(_int8_input, *inputs[0], in_scale, *(this->_ctx));
in_data = _int8_input.data();
} else {
in_data = inputs[0]->data();
}
if (outputs[0]->get_dtype() == AK_INT8) {
switch (param.active) {
case Active_clipped_relu:
ker_clipped_relu_fwd_s8s8
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
in_shape[1] * in_shape[2] * in_shape[3],
in_shape[2] * in_shape[3],
in_shape[3], 1,
out_stride[0], out_stride[1], out_stride[2], out_stride[3],
in_scale, out_scale, count);
break;
default:
LOG(FATAL) << "Not implement this activation in this data config" << param.active;
break;
}
} else if (outputs[0]->get_dtype() == AK_FLOAT) {
switch (param.active) {
case Active_clipped_relu:
ker_clipped_relu_fwd_s8f32
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, coef,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
in_shape[1] * in_shape[2] * in_shape[3],
in_shape[2] * in_shape[3],
in_shape[3], 1,
out_stride[0], out_stride[1], out_stride[2], out_stride[3],
in_scale, out_scale, count);
break;
default:
LOG(FATAL) << "Not implement this activation in this data config" << param.active;
break;
}
} else {
LOG(FATAL) << "not supported yet!!!";
}
CUDA_POST_KERNEL_CHECK;
return SaberSuccess;
}
template class SaberActivation<NV, AK_FLOAT>;
template class SaberActivation<NV, AK_INT8>;
DEFINE_OP_TEMPLATE(SaberActivation, ActivationParam, NV, AK_HALF);
}
}
|
the_stack
|
#pragma once
#include <cuda_runtime.h>
#include <libvis/cuda/cuda_buffer.cuh>
#include <libvis/libvis.h>
#include <math_constants.h>
#include "camera_calibration/cuda/cuda_matrix.cuh"
#include "camera_calibration/models/cuda_camera_model.cuh"
namespace vis {
// TODO: Move the functions below to a better place
__forceinline__ __device__ float3 NormalizeNoCheck(const float3& input) {
float length = sqrtf(input.x * input.x + input.y * input.y + input.z * input.z);
return make_float3(input.x / length, input.y / length, input.z / length);
}
__forceinline__ __device__ float3 CrossProduct(const float3& a, const float3& b) {
return make_float3(a.y * b.z - b.y * a.z,
b.x * a.z - a.x * b.z,
a.x * b.y - b.x * a.y);
}
struct CUDALineTangents {
float3 t1;
float3 t2;
};
/// Computes tangent vectors to the direction which are used to define the
/// local parametrization.
__forceinline__ __device__ void ComputeTangentsForDirectionOrLine(
const float3& direction,
CUDALineTangents* tangents) {
tangents->t1 = NormalizeNoCheck(CrossProduct(direction, (fabs(direction.x) > 0.9f) ? make_float3(0, 1, 0) : make_float3(1, 0, 0)));
tangents->t2 = CrossProduct(direction, tangents->t1); // is already normalized
}
__forceinline__ __device__ void ApplyLocalUpdateToDirection(
float3* direction,
const CUDALineTangents& tangents,
float offset1,
float offset2) {
// Projection onto the sphere in the direction towards the origin.
// NOTE: We could theoretically divide by sqrt(1 + offset1 * offset1 + offset2 * offset2) to normalize here,
// but we do a full renormalization to prevent error accumulation.
*direction = NormalizeNoCheck(
make_float3(direction->x + offset1 * tangents.t1.x + offset2 * tangents.t2.x,
direction->y + offset1 * tangents.t1.y + offset2 * tangents.t2.y,
direction->z + offset1 * tangents.t1.z + offset2 * tangents.t2.z));
}
// opcount = 486
template <typename Scalar>
__forceinline__ __device__ void CentralGenericBSpline_Unproject_ComputeResidualAndJacobian(Scalar frac_x, Scalar frac_y, float3 p[4][4], float3* result, CUDAMatrix<Scalar, 3, 2>* dresult_dxy) {
const Scalar term0 = 0.166666666666667f*frac_y;
const Scalar term1 = -term0 + 0.666666666666667f;
const Scalar term2 = (frac_y - 4) * (frac_y - 4);
const Scalar term3 = (frac_x - 4) * (frac_x - 4);
const Scalar term4 = 0.166666666666667f*frac_x;
const Scalar term5 = -term4 + 0.666666666666667f;
const Scalar term6 = p[0][0].x*term5;
const Scalar term7 = (frac_x - 3) * (frac_x - 3);
const Scalar term8 = term4 - 0.5f;
const Scalar term9 = p[0][3].x*term8;
const Scalar term10 = frac_x * frac_x;
const Scalar term11 = 0.5*frac_x*term10;
const Scalar term12 = 19.5f*frac_x - 5.5*term10 + term11 - 21.8333333333333f;
const Scalar term13 = -16*frac_x + 5*term10 - term11 + 16.6666666666667f;
const Scalar term14 = p[0][1].x*term12 + p[0][2].x*term13 + term3*term6 + term7*term9;
const Scalar term15 = term14*term2;
const Scalar term16 = term1*term15;
const Scalar term17 = term0 - 0.5f;
const Scalar term18 = (frac_y - 3) * (frac_y - 3);
const Scalar term19 = p[3][0].x*term5;
const Scalar term20 = p[3][3].x*term8;
const Scalar term21 = p[3][1].x*term12 + p[3][2].x*term13 + term19*term3 + term20*term7;
const Scalar term22 = term18*term21;
const Scalar term23 = term17*term22;
const Scalar term24 = frac_y * frac_y;
const Scalar term25 = 0.5f*frac_y*term24;
const Scalar term26 = -16*frac_y + 5*term24 - term25 + 16.6666666666667f;
const Scalar term27 = p[2][0].x*term5;
const Scalar term28 = p[2][3].x*term8;
const Scalar term29 = p[2][1].x*term12 + p[2][2].x*term13 + term27*term3 + term28*term7;
const Scalar term30 = term26*term29;
const Scalar term31 = 19.5f*frac_y - 5.5f*term24 + term25 - 21.8333333333333f;
const Scalar term32 = p[1][0].x*term5;
const Scalar term33 = p[1][3].x*term8;
const Scalar term34 = p[1][1].x*term12 + p[1][2].x*term13 + term3*term32 + term33*term7;
const Scalar term35 = term31*term34;
const Scalar term36 = term16 + term23 + term30 + term35;
const Scalar term37 = p[0][0].y*term5;
const Scalar term38 = p[0][3].y*term8;
const Scalar term39 = p[0][1].y*term12 + p[0][2].y*term13 + term3*term37 + term38*term7;
const Scalar term40 = term2*term39;
const Scalar term41 = term1*term40;
const Scalar term42 = p[3][0].y*term5;
const Scalar term43 = p[3][3].y*term8;
const Scalar term44 = p[3][1].y*term12 + p[3][2].y*term13 + term3*term42 + term43*term7;
const Scalar term45 = term18*term44;
const Scalar term46 = term17*term45;
const Scalar term47 = p[2][0].y*term5;
const Scalar term48 = p[2][3].y*term8;
const Scalar term49 = p[2][1].y*term12 + p[2][2].y*term13 + term3*term47 + term48*term7;
const Scalar term50 = term26*term49;
const Scalar term51 = p[1][0].y*term5;
const Scalar term52 = p[1][3].y*term8;
const Scalar term53 = p[1][1].y*term12 + p[1][2].y*term13 + term3*term51 + term52*term7;
const Scalar term54 = term31*term53;
const Scalar term55 = term41 + term46 + term50 + term54;
const Scalar term56 = p[0][0].z*term5;
const Scalar term57 = p[0][3].z*term8;
const Scalar term58 = p[0][1].z*term12 + p[0][2].z*term13 + term3*term56 + term57*term7;
const Scalar term59 = term2*term58;
const Scalar term60 = term1*term59;
const Scalar term61 = p[3][0].z*term5;
const Scalar term62 = p[3][3].z*term8;
const Scalar term63 = p[3][1].z*term12 + p[3][2].z*term13 + term3*term61 + term62*term7;
const Scalar term64 = term18*term63;
const Scalar term65 = term17*term64;
const Scalar term66 = p[2][0].z*term5;
const Scalar term67 = p[2][3].z*term8;
const Scalar term68 = p[2][1].z*term12 + p[2][2].z*term13 + term3*term66 + term67*term7;
const Scalar term69 = term26*term68;
const Scalar term70 = p[1][0].z*term5;
const Scalar term71 = p[1][3].z*term8;
const Scalar term72 = p[1][1].z*term12 + p[1][2].z*term13 + term3*term70 + term7*term71;
const Scalar term73 = term31*term72;
const Scalar term74 = term60 + term65 + term69 + term73;
const Scalar term75 = (term36 * term36) + (term55 * term55) + (term74 * term74);
const Scalar term76 = 1.f / sqrt(term75);
const Scalar term77 = term1*term2;
const Scalar term78 = 0.166666666666667f*term3;
const Scalar term79 = 0.166666666666667f*term7;
const Scalar term80 = 1.5f*term10;
const Scalar term81 = -11.0f*frac_x + term80 + 19.5f;
const Scalar term82 = 10*frac_x - term80 - 16;
const Scalar term83 = 2*frac_x;
const Scalar term84 = term83 - 8;
const Scalar term85 = term83 - 6;
const Scalar term86 = term17*term18;
const Scalar term87 = term26*(-p[2][0].x*term78 + p[2][1].x*term81 + p[2][2].x*term82 + p[2][3].x*term79 + term27*term84 + term28*term85) + term31*(-p[1][0].x*term78 + p[1][1].x*term81 + p[1][2].x*term82 + p[1][3].x*term79 + term32*term84 + term33*term85) + term77*(-p[0][0].x*term78 + p[0][1].x*term81 + p[0][2].x*term82 + p[0][3].x*term79 + term6*term84 + term85*term9) + term86*(-p[3][0].x*term78 + p[3][1].x*term81 + p[3][2].x*term82 + p[3][3].x*term79 + term19*term84 + term20*term85);
const Scalar term88b = 1.f / sqrt(term75);
const Scalar term88 = term88b * term88b * term88b;
const Scalar term89 = (1.0f/2.0f)*term16 + (1.0f/2.0f)*term23 + (1.0f/2.0f)*term30 + (1.0f/2.0f)*term35;
const Scalar term90 = (1.0f/2.0f)*term41 + (1.0f/2.0f)*term46 + (1.0f/2.0f)*term50 + (1.0f/2.0f)*term54;
const Scalar term91 = term26*(-p[2][0].y*term78 + p[2][1].y*term81 + p[2][2].y*term82 + p[2][3].y*term79 + term47*term84 + term48*term85) + term31*(-p[1][0].y*term78 + p[1][1].y*term81 + p[1][2].y*term82 + p[1][3].y*term79 + term51*term84 + term52*term85) + term77*(-p[0][0].y*term78 + p[0][1].y*term81 + p[0][2].y*term82 + p[0][3].y*term79 + term37*term84 + term38*term85) + term86*(-p[3][0].y*term78 + p[3][1].y*term81 + p[3][2].y*term82 + p[3][3].y*term79 + term42*term84 + term43*term85);
const Scalar term92 = (1.0f/2.0f)*term60 + (1.0f/2.0f)*term65 + (1.0f/2.0f)*term69 + (1.0f/2.0f)*term73;
const Scalar term93 = term26*(-p[2][0].z*term78 + p[2][1].z*term81 + p[2][2].z*term82 + p[2][3].z*term79 + term66*term84 + term67*term85) + term31*(-p[1][0].z*term78 + p[1][1].z*term81 + p[1][2].z*term82 + p[1][3].z*term79 + term70*term84 + term71*term85) + term77*(-p[0][0].z*term78 + p[0][1].z*term81 + p[0][2].z*term82 + p[0][3].z*term79 + term56*term84 + term57*term85) + term86*(-p[3][0].z*term78 + p[3][1].z*term81 + p[3][2].z*term82 + p[3][3].z*term79 + term61*term84 + term62*term85);
const Scalar term94 = 2*term88*(term87*term89 + term90*term91 + term92*term93);
const Scalar term95 = 1.5f*term24;
const Scalar term96 = 10*frac_y - term95 - 16;
const Scalar term97 = term29*term96;
const Scalar term98 = -11.0f*frac_y + term95 + 19.5f;
const Scalar term99 = term34*term98;
const Scalar term100 = 2*frac_y;
const Scalar term101 = term1*(term100 - 8);
const Scalar term102 = term101*term14;
const Scalar term103 = term17*(term100 - 6);
const Scalar term104 = term103*term21;
const Scalar term105 = term49*term96;
const Scalar term106 = term53*term98;
const Scalar term107 = term101*term39;
const Scalar term108 = term103*term44;
const Scalar term109 = term68*term96;
const Scalar term110 = term72*term98;
const Scalar term111 = term101*term58;
const Scalar term112 = term103*term63;
const Scalar term113 = term88*(term89*(2*term102 + 2*term104 - 0.333333333333333f*term15 + 0.333333333333333f*term22 + 2*term97 + 2*term99) + term90*(2*term105 + 2*term106 + 2*term107 + 2*term108 - 0.333333333333333f*term40 + 0.333333333333333f*term45) + term92*(2*term109 + 2*term110 + 2*term111 + 2*term112 - 0.333333333333333f*term59 + 0.333333333333333f*term64));
(*result).x = term36*term76;
(*result).y = term55*term76;
(*result).z = term74*term76;
(*dresult_dxy)(0, 0) = -term36*term94 + term76*term87;
(*dresult_dxy)(0, 1) = -term113*term36 + term76*(term102 + term104 - 0.166666666666667f*term15 + 0.166666666666667f*term22 + term97 + term99);
(*dresult_dxy)(1, 0) = -term55*term94 + term76*term91;
(*dresult_dxy)(1, 1) = -term113*term55 + term76*(term105 + term106 + term107 + term108 - 0.166666666666667f*term40 + 0.166666666666667f*term45);
(*dresult_dxy)(2, 0) = -term74*term94 + term76*term93;
(*dresult_dxy)(2, 1) = -term113*term74 + term76*(term109 + term110 + term111 + term112 - 0.166666666666667f*term59 + 0.166666666666667f*term64);
}
class CUDACentralGenericModel : public CUDACameraModel {
friend class CentralGenericModel;
public:
template <bool have_replacement>
__forceinline__ __device__ bool UnprojectWithJacobian(float x, float y, float3* result, CUDAMatrix<float, 3, 2>* dresult_dxy, int gx = -9999, int gy = -9999, float3* replacement_direction = nullptr) const {
if (!IsInCalibratedArea(x, y)) {
return false;
}
float2 grid_point = PixelCornerConvToGridPoint(x, y);
grid_point.x += 2;
grid_point.y += 2;
int ix = ::floor(grid_point.x);
int iy = ::floor(grid_point.y);
float frac_x = grid_point.x - (ix - 3);
float frac_y = grid_point.y - (iy - 3);
float3 p[4][4];
for (int y = 0; y < 4; ++ y) {
for (int x = 0; x < 4; ++ x) {
if (have_replacement && ix - 3 + x == gx && iy - 3 + y == gy) {
p[y][x] = *replacement_direction;
} else {
p[y][x] = m_grid(iy - 3 + y, ix - 3 + x);
}
}
}
CentralGenericBSpline_Unproject_ComputeResidualAndJacobian(frac_x, frac_y, p, result, dresult_dxy);
(*dresult_dxy)(0, 0) = PixelScaleToGridScaleX((*dresult_dxy)(0, 0));
(*dresult_dxy)(0, 1) = PixelScaleToGridScaleY((*dresult_dxy)(0, 1));
(*dresult_dxy)(1, 0) = PixelScaleToGridScaleX((*dresult_dxy)(1, 0));
(*dresult_dxy)(1, 1) = PixelScaleToGridScaleY((*dresult_dxy)(1, 1));
(*dresult_dxy)(2, 0) = PixelScaleToGridScaleX((*dresult_dxy)(2, 0));
(*dresult_dxy)(2, 1) = PixelScaleToGridScaleY((*dresult_dxy)(2, 1));
return true;
}
__forceinline__ __device__ bool ProjectWithInitialEstimate(const float3& point, float2* result) const {
// NOTE: We are not caring for the special case of ||point|| == 0 here,
// as the resulting NaN/Inf should lead to the point not being
// projected anyway.
float length = sqrtf(point.x * point.x + point.y * point.y + point.z * point.z);
return ProjectDirectionWithInitialEstimate</*have_replacement*/ false>(
make_float3(point.x / length, point.y / length, point.z / length),
result);
}
/// NOTE: This function allows to replace one grid value at the given coordinate (gx, gy) with replacement_direction.
template <bool have_replacement>
__forceinline__ __device__ bool ProjectDirectionWithInitialEstimate(const float3& point_direction, float2* result, int gx = -9999, int gy = -9999, float3* replacement_direction = nullptr) const {
// Levenberg-Marquardt optimization algorithm.
constexpr float kEpsilon = 1e-10f; // NOTE: This threshold has been increased compared to the CPU version, which uses 1e-12f.
const usize kMaxIterations = 100;
double lambda = -1;
for (usize i = 0; i < kMaxIterations; ++i) {
CUDAMatrix<float, 3, 2> ddxy_dxy;
float3 direction;
/*CHECK(*/ UnprojectWithJacobian<have_replacement>(result->x, result->y, &direction, &ddxy_dxy, gx, gy, replacement_direction);
// (Non-squared) residuals.
float dx = direction.x - point_direction.x;
float dy = direction.y - point_direction.y;
float dz = direction.z - point_direction.z;
float cost = dx * dx + dy * dy + dz * dz;
// Accumulate H and b.
float H_0_0 = ddxy_dxy(0, 0) * ddxy_dxy(0, 0) + ddxy_dxy(1, 0) * ddxy_dxy(1, 0) + ddxy_dxy(2, 0) * ddxy_dxy(2, 0);
float H_1_0_and_0_1 = ddxy_dxy(0, 0) * ddxy_dxy(0, 1) + ddxy_dxy(1, 0) * ddxy_dxy(1, 1) + ddxy_dxy(2, 0) * ddxy_dxy(2, 1);
float H_1_1 = ddxy_dxy(0, 1) * ddxy_dxy(0, 1) + ddxy_dxy(1, 1) * ddxy_dxy(1, 1) + ddxy_dxy(2, 1) * ddxy_dxy(2, 1);
float b_0 = dx * ddxy_dxy(0, 0) + dy * ddxy_dxy(1, 0) + dz * ddxy_dxy(2, 0);
float b_1 = dx * ddxy_dxy(0, 1) + dy * ddxy_dxy(1, 1) + dz * ddxy_dxy(2, 1);
if (lambda < 0) {
constexpr double kInitialLambdaFactor = 0.01;
lambda = kInitialLambdaFactor * 0.5 * (H_0_0 + H_1_1);
}
bool update_accepted = false;
for (int lm_iteration = 0; lm_iteration < 10; ++ lm_iteration) {
double H_0_0_LM = H_0_0 + lambda;
double H_1_1_LM = H_1_1 + lambda;
// Solve the system.
double x_1 = (b_1 - H_1_0_and_0_1 / H_0_0_LM * b_0) /
(H_1_1_LM - H_1_0_and_0_1 * H_1_0_and_0_1 / H_0_0_LM);
double x_0 = (b_0 - H_1_0_and_0_1 * x_1) / H_0_0_LM;
// // Perform in-place Cholesky decomposition of H
// H_0_0 = sqrtf(H_0_0);
// H_1_0_and_0_1 = H_1_0_and_0_1 / H_0_0;
// H_1_1 = sqrtf(H_1_1 - H_1_0_and_0_1 * H_1_0_and_0_1);
//
// // Solve H * x = b for x.
// //
// // (H_0_0 0) (H_0_0 H_0_1) (x0) (b0)
// // (H_1_0 H_1_1) * ( 0 H_1_1) * (x1) = (b1)
// //
// // Naming the result of the second multiplication y, we get:
// //
// // (H_0_0 0) (y0) (b0)
// // (H_1_0 H_1_1) * (y1) = (b1)
// //
// // and:
// //
// // (H_0_0 H_0_1) * (x0) = (y0)
// // ( 0 H_1_1) (x1) = (y1)
//
// float y_0 = b_0 / H_0_0;
// float y_1 = (b_1 - H_1_0_and_0_1 * y_0) / H_1_1;
//
// float x_1 = y_1 / H_1_1;
// float x_0 = (y_0 - H_1_0_and_0_1 * x_1) / H_0_0;
// Compute the test state.
float2 test_result = make_float2(result->x - x_0, result->y - x_1);
// Compute the test cost.
#ifdef __CUDA_ARCH__
float test_cost = CUDART_INF_F;
#else
float test_cost = 999999;
#endif
float3 test_direction;
// TODO: The Jacobian is not needed here
if (UnprojectWithJacobian<have_replacement>(test_result.x, test_result.y, &test_direction, &ddxy_dxy, gx, gy, replacement_direction)) {
float test_dx = test_direction.x - point_direction.x;
float test_dy = test_direction.y - point_direction.y;
float test_dz = test_direction.z - point_direction.z;
test_cost = test_dx * test_dx + test_dy * test_dy + test_dz * test_dz;
}
if (test_cost < cost) {
lambda *= 0.5;
*result = test_result;
update_accepted = true;
break;
} else {
lambda *= 2;
}
}
if (!update_accepted) {
// if (cost >= kEpsilon) {
// LOG(WARNING) << "No update found and not converged. Current state: " << result->transpose();
// }
return cost < kEpsilon;
}
if (cost < kEpsilon) {
return true;
}
}
// LOG(WARNING) << "Not converged. Current state: " << result->transpose();
return false;
}
__forceinline__ __device__ int calibration_min_x() const {
return m_calibration_min_x;
}
__forceinline__ __device__ int calibration_min_y() const {
return m_calibration_min_y;
}
__forceinline__ __device__ int calibration_max_x() const {
return m_calibration_max_x;
}
__forceinline__ __device__ int calibration_max_y() const {
return m_calibration_max_y;
}
__forceinline__ __device__ bool IsInCalibratedArea(float x, float y) const {
return x >= m_calibration_min_x && y >= m_calibration_min_y &&
x < m_calibration_max_x + 1 && y < m_calibration_max_y + 1;
}
__forceinline__ __device__ bool is_central_camera_model() const {
return true;
}
/// For x and y in [0, n_grid.width/height()[, returns the location of that
/// grid point in pixel-corner coordinate origin convention.
__forceinline__ __device__ float2 GridPointToPixelCornerConv(int x, int y) const {
return make_float2(
m_calibration_min_x + ((x - 1.f) / (m_grid.width() - 3.f)) * (m_calibration_max_x + 1 - m_calibration_min_x),
m_calibration_min_y + ((y - 1.f) / (m_grid.height() - 3.f)) * (m_calibration_max_y + 1 - m_calibration_min_y));
}
__forceinline__ __device__ float GridScaleToPixelScaleX(float length) const {
return length * ((m_calibration_max_x + 1 - m_calibration_min_x) / (m_grid.width() - 3.f));
}
__forceinline__ __device__ float GridScaleToPixelScaleY(float length) const {
return length * ((m_calibration_max_y + 1 - m_calibration_min_y) / (m_grid.height() - 3.f));
}
/// Inverse of GridPointToPixelCornerConv().
__forceinline__ __device__ float2 PixelCornerConvToGridPoint(float x, float y) const {
return make_float2(
1.f + (m_grid.width() - 3.f) * (x - m_calibration_min_x) / (m_calibration_max_x + 1 - m_calibration_min_x),
1.f + (m_grid.height() - 3.f) * (y - m_calibration_min_y) / (m_calibration_max_y + 1 - m_calibration_min_y));
}
__forceinline__ __device__ float PixelScaleToGridScaleX(float length) const {
return length * ((m_grid.width() - 3.f) / (m_calibration_max_x + 1 - m_calibration_min_x));
}
__forceinline__ __device__ float PixelScaleToGridScaleY(float length) const {
return length * ((m_grid.height() - 3.f) / (m_calibration_max_y + 1 - m_calibration_min_y));
}
static const int IntrinsicsJacobianSize = 2 * 16;
__forceinline__ __device__ bool ProjectionJacobianWrtIntrinsics(
const float3& local_point,
const float2& projected_pixel,
float numerical_diff_delta,
u32* grid_update_indices,
float* intrinsic_jac_x,
float* intrinsic_jac_y) {
float length = sqrtf(local_point.x * local_point.x + local_point.y * local_point.y + local_point.z * local_point.z);
float3 point_direction =
make_float3(local_point.x / length, local_point.y / length, local_point.z / length);
float2 grid_point = PixelCornerConvToGridPoint(projected_pixel.x, projected_pixel.y);
int ix = ::floor(grid_point.x);
int iy = ::floor(grid_point.y);
if (!(ix >= 1 && iy >= 1 && ix + 2 < m_grid.width() && iy + 2 < m_grid.height())) { // catches NaNs
return false;
}
int local_index = 0;
for (int y = 0; y < 4; ++ y) {
int gy = iy + y - 1;
// CHECK_GE(gy, 0);
// CHECK_LT(gy, m_grid.height());
for (int x = 0; x < 4; ++ x) {
int gx = ix + x - 1;
// CHECK_GE(gx, 0);
// CHECK_LT(gx, m_grid.width());
int sequential_index = gx + gy * m_grid.width();
grid_update_indices[local_index + 0] = 2 * sequential_index + 0;
grid_update_indices[local_index + 1] = 2 * sequential_index + 1;
CUDALineTangents tangents;
ComputeTangentsForDirectionOrLine(m_grid(gy, gx), &tangents);
#pragma unroll
for (int d = 0; d < 2; ++ d) {
float3 test_direction = m_grid(gy, gx);
ApplyLocalUpdateToDirection(
&test_direction, tangents,
(d == 0) ? numerical_diff_delta : 0,
(d == 1) ? numerical_diff_delta : 0);
float2 test_projected_pixel = projected_pixel;
bool success = ProjectDirectionWithInitialEstimate</*have_replacement*/ true>(point_direction, &test_projected_pixel, gx, gy, &test_direction);
if (!success) {
return false;
}
intrinsic_jac_x[local_index + d] = (test_projected_pixel.x - projected_pixel.x) / numerical_diff_delta;
intrinsic_jac_y[local_index + d] = (test_projected_pixel.y - projected_pixel.y) / numerical_diff_delta;
}
local_index += 2;
}
}
// CHECK_EQ(local_index, grid_update_indices->size());
return true;
}
const CUDABuffer_<float3>& grid() const { return m_grid; }
private:
/// Size of the camera images in pixels.
int m_width;
int m_height;
/// Extents of the calibrated image area within the image bounds.
int m_calibration_min_x;
int m_calibration_min_y;
int m_calibration_max_x;
int m_calibration_max_y;
CUDABuffer_<float3> m_grid;
};
}
|
the_stack
|
/**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/imageProcessing/imageProcessing.h"
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
namespace Saiga
{
namespace CUDA
{
static void checkRes(ImageView<float> ref, ImageView<float> dst)
{
for (int y = 0; y < ref.rows; ++y)
{
for (int x = 0; x < ref.cols; ++x)
{
#if 0
cout << dst(y,x) << " ";
#else
if (std::abs(dst(y, x) - ref(y, x)) > 1e-5)
{
std::cout << "error (" << x << "," << y << ") " << dst(y, x) << "!=" << ref(y, x) << std::endl;
SAIGA_ASSERT(0);
}
#endif
}
// cout << endl;
}
}
// static void checkRes2(const thrust::host_vector<float>& ref, const thrust::host_vector<float>& dst){
// int c = 0;
// for(int i = 0; i < (int)dst.size();++i){
// auto refv = 9.0f;
// if(std::abs(dst[i] - refv) > 1e-5){
// cout << "error " << (i/2048) << "," << (i%2048) << " " << dst[i] << "!=" << refv << endl;
// c++;
// SAIGA_ASSERT(c < 5);
// }
// }
//}
template <int KERNEL_RADIUS>
void convolutionTest2(int w, int h)
{
CUDA_SYNC_CHECK_ERROR();
size_t N = w * h;
size_t readWrites = N * 2 * sizeof(float);
size_t pitch = Saiga::iAlignUp(sizeof(float) * w, 1024 * 1024);
size_t size = pitch * h;
thrust::device_vector<char> src(size, 0);
thrust::device_vector<char> dest(size, 0);
thrust::device_vector<char> tmp(size, 0);
thrust::host_vector<char> h_src = src;
thrust::host_vector<char> h_dest = dest;
thrust::host_vector<char> h_tmp = dest;
thrust::host_vector<char> h_ref = dest;
ImageView<float> imgSrc(h, w, pitch, thrust::raw_pointer_cast(src.data()));
ImageView<float> imgDst(h, w, pitch, thrust::raw_pointer_cast(dest.data()));
ImageView<float> imgTmp(h, w, pitch, thrust::raw_pointer_cast(tmp.data()));
ImageView<float> h_imgSrc(h, w, pitch, thrust::raw_pointer_cast(h_src.data()));
ImageView<float> h_imgDst(h, w, pitch, thrust::raw_pointer_cast(h_dest.data()));
ImageView<float> h_imgTmp(h, w, pitch, thrust::raw_pointer_cast(h_tmp.data()));
int its = 50;
// float sigma = 2.0f;
// thrust::device_vector<float> d_kernel = createGaussianBlurKernel(KERNEL_RADIUS,sigma);
thrust::device_vector<float> d_kernel(2 * KERNEL_RADIUS + 1, 1.0f);
thrust::host_vector<float> h_kernel(d_kernel);
{
for (int y = 0; y < h; ++y)
{
for (int x = 0; x < w; ++x)
{
// h_imgSrc(y,x) = (rand()%3) - 1;
h_imgSrc(y, x) = 1;
}
}
src = h_src;
}
#if 0
// cout << "first pixels: " << h_imgSrc(0,0) << " " << h_imgSrc(0,1) << " " << h_imgSrc(1,0) << " " << h_imgSrc(1,1) << endl;
int fx = 508 % w;
int fy = 508 / w;
cout << "debug pixel: " << fx << " " << fy << endl;
for (int j=-KERNEL_RADIUS;j<=KERNEL_RADIUS;j++){
float sum =0;
for (int i=-KERNEL_RADIUS;i<=KERNEL_RADIUS;i++){
auto v = h_imgSrc.clampedRead(fy+j,fx+i);
sum += v;
cout << v << " ";
}
cout << " row sum: " << sum << endl;
}
#endif
Saiga::CUDA::PerformanceTestHelper pth("convolutionTest radius=" + std::to_string(KERNEL_RADIUS) +
" ImageSize: " + std::to_string(w) + "x" + std::to_string(h),
readWrites);
// this takes too long :D
#if 0
{
float time;
{
Saiga::ScopedTimer<float> t(&time);
for(int y = 0; y < h; ++y){
for(int x = 0; x < w; ++x){
float sum = 0;
for (int j=-KERNEL_RADIUS;j<=KERNEL_RADIUS;j++){
float innerSum = 0;
for (int i=-KERNEL_RADIUS;i<=KERNEL_RADIUS;i++){
innerSum += h_imgSrc.clampedRead(y +j ,x + i) * h_kernel[i+KERNEL_RADIUS];
}
sum += innerSum * h_kernel[j+KERNEL_RADIUS];
}
h_imgDst(y,x) = sum;
}
}
}
pth.addMeassurement("CPU Convolve",time);
h_ref = h_dest;
}
#endif
{
float time;
{
Saiga::ScopedTimer<float> t(&time);
for (int y = 0; y < h; ++y)
{
for (int x = 0; x < w; ++x)
{
float sum = 0;
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += h_imgSrc.clampedRead(y, x + j) * h_kernel[j + KERNEL_RADIUS];
}
h_imgTmp(y, x) = sum;
}
}
for (int x = 0; x < w; ++x)
{
for (int y = 0; y < h; ++y)
{
float sum = 0;
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += h_imgTmp.clampedRead(y + j, x) * h_kernel[j + KERNEL_RADIUS];
}
h_imgDst(y, x) = sum;
}
}
}
pth.addMeassurement("CPU Convolve Separate", time);
h_ref = h_dest;
}
#if 0
{
thrust::device_vector<float> d_kernel = h_kernel;
dest = src;
auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]()
{
convolveSinglePassSeparateOuterLinear(imgSrc,imgDst,d_kernel,KERNEL_RADIUS);
});
pth.addMeassurement("convolveSinglePassSeparateOuterLinear",st.median);
checkRes(h_ref,thrust::host_vector<float>(dest));
}
{
thrust::device_vector<float> d_kernel = h_kernel;
dest = src;
auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]()
{
convolveSinglePassSeparateOuterHalo(imgSrc,imgDst,d_kernel,KERNEL_RADIUS);
});
pth.addMeassurement("convolveSinglePassSeparateOuterHalo",st.median);
checkRes(h_ref,thrust::host_vector<float>(dest));
}
{
thrust::device_vector<float> d_kernel = h_kernel;
dest = src;
auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]()
{
convolveSinglePassSeparateInner(imgSrc,imgDst,d_kernel,KERNEL_RADIUS);
});
pth.addMeassurement("convolveSinglePassSeparateInner",st.median);
checkRes(h_ref,thrust::host_vector<float>(dest));
}
#endif
{
thrust::device_vector<float> d_kernel = h_kernel;
dest = src;
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(
its, [&]() { convolveSinglePassSeparateInner75(imgSrc, imgDst, d_kernel, KERNEL_RADIUS); });
pth.addMeassurement("convolveSinglePassSeparateInner75", st.median);
// checkRes(h_ref,thrust::host_vector<float>(dest));
}
CUDA_SYNC_CHECK_ERROR();
{
thrust::device_vector<float> d_kernel = h_kernel;
// dest = src;
thrust::fill(dest.begin(), dest.end(), 0.0f);
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(
its, [&]() { convolveSinglePassSeparateInnerShuffle(imgSrc, imgDst, d_kernel, KERNEL_RADIUS); });
pth.addMeassurement("convolveSinglePassSeparateInnerShuffle", st.median);
thrust::host_vector<char> d(dest);
ImageView<float> i(h, w, pitch, thrust::raw_pointer_cast(d.data()));
checkRes(h_imgDst, i);
// checkRes2(h_ref,thrust::host_vector<float>(dest));
}
CUDA_SYNC_CHECK_ERROR();
#if 1
{
dest = src;
tmp = src;
thrust::device_vector<float> d_kernel = h_kernel;
auto st1 = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(
its, [&]() { convolveRow(imgSrc, imgTmp, d_kernel, KERNEL_RADIUS); });
pth.addMeassurement("GPU Convolve Separate Row", st1.median);
auto st2 = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(
its, [&]() { convolveCol(imgTmp, imgDst, d_kernel, KERNEL_RADIUS); });
pth.addMeassurement("GPU Convolve Separate Col", st2.median);
pth.addMeassurement("GPU Convolve Separate Total", st1.median + st2.median);
// checkRes(h_ref,thrust::host_vector<float>(dest));
}
#endif
{
auto st = Saiga::measureObject<Saiga::CUDA::ScopedTimer>(its, [&]() {
cudaMemcpy(thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(src.data()), N * sizeof(int),
cudaMemcpyDeviceToDevice);
});
pth.addMeassurement("cudaMemcpy", st.median);
}
CUDA_SYNC_CHECK_ERROR();
}
void convolutionTest()
{
// convolutionTest2<3>(17,53);
int w = 2048;
int h = 1024;
// int w = 512;
// int h = 256;
// convolutionTest2<1>(w,h);
// convolutionTest2<2>(w,h);
// convolutionTest2<3>(w,h);
convolutionTest2<4>(w, h);
// convolutionTest2<5>(w,h);
// convolutionTest2<6>(w,h);
// convolutionTest2<7>(w,h);
// convolutionTest2<8>(w,h);
// convolutionTest2<9>(w,h);
// convolutionTest2<10>(w,h);
// convolutionTest2<11>(w,h);
// convolutionTest2<12>(w,h);
// convolutionTest2<13>(w,h);
// convolutionTest2<14>(w,h);
// convolutionTest2<15>(w,h);
// convolutionTest2<16>(w,h);
}
} // namespace CUDA
} // namespace Saiga
|
the_stack
|
#include <doctest.h>
#include <heteroflow/heteroflow.hpp>
// ----------------------------------------------------------------------------
// Parameters
// ----------------------------------------------------------------------------
const size_t C = std::min(16u, std::thread::hardware_concurrency());
const size_t G = std::min(4u, hf::cuda::num_devices());
// ----------------------------------------------------------------------------
// Kernel
// ----------------------------------------------------------------------------
template <typename T>
__global__ void k_set(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] = value;
}
}
template <typename T>
__global__ void k_add(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] += value;
}
}
template <typename T>
__global__ void k_single_add(T* ptr, size_t N, int idx, T value) {
ptr[idx] += value;
}
// --------------------------------------------------------
// Testcase: static
// --------------------------------------------------------
TEST_CASE("static" * doctest::timeout(300)) {
hf::Executor executor(C, G);
REQUIRE(executor.num_cpu_workers() == C);
REQUIRE(executor.num_gpu_workers() == G);
REQUIRE(executor.num_workers() == C + G);
hf::Heteroflow hf;
REQUIRE(hf.empty() == true);
REQUIRE(hf.size() == 0);
hf::HostTask host;
hf::SpanTask span;
hf::KernelTask kernel;
hf::CopyTask copy;
REQUIRE(host.empty() == true);
REQUIRE(span.empty() == true);
REQUIRE(kernel.empty() == true);
REQUIRE(copy.empty() == true);
auto host2 = hf.placeholder<hf::HostTask>();
auto span2 = hf.placeholder<hf::SpanTask>();
auto copy2 = hf.placeholder<hf::CopyTask>();
auto kernel2 = hf.placeholder<hf::KernelTask>();
REQUIRE(host2.empty() == false);
REQUIRE(span2.empty() == false);
REQUIRE(copy2.empty() == false);
REQUIRE(kernel2.empty() == false);
REQUIRE(hf.size() == 4);
REQUIRE(hf.empty() == false);
host = host2;
copy = copy2;
kernel = kernel2;
span = span2;
REQUIRE((host == host2 && host.empty() == false));
REQUIRE((copy == copy2 && copy.empty() == false));
REQUIRE((span == span2 && span.empty() == false));
REQUIRE((kernel == kernel2 && kernel.empty() == false));
REQUIRE(hf.size() == 4);
REQUIRE(hf.empty() == false);
hf::HostTask host3(host2);
hf::SpanTask span3(span2);
hf::CopyTask copy3(copy2);
hf::KernelTask kernel3(kernel2);
REQUIRE((host3 == host && host2 == host));
REQUIRE((span3 == span && span2 == span));
REQUIRE((copy3 == copy && copy2 == copy));
REQUIRE((kernel3 == kernel && kernel2 == kernel));
REQUIRE(hf.size() == 4);
REQUIRE(hf.empty() == false);
}
// --------------------------------------------------------
// Testcase: host-tasks
// --------------------------------------------------------
TEST_CASE("host-tasks" * doctest::timeout(300)) {
const size_t num_tasks = 100;
SUBCASE("Empty") {
for(size_t W=1; W<=C; ++W) {
hf::Executor executor(W);
hf::Heteroflow heteroflow;
REQUIRE(heteroflow.size() == 0);
REQUIRE(heteroflow.empty() == true);
executor.run(heteroflow).wait();
}
}
SUBCASE("Placeholder") {
for(size_t W=1; W<=C; ++W) {
hf::Executor executor(W);
hf::Heteroflow heteroflow;
std::atomic<int> counter {0};
std::vector<hf::HostTask> hosts;
for(size_t i=0; i<num_tasks; ++i) {
hosts.emplace_back(
heteroflow.placeholder<hf::HostTask>().name(std::to_string(i))
);
}
for(size_t i=0; i<num_tasks; ++i) {
REQUIRE(hosts[i].name() == std::to_string(i));
REQUIRE(hosts[i].num_dependents() == 0);
REQUIRE(hosts[i].num_successors() == 0);
}
for(auto& host : hosts) {
host.host([&counter](){ counter++; });
}
executor.run(heteroflow).get();
REQUIRE(counter == num_tasks);
}
}
SUBCASE("EmbarrassinglyParallel"){
for(size_t W=1; W<=C; ++W) {
hf::Executor executor(W);
hf::Heteroflow heteroflow;
std::atomic<int> counter {0};
std::vector<hf::HostTask> tasks;
for(size_t i=0;i<num_tasks;i++) {
tasks.emplace_back(heteroflow.host([&counter]() {counter += 1;}));
}
REQUIRE(heteroflow.size() == num_tasks);
executor.run(heteroflow).get();
REQUIRE(counter == num_tasks);
REQUIRE(heteroflow.size() == 100);
counter = 0;
for(size_t i=0;i<num_tasks;i++){
tasks.emplace_back(heteroflow.host([&counter]() {counter += 1;}));
}
REQUIRE(heteroflow.size() == num_tasks * 2);
executor.run(heteroflow).get();
REQUIRE(counter == num_tasks * 2);
REQUIRE(heteroflow.size() == 200);
}
}
SUBCASE("ParallelFor") {
for(size_t W=1; W<=C; ++W) {
hf::Executor executor(W);
// Range for
for(size_t i=0; i<num_tasks; i++) {
hf::Heteroflow heteroflow;
std::atomic<int> counter{0};
auto N = ::rand() % 4098 + 1;
std::vector<int> vec(N, 20);
heteroflow.parallel_for(vec.begin(), vec.end(), [&](int i){
counter += i;
});
executor.run(heteroflow).wait();
auto res = std::accumulate(vec.begin(), vec.end(), 0, std::plus<int>());
REQUIRE(counter == res);
}
// Index for
for(size_t i=0; i<num_tasks; i++) {
std::atomic<int> counter{0};
hf::Heteroflow heteroflow;
auto N = ::rand() % 4098 + 1;
auto S = std::min(::rand()%10, N) + 1;
heteroflow.parallel_for(0, N, S, [&](int){ ++counter; });
executor.run(heteroflow).wait();
auto res = 0;
for(auto i=0; i<N; i+=S) {
++res;
}
REQUIRE(counter == res);
}
}
}
SUBCASE("BinarySequence"){
for(size_t W=1; W<=C; ++W) {
hf::Executor executor(W);
hf::Heteroflow heteroflow;
std::atomic<int> counter {0};
std::vector<hf::HostTask> tasks;
for(size_t i=0;i<num_tasks;i++){
if(i%2 == 0){
tasks.emplace_back(heteroflow.host(
[&counter]() { REQUIRE(counter == 0); counter += 1;}
));
}
else{
tasks.emplace_back(heteroflow.host(
[&counter]() { REQUIRE(counter == 1); counter -= 1;}
));
}
if(i>0){
tasks[i-1].precede(tasks[i]);
}
if(i==0) {
REQUIRE(tasks[i].num_dependents() == 0);
}
else {
REQUIRE(tasks[i].num_dependents() == 1);
}
}
executor.run(heteroflow).get();
}
}
SUBCASE("LinearCounter"){
for(size_t W=1; W<=C; ++W) {
hf::Executor executor(W);
hf::Heteroflow heteroflow;
std::atomic<int> counter {0};
std::vector<hf::HostTask> tasks;
for(size_t i=0;i<num_tasks;i++){
tasks.emplace_back(
heteroflow.host([&counter, i]() {
REQUIRE(counter == i); counter += 1;}
)
);
if(i>0){
tasks[i-1].precede(tasks[i]);
}
}
executor.run(heteroflow).get();
REQUIRE(counter == num_tasks);
REQUIRE(heteroflow.size() == num_tasks);
}
}
SUBCASE("Broadcast"){
for(size_t W=1; W<=C; ++W) {
hf::Executor executor(W);
hf::Heteroflow heteroflow;
std::atomic<int> counter {0};
std::vector<hf::HostTask> tasks;
auto src = heteroflow.host([&counter]() {counter -= 1;});
for(size_t i=1; i<num_tasks; i++){
auto tgt = heteroflow.host([&counter]() {REQUIRE(counter == -1);});
src.precede(tgt);
}
executor.run(heteroflow).get();
REQUIRE(counter == - 1);
REQUIRE(heteroflow.size() == num_tasks);
}
}
SUBCASE("Succeed"){
for(size_t W=1; W<=C; ++W) {
hf::Executor executor(W);
hf::Heteroflow heteroflow;
std::atomic<int> counter {0};
std::vector<hf::HostTask> tasks;
auto dst = heteroflow.host([&]() { REQUIRE(counter == num_tasks - 1);});
for(size_t i=1;i<num_tasks;i++){
auto src = heteroflow.host([&counter]() {counter += 1;});
dst.succeed(src);
}
executor.run(heteroflow).get();
REQUIRE(counter == num_tasks - 1);
REQUIRE(heteroflow.size() == num_tasks);
}
}
}
// --------------------------------------------------------
// Testcase: span
// --------------------------------------------------------
TEST_CASE("span" * doctest::timeout(300)) {
const size_t num_tasks = 4096;
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<num_tasks; ++i) {
auto bytes = ::rand()% 1024;
heteroflow.span(bytes);
}
executor.run(heteroflow).wait();
}
}
}
// --------------------------------------------------------
// Testcase: memset
// --------------------------------------------------------
TEST_CASE("memset" * doctest::timeout(300)) {
const size_t num_tasks = 100;
SUBCASE("span-fill") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<num_tasks; ++i) {
auto ndata= ::rand()%4096 + 1;
auto ptr = new char[ndata];
auto span = heteroflow.span(ndata);
auto fill = heteroflow.fill(span, ndata, 'z');
auto push = heteroflow.copy(ptr, span, ndata);
auto host = heteroflow.host([=](){
for(auto j=0; j<ndata; j++) {
REQUIRE(ptr[j] == 'z');
}
delete [] ptr;
});
fill.succeed(span).precede(push);
push.precede(host);
}
executor.run(heteroflow).wait();
}
}
}
SUBCASE("span-fill-offset") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<num_tasks; ++i) {
auto ndata = ::rand()%4096 + 1;
auto offset = ::rand()%ndata;
auto ptr = new char[ndata];
auto span = heteroflow.span(ndata);
auto fill1 = heteroflow.fill(span, offset, ndata-offset, 'z');
auto fill2 = heteroflow.fill(span, offset, 'a');
auto push = heteroflow.copy(ptr, span, ndata);
auto host = heteroflow.host([=](){
for(auto j=0; j<offset; j++) {
REQUIRE(ptr[j] == 'a');
}
for(auto j=offset; j<ndata; j++) {
REQUIRE(ptr[j] == 'z');
}
delete [] ptr;
});
fill1.succeed(span).precede(push);
fill2.succeed(span).precede(push);
push.precede(host);
}
executor.run(heteroflow).wait();
}
}
}
SUBCASE("kernel") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<num_tasks; ++i) {
auto ndata= ::rand()%4096 + 1;
auto ptr = new char[ndata];
auto span = heteroflow.span(ndata);
auto mset = heteroflow.kernel(
(ndata+255)/256, 256, 0, k_set<char>, span, ndata, 'z'
);
auto push = heteroflow.copy(ptr, span, ndata);
auto host = heteroflow.host([=](){
for(auto j=0; j<ndata; j++) {
REQUIRE(ptr[j] == 'z');
}
delete [] ptr;
});
span.precede(mset);
mset.precede(push);
push.precede(host);
}
executor.run(heteroflow).wait();
}
}
}
SUBCASE("span-fill-kernel") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<num_tasks; ++i) {
auto ndata= ::rand()%4096 + 1;
auto ptr = new char[ndata];
auto span = heteroflow.span(ndata);
auto fill = heteroflow.fill(span, ndata, 'a');
auto mset = heteroflow.kernel(
(ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1
);
auto push = heteroflow.copy(ptr, span, ndata);
auto host = heteroflow.host([=](){
for(auto j=0; j<ndata; j++) {
REQUIRE(ptr[j] == 'b');
}
delete [] ptr;
});
span.precede(fill);
fill.precede(mset);
mset.precede(push);
push.precede(host);
}
executor.run(heteroflow).wait();
}
}
}
SUBCASE("pull-kernel-push") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<num_tasks; ++i) {
auto ndata= ::rand()%4096 + 1;
auto ofset= ::rand()%ndata;
auto ptr = new char[ndata];
std::fill_n(ptr, ndata, 'z');
auto span = heteroflow.span(ndata);
auto fill = heteroflow.fill(span, ndata, 'a');
auto mset = heteroflow.kernel(
(ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1
);
auto push = heteroflow.copy(ptr, span, ofset, ndata-ofset);
auto host = heteroflow.host([=](){
for(auto j=0; j<ndata-ofset; j++) {
REQUIRE(ptr[j] == 'b');
}
for(auto j=ndata-ofset; j<ndata; j++) {
REQUIRE(ptr[j] == 'z');
}
delete [] ptr;
});
span.precede(fill);
fill.precede(mset);
mset.precede(push);
push.precede(host);
}
executor.run(heteroflow).wait();
}
}
}
SUBCASE("from-host") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<num_tasks; ++i) {
auto ndata= ::rand()%4096 + 1;
auto ptr = new char[ndata];
std::fill_n(ptr, ndata, 'a');
auto span = heteroflow.span(ptr, ndata);
auto madd = heteroflow.kernel(
(ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1
);
auto push = heteroflow.copy(ptr, span, ndata);
auto host = heteroflow.host([=](){
for(auto j=0; j<ndata; j++) {
REQUIRE(ptr[j] == 'b');
}
delete [] ptr;
});
span.precede(madd);
madd.precede(push);
push.precede(host);
}
executor.run(heteroflow).wait();
}
}
}
}
// --------------------------------------------------------
// Testcase: h2d
// --------------------------------------------------------
TEST_CASE("h2d" * doctest::timeout(300)) {
const size_t N = 1000;
const size_t S = 64;
std::vector<std::vector<char>> res(S);
for(auto& v : res) {
v.resize(N);
}
std::vector<char> vec(N);
for(size_t i=0; i<N; ++i) {
vec[i] = ::rand()%40;
}
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow hf;
for(size_t s=0; s<S; ++s) {
std::fill_n(res[s].begin(), N, 'z');
auto span = hf.span(vec.size());
auto back = hf.copy(res[s].data(), span, N);
for(size_t i=0; i<vec.size(); ++i) {
auto copy = hf.copy(span, i, &(vec[i]), 1);
copy.succeed(span)
.precede(back);
}
}
executor.run(hf).wait();
for(size_t s=0; s<S; ++s) {
REQUIRE(vec == res[s]);
}
}
}
}
// --------------------------------------------------------
// Testcase: d2h
// --------------------------------------------------------
TEST_CASE("d2h" * doctest::timeout(300)) {
const size_t N = 1000;
const size_t S = 64;
std::vector<std::vector<char>> res(S);
for(auto& v : res) {
v.resize(N);
}
std::vector<char> vec(N);
for(size_t i=0; i<N; ++i) {
vec[i] = ::rand()%40;
}
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow hf;
for(size_t s=0; s<S; ++s) {
std::fill_n(res[s].begin(), N, 'z');
auto span = hf.span(vec.data(), N);
for(size_t i=0; i<N; ++i) {
hf.copy(&(res[s][i]), span, i, 1)
.succeed(span);
}
}
executor.run(hf).wait();
for(size_t s=0; s<S; ++s) {
REQUIRE(vec == res[s]);
}
}
}
}
// --------------------------------------------------------
// Testcase: h2d2h
// --------------------------------------------------------
TEST_CASE("h2d2h" * doctest::timeout(300)) {
const size_t N = 1000;
const size_t S = 64;
std::vector<std::vector<char>> res(S);
for(auto& v : res) {
v.resize(N);
}
std::vector<char> vec(N);
for(size_t i=0; i<N; ++i) {
vec[i] = ::rand()%40;
}
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow hf;
for(size_t s=0; s<S; ++s) {
std::fill_n(res[s].begin(), N, 'z');
auto span = hf.span(vec.size());
for(size_t i=0; i<vec.size(); ++i) {
auto h2d = hf.copy(span, i, &(vec[i]), 1);
auto d2h = hf.copy(&(res[s][i]), span, i, 1);
h2d.precede(d2h).succeed(span);
}
}
executor.run(hf).wait();
for(size_t s=0; s<S; ++s) {
REQUIRE(vec == res[s]);
}
}
}
}
// --------------------------------------------------------
// Testcase: d2d
// --------------------------------------------------------
TEST_CASE("d2d" * doctest::timeout(300)) {
SUBCASE("without-offset") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<100; ++i) {
auto ndata = ::rand()%4096 + 1;
auto data = new char[ndata];
auto span1 = heteroflow.span(ndata);
auto span2 = heteroflow.span(ndata);
auto fill1 = heteroflow.fill(span1, ndata, 'a');
auto fill2 = heteroflow.fill(span2, ndata, 'b');
auto kadd1 = heteroflow.kernel(
(ndata + 255)/256, 256, 0, k_add<char>, span1, ndata, 1
);
auto kadd2 = heteroflow.kernel(
(ndata + 255)/256, 256, 0, k_add<char>, span2, ndata, 1
);
auto trans = heteroflow.copy(
span1, span2, ndata
);
auto push1 = heteroflow.copy(data, span1, ndata);
auto test1 = heteroflow.host([data, ndata](){
for(int i=0; i<ndata; ++i) {
REQUIRE(data[i] == 'c');
}
delete [] data;
});
span1.precede(fill1);
span2.precede(fill2);
fill1.precede(kadd1);
fill2.precede(kadd2);
trans.succeed(kadd1, kadd2)
.precede(push1);
push1.precede(test1);
}
executor.run(heteroflow).wait();
}
}
}
SUBCASE("with-offset") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
for(size_t i=0; i<1024; ++i) {
auto ndata = ::rand()%4096 + 1;
auto offs1 = ::rand()%ndata;
auto offs2 = ::rand()%ndata;
auto togo = std::min(ndata-offs1, ndata-offs2);
auto data = new char[ndata];
auto span1 = heteroflow.span(ndata);
auto span2 = heteroflow.span(ndata);
auto fill1 = heteroflow.fill(span1, ndata, 'a');
auto fill2 = heteroflow.fill(span2, ndata, 'b');
auto kadd1 = heteroflow.kernel(
(ndata + 255)/256, 256, 0, k_add<char>, span1, ndata, 1
);
auto kadd2 = heteroflow.kernel(
(ndata + 255)/256, 256, 0, k_add<char>, span2, ndata, 1
);
auto trans = heteroflow.copy(
span1, offs1, span2, offs2, togo
);
auto push1 = heteroflow.copy(data, span1, ndata);
auto test1 = heteroflow.host([=](){
for(int i=0; i<offs1; ++i) {
REQUIRE(data[i] == 'b');
}
for(int i=offs1; i<offs1+togo; ++i) {
REQUIRE(data[i] == 'c');
}
for(int i=offs1+togo; i<ndata; ++i) {
REQUIRE(data[i] == 'b');
}
delete [] data;
});
span1.precede(fill1);
span2.precede(fill2);
fill1.precede(kadd1);
fill2.precede(kadd2);
trans.succeed(kadd1, kadd2)
.precede(push1);
push1.precede(test1);
}
executor.run(heteroflow).wait();
}
}
}
}
// --------------------------------------------------------
// Testcase: h2d2d2h
// --------------------------------------------------------
TEST_CASE("h2d2d2h" * doctest::timeout(300)) {
const size_t N = 1000;
const size_t S = 64;
std::vector<std::vector<char>> res(S);
for(auto& v : res) {
v.resize(N);
}
std::vector<char> vec(N);
for(size_t i=0; i<N; ++i) {
vec[i] = ::rand()%40;
}
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow hf;
for(size_t s=0; s<S; ++s) {
std::fill_n(res[s].begin(), N, 'z');
auto span1 = hf.span(vec.size());
auto span2 = hf.span(vec.size());
for(size_t i=0; i<vec.size(); ++i) {
auto h2d = hf.copy(span1, i, &(vec[i]), 1);
auto d2d = hf.copy(span2, i, span1, i, 1);
auto d2h = hf.copy(&(res[s][i]), span2, i, 1);
span1.precede(h2d);
span2.precede(d2d);
h2d.precede(d2d);
d2d.precede(d2h);
}
}
executor.run(hf).wait();
for(size_t s=0; s<S; ++s) {
REQUIRE(vec == res[s]);
}
}
}
}
// --------------------------------------------------------
// Testcase: dependent-copies
// --------------------------------------------------------
TEST_CASE("dependent-copies" * doctest::timeout(300)) {
using namespace std::literals::string_literals;
const size_t N = 1<<10;
const size_t S = 32;
std::vector<std::vector<char>> in(S);
std::vector<std::vector<char>> out(S);
std::vector<hf::CopyTask> h2d(N);
std::vector<hf::CopyTask> d2d(N);
std::vector<hf::CopyTask> d2h(N);
// randomize the in/out data
for(size_t s=0; s<S; ++s) {
in[s].resize(N);
out[s].resize(N);
for(size_t i=0; i<N; ++i) {
in[s][i] = ::rand()%26 + 'a';
out[s][i] = ::rand()%26 + 'a';
}
}
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow hf;
for(size_t s=0; s<S; ++s) {
auto span1 = hf.span(N).name("span1");
auto span2 = hf.span(N).name("span2");
// inter-tree dependency
for(size_t i=1; i<N; i++) {
h2d[i] = hf.copy(span1, i, &(in[s][i]), 1)
.name("h2d["s + std::to_string(i) + ']');
d2d[i] = hf.copy(span2, i, span1, i, 1)
.name("d2d["s + std::to_string(i) + ']');
d2h[i] = hf.copy(&(out[s][i]), span2, i, 1)
.name("d2h["s + std::to_string(i) + ']');
h2d[i].precede(d2d[i]);
d2d[i].precede(d2h[i]);
}
// tree dependency
span1.precede(h2d[1]);
span2.precede(h2d[1]);
for(size_t i=1; i<N; ++i) {
size_t l = i*2;
size_t r = i*2 + 1;
if(l < N) {
h2d[i].precede(h2d[l]);
d2d[i].precede(d2d[l]);
d2h[i].precede(d2h[l]);
}
if(r < N) {
h2d[i].precede(h2d[r]);
d2d[i].precede(d2d[r]);
d2h[i].precede(d2h[r]);
}
}
}
executor.run(hf).wait();
for(size_t s=0; s<S; ++s) {
for(size_t i=1; i<N; ++i) {
REQUIRE(in[s][i] == out[s][i]);
}
}
}
}
}
// --------------------------------------------------------
// Testcase: chained-kernels
// --------------------------------------------------------
TEST_CASE("chained-kernels" * doctest::timeout(300)) {
const size_t N = 1000;
const size_t S = 64;
const size_t L = 1000;
std::vector<int> vec(N, 0);
std::vector<std::vector<int>> res(S);
for(auto& v : res) {
v.resize(N);
}
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow hf;
for(size_t s=0; s<S; ++s) {
auto span = hf.span(vec.data(), N*sizeof(int));
auto copy = hf.copy(res[s].data(), span, N*sizeof(int)).name("copy");
hf::KernelTask prev, curr;
for(size_t x=0; x<L; ++x) {
curr = hf.kernel((N+16-1)/16, 16, 0, k_add<int>, span, N, 1)
.name(std::to_string(x) + "-kernel");
if(x==0) {
span.precede(curr);
}
else {
prev.precede(curr);
}
prev = curr;
}
curr.precede(copy);
auto test = hf.host([&vec=res[s]](){
for(auto item : vec) {
REQUIRE(item == L);
}
}).name("test");
copy.precede(test);
}
executor.run(hf).wait();
}
}
}
// --------------------------------------------------------
// Testcase: dependent-kernels
// --------------------------------------------------------
TEST_CASE("dependent-kernels" * doctest::timeout(300)) {
using namespace std::literals::string_literals;
const size_t N = 1<<2;
const size_t S = 1;
std::vector<std::vector<char>> in(S);
std::vector<std::vector<char>> out(S);
std::vector<hf::CopyTask> h2d(N);
std::vector<hf::CopyTask> d2d(N);
std::vector<hf::CopyTask> d2h(N);
// randomize the in/out data
for(size_t s=0; s<S; ++s) {
in[s].resize(N);
out[s].resize(N);
for(size_t i=0; i<N; ++i) {
in[s][i] = ::rand()%26 + 'a';
out[s][i] = ::rand()%26 + 'a';
}
}
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow hf;
for(size_t s=0; s<S; ++s) {
auto span1 = hf.span(N).name("span1");
auto span2 = hf.span(N).name("span2");
// inter-tree dependency
for(size_t i=1; i<N; i++) {
h2d[i] = hf.copy(span1, i, &(in[s][i]), 1)
.name("h2d["s + std::to_string(i) + ']');
d2d[i] = hf.copy(span2, i, span1, i, 1)
.name("d2d["s + std::to_string(i) + ']');
d2h[i] = hf.copy(&(out[s][i]), span2, i, 1)
.name("d2h["s + std::to_string(i) + ']');
auto k1 = hf.kernel(1, 1, 0,
k_single_add<char>, span1, N, i, 1
).name("k1["s + std::to_string(i) + ']');
auto k2 = hf.kernel(1, 1, 0,
k_single_add<char>, span2, N, i, 1
).name("k2["s + std::to_string(i) + ']');
h2d[i].precede(k1);
k1.precede(d2d[i]);
d2d[i].precede(k2);
k2.precede(d2h[i]);
}
// tree dependency
span1.precede(h2d[1]);
span2.precede(h2d[1]);
for(size_t i=1; i<N; ++i) {
size_t l = i*2;
size_t r = i*2 + 1;
if(l < N) {
h2d[i].precede(h2d[l]);
d2d[i].precede(d2d[l]);
d2h[i].precede(d2h[l]);
}
if(r < N) {
h2d[i].precede(h2d[r]);
d2d[i].precede(d2d[r]);
d2h[i].precede(d2h[r]);
}
}
}
executor.run(hf).wait();
for(size_t s=0; s<S; ++s) {
for(size_t i=1; i<N; ++i) {
REQUIRE(in[s][i] + 2 == out[s][i]);
}
}
}
}
}
// --------------------------------------------------------
// Testcase: state-transition
// --------------------------------------------------------
TEST_CASE("statefulness" * doctest::timeout(300)) {
SUBCASE("linear-chain") {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
std::vector<char> vec;
size_t size = 0;
char* data = nullptr;
dim3 grid, block;
auto host = heteroflow.host([&](){
size = 1234567;
vec.resize(size, 'a');
data = vec.data();
grid = (size+255)/256;
block = 256;
});
auto span = heteroflow.span(std::ref(data), std::ref(size));
auto kadd = heteroflow.kernel(
std::ref(grid), std::ref(block), 0, k_add<char>, span, std::ref(size), 1
);
auto push = heteroflow.copy(std::ref(data), span, std::ref(size));
auto test = heteroflow.host([&](){
REQUIRE(size == vec.size());
REQUIRE(data == vec.data());
REQUIRE(grid.x == (size+255)/256);
REQUIRE(block.x == 256);
for(auto i : vec) {
REQUIRE(i == 'b');
}
});
host.precede(span);
span.precede(kadd);
kadd.precede(push);
push.precede(test);
executor.run(heteroflow).wait();
}
}
}
}
// --------------------------------------------------------
// Testcase: run_n
// --------------------------------------------------------
TEST_CASE("run_n" * doctest::timeout(300)) {
for(size_t c=1; c<=C; ++c) {
for(size_t g=1; g<=G; ++g) {
std::atomic<size_t> counter{0};
hf::Executor executor(c, g);
hf::Heteroflow heteroflow;
const size_t ndata = 5000;
for(size_t n=0; n<2*G; ++n) {
std::vector<char> vec(ndata);
auto data = vec.data();
auto host = heteroflow.host([vec=std::move(vec)]() mutable {
for(auto& c : vec) c = 0;
});
auto span = heteroflow.span(data, ndata);
auto kadd = heteroflow.kernel(
(ndata + 255)/256, 256, 0, k_add<char>, span, ndata, 1
);
auto push = heteroflow.copy(data, span, ndata);
auto combine = heteroflow.host([&counter, data, ndata] () {
for(size_t i=0; i<ndata; ++i) {
counter += data[i];
}
});
host.precede(span);
span.precede(kadd);
kadd.precede(push);
push.precede(combine);
}
auto res = 0;
for(size_t s=0; s<25; ++s){
auto r = ::rand() % 5;
res += r;
executor.run_n(heteroflow, r).wait();
REQUIRE(counter == res*ndata*2*G);
}
}
}
}
|
the_stack
|
/*--
Sort Transform is patented by Michael Schindler under US patent 6,199,064.
However for research purposes this algorithm is included in this software.
So if you are of the type who should worry about this (making money) worry away.
The author shall have no liability with respect to the infringement of
copyrights, trade secrets or any patents by this software. In no event will
the author be liable for any lost revenue or profits or other special,
indirect and consequential damages.
Sort Transform is disabled by default and can be enabled by defining the
preprocessor macro LIBBSC_SORT_TRANSFORM_SUPPORT at compile time.
--*/
#if defined(LIBBSC_SORT_TRANSFORM_SUPPORT) && defined(LIBBSC_CUDA_SUPPORT)
#if defined(_MSC_VER)
#pragma warning(disable : 4267)
#endif
#include <stdlib.h>
#include <memory.h>
#include "st.cuh"
#include "../libbsc.h"
#include "../platform/platform.h"
#include <cuda_runtime_api.h>
#include <device_functions.h>
#include "b40c/radix_sort/enactor.cuh"
#include "b40c/util/multi_buffer.cuh"
#ifdef LIBBSC_OPENMP
omp_lock_t cuda_lock;
int bsc_st_cuda_init(int features)
{
omp_init_lock(&cuda_lock);
return LIBBSC_NO_ERROR;
}
#else
int bsc_st_cuda_init(int features)
{
return LIBBSC_NO_ERROR;
}
#endif
#ifndef __CUDA_ARCH__
#define CUDA_DEVICE_ARCH 0
#else
#define CUDA_DEVICE_ARCH __CUDA_ARCH__
#endif
#define CUDA_DEVICE_PADDING 1024
#define CUDA_NUM_THREADS_IN_BLOCK 192
#define CUDA_CTA_OCCUPANCY_SM30 10
#define CUDA_CTA_OCCUPANCY_SM20 8
#define CUDA_CTA_OCCUPANCY_SM12 5
#define CUDA_CTA_OCCUPANCY_SM10 4
#define CUDA_CTA_OCCUPANCY(v) (v >= 300 ? CUDA_CTA_OCCUPANCY_SM30 : v >= 200 ? CUDA_CTA_OCCUPANCY_SM20 : v >= 120 ? CUDA_CTA_OCCUPANCY_SM12 : CUDA_CTA_OCCUPANCY_SM10)
cudaError_t bsc_cuda_safe_call(const char * filename, int line, cudaError_t result, cudaError_t status = cudaSuccess)
{
if (result != cudaSuccess)
{
fprintf(stderr, "\n%s(%d): bsc_cuda_safe_call failed %d: '%s'.", filename, line, result, cudaGetErrorString(result));
fflush(stderr);
}
return result != cudaSuccess ? result : status;
}
__global__ __launch_bounds__(CUDA_NUM_THREADS_IN_BLOCK, CUDA_CTA_OCCUPANCY(CUDA_DEVICE_ARCH))
void bsc_st567_encode_cuda_presort(unsigned char * RESTRICT T_device, unsigned long long * RESTRICT K_device, int n)
{
__shared__ unsigned int staging[1 + CUDA_NUM_THREADS_IN_BLOCK + 7];
unsigned int * RESTRICT thread_staging = &staging[threadIdx.x];
for (int grid_size = gridDim.x * CUDA_NUM_THREADS_IN_BLOCK, block_start = blockIdx.x * CUDA_NUM_THREADS_IN_BLOCK; block_start < n; block_start += grid_size)
{
int index = block_start + threadIdx.x;
{
thread_staging[1 ] = T_device[index ];
if (threadIdx.x < 7 ) thread_staging[1 + CUDA_NUM_THREADS_IN_BLOCK] = T_device[index + CUDA_NUM_THREADS_IN_BLOCK]; else
if (threadIdx.x == 7) thread_staging[-7 ] = T_device[index - 8 ];
syncthreads();
}
{
#if CUDA_DEVICE_ARCH >= 200
unsigned int lo = __byte_perm(thread_staging[4], thread_staging[5], 0x0411) | __byte_perm(thread_staging[6], thread_staging[7], 0x1104);
unsigned int hi = __byte_perm(thread_staging[0], thread_staging[1], 0x0411) | __byte_perm(thread_staging[2], thread_staging[3], 0x1104);
#else
unsigned int lo = (thread_staging[4] << 24) | (thread_staging[5] << 16) | (thread_staging[6] << 8) | thread_staging[7];
unsigned int hi = (thread_staging[0] << 24) | (thread_staging[1] << 16) | (thread_staging[2] << 8) | thread_staging[3];
#endif
K_device[index] = (((unsigned long long)hi) << 32) | ((unsigned long long)lo);
syncthreads();
}
}
}
__global__ __launch_bounds__(CUDA_NUM_THREADS_IN_BLOCK, CUDA_CTA_OCCUPANCY(CUDA_DEVICE_ARCH))
void bsc_st8_encode_cuda_presort(unsigned char * RESTRICT T_device, unsigned long long * RESTRICT K_device, unsigned char * RESTRICT V_device, int n)
{
__shared__ unsigned int staging[1 + CUDA_NUM_THREADS_IN_BLOCK + 8];
unsigned int * RESTRICT thread_staging = &staging[threadIdx.x];
for (int grid_size = gridDim.x * CUDA_NUM_THREADS_IN_BLOCK, block_start = blockIdx.x * CUDA_NUM_THREADS_IN_BLOCK; block_start < n; block_start += grid_size)
{
int index = block_start + threadIdx.x;
{
thread_staging[1 ] = T_device[index ];
if (threadIdx.x < 8 ) thread_staging[1 + CUDA_NUM_THREADS_IN_BLOCK] = T_device[index + CUDA_NUM_THREADS_IN_BLOCK]; else
if (threadIdx.x == 8) thread_staging[-8 ] = T_device[index - 9 ];
syncthreads();
}
{
#if CUDA_DEVICE_ARCH >= 200
unsigned int lo = __byte_perm(thread_staging[5], thread_staging[6], 0x0411) | __byte_perm(thread_staging[7], thread_staging[8], 0x1104);
unsigned int hi = __byte_perm(thread_staging[1], thread_staging[2], 0x0411) | __byte_perm(thread_staging[3], thread_staging[4], 0x1104);
#else
unsigned int lo = (thread_staging[5] << 24) | (thread_staging[6] << 16) | (thread_staging[7] << 8) | thread_staging[8];
unsigned int hi = (thread_staging[1] << 24) | (thread_staging[2] << 16) | (thread_staging[3] << 8) | thread_staging[4];
#endif
K_device[index] = (((unsigned long long)hi) << 32) | ((unsigned long long)lo); V_device[index] = thread_staging[0];
syncthreads();
}
}
}
__global__ __launch_bounds__(CUDA_NUM_THREADS_IN_BLOCK, CUDA_CTA_OCCUPANCY(CUDA_DEVICE_ARCH))
void bsc_st567_encode_cuda_postsort(unsigned char * RESTRICT T_device, unsigned long long * RESTRICT K_device, int n, unsigned long long lookup, int * RESTRICT I_device)
{
int min_index = n;
for (int grid_size = gridDim.x * CUDA_NUM_THREADS_IN_BLOCK, block_start = blockIdx.x * CUDA_NUM_THREADS_IN_BLOCK; block_start < n; block_start += grid_size)
{
int index = block_start + threadIdx.x;
{
unsigned long long value = K_device[index];
{
if (value == lookup && index < min_index) min_index = index;
T_device[index] = (unsigned char)(value >> 56);
}
}
}
syncthreads(); if (min_index != n) atomicMin(I_device, min_index);
}
__global__ __launch_bounds__(CUDA_NUM_THREADS_IN_BLOCK, CUDA_CTA_OCCUPANCY(CUDA_DEVICE_ARCH))
void bsc_st8_encode_cuda_postsort(unsigned long long * RESTRICT K_device, int n, unsigned long long lookup, int * RESTRICT I_device)
{
int min_index = n;
for (int grid_size = gridDim.x * CUDA_NUM_THREADS_IN_BLOCK, block_start = blockIdx.x * CUDA_NUM_THREADS_IN_BLOCK; block_start < n; block_start += grid_size)
{
int index = block_start + threadIdx.x;
{
if (K_device[index] == lookup && index < min_index) min_index = index;
}
}
syncthreads(); if (min_index != n) atomicMin(I_device, min_index);
}
int bsc_st567_encode_cuda(unsigned char * T, unsigned char * T_device, int n, int num_blocks, int k)
{
int index = LIBBSC_GPU_NOT_ENOUGH_MEMORY;
{
unsigned long long * K_device = NULL;
unsigned long long * K_device_sorted = NULL;
if (bsc_cuda_safe_call(__FILE__, __LINE__, cudaMalloc((void **)&K_device, 2 * (n + 2 * CUDA_DEVICE_PADDING) * sizeof(unsigned long long))) == cudaSuccess)
{
index = LIBBSC_GPU_ERROR;
bsc_st567_encode_cuda_presort<<<num_blocks, CUDA_NUM_THREADS_IN_BLOCK>>>(T_device, K_device, n);
cudaError_t status = cudaSuccess;
{
b40c::util::DoubleBuffer<unsigned long long> storage;
{
storage.d_keys[storage.selector ^ 0] = K_device;
storage.d_keys[storage.selector ^ 1] = K_device + ((n + 2 * CUDA_DEVICE_PADDING) / CUDA_DEVICE_PADDING) * CUDA_DEVICE_PADDING;
}
{
b40c::radix_sort::Enactor enactor;
if (k == 5) status = enactor.Sort<b40c::radix_sort::LARGE_PROBLEM, 40, 16>(storage, n);
if (k == 6) status = enactor.Sort<b40c::radix_sort::LARGE_PROBLEM, 48, 8>(storage, n);
if (k == 7) status = enactor.Sort<b40c::radix_sort::LARGE_PROBLEM, 56, 0>(storage, n);
K_device_sorted = storage.d_keys[storage.selector];
}
}
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
unsigned long long lookup;
{
unsigned int lo = (T[3 ] << 24) | (T[4] << 16) | (T[5] << 8) | T[6];
unsigned int hi = (T[n - 1] << 24) | (T[0] << 16) | (T[1] << 8) | T[2];
lookup = (((unsigned long long)hi) << 32) | ((unsigned long long)lo);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpy(T_device - sizeof(int), &n, sizeof(int), cudaMemcpyHostToDevice), status);
}
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
bsc_st567_encode_cuda_postsort<<<num_blocks, CUDA_NUM_THREADS_IN_BLOCK>>>(T_device, K_device_sorted, n, lookup, (int *)(T_device - sizeof(int)));
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaFree(K_device), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpy(T_device + n, T_device - sizeof(int), sizeof(int), cudaMemcpyDeviceToDevice), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpy(T, T_device, n + sizeof(int), cudaMemcpyDeviceToHost), status);
}
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
index = *(int *)(T + n);
}
return index;
}
}
cudaFree(K_device);
}
}
return index;
}
int bsc_st8_encode_cuda(unsigned char * T, unsigned char * T_device, int n, int num_blocks)
{
int index = LIBBSC_GPU_NOT_ENOUGH_MEMORY;
{
unsigned char * V_device = NULL;
unsigned char * V_device_sorted = NULL;
if (bsc_cuda_safe_call(__FILE__, __LINE__, cudaMalloc((void **)&V_device, 2 * (n + 2 * CUDA_DEVICE_PADDING) * sizeof(unsigned char))) == cudaSuccess)
{
unsigned long long * K_device = NULL;
unsigned long long * K_device_sorted = NULL;
if (bsc_cuda_safe_call(__FILE__, __LINE__, cudaMalloc((void **)&K_device, 2 * (n + 2 * CUDA_DEVICE_PADDING) * sizeof(unsigned long long))) == cudaSuccess)
{
index = LIBBSC_GPU_ERROR;
bsc_st8_encode_cuda_presort<<<num_blocks, CUDA_NUM_THREADS_IN_BLOCK>>>(T_device, K_device, V_device, n);
cudaError_t status = cudaSuccess;
{
b40c::util::DoubleBuffer<unsigned long long, unsigned char> storage;
{
storage.d_keys [storage.selector ^ 0] = K_device;
storage.d_keys [storage.selector ^ 1] = K_device + ((n + 2 * CUDA_DEVICE_PADDING) / CUDA_DEVICE_PADDING) * CUDA_DEVICE_PADDING;
storage.d_values[storage.selector ^ 0] = V_device;
storage.d_values[storage.selector ^ 1] = V_device + ((n + 2 * CUDA_DEVICE_PADDING) / CUDA_DEVICE_PADDING) * CUDA_DEVICE_PADDING;
}
{
b40c::radix_sort::Enactor enactor;
status = enactor.Sort(storage, n);
K_device_sorted = storage.d_keys[storage.selector];
V_device_sorted = storage.d_values[storage.selector];
}
}
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
unsigned long long lookup;
{
unsigned int lo = (T[4] << 24) | (T[5] << 16) | (T[6] << 8) | T[7];
unsigned int hi = (T[0] << 24) | (T[1] << 16) | (T[2] << 8) | T[3];
lookup = (((unsigned long long)hi) << 32) | ((unsigned long long)lo);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpy(V_device_sorted + ((n + sizeof(int) - 1) / sizeof(int)) * sizeof(int), &n, sizeof(int), cudaMemcpyHostToDevice), status);
}
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
bsc_st8_encode_cuda_postsort<<<num_blocks, CUDA_NUM_THREADS_IN_BLOCK>>>(K_device_sorted, n, lookup, (int *)(V_device_sorted + ((n + sizeof(int) - 1) / sizeof(int)) * sizeof(int)));
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpy(T, V_device_sorted, n + 2 * sizeof(int), cudaMemcpyDeviceToHost), status);
}
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaFree(K_device), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaFree(V_device), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
index = *(int *)(T + ((n + sizeof(int) - 1) / sizeof(int)) * sizeof(int));
}
return index;
}
}
cudaFree(K_device);
}
cudaFree(V_device);
}
}
return index;
}
int bsc_st_encode_cuda(unsigned char * T, int n, int k, int features)
{
if ((T == NULL) || (n < 0)) return LIBBSC_BAD_PARAMETER;
if ((k < 5) || (k > 8)) return LIBBSC_BAD_PARAMETER;
if (n <= 1) return 0;
int num_blocks = 1;
{
cudaDeviceProp deviceProperties;
{
int deviceId; if (cudaGetDevice(&deviceId) != cudaSuccess || cudaGetDeviceProperties(&deviceProperties, deviceId) != cudaSuccess)
{
return LIBBSC_GPU_NOT_SUPPORTED;
}
}
if (deviceProperties.major * 10 + deviceProperties.minor <= 10) return LIBBSC_GPU_NOT_SUPPORTED;
num_blocks = CUDA_CTA_OCCUPANCY(deviceProperties.major * 100 + deviceProperties.minor * 10) * deviceProperties.multiProcessorCount;
if (num_blocks > ((n + CUDA_NUM_THREADS_IN_BLOCK - 1) / CUDA_NUM_THREADS_IN_BLOCK)) num_blocks = (n + CUDA_NUM_THREADS_IN_BLOCK - 1) / CUDA_NUM_THREADS_IN_BLOCK;
if (num_blocks <= 0) num_blocks = 1;
}
#ifdef LIBBSC_OPENMP
omp_set_lock(&cuda_lock);
#endif
int index = LIBBSC_GPU_NOT_ENOUGH_MEMORY;
{
unsigned char * T_device = NULL;
if (cudaMalloc((void **)&T_device, n + 2 * CUDA_DEVICE_PADDING) == cudaSuccess)
{
index = LIBBSC_GPU_ERROR;
cudaError_t status = cudaSuccess;
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpy(T_device + CUDA_DEVICE_PADDING , T , n , cudaMemcpyHostToDevice ), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpy(T_device + CUDA_DEVICE_PADDING + n, T_device + CUDA_DEVICE_PADDING, CUDA_DEVICE_PADDING, cudaMemcpyDeviceToDevice), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpy(T_device , T_device + n , CUDA_DEVICE_PADDING, cudaMemcpyDeviceToDevice), status);
if (status == cudaSuccess)
{
if (k >= 5 && k <= 7) index = bsc_st567_encode_cuda(T, T_device + CUDA_DEVICE_PADDING, n, num_blocks, k);
if (k == 8) index = bsc_st8_encode_cuda (T, T_device + CUDA_DEVICE_PADDING, n, num_blocks );
}
cudaFree(T_device);
}
}
#ifdef LIBBSC_OPENMP
omp_unset_lock(&cuda_lock);
#endif
return index;
}
#endif
/*-----------------------------------------------------------*/
/* End st.cu */
/*-----------------------------------------------------------*/
|
the_stack
|
// Description
//======================================================================================================================================================150
// USE
//======================================================================================================================================================150
// EXAMPLE:
// ./b+tree file ./input/mil.txt command ./command.txt
// ...then enter any of the following commands after the prompt > :
// f <x> -- Find the value under key <x>
// p <x> -- Print the path from the root to key k and its associated value
// t -- Print the B+ tree
// l -- Print the keys of the leaves (bottom row of the tree)
// v -- Toggle output of pointer addresses ("verbose") in tree and leaves.
// k <x> -- Run <x> bundled queries on the CPU and GPU (B+Tree) (Selects random values for each search)
// j <x> <y> -- Run a range search of <x> bundled queries on the CPU and GPU (B+Tree) with the range of each search of size <y>
// x <z> -- Run a single search for value z on the GPU and CPU
// y <a> <b> -- Run a single range search for range a-b on the GPU and CPU
// q -- Quit. (Or use Ctl-D.)
//======================================================================================================================================================150
// END
//======================================================================================================================================================150
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#include <stdio.h> // (in directory known to compiler) needed by printf, stderr
#include <limits.h> // (in directory known to compiler) needed by INT_MIN, INT_MAX
#include <math.h> // (in directory known to compiler) needed by log, pow
#include <string.h> // (in directory known to compiler) needed by memset
#include <sys/time.h> // (in directory known to compiler) needed by memset
#include <hip/hip_runtime.h>
//======================================================================================================================================================150
// COMMON
//======================================================================================================================================================150
#include "./common.h" // (in directory provided here)
//======================================================================================================================================================150
// DEFINE
//======================================================================================================================================================150
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "./util/timer/timer.h" // (in directory provided here)
#include "./util/num/num.h" // (in directory provided here)
//======================================================================================================================================================150
// KERNEL HEADERS
//======================================================================================================================================================150
#include "./kernel/kernel_wrapper.h" // (in directory provided here)
#include "./kernel/kernel2_wrapper.h" // (in directory provided here)
//======================================================================================================================================================150
// HEADER
//======================================================================================================================================================150
#include "./main.h" // (in directory provided here)
//======================================================================================================================================================150
// END
//======================================================================================================================================================150
//========================================================================================================================================================================================================200
// VARIABLES
//========================================================================================================================================================================================================200
// general variables
knode *knodes;
record *krecords;
char *mem;
long freeptr;
long malloc_size;
long size;
long maxheight;
/* The order determines the maximum and minimum
* number of entries (keys and pointers) in any
* node. Every node has at most order - 1 keys and
* at least (roughly speaking) half that number.
* Every leaf has as many pointers to data as keys,
* and every internal node has one more pointer
* to a subtree than the number of keys.
* This global variable is initialized to the
* default value.
*/
int order = DEFAULT_ORDER;
/* The queue is used to print the tree in
* level order, starting from the root
* printing each entire rank on a separate
* line, finishing with the leaves.
*/
node *queue = NULL;
/* The user can toggle on and off the "verbose"
* property, which causes the pointer addresses
* to be printed out in hexadecimal notation
* next to their corresponding keys.
*/
bool verbose_output = false;
//========================================================================================================================================================================================================200
// FUNCTIONS
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// Components
//======================================================================================================================================================150
void
list_init( list_t *l,
int32_t (*compare)(const void *key,
const void *with),
void (*datum_delete)(void *))
{
l->head = l->tail = NULL;
l->length = 0;
l->compare = compare;
l->datum_delete = datum_delete;
}
void
list_delete(list_t *l)
{
list_item_t *li, *del;
for (li = l->head; li;) {
del = li;
li = li->next;
list_item_delete(del, l->datum_delete);
}
l->head = l->tail = NULL;
l->length = 0;
}
void
list_reset(list_t *l)
{
list_delete(l);
}
void
list_insert_item_head( list_t *l,
list_item_t *i)
{
if (l->head) {
i->next = l->head;
l->head->pred = i;
l->head = i;
l->head->pred = NULL;
} else {
l->head = l->tail = i;
i->next = i->pred = NULL;
}
l->length++;
}
void
list_insert_item_tail( list_t *l,
list_item_t *i)
{
if (l->head) {
l->tail->next = i;
i->pred = l->tail;
i->next = NULL;
l->tail = i;
} else {
l->head = l->tail = i;
i->next = i->pred = NULL;
}
l->length++;
}
void
list_insert_item_before(list_t *l,
list_item_t *next,
list_item_t *i)
{
/* Assume next is actually in the list! */
/* If it's not, we may lose the list. */
if (l->head == next) {
i->next = next;
i->pred = NULL;
l->head = i;
next->pred = i;
} else {
i->next = next;
i->pred = next->pred;
next->pred->next = i;
next->pred = i;
}
l->length++;
}
void
list_insert_item_after( list_t *l,
list_item_t *pred,
list_item_t *i)
{
/* Assume pred is actually in the list! */
/* If it's not, we may lose the list. */
if (l->tail == pred) {
i->pred = pred;
i->next = NULL;
l->tail = i;
pred->next = i;
} else {
i->pred = pred;
i->next = pred->next;
pred->next->pred = i;
pred->next = i;
}
l->length++;
}
void
list_insert_item_sorted(list_t *l,
list_item_t *i)
{
list_item_t *itr;
if (l->head) {
for (itr = l->head; itr && l->compare(list_item_get_datum(i),
list_item_get_datum(itr)) < 0;
itr = itr->next)
;
if (itr) {
i->next = itr;
i->pred = itr->pred;
itr->pred = i;
i->pred->next = i;
} else {
l->tail->next = i;
i->pred = l->tail;
i->next = NULL;
l->tail = i;
}
} else {
l->head = l->tail = i;
i->pred = i->next = NULL;
}
l->length++;
}
void
list_insert_head( list_t *l,
void *v)
{
list_item_t *i;
i = (list_item_t *)malloc(sizeof (*i));
list_item_init(i, v);
if (l->head) {
i->next = l->head;
l->head->pred = i;
l->head = i;
l->head->pred = NULL;
} else {
l->head = l->tail = i;
i->next = i->pred = NULL;
}
l->length++;
}
void
list_insert_tail( list_t *l,
void *v)
{
list_item_t *i;
i = (list_item_t *)malloc(sizeof (*i));
list_item_init(i, v);
if (l->head) {
l->tail->next = i;
i->pred = l->tail;
i->next = NULL;
l->tail = i;
} else {
l->head = l->tail = i;
i->next = i->pred = NULL;
}
l->length++;
}
void
list_insert_before( list_t *l,
list_item_t *next,
void *v)
{
list_item_t *i;
i = (list_item_t *)malloc(sizeof (*i));
list_item_init(i, v);
/* Assume next is actually in the list! */
/* If it's not, we may lose the list. */
if (l->head == next) {
i->next = next;
i->pred = NULL;
l->head = i;
next->pred = i;
} else {
i->next = next;
i->pred = next->pred;
next->pred->next = i;
next->pred = i;
}
l->length++;
}
void
list_insert_after( list_t *l,
list_item_t *pred,
void *v)
{
list_item_t *i;
i = (list_item_t *)malloc(sizeof (*i));
list_item_init(i, v);
/* Assume pred is actually in the list! */
/* If it's not, we may lose the list. */
if (l->tail == pred) {
i->pred = pred;
i->next = NULL;
l->tail = i;
pred->next = i;
} else {
i->pred = pred;
i->next = pred->next;
pred->next->pred = i;
pred->next = i;
}
l->length++;
}
void
list_insert_sorted( list_t *l,
void *v)
{
list_item_t *itr;
list_item_t *i;
i = (list_item_t *)malloc(sizeof (*i));
list_item_init(i, v);
if (l->head) {
for (itr = l->head; itr && l->compare(list_item_get_datum(i),
list_item_get_datum(itr)) < 0;
itr = itr->next)
;
if (itr) {
i->next = itr;
i->pred = itr->pred;
itr->pred = i;
i->pred->next = i;
} else {
l->tail->next = i;
i->pred = l->tail;
i->next = NULL;
l->tail = i;
}
} else {
l->head = l->tail = i;
i->pred = i->next = NULL;
}
l->length++;
}
void
list_remove_item( list_t *l,
list_item_t *i)
{
if (i == l->head) {
l->head = l->head->next;
if (l->head)
l->head->pred = NULL;
else
l->tail = NULL;
} else if (i == l->tail) {
l->tail = l->tail->pred;
l->tail->next = NULL;
} else {
i->pred->next = i->next;
i->next->pred = i->pred;
}
l->length--;
list_item_delete(i, l->datum_delete);
}
void
list_remove_head(list_t *l)
{
list_remove_item(l, l->head);
}
void
list_remove_tail(list_t *l)
{
list_remove_item(l, l->tail);
}
list_item_t*
list_find_item( list_t *l,
void *datum)
{
list_item_t *li;
for (li = l->head; li && l->compare(datum, list_item_get_datum(li));
li = li->next)
;
return li;
}
list_item_t*
list_get_head_item(list_t *l)
{
return l->head;
}
list_item_t*
list_get_tail_item(list_t *l)
{
return l->tail;
}
void*
list_find( list_t *l,
void *datum)
{
list_item_t *li;
for (li = l->head; li && l->compare(datum, list_item_get_datum(li));
li = li->next)
;
return li ? li->datum : NULL;
}
void*
list_get_head(list_t *l)
{
return l->head ? l->head->datum : NULL;
}
void*
list_get_tail(list_t *l)
{
return l->tail ? l->tail->datum : NULL;
}
uint32_t
list_get_length(list_t *l)
{
return l->length;
}
bool
list_is_empty(list_t *l)
{
return (l->length == 0);
}
bool
list_not_empty(list_t *l)
{
return (l->length != 0);
}
void
list_visit_items( list_t *l,
void (*visitor)(void *v))
{
list_item_t *li;
for (li = l->head; li; li = li->next)
visitor(list_item_get_datum(li));
}
void
list_item_init( list_item_t *li,
void *datum)
{
li->pred = li->next = NULL;
li->datum = datum;
}
void
list_item_delete( list_item_t *li,
void (*datum_delete)(void *datum))
{
if (datum_delete) {
datum_delete(li->datum);
}
free(li);
}
void *
list_item_get_datum(list_item_t *li)
{
return li->datum;
}
void
list_iterator_init( list_t *l,
list_iterator_t *li)
{
*li = l ? l->head : NULL;
}
void
list_iterator_delete(list_iterator_t *li)
{
*li = NULL;
}
void
list_iterator_next(list_iterator_t *li)
{
if (*li)
*li = (*li)->next;
}
void
list_iterator_prev(list_iterator_t *li)
{
if (*li)
*li = (*li)->pred;
}
void *
list_iterator_get_datum(list_iterator_t *li)
{
return *li ? (*li)->datum : NULL;
}
bool
list_iterator_is_valid(list_iterator_t *li)
{
return (*li != NULL);
}
void
list_reverse_iterator_init( list_t *l,
list_reverse_iterator_t *li)
{
*li = l ? l->tail : NULL;
}
void
list_reverse_iterator_delete(list_reverse_iterator_t *li)
{
*li = NULL;
}
void
list_reverse_iterator_next(list_reverse_iterator_t *li)
{
if (*li)
*li = (*li)->pred;
}
void
list_reverse_iterator_prev(list_reverse_iterator_t *li)
{
if (*li)
*li = (*li)->next;
}
void *
list_reverse_iterator_get_datum(list_reverse_iterator_t *li)
{
return *li ? (*li)->datum : NULL;
}
bool
list_reverse_iterator_is_valid(list_reverse_iterator_t *li)
{
return (li != NULL);
}
//======================================================================================================================================================150
// OUTPUT AND UTILITIES
//======================================================================================================================================================150
/* */
void *
kmalloc(int size)
{
//printf("size: %d, current offset: %p\n",size,freeptr);
void * r = (void *)freeptr;
freeptr+=size;
if(freeptr > malloc_size+(long)mem){
printf("Memory Overflow\n");
exit(1);
}
return r;
}
//transforms the current B+ Tree into a single, contiguous block of memory to be used on the GPU
long
transform_to_cuda( node * root,
bool verbose)
{
struct timeval one,two;
double time;
gettimeofday (&one, NULL);
long max_nodes = (long)(pow(order,log(size)/log(order/2.0)-1) + 1);
malloc_size = size*sizeof(record) + max_nodes*sizeof(knode);
mem = (char*)malloc(malloc_size);
if(mem==NULL){
printf("Initial malloc error\n");
exit(1);
}
freeptr = (long)mem;
krecords = (record * )kmalloc(size*sizeof(record));
// printf("%d records\n", size);
knodes = (knode *)kmalloc(max_nodes*sizeof(knode));
// printf("%d knodes\n", max_nodes);
queue = NULL;
enqueue(root);
node * n;
knode * k;
int i;
long nodeindex = 0;
long recordindex = 0;
long queueindex = 0;
knodes[0].location = nodeindex++;
while( queue != NULL ) {
n = dequeue();
k = &knodes[queueindex];
k->location = queueindex++;
k->is_leaf = n->is_leaf;
k->num_keys = n->num_keys+2;
//start at 1 because 0 is set to INT_MIN
k->keys[0]=INT_MIN;
k->keys[k->num_keys-1]=INT_MAX;
for(i=k->num_keys; i < order; i++)k->keys[i]=INT_MAX;
if(!k->is_leaf){
k->indices[0]=nodeindex++;
// if(k->indices[0]>3953){
// printf("ERROR: %d\n", k->indices[0]);
// }
for(i=1;i<k->num_keys-1;i++){
k->keys[i] = n->keys[i-1];
enqueue((node * )n->pointers[i-1]);
k->indices[i] = nodeindex++;
// if(k->indices[i]>3953){
// printf("ERROR 1: %d\n", k->indices[i]);
// }
//knodes[nodeindex].location = nodeindex++;
}
//for final point of n
enqueue((node * )n->pointers[i-1]);
}
else{
k->indices[0]=0;
for(i=1;i<k->num_keys-1;i++){
k->keys[i] = n->keys[i-1];
krecords[recordindex].value=((record *)n->pointers[i-1])->value;
k->indices[i] = recordindex++;
// if(k->indices[i]>3953){
// printf("ERROR 2: %d\n", k->indices[i]);
// }
}
}
k->indices[k->num_keys-1]=queueindex;
// if(k->indices[k->num_keys-1]>3953){
// printf("ERROR 3: %d\n", k->indices[k->num_keys-1]);
// }
if(verbose){
printf("Successfully created knode with index %d\n", k->location);
printf("Is Leaf: %d, Num Keys: %d\n", k->is_leaf, k->num_keys);
printf("Pointers: ");
for(i=0;i<k->num_keys;i++)
printf("%d | ", k->indices[i]);
printf("\nKeys: ");
for(i=0;i<k->num_keys;i++)
printf("%d | ", k->keys[i]);
printf("\n\n");
}
}
long mem_used = size*sizeof(record)+(nodeindex)*sizeof(knode);
if(verbose){
for(i = 0; i < size; i++)
printf("%d ", krecords[i].value);
printf("\nNumber of records = %ld, sizeof(record)=%lu, total=%lu\n",size,sizeof(record),size*sizeof(record));
printf("Number of knodes = %ld, sizeof(knode)=%lu, total=%lu\n",nodeindex,sizeof(knode),(nodeindex)*sizeof(knode));
printf("\nDone Transformation. Mem used: %ld\n", mem_used);
}
gettimeofday (&two, NULL);
double oneD = one.tv_sec + (double)one.tv_usec * .000001;
double twoD = two.tv_sec + (double)two.tv_usec * .000001;
time = twoD-oneD;
printf("Tree transformation took %f\n", time);
return mem_used;
}
/* */
list_t *
findRange( node * root,
int start,
int end)
{
int i;
node * c = find_leaf( root, start, false );
if (c == NULL) return NULL;
list_t * retList = (list_t *)malloc(sizeof(list_t));
list_init(retList,NULL,NULL);
int counter = 0;
bool cont = true;
while(cont && c!=0){
cont = false;
for(i = 0;i < c->num_keys;i++){
if(c->keys[i] >= start && c->keys[i] <= end){
//list_insert_tail(retList,(record *)c->pointers[i]);
counter++;
cont = true;
}else{
cont = false;
break;
}
}
c = (node *)c->pointers[order-1];
}
return retList;
}
/* First message to the user. */
void
usage_1( void )
{
printf("B+ Tree of Order %d.\n", order);
printf("\tAmittai Aviram -- amittai.aviram@yale.edu Version %s\n", Version);
printf("\tfollowing Silberschatz, Korth, Sidarshan, Database Concepts, 5th ed.\n\n");
printf("To build a B+ tree of a different order, start again and enter the order\n");
printf("as an integer argument: bpt <order>. ");
printf("3 <= order <=20\n");
printf("To start with input from a file of newline-delimited integers, start again and enter\n");
printf("the order followed by the filename: bpt <order> <inputfile>.\n");
}
/* Second message to the user. */
void
usage_2( void )
{
printf("Enter any of the following commands after the prompt > :\n");
printf("\ti <k> -- Insert <k> (an integer) as both key and value).\n");
printf("\tf <k> -- Find the value under key <k>.\n");
printf("\tp <k> -- Print the path from the root to key k and its associated value.\n");
printf("\td <k> -- Delete key <k> and its associated value.\n");
printf("\tx -- Destroy the whole tree. Start again with an empty tree of the same order.\n");
printf("\tt -- Print the B+ tree.\n");
printf("\tl -- Print the keys of the leaves (bottom row of the tree).\n");
printf("\tv -- Toggle output of pointer addresses (\"verbose\") in tree and leaves.\n");
printf("\tq -- Quit. (Or use Ctl-D.)\n");
printf("\t? -- Print this help message.\n");
}
/* Helper function for printing the tree out. See print_tree. */
void
enqueue( node* new_node )
{
node * c;
if (queue == NULL) {
queue = new_node;
queue->next = NULL;
}
else {
c = queue;
while(c->next != NULL) {
c = c->next;
}
c->next = new_node;
new_node->next = NULL;
}
}
/* Helper function for printing the tree out. See print_tree. */
node *
dequeue( void )
{
node * n = queue;
queue = queue->next;
n->next = NULL;
return n;
}
/* Prints the bottom row of keys of the tree (with their respective pointers, if the verbose_output flag is set. */
void
print_leaves( node* root )
{
int i;
node * c = root;
if (root == NULL) {
printf("Empty tree.\n");
return;
}
while (!c->is_leaf)
c = (node *) c->pointers[0];
while (true) {
for (i = 0; i < c->num_keys; i++) {
if (verbose_output)
//printf("%x ", (unsigned int)c->pointers[i]);
printf("%d ", c->keys[i]);
}
if (verbose_output)
//printf("%x ", (unsigned int)c->pointers[order - 1]);
if (c->pointers[order - 1] != NULL) {
printf(" | ");
c = (node *) c->pointers[order - 1];
}
else
break;
}
printf("\n");
}
/* Utility function to give the height of the tree, which length in number of edges of the path from the root to any leaf. */
int
height( node* root )
{
int h = 0;
node * c = root;
while (!c->is_leaf) {
c = (node *) c->pointers[0];
h++;
}
return h;
}
/* Utility function to give the length in edges of the path from any node to the root. */
int
path_to_root( node* root, node* child )
{
int length = 0;
node * c = child;
while (c != root) {
c = c->parent;
length++;
}
return length;
}
/* Prints the B+ tree in the command line in level (rank) order, with the keys in each node and the '|' symbol to separate nodes. With the verbose_output flag set. the values of the pointers corresponding to the keys also appear next to their respective keys, in hexadecimal notation. */
void
print_tree( node* root )
{
node * n = NULL;
int i = 0;
int rank = 0;
int new_rank = 0;
if (root == NULL) {
printf("Empty tree.\n");
return;
}
queue = NULL;
enqueue(root);
while( queue != NULL ) {
n = dequeue();
if (n->parent != NULL && n == n->parent->pointers[0]) {
new_rank = path_to_root( root, n );
if (new_rank != rank) {
rank = new_rank;
printf("\n");
}
}
if (verbose_output)
printf("(%x)", n);
for (i = 0; i < n->num_keys; i++) {
if (verbose_output)
printf("%x ", n->pointers[i]);
printf("%d ", n->keys[i]);
}
if (!n->is_leaf)
for (i = 0; i <= n->num_keys; i++)
enqueue((node *) n->pointers[i]);
if (verbose_output) {
if (n->is_leaf)
printf("%x ", n->pointers[order - 1]);
else
printf("%x ", n->pointers[n->num_keys]);
}
printf("| ");
}
printf("\n");
}
/* Traces the path from the root to a leaf, searching by key. Displays information about the path if the verbose flag is set. Returns the leaf containing the given key. */
node *
find_leaf( node* root, int key, bool verbose )
{
int i = 0;
node * c = root;
if (c == NULL) {
if (verbose)
printf("Empty tree.\n");
return c;
}
while (!c->is_leaf) {
if (verbose) {
printf("[");
for (i = 0; i < c->num_keys - 1; i++)
printf("%d ", c->keys[i]);
printf("%d] ", c->keys[i]);
}
i = 0;
while (i < c->num_keys) {
if (key >= c->keys[i])
i++;
else
break;
}
if (verbose)
printf("%d ->\n", i);
c = (node *)c->pointers[i];
}
if (verbose) {
printf("Leaf [");
for (i = 0; i < c->num_keys - 1; i++)
printf("%d ", c->keys[i]);
printf("%d] ->\n", c->keys[i]);
}
return c;
}
/* Finds and returns the record to which a key refers. */
record *
find( node* root, int key, bool verbose )
{
int i = 0;
node * c = find_leaf( root, key, verbose );
if (c == NULL)
return NULL;
for (i = 0; i < c->num_keys; i++)
if (c->keys[i] == key)
break;
if (i == c->num_keys)
return NULL;
else
return (record *)c->pointers[i];
}
/* Finds the appropriate place to split a node that is too big into two. */
int
cut( int length )
{
if (length % 2 == 0)
return length/2;
else
return length/2 + 1;
}
//======================================================================================================================================================150
// INSERTION
//======================================================================================================================================================150
/* Creates a new record to hold the value to which a key refers. */
record *
make_record(int value)
{
record * new_record = (record *)malloc(sizeof(record));
if (new_record == NULL) {
perror("Record creation.");
exit(EXIT_FAILURE);
}
else {
new_record->value = value;
}
return new_record;
}
/* Creates a new general node, which can be adapted to serve as either a leaf or an internal node. */
node *
make_node( void )
{
node * new_node;
new_node = (node *) malloc(sizeof(node));
if (new_node == NULL) {
perror("Node creation.");
exit(EXIT_FAILURE);
}
new_node->keys = (int *) malloc( (order - 1) * sizeof(int) );
if (new_node->keys == NULL) {
perror("New node keys array.");
exit(EXIT_FAILURE);
}
new_node->pointers = (void **) malloc( order * sizeof(void *) );
if (new_node->pointers == NULL) {
perror("New node pointers array.");
exit(EXIT_FAILURE);
}
new_node->is_leaf = false;
new_node->num_keys = 0;
new_node->parent = NULL;
new_node->next = NULL;
return new_node;
}
/* Creates a new leaf by creating a node and then adapting it appropriately. */
node *
make_leaf( void )
{
node* leaf = make_node();
leaf->is_leaf = true;
return leaf;
}
/* Helper function used in insert_into_parent to find the index of the parent's pointer to the node to the left of the key to be inserted. */
int
get_left_index(node* parent, node* left)
{
int left_index = 0;
while (left_index <= parent->num_keys &&
parent->pointers[left_index] != left)
left_index++;
return left_index;
}
/* Inserts a new pointer to a record and its corresponding key into a leaf. Returns the altered leaf. */
node *
insert_into_leaf( node* leaf, int key, record* pointer )
{
int i, insertion_point;
insertion_point = 0;
while (insertion_point < leaf->num_keys && leaf->keys[insertion_point] < key)
insertion_point++;
for (i = leaf->num_keys; i > insertion_point; i--) {
leaf->keys[i] = leaf->keys[i - 1];
leaf->pointers[i] = leaf->pointers[i - 1];
}
leaf->keys[insertion_point] = key;
leaf->pointers[insertion_point] = pointer;
leaf->num_keys++;
return leaf;
}
/* Inserts a new key and pointer to a new record into a leaf so as to exceed the tree's order, causing the leaf to be split in half. */
node *
insert_into_leaf_after_splitting( node* root,
node* leaf,
int key,
record* pointer)
{
node * new_leaf;
int * temp_keys;
void ** temp_pointers;
int insertion_index, split, new_key, i, j;
new_leaf = make_leaf();
temp_keys = (int *) malloc( order * sizeof(int) );
if (temp_keys == NULL) {
perror("Temporary keys array.");
exit(EXIT_FAILURE);
}
temp_pointers = (void **) malloc( order * sizeof(void *) );
if (temp_pointers == NULL) {
perror("Temporary pointers array.");
exit(EXIT_FAILURE);
}
insertion_index = 0;
while (leaf->keys[insertion_index] < key && insertion_index < order - 1)
insertion_index++;
for (i = 0, j = 0; i < leaf->num_keys; i++, j++) {
if (j == insertion_index) j++;
temp_keys[j] = leaf->keys[i];
temp_pointers[j] = leaf->pointers[i];
}
temp_keys[insertion_index] = key;
temp_pointers[insertion_index] = pointer;
leaf->num_keys = 0;
split = cut(order - 1);
for (i = 0; i < split; i++) {
leaf->pointers[i] = temp_pointers[i];
leaf->keys[i] = temp_keys[i];
leaf->num_keys++;
}
for (i = split, j = 0; i < order; i++, j++) {
new_leaf->pointers[j] = temp_pointers[i];
new_leaf->keys[j] = temp_keys[i];
new_leaf->num_keys++;
}
free(temp_pointers);
free(temp_keys);
new_leaf->pointers[order - 1] = leaf->pointers[order - 1];
leaf->pointers[order - 1] = new_leaf;
for (i = leaf->num_keys; i < order - 1; i++)
leaf->pointers[i] = NULL;
for (i = new_leaf->num_keys; i < order - 1; i++)
new_leaf->pointers[i] = NULL;
new_leaf->parent = leaf->parent;
new_key = new_leaf->keys[0];
return insert_into_parent(root, leaf, new_key, new_leaf);
}
/* Inserts a new key and pointer to a node into a node into which these can fit without violating the B+ tree properties. */
node *
insert_into_node( node* root,
node* n,
int left_index,
int key,
node* right)
{
int i;
for (i = n->num_keys; i > left_index; i--) {
n->pointers[i + 1] = n->pointers[i];
n->keys[i] = n->keys[i - 1];
}
n->pointers[left_index + 1] = right;
n->keys[left_index] = key;
n->num_keys++;
return root;
}
/* Inserts a new key and pointer to a node into a node, causing the node's size to exceed the order, and causing the node to split into two. */
node *
insert_into_node_after_splitting( node* root,
node* old_node,
int left_index,
int key,
node * right)
{
int i, j, split, k_prime;
node * new_node, * child;
int * temp_keys;
node ** temp_pointers;
/* First create a temporary set of keys and pointers
* to hold everything in order, including
* the new key and pointer, inserted in their
* correct places.
* Then create a new node and copy half of the
* keys and pointers to the old node and
* the other half to the new.
*/
temp_pointers = (node **) malloc( (order + 1) * sizeof(node *) );
if (temp_pointers == NULL) {
perror("Temporary pointers array for splitting nodes.");
exit(EXIT_FAILURE);
}
temp_keys = (int *) malloc( order * sizeof(int) );
if (temp_keys == NULL) {
perror("Temporary keys array for splitting nodes.");
exit(EXIT_FAILURE);
}
for (i = 0, j = 0; i < old_node->num_keys + 1; i++, j++) {
if (j == left_index + 1) j++;
temp_pointers[j] = (node *) old_node->pointers[i];
}
for (i = 0, j = 0; i < old_node->num_keys; i++, j++) {
if (j == left_index) j++;
temp_keys[j] = old_node->keys[i];
}
temp_pointers[left_index + 1] = right;
temp_keys[left_index] = key;
/* Create the new node and copy
* half the keys and pointers to the
* old and half to the new.
*/
split = cut(order);
new_node = make_node();
old_node->num_keys = 0;
for (i = 0; i < split - 1; i++) {
old_node->pointers[i] = temp_pointers[i];
old_node->keys[i] = temp_keys[i];
old_node->num_keys++;
}
old_node->pointers[i] = temp_pointers[i];
k_prime = temp_keys[split - 1];
for (++i, j = 0; i < order; i++, j++) {
new_node->pointers[j] = temp_pointers[i];
new_node->keys[j] = temp_keys[i];
new_node->num_keys++;
}
new_node->pointers[j] = temp_pointers[i];
free(temp_pointers);
free(temp_keys);
new_node->parent = old_node->parent;
for (i = 0; i <= new_node->num_keys; i++) {
child = (node *) new_node->pointers[i];
child->parent = new_node;
}
/* Insert a new key into the parent of the two
* nodes resulting from the split, with
* the old node to the left and the new to the right.
*/
return insert_into_parent(root, old_node, k_prime, new_node);
}
/* Inserts a new node (leaf or internal node) into the B+ tree. Returns the root of the tree after insertion. */
node *
insert_into_parent( node* root,
node* left,
int key,
node* right)
{
int left_index;
node * parent;
parent = left->parent;
/* Case: new root. */
if (parent == NULL)
return insert_into_new_root(left, key, right);
/* Case: leaf or node. (Remainder of
* function body.)
*/
/* Find the parent's pointer to the left
* node.
*/
left_index = get_left_index(parent, left);
/* Simple case: the new key fits into the node.
*/
if (parent->num_keys < order - 1)
return insert_into_node(root, parent, left_index, key, right);
/* Harder case: split a node in order
* to preserve the B+ tree properties.
*/
return insert_into_node_after_splitting(root, parent, left_index, key, right);
}
/* Creates a new root for two subtrees and inserts the appropriate key into the new root. */
node *
insert_into_new_root( node* left,
int key,
node* right)
{
node * root = make_node();
root->keys[0] = key;
root->pointers[0] = left;
root->pointers[1] = right;
root->num_keys++;
root->parent = NULL;
left->parent = root;
right->parent = root;
return root;
}
/* First insertion: start a new tree. */
node *
start_new_tree( int key,
record* pointer)
{
node * root = make_leaf();
root->keys[0] = key;
root->pointers[0] = pointer;
root->pointers[order - 1] = NULL;
root->parent = NULL;
root->num_keys++;
return root;
}
/* Master insertion function. Inserts a key and an associated value into the B+ tree, causing the tree to be adjusted however necessary to maintain the B+ tree properties. */
node *
insert( node* root,
int key,
int value )
{
record* pointer;
node* leaf;
/* The current implementation ignores duplicates. */
if (find(root, key, false) != NULL)
return root;
/* Create a new record for the value. */
pointer = make_record(value);
/* Case: the tree does not exist yet. Start a new tree. */
if (root == NULL)
return start_new_tree(key, pointer);
/* Case: the tree already exists. (Rest of function body.) */
leaf = find_leaf(root, key, false);
/* Case: leaf has room for key and pointer. */
if (leaf->num_keys < order - 1) {
leaf = insert_into_leaf(leaf, key, pointer);
return root;
}
/* Case: leaf must be split. */
return insert_into_leaf_after_splitting(root, leaf, key, pointer);
}
//======================================================================================================================================================150
// DELETION
//======================================================================================================================================================150
/* Utility function for deletion. Retrieves the index of a node's nearest neighbor (sibling) to the left if one exists. If not (the node is the leftmost child), returns -1 to signify this special case. */
int
get_neighbor_index( node* n )
{
int i;
/* Return the index of the key to the left
* of the pointer in the parent pointing
* to n.
* If n is the leftmost child, this means
* return -1.
*/
for (i = 0; i <= n->parent->num_keys; i++)
if (n->parent->pointers[i] == n)
return i - 1;
// Error state.
printf("Search for nonexistent pointer to node in parent.\n");
//printf("Node: %#x\n", (unsigned int)n);
exit(EXIT_FAILURE);
}
/* */
node*
remove_entry_from_node( node* n,
int key,
node * pointer)
{
int i, num_pointers;
// Remove the key and shift other keys accordingly.
i = 0;
while (n->keys[i] != key)
i++;
for (++i; i < n->num_keys; i++)
n->keys[i - 1] = n->keys[i];
// Remove the pointer and shift other pointers accordingly.
// First determine number of pointers.
num_pointers = n->is_leaf ? n->num_keys : n->num_keys + 1;
i = 0;
while (n->pointers[i] != pointer)
i++;
for (++i; i < num_pointers; i++)
n->pointers[i - 1] = n->pointers[i];
// One key fewer.
n->num_keys--;
// Set the other pointers to NULL for tidiness.
// A leaf uses the last pointer to point to the next leaf.
if (n->is_leaf)
for (i = n->num_keys; i < order - 1; i++)
n->pointers[i] = NULL;
else
for (i = n->num_keys + 1; i < order; i++)
n->pointers[i] = NULL;
return n;
}
/* */
node*
adjust_root(node* root)
{
node * new_root;
/* Case: nonempty root.
* Key and pointer have already been deleted,
* so nothing to be done.
*/
if (root->num_keys > 0)
return root;
/* Case: empty root.
*/
// If it has a child, promote
// the first (only) child
// as the new root.
if (!root->is_leaf) {
new_root = (node *) root->pointers[0];
new_root->parent = NULL;
}
// If it is a leaf (has no children),
// then the whole tree is empty.
else
new_root = NULL;
free(root->keys);
free(root->pointers);
free(root);
return new_root;
}
/* Coalesces a node that has become too small after deletion with a neighboring node that can accept the additional entries without exceeding the maximum. */
node*
coalesce_nodes( node* root,
node* n,
node* neighbor,
int neighbor_index,
int k_prime)
{
int i, j, neighbor_insertion_index, n_start, n_end, new_k_prime;
node * tmp;
bool split;
/* Swap neighbor with node if node is on the
* extreme left and neighbor is to its right.
*/
if (neighbor_index == -1) {
tmp = n;
n = neighbor;
neighbor = tmp;
}
/* Starting point in the neighbor for copying
* keys and pointers from n.
* Recall that n and neighbor have swapped places
* in the special case of n being a leftmost child.
*/
neighbor_insertion_index = neighbor->num_keys;
/*
* Nonleaf nodes may sometimes need to remain split,
* if the insertion of k_prime would cause the resulting
* single coalesced node to exceed the limit order - 1.
* The variable split is always false for leaf nodes
* and only sometimes set to true for nonleaf nodes.
*/
split = false;
/* Case: nonleaf node.
* Append k_prime and the following pointer.
* If there is room in the neighbor, append
* all pointers and keys from the neighbor.
* Otherwise, append only cut(order) - 2 keys and
* cut(order) - 1 pointers.
*/
if (!n->is_leaf) {
/* Append k_prime.
*/
neighbor->keys[neighbor_insertion_index] = k_prime;
neighbor->num_keys++;
/* Case (default): there is room for all of n's keys and pointers
* in the neighbor after appending k_prime.
*/
n_end = n->num_keys;
/* Case (special): k cannot fit with all the other keys and pointers
* into one coalesced node.
*/
n_start = 0; // Only used in this special case.
if (n->num_keys + neighbor->num_keys >= order) {
split = true;
n_end = cut(order) - 2;
}
for (i = neighbor_insertion_index + 1, j = 0; j < n_end; i++, j++) {
neighbor->keys[i] = n->keys[j];
neighbor->pointers[i] = n->pointers[j];
neighbor->num_keys++;
n->num_keys--;
n_start++;
}
/* The number of pointers is always
* one more than the number of keys.
*/
neighbor->pointers[i] = n->pointers[j];
/* If the nodes are still split, remove the first key from
* n.
*/
if (split) {
new_k_prime = n->keys[n_start];
for (i = 0, j = n_start + 1; i < n->num_keys; i++, j++) {
n->keys[i] = n->keys[j];
n->pointers[i] = n->pointers[j];
}
n->pointers[i] = n->pointers[j];
n->num_keys--;
}
/* All children must now point up to the same parent.
*/
for (i = 0; i < neighbor->num_keys + 1; i++) {
tmp = (node *)neighbor->pointers[i];
tmp->parent = neighbor;
}
}
/* In a leaf, append the keys and pointers of
* n to the neighbor.
* Set the neighbor's last pointer to point to
* what had been n's right neighbor.
*/
else {
for (i = neighbor_insertion_index, j = 0; j < n->num_keys; i++, j++) {
neighbor->keys[i] = n->keys[j];
neighbor->pointers[i] = n->pointers[j];
neighbor->num_keys++;
}
neighbor->pointers[order - 1] = n->pointers[order - 1];
}
if (!split) {
root = delete_entry(root, n->parent, k_prime, n);
free(n->keys);
free(n->pointers);
free(n);
}
else
for (i = 0; i < n->parent->num_keys; i++)
if (n->parent->pointers[i + 1] == n) {
n->parent->keys[i] = new_k_prime;
break;
}
return root;
}
/* Redistributes entries between two nodes when one has become too small after deletion but its neighbor is too big to append the small node's entries without exceeding the maximum */
node*
redistribute_nodes( node* root,
node* n,
node* neighbor,
int neighbor_index,
int k_prime_index,
int k_prime)
{
int i;
node * tmp;
/* Case: n has a neighbor to the left.
* Pull the neighbor's last key-pointer pair over
* from the neighbor's right end to n's left end.
*/
if (neighbor_index != -1) {
if (!n->is_leaf)
n->pointers[n->num_keys + 1] = n->pointers[n->num_keys];
for (i = n->num_keys; i > 0; i--) {
n->keys[i] = n->keys[i - 1];
n->pointers[i] = n->pointers[i - 1];
}
if (!n->is_leaf) {
n->pointers[0] = neighbor->pointers[neighbor->num_keys];
tmp = (node *)n->pointers[0];
tmp->parent = n;
neighbor->pointers[neighbor->num_keys] = NULL;
n->keys[0] = k_prime;
n->parent->keys[k_prime_index] = neighbor->keys[neighbor->num_keys - 1];
}
else {
n->pointers[0] = neighbor->pointers[neighbor->num_keys - 1];
neighbor->pointers[neighbor->num_keys - 1] = NULL;
n->keys[0] = neighbor->keys[neighbor->num_keys - 1];
n->parent->keys[k_prime_index] = n->keys[0];
}
}
/* Case: n is the leftmost child.
* Take a key-pointer pair from the neighbor to the right.
* Move the neighbor's leftmost key-pointer pair
* to n's rightmost position.
*/
else {
if (n->is_leaf) {
n->keys[n->num_keys] = neighbor->keys[0];
n->pointers[n->num_keys] = neighbor->pointers[0];
n->parent->keys[k_prime_index] = neighbor->keys[1];
}
else {
n->keys[n->num_keys] = k_prime;
n->pointers[n->num_keys + 1] = neighbor->pointers[0];
tmp = (node *)n->pointers[n->num_keys + 1];
tmp->parent = n;
n->parent->keys[k_prime_index] = neighbor->keys[0];
}
for (i = 0; i < neighbor->num_keys; i++) {
neighbor->keys[i] = neighbor->keys[i + 1];
neighbor->pointers[i] = neighbor->pointers[i + 1];
}
if (!n->is_leaf)
neighbor->pointers[i] = neighbor->pointers[i + 1];
}
/* n now has one more key and one more pointer;
* the neighbor has one fewer of each.
*/
n->num_keys++;
neighbor->num_keys--;
return root;
}
/* Deletes an entry from the B+ tree. Removes the record and its key and pointer from the leaf, and then makes all appropriate changes to preserve the B+ tree properties. */
node*
delete_entry( node* root,
node* n,
int key,
void* pointer )
{
int min_keys;
node * neighbor;
int neighbor_index;
int k_prime_index, k_prime;
int capacity;
// Remove key and pointer from node.
n = remove_entry_from_node(n, key, (node *) pointer);
/* Case: deletion from the root.
*/
if (n == root)
return adjust_root(root);
/* Case: deletion from a node below the root.
* (Rest of function body.)
*/
/* Determine minimum allowable size of node,
* to be preserved after deletion.
*/
min_keys = n->is_leaf ? cut(order - 1) : cut(order) - 1;
/* Case: node stays at or above minimum.
* (The simple case.)
*/
if (n->num_keys >= min_keys)
return root;
/* Case: node falls below minimum.
* Either coalescence or redistribution
* is needed.
*/
/* Find the appropriate neighbor node with which
* to coalesce.
* Also find the key (k_prime) in the parent
* between the pointer to node n and the pointer
* to the neighbor.
*/
neighbor_index = get_neighbor_index( n );
k_prime_index = neighbor_index == -1 ? 0 : neighbor_index;
k_prime = n->parent->keys[k_prime_index];
neighbor = neighbor_index == -1 ? (node *) n->parent->pointers[1] :
(node *)n->parent->pointers[neighbor_index];
capacity = n->is_leaf ? order : order - 1;
/* Coalescence. */
if (neighbor->num_keys + n->num_keys < capacity)
return coalesce_nodes(root, n, neighbor, neighbor_index, k_prime);
/* Redistribution. */
else
return redistribute_nodes(root, n, neighbor, neighbor_index, k_prime_index, k_prime);
}
/* Master deletion function. */
node*
deleteVal( node* root,
int key)
{
node * key_leaf;
record * key_record;
key_record = find(root, key, false);
key_leaf = find_leaf(root, key, false);
if (key_record != NULL && key_leaf != NULL) {
free(key_record);
root = delete_entry(root, key_leaf, key, key_record);
}
return root;
}
/* */
void
destroy_tree_nodes(node* root)
{
int i;
if (root->is_leaf)
for (i = 0; i < root->num_keys; i++)
free(root->pointers[i]);
else
for (i = 0; i < root->num_keys + 1; i++)
destroy_tree_nodes((node *) root->pointers[i]);
free(root->pointers);
free(root->keys);
free(root);
}
/* */
node*
destroy_tree(node* root)
{
destroy_tree_nodes(root);
return NULL;
}
//======================================================================================================================================================150
// END
//======================================================================================================================================================150
//========================================================================================================================================================================================================200
// MAIN FUNCTION
//========================================================================================================================================================================================================200
int
main( int argc,
char** argv )
{
srand(2);
printf("WG size of kernel 1 & 2 = %d \n", DEFAULT_ORDER);
// ------------------------------------------------------------60
// figure out and display whether 32-bit or 64-bit architecture
// ------------------------------------------------------------60
// if(sizeof(int *)==8){
// printf("64 bit machine\n");
// }
// else if(sizeof(int *)==4){
// printf("32 bit machine\n");
// }
// ------------------------------------------------------------60
// set GPU
// ------------------------------------------------------------60
int device = 0;
hipSetDevice(device);
printf("Selecting device %d\n", device);
// ------------------------------------------------------------60
// read inputs
// ------------------------------------------------------------60
// assing default values
int cur_arg;
char *input_file = NULL;
char *command_file = NULL;
const char *output="output.txt";
FILE * pFile;
// go through arguments
for(cur_arg=1; cur_arg<argc; cur_arg++){
// check if -file
if(strcmp(argv[cur_arg], "file")==0){
// check if value provided
if(argc>=cur_arg+1){
input_file = argv[cur_arg+1];
cur_arg = cur_arg+1;
// value is not a number
}
// value not provided
else{
printf("ERROR: Missing value to -file parameter\n");
return -1;
}
}
else if(strcmp(argv[cur_arg], "command")==0){
// check if value provided
if(argc>=cur_arg+1){
command_file = argv[cur_arg+1];
cur_arg = cur_arg+1;
// value is not a number
}
// value not provided
else{
printf("ERROR: Missing value to command parameter\n");
return -1;
}
}
}
// Print configuration
if((input_file==NULL)||(command_file==NULL))
printf("Usage: ./b+tree file input_file command command_list\n");
// For debug
printf("Input File: %s \n", input_file);
printf("Command File: %s \n", command_file);
FILE * commandFile;
long lSize;
char * commandBuffer;
size_t result;
commandFile = fopen ( command_file, "rb" );
if (commandFile==NULL) {fputs ("Command File error",stderr); exit (1);}
// obtain file size:
fseek (commandFile , 0 , SEEK_END);
lSize = ftell (commandFile);
rewind (commandFile);
// allocate memory to contain the whole file:
commandBuffer = (char*) malloc (sizeof(char)*lSize);
if (commandBuffer == NULL) {fputs ("Command Buffer memory error",stderr); exit (2);}
// copy the file into the buffer:
result = fread (commandBuffer,1,lSize,commandFile);
if (result != lSize) {fputs ("Command file reading error",stderr); exit (3);}
/* the whole file is now loaded in the memory buffer. */
// terminate
fclose (commandFile);
// For Debug
//char *sPointer=commandBuffer;
printf("Command Buffer: \n");
printf("%s",commandBuffer);
//
pFile = fopen (output,"w+");
if (pFile==NULL)
printf ("Fail to open %s !\n",output);
fprintf(pFile,"******starting******\n");
fclose(pFile);
// ------------------------------------------------------------60
// general variables
// ------------------------------------------------------------60
FILE *file_pointer;
node *root;
root = NULL;
record *r;
int input;
char instruction;
order = DEFAULT_ORDER;
verbose_output = false;
//usage_1();
//usage_2();
// ------------------------------------------------------------60
// get input from file, if file provided
// ------------------------------------------------------------60
if (input_file != NULL) {
printf("Getting input from file %s...\n", input_file);
// open input file
file_pointer = fopen(input_file, "r");
if (file_pointer == NULL) {
perror("Failure to open input file.");
exit(EXIT_FAILURE);
}
// get # of numbers in the file
fscanf(file_pointer, "%d\n", &input);
size = input;
// save all numbers
while (!feof(file_pointer)) {
fscanf(file_pointer, "%d\n", &input);
root = insert(root, input, input);
}
// close file
fclose(file_pointer);
//print_tree(root);
//printf("Height of tree = %d\n", height(root));
}
else{
printf("ERROR: Argument -file missing\n");
return 0;
}
// ------------------------------------------------------------60
// get tree statistics
// ------------------------------------------------------------60
printf("Transforming data to a GPU suitable structure...\n");
long mem_used = transform_to_cuda(root,0);
maxheight = height(root);
long rootLoc = (long)knodes - (long)mem;
// ------------------------------------------------------------60
// process commands
// ------------------------------------------------------------60
char *commandPointer=commandBuffer;
printf("Waiting for command\n");
printf("> ");
while (sscanf(commandPointer, "%c", &instruction) != EOF) {
commandPointer++;
switch (instruction) {
// ----------------------------------------40
// Insert
// ----------------------------------------40
case 'i':
{
scanf("%d", &input);
while (getchar() != (int)'\n');
root = insert(root, input, input);
print_tree(root);
break;
}
// ----------------------------------------40
// n/a
// ----------------------------------------40
case 'f':
{
}
// ----------------------------------------40
// find
// ----------------------------------------40
case 'p':
{
scanf("%d", &input);
while (getchar() != (int)'\n');
r = find(root, input, instruction == 'p');
if (r == NULL)
printf("Record not found under key %d.\n", input);
else
printf("Record found: %d\n",r->value);
break;
}
// ----------------------------------------40
// delete value
// ----------------------------------------40
case 'd':
{
scanf("%d", &input);
while (getchar() != (int)'\n');
root = (node *) deleteVal(root, input);
print_tree(root);
break;
}
// ----------------------------------------40
// destroy tree
// ----------------------------------------40
case 'x':
{
while (getchar() != (int)'\n');
root = destroy_tree(root);
print_tree(root);
break;
}
// ----------------------------------------40
// print leaves
// ----------------------------------------40
case 'l':
{
while (getchar() != (int)'\n');
print_leaves(root);
break;
}
// ----------------------------------------40
// print tree
// ----------------------------------------40
case 't':
{
while (getchar() != (int)'\n');
print_tree(root);
break;
}
// ----------------------------------------40
// toggle verbose output
// ----------------------------------------40
case 'v':
{
while (getchar() != (int)'\n');
verbose_output = !verbose_output;
break;
}
// ----------------------------------------40
// quit
// ----------------------------------------40
case 'q':
{
while (getchar() != (int)'\n');
return EXIT_SUCCESS;
}
// ----------------------------------------40
// [GPU] find K (initK, findK)
// ----------------------------------------40
case 'k':
{
// get # of queries from user
int count;
sscanf(commandPointer, "%d", &count);
while(*commandPointer!=32 && *commandPointer!='\n')
commandPointer++;
printf("\n ******command: k count=%d \n",count);
if(count > 65535){
printf("ERROR: Number of requested querries should be 65,535 at most. (limited by # of CUDA blocks)\n");
exit(0);
}
// INPUT: records CPU allocation (setting pointer in mem variable)
record *records = (record *)mem;
long records_elem = (long)rootLoc / sizeof(record);
long records_mem = (long)rootLoc;
printf("records_elem=%d, records_unit_mem=%d, records_mem=%d\n", (int)records_elem, (int)sizeof(record), (int)records_mem);
// INPUT: knodes CPU allocation (setting pointer in mem variable)
knode *knodes = (knode *)((long)mem + (long)rootLoc);
long knodes_elem = ((long)(mem_used) - (long)rootLoc) / sizeof(knode);
long knodes_mem = (long)(mem_used) - (long)rootLoc;
printf("knodes_elem=%d, knodes_unit_mem=%d, knodes_mem=%d\n", (int)knodes_elem, (int)sizeof(knode), (int)knodes_mem);
// INPUT: currKnode CPU allocation
long *currKnode;
currKnode = (long *)malloc(count*sizeof(long));
// INPUT: offset CPU initialization
memset(currKnode, 0, count*sizeof(long));
// INPUT: offset CPU allocation
long *offset;
offset = (long *)malloc(count*sizeof(long));
// INPUT: offset CPU initialization
memset(offset, 0, count*sizeof(long));
// INPUT: keys CPU allocation
int *keys;
keys = (int *)malloc(count*sizeof(int));
// INPUT: keys CPU initialization
int i;
for(i = 0; i < count; i++){
keys[i] = (rand()/(float)RAND_MAX)*size;
}
// OUTPUT: ans CPU allocation
record *ans = (record *)malloc(sizeof(record)*count);
// OUTPUT: ans CPU initialization
for(i = 0; i < count; i++){
ans[i].value = -1;
}
// CUDA kernel
kernel_wrapper(records,
records_mem,
knodes,
knodes_elem,
knodes_mem,
order,
maxheight,
count,
currKnode,
offset,
keys,
ans);
/* printf("ans: \n"); */
/* for(i = 0; i < count; i++){ */
/* printf("%d ",ans[i].value); */
/* } */
/* printf(" \n"); */
pFile = fopen (output,"aw+");
if (pFile==NULL)
{
printf ("Fail to open %s !\n",output);
}
fprintf(pFile,"\n ******command: k count=%d \n",count);
for(i = 0; i < count; i++){
fprintf(pFile, "%d %d\n",i, ans[i].value);
}
fprintf(pFile, " \n");
fclose(pFile);
// free memory
free(currKnode);
free(offset);
free(keys);
free(ans);
// break out of case
break;
}
// ----------------------------------------40
// find range
// ----------------------------------------40
case 'r':
{
int start, end;
scanf("%d", &start);
scanf("%d", &end);
if(start > end){
input = start;
start = end;
end = input;
}
printf("For range %d to %d, ",start,end);
list_t * ansList;
ansList = findRange(root, start, end);
printf("%d records found\n", list_get_length(ansList));
//list_iterator_t iter;
free(ansList);
break;
}
// ----------------------------------------40
// [GPU] find Range K (initK, findRangeK)
// ----------------------------------------40
case 'j':
{
// get # of queries from user
int count;
sscanf(commandPointer, "%d", &count);
while(*commandPointer!=32 && *commandPointer!='\n')
commandPointer++;
int rSize;
sscanf(commandPointer, "%d", &rSize);
while(*commandPointer!=32 && *commandPointer!='\n')
commandPointer++;
printf("\n******command: j count=%d, rSize=%d \n",count, rSize);
if(rSize > size || rSize < 0) {
printf("Search range size is larger than data set size %d.\n", (int)size);
exit(0);
}
// INPUT: knodes CPU allocation (setting pointer in mem variable)
knode *knodes = (knode *)((long)mem + (long)rootLoc);
long knodes_elem = ((long)(mem_used) - (long)rootLoc) / sizeof(knode);
long knodes_mem = (long)(mem_used) - (long)rootLoc;
printf("knodes_elem=%d, knodes_unit_mem=%d, knodes_mem=%d\n", (int)knodes_elem, (int)sizeof(knode), (int)knodes_mem);
// INPUT: currKnode CPU allocation
long *currKnode;
currKnode = (long *)malloc(count*sizeof(long));
// INPUT: offset CPU initialization
memset (currKnode, 0, count*sizeof(long));
// INPUT: offset CPU allocation
long *offset;
offset = (long *)malloc(count*sizeof(long));
// INPUT: offset CPU initialization
memset (offset, 0, count*sizeof(long));
// INPUT: lastKnode CPU allocation
long *lastKnode;
lastKnode = (long *)malloc(count*sizeof(long));
// INPUT: offset CPU initialization
memset (lastKnode, 0, count*sizeof(long));
// INPUT: offset_2 CPU allocation
long *offset_2;
offset_2 = (long *)malloc(count*sizeof(long));
// INPUT: offset CPU initialization
memset (offset_2, 0, count*sizeof(long));
// INPUT: start, end CPU allocation
int *start;
start = (int *)malloc(count*sizeof(int));
int *end;
end = (int *)malloc(count*sizeof(int));
// INPUT: start, end CPU initialization
int i;
for(i = 0; i < count; i++){
start[i] = (rand()/(float)RAND_MAX)*size;
end[i] = start[i]+rSize;
if(end[i] >= size){
start[i] = start[i] - (end[i] - size);
end[i]= size-1;
}
}
// INPUT: recstart, reclenght CPU allocation
int *recstart;
recstart = (int *)malloc(count*sizeof(int));
int *reclength;
reclength = (int *)malloc(count*sizeof(int));
// OUTPUT: ans CPU initialization
for(i = 0; i < count; i++){
recstart[i] = 0;
reclength[i] = 0;
}
// CUDA kernel
kernel2_wrapper( knodes,
knodes_elem,
knodes_mem,
order,
maxheight,
count,
currKnode,
offset,
lastKnode,
offset_2,
start,
end,
recstart,
reclength);
pFile = fopen (output,"aw+");
if (pFile==NULL)
{
printf ("Fail to open %s !\n",output);
}
fprintf(pFile,"\n******command: j count=%d, rSize=%d \n",count, rSize);
for(i = 0; i < count; i++){
fprintf(pFile, "%d %d %d\n",i, recstart[i],reclength[i]);
}
fprintf(pFile, " \n");
fclose(pFile);
// free memory
free(currKnode);
free(offset);
free(lastKnode);
free(offset_2);
free(start);
free(end);
free(recstart);
free(reclength);
// break out of case
break;
}
// ----------------------------------------40
// default
// ----------------------------------------40
default:
{
//usage_2();
break;
}
}
printf("> ");
}
printf("\n");
// ------------------------------------------------------------60
// free remaining memory and exit
// ------------------------------------------------------------60
free(mem);
return EXIT_SUCCESS;
}
|
the_stack
|
#include "libhmsbeagle/GPU/GPUImplDefs.h"
#include "libhmsbeagle/GPU/kernels/kernelsAll.cu" // This file includes the non-state-count specific kernels
extern "C" {
#endif
///////////////////////////////////////////////////////////////////////////////
// kernel macros CPU
#define DETERMINE_INDICES_X_CPU()\
int state = KW_LOCAL_ID_0;\
int patIdx = get_global_id(1);\
int pattern = __umul24(KW_GROUP_ID_0,PATTERN_BLOCK_SIZE) + patIdx;\
int matrix = KW_GROUP_ID_2;\
int patternCount = totalPatterns;\
int deltaPartialsByState = pattern * PADDED_STATE_COUNT;\
int deltaPartialsByMatrix = matrix * PADDED_STATE_COUNT * patternCount;\
int deltaMatrix = matrix * PADDED_STATE_COUNT * PADDED_STATE_COUNT;\
int u = state + deltaPartialsByState + deltaPartialsByMatrix;
#define SUM_PARTIALS_PARTIALS_X_CPU()\
REAL sum1 = 0, sum2 = 0;\
int deltaPartials = deltaPartialsByMatrix + deltaPartialsByState;\
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrix1 = matrices1 + deltaMatrix;\
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrix2 = matrices2 + deltaMatrix;\
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials1 = partials1 + deltaPartials;\
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials2 = partials2 + deltaPartials;\
for(int i = 0; i < PADDED_STATE_COUNT; i++) {\
FMA(sMatrix1[i * PADDED_STATE_COUNT + state], sPartials1[i], sum1);\
FMA(sMatrix2[i * PADDED_STATE_COUNT + state], sPartials2[i], sum2);\
}
#define SUM_STATES_PARTIALS_X_CPU()\
REAL sum1 = 0, sum2 = 0;\
int deltaPartials = deltaPartialsByMatrix + deltaPartialsByState;\
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrix1 = matrices1 + deltaMatrix;\
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrix2 = matrices2 + deltaMatrix;\
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials2 = partials2 + deltaPartials;\
int state1 = states1[pattern];\
if (state1 < PADDED_STATE_COUNT)\
sum1 = sMatrix1[state1 * PADDED_STATE_COUNT + state];\
else\
sum1 = 1.0;\
for(int i = 0; i < PADDED_STATE_COUNT; i++) {\
FMA(sMatrix2[i * PADDED_STATE_COUNT + state], sPartials2[i], sum2);\
}
#define FIND_MAX_PARTIALS_X_CPU()\
int patIdx = KW_LOCAL_ID_0;\
int pattern = KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE + patIdx;\
int deltaPartialsByState = pattern * PADDED_STATE_COUNT;\
REAL max = 0;\
for(int m = 0; m < matrixCount; m++) {\
int deltaPartialsByMatrix = m * PADDED_STATE_COUNT * PATTERN_BLOCK_SIZE * KW_NUM_GROUPS_0;\
int deltaPartials = deltaPartialsByMatrix + deltaPartialsByState;\
for(int i = 0; i < PADDED_STATE_COUNT; i++) {\
REAL iPartial = allPartials[deltaPartials + i];\
if (iPartial > max)\
max = iPartial;\
}\
}
#define SCALE_PARTIALS_X_CPU()\
for(int m = 0; m < matrixCount; m++) {\
int deltaPartialsByMatrix = m * PADDED_STATE_COUNT * PATTERN_BLOCK_SIZE * KW_NUM_GROUPS_0;\
int deltaPartials = deltaPartialsByMatrix + deltaPartialsByState;\
for(int i = 0; i < PADDED_STATE_COUNT; i++) {\
allPartials[deltaPartials + i] /= max;\
}\
}
#define INTEGRATE_PARTIALS_X_CPU()\
int pattern = KW_GROUP_ID_0;\
int u = pattern * PADDED_STATE_COUNT;\
int delta = patternCount * PADDED_STATE_COUNT;\
REAL sumTotal = 0;\
for (int i = 0; i < PADDED_STATE_COUNT; i++) {\
REAL sumState = dRootPartials[i + u] * dWeights[0];\
for(int r = 1; r < matrixCount; r++) {\
FMA(dRootPartials[i + u + delta * r], dWeights[r], sumState);\
}\
sumState *= dFrequencies[i];\
sumTotal += sumState;\
}
#define INTEGRATE_PARTIALS_DERIV_X_CPU()\
int pattern = KW_GROUP_ID_0;\
int u = pattern * PADDED_STATE_COUNT;\
int delta = patternCount * PADDED_STATE_COUNT;\
REAL sumTotal = 0, sumTotalD1 = 0, sumTotalD2 = 0;\
REAL tmpLogLike, tmpFirstDeriv;\
for (int i = 0; i < PADDED_STATE_COUNT; i++) {\
REAL sumState = dRootPartials[ i + u] * dWeights[0];\
REAL sumD1 = dRootFirstDeriv[ i + u] * dWeights[0];\
REAL sumD2 = dRootSecondDeriv[i + u] * dWeights[0];\
for(int r = 1; r < matrixCount; r++) {\
FMA(dRootPartials[ i + u + delta * r], dWeights[r], sumState);\
FMA(dRootFirstDeriv[ i + u + delta * r], dWeights[r], sumD1);\
FMA(dRootSecondDeriv[i + u + delta * r], dWeights[r], sumD2);\
}\
sumState *= dFrequencies[i];\
sumD1 *= dFrequencies[i];\
sumD2 *= dFrequencies[i];\
sumTotal += sumState;\
sumTotalD1 += sumD1;\
sumTotalD2 += sumD2;\
}
///////////////////////////////////////////////////////////////////////////////
// kernel macros GPU
#define DETERMINE_INDICES_X_GPU()\
int state = KW_LOCAL_ID_0;\
int patIdx = KW_LOCAL_ID_1;\
int pattern = __umul24(KW_GROUP_ID_0,PATTERN_BLOCK_SIZE) + patIdx;\
int matrix = KW_GROUP_ID_1;\
int patternCount = totalPatterns;\
int deltaPartialsByState = pattern * PADDED_STATE_COUNT;\
int deltaPartialsByMatrix = matrix * PADDED_STATE_COUNT * patternCount;\
int deltaMatrix = matrix * PADDED_STATE_COUNT * PADDED_STATE_COUNT;\
int u = state + deltaPartialsByState + deltaPartialsByMatrix;
#define LOAD_SCALING_X_GPU()\
KW_LOCAL_MEM REAL fixedScalingFactors[PATTERN_BLOCK_SIZE];\
if (patIdx == 0 && state < PATTERN_BLOCK_SIZE ) {\
/* TODO: If PATTERN_BLOCK_SIZE > PADDED_STATE_COUNT, there is a bug here */\
fixedScalingFactors[state] = scalingFactors[KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE + state];\
}
#define SUM_PARTIALS_PARTIALS_X_GPU()\
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix; /* Points to *this* matrix */\
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix;\
/* Load values into shared memory */\
KW_LOCAL_MEM REAL sMatrix1[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL sMatrix2[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];\
int y = deltaPartialsByState + deltaPartialsByMatrix;\
/* copy PADDED_STATE_COUNT*PATTERN_BLOCK_SIZE lengthed partials */\
/* These are all coherent global memory reads; checked in Profiler */\
if (pattern < totalPatterns) {\
sPartials1[patIdx][state] = partials1[y + state];\
sPartials2[patIdx][state] = partials2[y + state];\
} else {\
sPartials1[patIdx][state] = 0;\
sPartials2[patIdx][state] = 0;\
}\
REAL sum1 = 0, sum2 = 0;\
for (int i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {\
/* load one row of matrices */\
if (patIdx < BLOCK_PEELING_SIZE) {\
/* These are all coherent global memory reads. */\
sMatrix1[patIdx][state] = matrix1[patIdx * PADDED_STATE_COUNT + state];\
sMatrix2[patIdx][state] = matrix2[patIdx * PADDED_STATE_COUNT + state];\
/* sMatrix now filled with starting in state and ending in i */\
matrix1 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;\
matrix2 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;\
}\
KW_LOCAL_FENCE;\
for(int j = 0; j < BLOCK_PEELING_SIZE; j++) {\
FMA(sMatrix1[j][state], sPartials1[patIdx][i + j], sum1);\
FMA(sMatrix2[j][state], sPartials2[patIdx][i + j], sum2);\
}\
KW_LOCAL_FENCE;\
}
#define SUM_STATES_PARTIALS_X_GPU()\
KW_LOCAL_MEM REAL sMatrix2[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];\
int y = deltaPartialsByState + deltaPartialsByMatrix;\
if (pattern < totalPatterns) {\
sPartials2[patIdx][state] = partials2[y + state];\
} else {\
sPartials2[patIdx][state] = 0;\
}\
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix;\
REAL sum1 = 0, sum2 = 0;\
if (pattern < totalPatterns) {\
int state1 = states1[pattern]; /* Coalesced; no need to share */\
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix + state1 * PADDED_STATE_COUNT;\
if (state1 < PADDED_STATE_COUNT)\
sum1 = matrix1[state];\
else\
sum1 = 1.0;\
}\
for (int i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {\
if (patIdx < BLOCK_PEELING_SIZE) {\
sMatrix2[patIdx][state] = matrix2[patIdx * PADDED_STATE_COUNT + state];\
matrix2 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;\
}\
KW_LOCAL_FENCE;\
for(int j = 0; j < BLOCK_PEELING_SIZE; j++) {\
FMA(sMatrix2[j][state], sPartials2[patIdx][i + j], sum2);\
}\
KW_LOCAL_FENCE;\
}
#define LOAD_PARTIALS_SCALING_X_GPU()\
int state = KW_LOCAL_ID_0;\
int matrix = KW_LOCAL_ID_1;\
int pattern = KW_GROUP_ID_0;\
int patternCount = KW_NUM_GROUPS_0;\
int offsetPartials = matrix * patternCount * PADDED_STATE_COUNT\
+ pattern * PADDED_STATE_COUNT + state;\
/* TODO: Currently assumes MATRIX_BLOCK_SIZE > matrixCount; FIX!!! */\
KW_LOCAL_MEM REAL partials[MATRIX_BLOCK_SIZE][PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL storedPartials[MATRIX_BLOCK_SIZE][PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL max;\
if (matrix < matrixCount)\
partials[matrix][state] = allPartials[offsetPartials];\
else\
partials[matrix][state] = 0;\
storedPartials[matrix][state] = partials[matrix][state];\
KW_LOCAL_FENCE;
#define FIND_MAX_PARTIALS_STATE_POWER_OF_TWO_X_GPU()\
/* parallelized reduction, only works for powers-of-2 */\
for (int i = PADDED_STATE_COUNT / 2; i > 0; i >>= 1) {\
if (state < i) {\
REAL compare1 = partials[matrix][state];\
REAL compare2 = partials[matrix][state + i];\
if (compare2 > compare1)\
partials[matrix][state] = compare2;\
}\
KW_LOCAL_FENCE;\
}
#define FIND_MAX_PARTIALS_STATE_X_GPU()\
/* not power-of-2 */\
for (int i = SMALLEST_POWER_OF_TWO / 2; i > 0; i >>= 1) {\
if (state < i && state + i < PADDED_STATE_COUNT ) {\
REAL compare1 = partials[matrix][state];\
REAL compare2 = partials[matrix][state + i];\
if (compare2 > compare1)\
partials[matrix][state] = compare2;\
}\
KW_LOCAL_FENCE;\
}
#define FIND_MAX_PARTIALS_MATRIX_X_GPU()\
max = 0;\
for(int m = 0; m < matrixCount; m++) {\
if (partials[m][0] > max)\
max = partials[m][0];\
}
#define SCALE_PARTIALS_X_GPU()\
KW_LOCAL_FENCE;\
if (matrix < matrixCount)\
allPartials[offsetPartials] = storedPartials[matrix][state] / max;
#define INTEGRATE_PARTIALS_X_GPU()\
int state = KW_LOCAL_ID_0;\
int pattern = KW_GROUP_ID_0;\
KW_LOCAL_MEM REAL stateFreq[PADDED_STATE_COUNT];\
/* TODO: Currently assumes MATRIX_BLOCK_SIZE >> matrixCount */\
KW_LOCAL_MEM REAL matrixProp[MATRIX_BLOCK_SIZE];\
KW_LOCAL_MEM REAL sum[PADDED_STATE_COUNT];\
/* Load shared memory */\
stateFreq[state] = dFrequencies[state];\
sum[state] = 0;\
for(int matrixEdge = 0; matrixEdge < matrixCount; matrixEdge += PADDED_STATE_COUNT) {\
int x = matrixEdge + state;\
if (x < matrixCount)\
matrixProp[x] = dWeights[x];\
}\
KW_LOCAL_FENCE;\
int u = state + pattern * PADDED_STATE_COUNT;\
int delta = patternCount * PADDED_STATE_COUNT;\
for(int r = 0; r < matrixCount; r++) {\
FMA(dRootPartials[u + delta * r], matrixProp[r], sum[state]);\
}\
sum[state] *= stateFreq[state];\
KW_LOCAL_FENCE;
#define INTEGRATE_PARTIALS_DERIV_X_GPU()\
int state = KW_LOCAL_ID_0;\
int pattern = KW_GROUP_ID_0;\
REAL tmpLogLike, tmpFirstDeriv;\
KW_LOCAL_MEM REAL stateFreq[PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL matrixProp[MATRIX_BLOCK_SIZE];\
KW_LOCAL_MEM REAL sum[PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL sumD1[PADDED_STATE_COUNT];\
KW_LOCAL_MEM REAL sumD2[PADDED_STATE_COUNT];\
stateFreq[state] = dFrequencies[state];\
sum[state] = 0;\
sumD1[state] = 0;\
sumD2[state] = 0;\
for(int matrixEdge = 0; matrixEdge < matrixCount; matrixEdge += PADDED_STATE_COUNT) {\
int x = matrixEdge + state;\
if (x < matrixCount)\
matrixProp[x] = dWeights[x];\
}\
KW_LOCAL_FENCE;\
int u = state + pattern * PADDED_STATE_COUNT;\
int delta = patternCount * PADDED_STATE_COUNT;\
for(int r = 0; r < matrixCount; r++) {\
FMA(dRootPartials[ u + delta * r], matrixProp[r], sum[state] );\
FMA(dRootFirstDeriv[ u + delta * r], matrixProp[r], sumD1[state]);\
FMA(dRootSecondDeriv[u + delta * r], matrixProp[r], sumD2[state]);\
}\
sum[state] *= stateFreq[state];\
sumD1[state] *= stateFreq[state];\
sumD2[state] *= stateFreq[state];\
KW_LOCAL_FENCE;
#define SUM_STATES_POWER_OF_TWO_X_GPU()\
/* parallelized reduction, only works for powers-of-2 */\
for (int i = PADDED_STATE_COUNT / 2; i > 0; i >>= 1) {\
if (state < i) {\
sum[state] += sum[state + i];\
}\
KW_LOCAL_FENCE;\
}
#define SUM_STATES_X_GPU()\
/* not power-of-2 */\
for (int i = SMALLEST_POWER_OF_TWO / 2; i > 0; i >>= 1) {\
if (state < i && state + i < PADDED_STATE_COUNT ) {\
sum[state] += sum[state + i];\
}\
KW_LOCAL_FENCE;\
}
#define SUM_STATES_DERIVS_POWER_OF_TWO_X_GPU()\
for (int i = PADDED_STATE_COUNT / 2; i > 0; i >>= 1) {\
if (state < i) {\
sum[state] += sum[state + i];\
sumD1[state] += sumD1[state + i];\
sumD2[state] += sumD2[state + i];\
}\
KW_LOCAL_FENCE;\
}
#define SUM_STATES_DERIVS_X_GPU()\
for (int i = SMALLEST_POWER_OF_TWO / 2; i > 0; i >>= 1) {\
if (state < i && state + i < PADDED_STATE_COUNT ) {\
sum[state] += sum[state + i];\
sumD1[state] += sumD1[state + i];\
sumD2[state] += sumD2[state + i];\
}\
KW_LOCAL_FENCE;\
}
///////////////////////////////////////////////////////////////////////////////
//KW_GLOBAL_KERNEL void kernelPartialsPartialsEdgeFirstDerivatives(KW_GLOBAL_VAR REAL* KW_RESTRICT partials1,
// KW_GLOBAL_VAR REAL* KW_RESTRICT partials2,
// KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
// int totalPatterns, int categoryCount) {
//#ifdef FW_OPENCL_CPU // CPU/MIC implementation
// // Not implemented
//#else // GPU implementation
//// DETERMINE_INDICES_X_GPU();
////
//// KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix; /* Points to *this* matrix */
//// KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix;
////
//// /* Load values into shared memory */
//// KW_LOCAL_MEM REAL sMatrix1[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
//// KW_LOCAL_MEM REAL sMatrix2[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
////
//// KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
//// KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
////
//// int y = deltaPartialsByState + deltaPartialsByMatrix;
////
//// /* copy PADDED_STATE_COUNT*PATTERN_BLOCK_SIZE lengthed partials */
//// /* These are all coherent global memory reads; checked in Profiler */
//// if (pattern < totalPatterns) {
//// sPartials1[patIdx][state] = partials1[y + state];
//// sPartials2[patIdx][state] = partials2[y + state];
//// } else {
//// sPartials1[patIdx][state] = 0;
//// sPartials2[patIdx][state] = 0;
//// }
////
//// REAL sum2 = 0;
//// for (int i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {
//// /* load one row of matrices */
//// if (patIdx < BLOCK_PEELING_SIZE) {
//// /* These are all coherent global memory reads. */
//// sMatrix2[patIdx][state] = matrix2[patIdx * PADDED_STATE_COUNT + state];
//// /* sMatrix now filled with starting in state and ending in i */
//// matrix2 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
//// }
////
//// KW_LOCAL_FENCE;
////
//// for(int j = 0; j < BLOCK_PEELING_SIZE; j++) {
//// FMA(sMatrix2[j][state], sPartials2[patIdx][i + j], sum2);
//// }
////
//// KW_LOCAL_FENCE;
//// }
////
//// sPartials1[patIdx][state] *= sum2;
////
//// KW_LOCAL_FENCE; // TODO Remove?
////
//// REAL sum1 = 0;
//// for (int i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {
//// /* load one row of matrices */
//// if (patIdx < BLOCK_PEELING_SIZE) {
//// /* These are all coherent global memory reads. */
//// sMatrix1[patIdx][state] = matrix1[patIdx * PADDED_STATE_COUNT + state];
//// /* sMatrix now filled with starting in state and ending in i */
//// matrix1 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
//// }
////
//// KW_LOCAL_FENCE;
////
//// for(int j = 0; j < BLOCK_PEELING_SIZE; j++) {
//// FMA(sMatrix1[j][state], sPartials1[patIdx][i + j], sum1);
//// }
////
//// KW_LOCAL_FENCE;
//// }
////
//// if (pattern < totalPatterns) {
//// partials3[u] = sum1;
//// }
//
//#endif
//}
KW_GLOBAL_KERNEL void kernelPartialsPartialsNoScale(KW_GLOBAL_VAR REAL* KW_RESTRICT partials1,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
SUM_PARTIALS_PARTIALS_X_CPU();
partials3[u] = sum1 * sum2;
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
SUM_PARTIALS_PARTIALS_X_GPU();
if (pattern < totalPatterns)
partials3[u] = sum1 * sum2;
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelPartialsPartialsFixedScale(KW_GLOBAL_VAR REAL* KW_RESTRICT partials1,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2,
KW_GLOBAL_VAR REAL* KW_RESTRICT scalingFactors,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
SUM_PARTIALS_PARTIALS_X_CPU();
partials3[u] = sum1 * sum2 / scalingFactors[pattern];
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
LOAD_SCALING_X_GPU();
SUM_PARTIALS_PARTIALS_X_GPU();
if (pattern < totalPatterns)
partials3[u] = sum1 * sum2 / fixedScalingFactors[patIdx];
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelStatesPartialsNoScale(KW_GLOBAL_VAR int* KW_RESTRICT states1,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
SUM_STATES_PARTIALS_X_CPU();
partials3[u] = sum1 * sum2;
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
SUM_STATES_PARTIALS_X_GPU();
if (pattern < totalPatterns)
partials3[u] = sum1 * sum2;
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelStatesPartialsFixedScale(KW_GLOBAL_VAR int* KW_RESTRICT states1,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2,
KW_GLOBAL_VAR REAL* KW_RESTRICT scalingFactors,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
SUM_STATES_PARTIALS_X_CPU();
partials3[u] = sum1 * sum2 / scalingFactors[pattern];
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
LOAD_SCALING_X_GPU();
SUM_STATES_PARTIALS_X_GPU();
if (pattern < totalPatterns)
partials3[u] = sum1 * sum2 / fixedScalingFactors[patIdx];
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelStatesStatesNoScale(KW_GLOBAL_VAR int* KW_RESTRICT states1,
KW_GLOBAL_VAR int* KW_RESTRICT states2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
int state1 = states1[pattern];
int state2 = states2[pattern];
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix + state1 * PADDED_STATE_COUNT;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix + state2 * PADDED_STATE_COUNT;
if (state1 < PADDED_STATE_COUNT && state2 < PADDED_STATE_COUNT) {
partials3[u] = matrix1[state] * matrix2[state];
} else if (state1 < PADDED_STATE_COUNT) {
partials3[u] = matrix1[state];
} else if (state2 < PADDED_STATE_COUNT) {
partials3[u] = matrix2[state];
} else {
partials3[u] = 1.0;
}
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
int state1 = states1[pattern];
int state2 = states2[pattern];
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix + state1 * PADDED_STATE_COUNT;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix + state2 * PADDED_STATE_COUNT;
if (pattern < totalPatterns) {
if (state1 < PADDED_STATE_COUNT && state2 < PADDED_STATE_COUNT) {
partials3[u] = matrix1[state] * matrix2[state];
} else if (state1 < PADDED_STATE_COUNT) {
partials3[u] = matrix1[state];
} else if (state2 < PADDED_STATE_COUNT) {
partials3[u] = matrix2[state];
} else {
partials3[u] = 1.0;
}
}
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelStatesStatesFixedScale(KW_GLOBAL_VAR int* KW_RESTRICT states1,
KW_GLOBAL_VAR int* KW_RESTRICT states2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2,
KW_GLOBAL_VAR REAL* KW_RESTRICT scalingFactors,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
int state1 = states1[pattern];
int state2 = states2[pattern];
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix + state1 * PADDED_STATE_COUNT;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix + state2 * PADDED_STATE_COUNT;
if (state1 < PADDED_STATE_COUNT && state2 < PADDED_STATE_COUNT) {
partials3[u] = matrix1[state] * matrix2[state] / scalingFactors[pattern];
} else if (state1 < PADDED_STATE_COUNT) {
partials3[u] = matrix1[state] / scalingFactors[pattern];
} else if (state2 < PADDED_STATE_COUNT) {
partials3[u] = matrix2[state] / scalingFactors[pattern];
} else {
partials3[u] = 1.0 / scalingFactors[pattern];
}
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
int state1 = states1[pattern];
int state2 = states2[pattern];
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix + state1 * PADDED_STATE_COUNT;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix + state2 * PADDED_STATE_COUNT;
LOAD_SCALING_X_GPU();
KW_LOCAL_FENCE;
if (pattern < totalPatterns) {
if (state1 < PADDED_STATE_COUNT && state2 < PADDED_STATE_COUNT) {
partials3[u] = matrix1[state] * matrix2[state] / fixedScalingFactors[patIdx];
} else if (state1 < PADDED_STATE_COUNT) {
partials3[u] = matrix1[state] / fixedScalingFactors[patIdx];
} else if (state2 < PADDED_STATE_COUNT) {
partials3[u] = matrix2[state] / fixedScalingFactors[patIdx];
} else {
partials3[u] = 1.0 / fixedScalingFactors[patIdx];
}
}
#endif // FW_OPENCL_CPU
}
// Find a scaling factor for each pattern
KW_GLOBAL_KERNEL void kernelPartialsDynamicScaling(KW_GLOBAL_VAR REAL* KW_RESTRICT allPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT scalingFactors,
int matrixCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
FIND_MAX_PARTIALS_X_CPU();
if (max == 0)
max = 1.0;
scalingFactors[pattern] = max;
SCALE_PARTIALS_X_CPU();
#else // GPU implementation
LOAD_PARTIALS_SCALING_X_GPU();
#ifdef IS_POWER_OF_TWO
FIND_MAX_PARTIALS_STATE_POWER_OF_TWO_X_GPU();
#else // not power-of-2
FIND_MAX_PARTIALS_STATE_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0 && matrix == 0) {
FIND_MAX_PARTIALS_MATRIX_X_GPU();
if (max == 0)
max = 1.0;
scalingFactors[pattern] = max; // TODO: These are incoherent memory writes!!!
}
SCALE_PARTIALS_X_GPU();
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelPartialsDynamicScalingScalersLog(KW_GLOBAL_VAR REAL* KW_RESTRICT allPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT scalingFactors,
int matrixCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
FIND_MAX_PARTIALS_X_CPU();
if (max == 0) {
max = 1.0;
scalingFactors[pattern] = 0.0;
} else {
scalingFactors[pattern] = log(max);
}
SCALE_PARTIALS_X_CPU();
#else // GPU implementation
LOAD_PARTIALS_SCALING_X_GPU();
#ifdef IS_POWER_OF_TWO
FIND_MAX_PARTIALS_STATE_POWER_OF_TWO_X_GPU();
#else // not power-of-2
FIND_MAX_PARTIALS_STATE_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0 && matrix == 0) {
FIND_MAX_PARTIALS_MATRIX_X_GPU();
if (max == 0) {
max = 1.0;
scalingFactors[pattern] = 0.0;
} else {
scalingFactors[pattern] = log(max);
}
}
SCALE_PARTIALS_X_GPU();
#endif // FW_OPENCL_CPU
}
// Find a scaling factor for each pattern and accumulate into buffer
KW_GLOBAL_KERNEL void kernelPartialsDynamicScalingAccumulate(KW_GLOBAL_VAR REAL* KW_RESTRICT allPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT scalingFactors,
KW_GLOBAL_VAR REAL* KW_RESTRICT cumulativeScaling,
int matrixCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
FIND_MAX_PARTIALS_X_CPU();
if (max == 0)
max = 1.0;
scalingFactors[pattern] = max;
cumulativeScaling[pattern] += log(max);
SCALE_PARTIALS_X_CPU();
#else // GPU implementation
LOAD_PARTIALS_SCALING_X_GPU();
#ifdef IS_POWER_OF_TWO
FIND_MAX_PARTIALS_STATE_POWER_OF_TWO_X_GPU();
#else // not power-of-2
FIND_MAX_PARTIALS_STATE_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0 && matrix == 0) {
FIND_MAX_PARTIALS_MATRIX_X_GPU();
if (max == 0)
max = 1.0;
scalingFactors[pattern] = max;
#ifdef CUDA
atomicAdd(&cumulativeScaling[pattern], log(max));
#else
cumulativeScaling[pattern] += log(max);
#endif
}
SCALE_PARTIALS_X_GPU();
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelPartialsDynamicScalingAccumulateScalersLog(KW_GLOBAL_VAR REAL* KW_RESTRICT allPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT scalingFactors,
KW_GLOBAL_VAR REAL* KW_RESTRICT cumulativeScaling,
int matrixCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
FIND_MAX_PARTIALS_X_CPU();
if (max == 0) {
max = 1.0;
scalingFactors[pattern] = 0.0;
} else {
REAL logMax = log(max);
scalingFactors[pattern] = logMax;
cumulativeScaling[pattern] += logMax;
}
SCALE_PARTIALS_X_CPU();
#else // GPU implementation
LOAD_PARTIALS_SCALING_X_GPU();
#ifdef IS_POWER_OF_TWO
FIND_MAX_PARTIALS_STATE_POWER_OF_TWO_X_GPU();
#else // not power-of-2
FIND_MAX_PARTIALS_STATE_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0 && matrix == 0) {
FIND_MAX_PARTIALS_MATRIX_X_GPU();
if (max == 0) {
max = 1.0;
scalingFactors[pattern] = 0.0;
} else {
REAL logMax = log(max);
scalingFactors[pattern] = logMax;
#ifdef CUDA
atomicAdd(&cumulativeScaling[pattern], logMax);
#else
cumulativeScaling[pattern] += logMax;
#endif
}
}
SCALE_PARTIALS_X_GPU();
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelIntegrateLikelihoods(KW_GLOBAL_VAR REAL* KW_RESTRICT dResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dWeights,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFrequencies,
int matrixCount,
int patternCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
INTEGRATE_PARTIALS_X_CPU();
dResult[pattern] = log(sumTotal);
#else // GPU implementation
INTEGRATE_PARTIALS_X_GPU();
#ifdef IS_POWER_OF_TWO
SUM_STATES_POWER_OF_TWO_X_GPU();
#else // not power-of-2
SUM_STATES_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0)
dResult[pattern] = log(sum[state]);
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelIntegrateLikelihoodsFixedScale(KW_GLOBAL_VAR REAL* KW_RESTRICT dResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dWeights,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFrequencies,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootScalingFactors,
int matrixCount,
int patternCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
INTEGRATE_PARTIALS_X_CPU();
dResult[pattern] = log(sumTotal) + dRootScalingFactors[pattern];
#else // GPU implementation
INTEGRATE_PARTIALS_X_GPU();
#ifdef IS_POWER_OF_TWO
SUM_STATES_POWER_OF_TWO_X_GPU();
#else // not power-of-2
SUM_STATES_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0)
dResult[pattern] = log(sum[state]) + dRootScalingFactors[pattern];
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelIntegrateLikelihoodsMulti(KW_GLOBAL_VAR REAL* KW_RESTRICT dResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dWeights,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFrequencies,
int matrixCount,
int patternCount,
int takeLog) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
INTEGRATE_PARTIALS_X_CPU();
if (takeLog == 0)
dResult[pattern] = sumTotal;
else if (takeLog == 1)
dResult[pattern] = log(dResult[pattern] + sumTotal);
else
dResult[pattern] += sumTotal;
#else // GPU implementation
INTEGRATE_PARTIALS_X_GPU();
#ifdef IS_POWER_OF_TWO
SUM_STATES_POWER_OF_TWO_X_GPU();
#else // not power-of-2
SUM_STATES_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0) {
if (takeLog == 0)
dResult[pattern] = sum[state];
else if (takeLog == 1)
dResult[pattern] = log(dResult[pattern] + sum[state]);
else
dResult[pattern] += sum[state];
}
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelIntegrateLikelihoodsFixedScaleMulti(KW_GLOBAL_VAR REAL* KW_RESTRICT dResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dWeights,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFrequencies,
KW_GLOBAL_VAR REAL* KW_RESTRICT dScalingFactors,
KW_GLOBAL_VAR unsigned int* KW_RESTRICT dPtrQueue,
KW_GLOBAL_VAR REAL* KW_RESTRICT dMaxScalingFactors,
KW_GLOBAL_VAR unsigned int* KW_RESTRICT dIndexMaxScalingFactors,
int matrixCount,
int patternCount,
int subsetCount,
int subsetIndex) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
INTEGRATE_PARTIALS_X_CPU();
REAL cumulativeScalingFactor = (dScalingFactors + dPtrQueue[subsetIndex])[pattern];
if (subsetIndex == 0) {
int indexMaxScalingFactor = 0;
REAL maxScalingFactor = cumulativeScalingFactor;
for (int j = 1; j < subsetCount; j++) {
REAL tmpScalingFactor = (dScalingFactors + dPtrQueue[j])[pattern];
if (tmpScalingFactor > maxScalingFactor) {
indexMaxScalingFactor = j;
maxScalingFactor = tmpScalingFactor;
}
}
dIndexMaxScalingFactors[pattern] = indexMaxScalingFactor;
dMaxScalingFactors[pattern] = maxScalingFactor;
if (indexMaxScalingFactor != 0)
sumTotal *= exp((REAL)(cumulativeScalingFactor - maxScalingFactor));
dResult[pattern] = sumTotal;
} else {
if (subsetIndex != dIndexMaxScalingFactors[pattern])
sumTotal *= exp((REAL)(cumulativeScalingFactor - dMaxScalingFactors[pattern]));
if (subsetIndex == subsetCount - 1)
dResult[pattern] = (log(dResult[pattern] + sumTotal) + dMaxScalingFactors[pattern]);
else
dResult[pattern] += sumTotal;
}
#else // GPU implementation
INTEGRATE_PARTIALS_X_GPU();
#ifdef IS_POWER_OF_TWO
SUM_STATES_POWER_OF_TWO_X_GPU();
#else // not power-of-2
SUM_STATES_X_GPU();
#endif // IS_POWER_OF_TWO
REAL cumulativeScalingFactor = (dScalingFactors + dPtrQueue[subsetIndex])[pattern];
if (subsetIndex == 0) {
int indexMaxScalingFactor = 0;
REAL maxScalingFactor = cumulativeScalingFactor;
for (int j = 1; j < subsetCount; j++) {
REAL tmpScalingFactor = (dScalingFactors + dPtrQueue[j])[pattern];
if (tmpScalingFactor > maxScalingFactor) {
indexMaxScalingFactor = j;
maxScalingFactor = tmpScalingFactor;
}
}
dIndexMaxScalingFactors[pattern] = indexMaxScalingFactor;
dMaxScalingFactors[pattern] = maxScalingFactor;
if (indexMaxScalingFactor != 0)
sum[state] *= exp((REAL)(cumulativeScalingFactor - maxScalingFactor));
if (state == 0)
dResult[pattern] = sum[state];
KW_LOCAL_FENCE;
} else {
if (subsetIndex != dIndexMaxScalingFactors[pattern])
sum[state] *= exp((REAL)(cumulativeScalingFactor - dMaxScalingFactors[pattern]));
if (state == 0) {
if (subsetIndex == subsetCount - 1)
dResult[pattern] = (log(dResult[pattern] + sum[state]) + dMaxScalingFactors[pattern]);
else
dResult[pattern] += sum[state];
}
}
#endif // FW_OPENCL_CPU
}
////////////////////////////////////////////////////////////////////////////////////////////////
// edge and deriv kernels
KW_GLOBAL_KERNEL void kernelPartialsPartialsEdgeLikelihoods(KW_GLOBAL_VAR REAL* KW_RESTRICT dPartialsTmp,
KW_GLOBAL_VAR REAL* KW_RESTRICT dParentPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dChildParials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dTransMatrix,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
int deltaPartials = deltaPartialsByMatrix + deltaPartialsByState;
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrix1 = dTransMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials1 = dParentPartials + deltaPartials;
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials2 = dChildParials + deltaPartials;
REAL sum1 = 0;
for(int i = 0; i < PADDED_STATE_COUNT; i++) {
FMA(sMatrix1[i * PADDED_STATE_COUNT + state], sPartials1[i], sum1);
}
dPartialsTmp[u] = sum1 * sPartials2[state];
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = dTransMatrix + deltaMatrix;
int y = deltaPartialsByState + deltaPartialsByMatrix;
KW_LOCAL_MEM REAL sMatrix1[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
if (pattern < totalPatterns) {
sPartials1[patIdx][state] = dParentPartials[y + state];
sPartials2[patIdx][state] = dChildParials[y + state];
} else {
sPartials1[patIdx][state] = 0;
sPartials2[patIdx][state] = 0;
}
REAL sum1 = 0;
int i;
for (i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {
if (patIdx < BLOCK_PEELING_SIZE) {
sMatrix1[patIdx][state] = matrix1[patIdx * PADDED_STATE_COUNT + state];
matrix1 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
}
KW_LOCAL_FENCE;
int j;
for(j = 0; j < BLOCK_PEELING_SIZE; j++) {
FMA(sMatrix1[j][state], sPartials1[patIdx][i + j], sum1);
}
KW_LOCAL_FENCE;
}
if (pattern < totalPatterns)
dPartialsTmp[u] = sum1 * sPartials2[patIdx][state];
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void
#ifdef CUDA
__launch_bounds__(PATTERN_BLOCK_SIZE * PADDED_STATE_COUNT)
#endif
kernelPartialsPartialsEdgeLikelihoodsSecondDeriv(KW_GLOBAL_VAR REAL* KW_RESTRICT dPartialsTmp,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFirstDerivTmp,
KW_GLOBAL_VAR REAL* KW_RESTRICT dSecondDerivTmp,
KW_GLOBAL_VAR REAL* KW_RESTRICT dParentPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dChildParials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dTransMatrix,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFirstDerivMatrix,
KW_GLOBAL_VAR REAL* KW_RESTRICT dSecondDerivMatrix,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
int deltaPartials = deltaPartialsByMatrix + deltaPartialsByState;
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrix1 = dTransMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrixFirstDeriv = dFirstDerivMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrixSecondDeriv = dSecondDerivMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials1 = dParentPartials + deltaPartials;
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials2 = dChildParials + deltaPartials;
REAL sum1 = 0;
REAL sumFirstDeriv = 0;
REAL sumSecondDeriv = 0;
for(int i = 0; i < PADDED_STATE_COUNT; i++) {
FMA(sMatrix1[ i * PADDED_STATE_COUNT + state], sPartials1[i], sum1);
FMA(sMatrixFirstDeriv[ i * PADDED_STATE_COUNT + state], sPartials1[i], sumFirstDeriv);
FMA(sMatrixSecondDeriv[i * PADDED_STATE_COUNT + state], sPartials1[i], sumSecondDeriv);
}
dPartialsTmp[u] = sum1 * sPartials2[state];
dFirstDerivTmp[u] = sumFirstDeriv * sPartials2[state];
dSecondDerivTmp[u] = sumSecondDeriv * sPartials2[state];
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = dTransMatrix + deltaMatrix; // Points to *this* matrix
KW_GLOBAL_VAR REAL* KW_RESTRICT matrixFirstDeriv = dFirstDerivMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrixSecondDeriv = dSecondDerivMatrix + deltaMatrix;
int y = deltaPartialsByState + deltaPartialsByMatrix;
KW_LOCAL_MEM REAL sMatrix1[BLOCK_PEELING_SIZE/2][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sMatrixFirstDeriv[BLOCK_PEELING_SIZE/2][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sMatrixSecondDeriv[BLOCK_PEELING_SIZE/2][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
if (pattern < totalPatterns) {
sPartials1[patIdx][state] = dParentPartials[y + state];
sPartials2[patIdx][state] = dChildParials[y + state];
} else {
sPartials1[patIdx][state] = 0;
sPartials2[patIdx][state] = 0;
}
REAL sum1 = 0;
REAL sumFirstDeriv = 0;
REAL sumSecondDeriv = 0;
int i;
for (i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE/2) {
if (patIdx < BLOCK_PEELING_SIZE/2) {
sMatrix1[patIdx][state] = matrix1[patIdx * PADDED_STATE_COUNT + state];
sMatrixFirstDeriv[patIdx][state] = matrixFirstDeriv[patIdx * PADDED_STATE_COUNT + state];
sMatrixSecondDeriv[patIdx][state] = matrixSecondDeriv[patIdx * PADDED_STATE_COUNT + state];
matrix1 += BLOCK_PEELING_SIZE/2 * PADDED_STATE_COUNT;
matrixFirstDeriv += BLOCK_PEELING_SIZE/2 * PADDED_STATE_COUNT;
matrixSecondDeriv += BLOCK_PEELING_SIZE/2 * PADDED_STATE_COUNT;
}
KW_LOCAL_FENCE;
int j;
for(j = 0; j < BLOCK_PEELING_SIZE/2; j++) {
FMA(sMatrix1[j][state] , sPartials1[patIdx][i + j], sum1 );
FMA(sMatrixFirstDeriv[j][state] , sPartials1[patIdx][i + j], sumFirstDeriv );
FMA(sMatrixSecondDeriv[j][state], sPartials1[patIdx][i + j], sumSecondDeriv);
}
KW_LOCAL_FENCE;
}
if (pattern < totalPatterns) {
dPartialsTmp[u] = sum1 * sPartials2[patIdx][state];
dFirstDerivTmp[u] = sumFirstDeriv * sPartials2[patIdx][state];
dSecondDerivTmp[u] = sumSecondDeriv * sPartials2[patIdx][state];
}
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelStatesPartialsEdgeLikelihoods(KW_GLOBAL_VAR REAL* KW_RESTRICT dPartialsTmp,
KW_GLOBAL_VAR REAL* KW_RESTRICT dParentPartials,
KW_GLOBAL_VAR int* KW_RESTRICT dChildStates,
KW_GLOBAL_VAR REAL* KW_RESTRICT dTransMatrix,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
int deltaPartials = deltaPartialsByMatrix + deltaPartialsByState;
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrix1 = dTransMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials2 = dParentPartials + deltaPartials;
REAL sum1 = 0;
int state1 = dChildStates[pattern];
if (state1 < PADDED_STATE_COUNT)
sum1 = sMatrix1[state1 * PADDED_STATE_COUNT + state];
else
sum1 = 1.0;
dPartialsTmp[u] = sum1 * sPartials2[state];
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
int y = deltaPartialsByState + deltaPartialsByMatrix;
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
if (pattern < totalPatterns) {
sPartials2[patIdx][state] = dParentPartials[y + state];
} else {
sPartials2[patIdx][state] = 0;
}
REAL sum1 = 0;
if (pattern < totalPatterns) {
int state1 = dChildStates[pattern];
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = dTransMatrix + deltaMatrix + state1 * PADDED_STATE_COUNT;
if (state1 < PADDED_STATE_COUNT)
sum1 = matrix1[state];
else
sum1 = 1.0;
}
if (pattern < totalPatterns)
dPartialsTmp[u] = sum1 * sPartials2[patIdx][state];
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelStatesPartialsEdgeLikelihoodsSecondDeriv(KW_GLOBAL_VAR REAL* KW_RESTRICT dPartialsTmp,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFirstDerivTmp,
KW_GLOBAL_VAR REAL* KW_RESTRICT dSecondDerivTmp,
KW_GLOBAL_VAR REAL* KW_RESTRICT dParentPartials,
KW_GLOBAL_VAR int* KW_RESTRICT dChildStates,
KW_GLOBAL_VAR REAL* KW_RESTRICT dTransMatrix,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFirstDerivMatrix,
KW_GLOBAL_VAR REAL* KW_RESTRICT dSecondDerivMatrix,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
DETERMINE_INDICES_X_CPU();
int deltaPartials = deltaPartialsByMatrix + deltaPartialsByState;
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrix1 = dTransMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrixFirstDeriv = dFirstDerivMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT sMatrixSecondDeriv = dSecondDerivMatrix + deltaMatrix;
KW_GLOBAL_VAR REAL* KW_RESTRICT sPartials2 = dParentPartials + deltaPartials;
REAL sum1 = 0;
REAL sumFirstDeriv = 0;
REAL sumSecondDeriv = 0;
int state1 = dChildStates[pattern];
if (state1 < PADDED_STATE_COUNT) {
sum1 = sMatrix1[ state1 * PADDED_STATE_COUNT + state];
sumFirstDeriv = sMatrixFirstDeriv[ state1 * PADDED_STATE_COUNT + state];
sumSecondDeriv = sMatrixSecondDeriv[state1 * PADDED_STATE_COUNT + state];
} else {
sum1 = 1.0;
}
dPartialsTmp[u] = sum1 * sPartials2[state];
dFirstDerivTmp[u] = sumFirstDeriv * sPartials2[state];
dSecondDerivTmp[u] = sumSecondDeriv * sPartials2[state];
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
int y = deltaPartialsByState + deltaPartialsByMatrix;
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
if (pattern < totalPatterns) {
sPartials2[patIdx][state] = dParentPartials[y + state];
} else {
sPartials2[patIdx][state] = 0;
}
REAL sum1 = 0;
REAL sumFirstDeriv = 0;
REAL sumSecondDeriv = 0;
if (pattern < totalPatterns) {
int state1 = dChildStates[pattern]; // Coalesced; no need to share
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = dTransMatrix + deltaMatrix + state1 * PADDED_STATE_COUNT;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrixFirstDeriv = dFirstDerivMatrix + deltaMatrix + state1 * PADDED_STATE_COUNT;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrixSecondDeriv = dSecondDerivMatrix + deltaMatrix + state1 * PADDED_STATE_COUNT;
if (state1 < PADDED_STATE_COUNT) {
sum1 = matrix1[state];
sumFirstDeriv = matrixFirstDeriv[state];
sumSecondDeriv = matrixSecondDeriv[state];
} else {
sum1 = 1.0;
sumFirstDeriv = 0.0;
sumSecondDeriv = 0.0;
}
}
if (pattern < totalPatterns) {
dPartialsTmp[u] = sum1 * sPartials2[patIdx][state];
dFirstDerivTmp[u] = sumFirstDeriv * sPartials2[patIdx][state];
dSecondDerivTmp[u] = sumSecondDeriv * sPartials2[patIdx][state];
}
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelIntegrateLikelihoodsSecondDeriv(KW_GLOBAL_VAR REAL* KW_RESTRICT dResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFirstDerivResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dSecondDerivResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootFirstDeriv,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootSecondDeriv,
KW_GLOBAL_VAR REAL* KW_RESTRICT dWeights,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFrequencies,
int matrixCount,
int patternCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
INTEGRATE_PARTIALS_DERIV_X_CPU();
tmpLogLike = sumTotal;
dResult[pattern] = log(tmpLogLike);
tmpFirstDeriv = sumTotalD1 / tmpLogLike;
dFirstDerivResult[pattern] = tmpFirstDeriv;
dSecondDerivResult[pattern] = (sumTotalD2 / tmpLogLike - tmpFirstDeriv * tmpFirstDeriv);
#else // GPU implementation
INTEGRATE_PARTIALS_DERIV_X_GPU();
#ifdef IS_POWER_OF_TWO
SUM_STATES_DERIVS_POWER_OF_TWO_X_GPU();
#else // not power-of-2
SUM_STATES_DERIVS_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0) {
tmpLogLike = sum[state];
dResult[pattern] = log(tmpLogLike);
tmpFirstDeriv = sumD1[state] / tmpLogLike;
dFirstDerivResult[pattern] = tmpFirstDeriv;
dSecondDerivResult[pattern] = (sumD2[state] / tmpLogLike - tmpFirstDeriv * tmpFirstDeriv);
}
#endif // FW_OPENCL_CPU
}
KW_GLOBAL_KERNEL void kernelIntegrateLikelihoodsFixedScaleSecondDeriv(KW_GLOBAL_VAR REAL* KW_RESTRICT dResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFirstDerivResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dSecondDerivResult,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootPartials,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootFirstDeriv,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootSecondDeriv,
KW_GLOBAL_VAR REAL* KW_RESTRICT dWeights,
KW_GLOBAL_VAR REAL* KW_RESTRICT dFrequencies,
KW_GLOBAL_VAR REAL* KW_RESTRICT dRootScalingFactors,
int matrixCount,
int patternCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
INTEGRATE_PARTIALS_DERIV_X_CPU();
tmpLogLike = sumTotal;
dResult[pattern] = log(tmpLogLike) + dRootScalingFactors[pattern];
tmpFirstDeriv = sumTotalD1 / tmpLogLike;
dFirstDerivResult[pattern] = tmpFirstDeriv;
dSecondDerivResult[pattern] = (sumTotalD2 / tmpLogLike - tmpFirstDeriv * tmpFirstDeriv);
#else // GPU implementation
INTEGRATE_PARTIALS_DERIV_X_GPU();
#ifdef IS_POWER_OF_TWO
SUM_STATES_DERIVS_POWER_OF_TWO_X_GPU();
#else // not power-of-2
SUM_STATES_DERIVS_X_GPU();
#endif // IS_POWER_OF_TWO
if (state == 0) {
tmpLogLike = sum[state];
dResult[pattern] = log(tmpLogLike) + dRootScalingFactors[pattern];
tmpFirstDeriv = sumD1[state] / tmpLogLike;
dFirstDerivResult[pattern] = tmpFirstDeriv;
dSecondDerivResult[pattern] = (sumD2[state] / tmpLogLike - tmpFirstDeriv * tmpFirstDeriv);
}
#endif // FW_OPENCL_CPU
}
////////////////////////////////////////////////////////////////////////////////////////////////
// scaling experiments kernels
KW_GLOBAL_KERNEL void kernelPartialsPartialsAutoScale(KW_GLOBAL_VAR REAL* partials1,
KW_GLOBAL_VAR REAL* partials2,
KW_GLOBAL_VAR REAL* partials3,
KW_GLOBAL_VAR REAL* matrices1,
KW_GLOBAL_VAR REAL* matrices2,
KW_GLOBAL_VAR signed char* scalingFactors,
int totalPatterns) {
REAL sum1 = 0;
REAL sum2 = 0;
int i;
DETERMINE_INDICES_X_GPU();
KW_GLOBAL_VAR REAL* matrix1 = matrices1 + deltaMatrix; // Points to *this* matrix
KW_GLOBAL_VAR REAL* matrix2 = matrices2 + deltaMatrix;
int y = deltaPartialsByState + deltaPartialsByMatrix;
// Load values into shared memory
KW_LOCAL_MEM REAL sMatrix1[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sMatrix2[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
// copy PADDED_STATE_COUNT*PATTERN_BLOCK_SIZE lengthed partials
if (pattern < totalPatterns) {
// These are all coherent global memory reads; checked in Profiler
sPartials1[patIdx][state] = partials1[y + state];
sPartials2[patIdx][state] = partials2[y + state];
} else {
sPartials1[patIdx][state] = 0;
sPartials2[patIdx][state] = 0;
}
for (i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {
// load one row of matrices
if (patIdx < BLOCK_PEELING_SIZE) {
// These are all coherent global memory reads.
sMatrix1[patIdx][state] = matrix1[patIdx * PADDED_STATE_COUNT + state];
sMatrix2[patIdx][state] = matrix2[patIdx * PADDED_STATE_COUNT + state];
// sMatrix now filled with starting in state and ending in i
matrix1 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
matrix2 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
}
KW_LOCAL_FENCE;
int j;
for(j = 0; j < BLOCK_PEELING_SIZE; j++) {
sum1 += sMatrix1[j][state] * sPartials1[patIdx][i + j];
sum2 += sMatrix2[j][state] * sPartials2[patIdx][i + j];
}
KW_LOCAL_FENCE; // GTX280 FIX HERE
}
REAL tmpPartial = sum1 * sum2;
int expTmp;
REAL sigTmp = frexp(tmpPartial, &expTmp);
if (pattern < totalPatterns) {
if (abs(expTmp) > SCALING_EXPONENT_THRESHOLD) {
// now using sPartials2 to hold scaling trigger boolean
sPartials2[patIdx][0] = 1;
} else {
partials3[u] = tmpPartial;
sPartials2[patIdx][0] = 0;
sPartials1[patIdx][0] = 0;
}
}
KW_LOCAL_FENCE;
int scalingActive = sPartials2[patIdx][0];
if (scalingActive) {
// now using sPartials1 to store max unscaled partials3
sPartials1[patIdx][state] = tmpPartial;
}
KW_LOCAL_FENCE;
// Unrolled parallel max-reduction
if (scalingActive && state < 2) {
REAL compare = sPartials1[patIdx][state + 2];
if (compare > sPartials1[patIdx][state])
sPartials1[patIdx][state] = compare;
}
KW_LOCAL_FENCE;
if (scalingActive && state < 1) {
REAL maxPartial = sPartials1[patIdx][1];
if (maxPartial < sPartials1[patIdx][0])
maxPartial = sPartials1[patIdx][0];
int expMax;
frexp(maxPartial, &expMax);
sPartials1[patIdx][0] = expMax;
}
KW_LOCAL_FENCE;
if (scalingActive)
partials3[u] = ldexp(sigTmp, expTmp - sPartials1[patIdx][0]);
int myIdx = (patIdx * PADDED_STATE_COUNT) + state; // threadId in block
if ((myIdx < PATTERN_BLOCK_SIZE) && (myIdx + (KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE) < totalPatterns))
scalingFactors[(KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE) + (matrix * totalPatterns) + myIdx] = sPartials1[myIdx][0];
}
KW_GLOBAL_KERNEL void kernelIntegrateLikelihoodsAutoScaling(KW_GLOBAL_VAR REAL* dResult,
KW_GLOBAL_VAR REAL* dRootPartials,
KW_GLOBAL_VAR REAL* dWeights,
KW_GLOBAL_VAR REAL* dFrequencies,
KW_GLOBAL_VAR int* dRootScalingFactors,
int matrixCount,
int patternCount) {
int state = KW_LOCAL_ID_0;
int pattern = KW_GROUP_ID_0;
// int patternCount = KW_NUM_GROUPS_0;
KW_LOCAL_MEM REAL stateFreq[PADDED_STATE_COUNT];
// TODO: Currently assumes MATRIX_BLOCK_SIZE >> matrixCount
KW_LOCAL_MEM REAL matrixProp[MATRIX_BLOCK_SIZE];
KW_LOCAL_MEM REAL matrixScalers[MATRIX_BLOCK_SIZE];
KW_LOCAL_MEM REAL sum[PADDED_STATE_COUNT];
// Load shared memory
stateFreq[state] = dFrequencies[state];
sum[state] = 0;
for(int matrixEdge = 0; matrixEdge < matrixCount; matrixEdge += PADDED_STATE_COUNT) {
int x = matrixEdge + state;
if (x < matrixCount) {
matrixProp[x] = dWeights[x];
matrixScalers[x] = dRootScalingFactors[pattern + (x * patternCount)];
}
}
KW_LOCAL_FENCE;
int u = state + pattern * PADDED_STATE_COUNT;
int delta = patternCount * PADDED_STATE_COUNT;
short maxScaleFactor = matrixScalers[0];
for(int r = 1; r < matrixCount; r++) {
int tmpFactor = matrixScalers[r];
if (tmpFactor > maxScaleFactor)
maxScaleFactor = tmpFactor;
}
for(int r = 0; r < matrixCount; r++) {
int tmpFactor = matrixScalers[r];
if (tmpFactor != maxScaleFactor) {
int expTmp;
sum[state] += ldexp(frexp(dRootPartials[u + delta * r], &expTmp), expTmp + (tmpFactor - maxScaleFactor)) * matrixProp[r];
} else {
sum[state] += dRootPartials[u + delta * r] * matrixProp[r];
}
}
sum[state] *= stateFreq[state];
KW_LOCAL_FENCE;
#ifdef IS_POWER_OF_TWO
// parallelized reduction *** only works for powers-of-2 ****
for (int i = PADDED_STATE_COUNT / 2; i > 0; i >>= 1) {
if (state < i) {
#else
for (int i = SMALLEST_POWER_OF_TWO / 2; i > 0; i >>= 1) {
if (state < i && state + i < PADDED_STATE_COUNT ) {
#endif // IS_POWER_OF_TWO
sum[state] += sum[state + i];
}
KW_LOCAL_FENCE;
}
if (state == 0)
dResult[pattern] = (log(sum[state]) + (M_LN2 * maxScaleFactor));
}
#ifdef CUDA
#include "kernelsXDerivatives.cu"
#endif // CUDA
#ifdef CUDA
} // extern "C"
#endif //CUDA
|
the_stack
|
#include "nvblox/core/blox.h"
#include "nvblox/core/cuda/error_check.cuh"
#include "nvblox/core/interpolation_2d.h"
namespace nvblox {
namespace test_utils {
__global__ void transformPointsOnGPU(const Eigen::Matrix3f* R_B_A_matrix_ptr,
const Eigen::Vector3f* t_B_A_matrix_ptr,
const float* vecs_A_ptr,
const int num_vecs, float* vecs_B_ptr) {
// We first load the transform into shared memory for use by all threads in
// the block. The transformation matrix has 4x4=16 elements, so the first 16
// threads of each block perform the load.
__shared__ Eigen::Matrix3f R_B_A;
if (threadIdx.x < 9) {
R_B_A.data()[threadIdx.x] = R_B_A_matrix_ptr->data()[threadIdx.x];
}
__shared__ Eigen::Vector3f t_B_A;
if (threadIdx.x >= 9 && threadIdx.x < 12) {
t_B_A.data()[threadIdx.x - 9] = t_B_A_matrix_ptr->data()[threadIdx.x - 9];
}
__syncthreads();
// Now perform transformation of the vectors.
const int vec_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (vec_idx < num_vecs) {
// Mapping the vecs
const Eigen::Map<const Eigen::Matrix3Xf> vecs_A(vecs_A_ptr, 3, num_vecs);
Eigen::Map<Eigen::Matrix3Xf> vecs_B(vecs_B_ptr, 3, num_vecs);
// Transformation
vecs_B.col(vec_idx) = R_B_A * vecs_A.col(vec_idx) + t_B_A;
}
}
__global__ void projectBlocksToCamera(
const Index3D* block_indices_device_ptr, const Camera* camera_device_ptr,
const Eigen::Matrix3f* R_C_L_device_ptr,
const Eigen::Vector3f* t_C_L_device_ptr, const float block_size,
BlockProjectionResult* block_projection_results_device_ptr) {
// Linear index of thread within block
const int thread_index_linear =
threadIdx.z + blockDim.z * (threadIdx.y + (blockDim.y * threadIdx.x));
// Get data needed by all threads into shared memory
__shared__ Eigen::Matrix3f R_C_L;
if (thread_index_linear < 9) {
R_C_L.data()[thread_index_linear] =
R_C_L_device_ptr->data()[thread_index_linear];
}
__shared__ Eigen::Vector3f t_C_L;
if (thread_index_linear >= 9 && thread_index_linear < 12) {
t_C_L.data()[thread_index_linear - 9] =
t_C_L_device_ptr->data()[thread_index_linear - 9];
}
__syncthreads();
// The indices of the voxel this thread will work on
// blockIdx.x - The index of the block we're working on (blockIdx.y/z
// should be zero)
// threadIdx.x/y/z - The indices of the voxel within the block (we
// expect the threadBlockDims == voxelBlockDims)
const Index3D block_idx = block_indices_device_ptr[blockIdx.x];
const Index3D voxel_idx(threadIdx.x, threadIdx.y, threadIdx.z);
// Voxel center point
const Vector3f p_voxel_center_L = getCenterPostionFromBlockIndexAndVoxelIndex(
block_size, block_idx, voxel_idx);
// To camera frame
const Vector3f p_voxel_center_C = R_C_L * p_voxel_center_L + t_C_L;
// Project to image plane
Eigen::Vector2f u_px;
if (!camera_device_ptr->project(p_voxel_center_C, &u_px)) {
return;
}
// Map outputs
BlockProjectionResult* result_ptr =
&(block_projection_results_device_ptr[blockIdx.x]);
result_ptr->row(thread_index_linear) = u_px;
}
__global__ void interpolate(const float* depth_frame,
const float* u_px_vec_device_ptr, const int rows,
const int cols, const int num_points,
float* interpolated_value_device_ptr) {
// Map the interpolation locations
const Eigen::Map<const Eigen::MatrixX2f> u_px_vec(u_px_vec_device_ptr,
num_points, 2);
// Interpolate one of the points
const int u_px_vec_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (u_px_vec_idx < num_points) {
interpolation::interpolate2DLinear(
depth_frame, u_px_vec.row(u_px_vec_idx), rows, cols,
&interpolated_value_device_ptr[u_px_vec_idx]);
}
}
__global__ void setVoxelBlock(VoxelBlock<TsdfVoxel>** block_device_ptrs) {
// The VoxelBlock that this ThreadBlock is working on
VoxelBlock<TsdfVoxel>* block_ptr = block_device_ptrs[blockIdx.x];
block_ptr->voxels[threadIdx.z][threadIdx.y][threadIdx.x].distance =
threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
}
void setVoxelBlockOnGPU(TsdfLayer* layer) {
// Get a list of blocks to be modified on the CPU
const std::vector<Index3D> block_indices = layer->getAllBlockIndices();
std::vector<VoxelBlock<TsdfVoxel>*> block_ptrs;
block_ptrs.reserve(block_indices.size());
for (const Index3D& block_index : block_indices) {
block_ptrs.push_back(layer->getBlockAtIndex(block_index).get());
}
// Move the list to the GPU
VoxelBlock<TsdfVoxel>** block_device_ptrs;
checkCudaErrors(
cudaMalloc(&block_device_ptrs,
block_ptrs.size() * sizeof(VoxelBlock<TsdfVoxel>*)));
checkCudaErrors(
cudaMemcpy(block_device_ptrs, block_ptrs.data(),
block_ptrs.size() * sizeof(VoxelBlock<TsdfVoxel>*),
cudaMemcpyHostToDevice));
// Kernal - One thread block per block
constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide;
const dim3 kThreadsPerBlock(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide);
const int num_blocks = block_indices.size();
setVoxelBlock<<<num_blocks, kThreadsPerBlock>>>(block_device_ptrs);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
cudaFree(block_device_ptrs);
}
Eigen::VectorXf interpolatePointsOnGPU(const DepthImage& depth_frame,
const Eigen::MatrixX2f& u_px_vec) {
// Transfer data to the GPU
float* depth_frame_device_ptr;
checkCudaErrors(
cudaMalloc(&depth_frame_device_ptr, depth_frame.numel() * sizeof(float)));
checkCudaErrors(cudaMemcpy(depth_frame_device_ptr, depth_frame.dataConstPtr(),
depth_frame.numel() * sizeof(float),
cudaMemcpyHostToDevice));
float* u_px_vec_device_ptr;
checkCudaErrors(
cudaMalloc(&u_px_vec_device_ptr, u_px_vec.rows() * 2 * sizeof(float)));
checkCudaErrors(cudaMemcpy(u_px_vec_device_ptr, u_px_vec.data(),
u_px_vec.rows() * 2 * sizeof(float),
cudaMemcpyHostToDevice));
// Output location
float* interpolated_values_device_ptr;
checkCudaErrors(cudaMalloc(&interpolated_values_device_ptr,
u_px_vec.rows() * sizeof(float)));
// Kernel - interpolation
const int num_points = u_px_vec.rows();
constexpr int threadsPerBlock = 512;
const int blocksInGrid = (num_points / threadsPerBlock) + 1;
interpolate<<<blocksInGrid, threadsPerBlock>>>(
depth_frame_device_ptr, u_px_vec_device_ptr, depth_frame.rows(), depth_frame.cols(),
u_px_vec.rows(), interpolated_values_device_ptr);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Return the result
Eigen::VectorXf results(u_px_vec.rows());
checkCudaErrors(cudaMemcpy(results.data(), interpolated_values_device_ptr,
u_px_vec.rows() * sizeof(float),
cudaMemcpyDeviceToHost));
cudaFree(depth_frame_device_ptr);
cudaFree(u_px_vec_device_ptr);
cudaFree(interpolated_values_device_ptr);
return results;
}
std::vector<BlockProjectionResult> projectBlocksOnGPU(
const std::vector<Index3D>& block_indices, const Camera& camera,
const Transform& T_C_L, TsdfLayer* distance_layer_ptr) {
// Camera
Camera* camera_device_ptr;
checkCudaErrors(cudaMalloc(&camera_device_ptr, sizeof(Camera)));
checkCudaErrors(cudaMemcpy(camera_device_ptr, &camera, sizeof(Camera),
cudaMemcpyHostToDevice));
// Transformation
// NOTE(alexmillane): For some reason I only got things to work by separating
// the Eigen::Affine3f into the rotation matrix and translation vector... I
// cannot explain why it didn't work, but I spent hours trying to get it and I
// couldn't.
const Eigen::Matrix3f R_C_L = T_C_L.rotation();
const Eigen::Vector3f t_C_L = T_C_L.translation();
Eigen::Matrix3f* R_C_L_device_ptr;
Eigen::Vector3f* t_C_L_device_ptr;
checkCudaErrors(cudaMalloc(&R_C_L_device_ptr, sizeof(Eigen::Matrix3f)));
checkCudaErrors(cudaMemcpy(R_C_L_device_ptr, R_C_L.data(),
sizeof(Eigen::Matrix3f), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&t_C_L_device_ptr, sizeof(Eigen::Vector3f)));
checkCudaErrors(cudaMemcpy(t_C_L_device_ptr, t_C_L.data(),
sizeof(Eigen::Vector3f), cudaMemcpyHostToDevice));
// Copy the block indices to the GPU for projection
Index3D* block_indices_device_ptr;
checkCudaErrors(cudaMalloc(&block_indices_device_ptr,
block_indices.size() * sizeof(Index3D)));
checkCudaErrors(cudaMemcpy(block_indices_device_ptr, block_indices.data(),
block_indices.size() * sizeof(Index3D),
cudaMemcpyHostToDevice));
// Output space
BlockProjectionResult* block_projection_results_device_ptr;
checkCudaErrors(
cudaMalloc(&block_projection_results_device_ptr,
block_indices.size() * sizeof(BlockProjectionResult)));
// TODO: CURRENTLY ASSUMES WE CAN LAUNCH AN INFINITE NUMBER OF THREAD BLOX
constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide;
const dim3 kThreadsPerBlock(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide);
const int num_blocks = block_indices.size();
projectBlocksToCamera<<<num_blocks, kThreadsPerBlock>>>(
block_indices_device_ptr, camera_device_ptr, R_C_L_device_ptr,
t_C_L_device_ptr, distance_layer_ptr->block_size(),
block_projection_results_device_ptr);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Copy over results
std::vector<BlockProjectionResult> projection_results;
projection_results.resize(block_indices.size());
checkCudaErrors(
cudaMemcpy(projection_results.data(), block_projection_results_device_ptr,
block_indices.size() * sizeof(BlockProjectionResult),
cudaMemcpyDeviceToHost));
// Free
cudaFree(R_C_L_device_ptr);
cudaFree(t_C_L_device_ptr);
cudaFree(block_indices_device_ptr);
cudaFree(block_projection_results_device_ptr);
return projection_results;
}
Eigen::Matrix3Xf transformPointsOnGPU(const Transform& T_B_A,
const Eigen::Matrix3Xf& vecs_A) {
// Move inputs
float* vecs_A_device_ptr;
const int num_elements = vecs_A.rows() * vecs_A.cols();
checkCudaErrors(cudaMalloc(&vecs_A_device_ptr, num_elements * sizeof(float)));
checkCudaErrors(cudaMemcpy(vecs_A_device_ptr, vecs_A.data(),
num_elements * sizeof(float),
cudaMemcpyHostToDevice));
// Transformation
// NOTE(alexmillane): For some reason I only got things to work by separating
// the Eigen::Affine3f into the rotation matrix and translation vector... I
// cannot explain why it didn't work, but I spent hours trying to get it and I
// couldn't.
const Eigen::Matrix3f R_B_A = T_B_A.rotation();
const Eigen::Vector3f t_B_A = T_B_A.translation();
Eigen::Matrix3f* R_A_B_device_ptr;
Eigen::Vector3f* t_A_B_device_ptr;
checkCudaErrors(cudaMalloc(&R_A_B_device_ptr, sizeof(Eigen::Matrix3f)));
checkCudaErrors(cudaMemcpy(R_A_B_device_ptr, R_B_A.data(),
sizeof(Eigen::Matrix3f), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&t_A_B_device_ptr, sizeof(Eigen::Vector3f)));
checkCudaErrors(cudaMemcpy(t_A_B_device_ptr, t_B_A.data(),
sizeof(Eigen::Vector3f), cudaMemcpyHostToDevice));
// Output space
float* vecs_B_device;
checkCudaErrors(cudaMalloc(&vecs_B_device, num_elements * sizeof(float)));
// Kernel
const int num_vecs = vecs_A.cols();
constexpr int threadsPerBlock = 512;
const int blocksInGrid = (num_vecs / threadsPerBlock) + 1;
transformPointsOnGPU<<<blocksInGrid, threadsPerBlock>>>(
R_A_B_device_ptr, t_A_B_device_ptr, vecs_A_device_ptr, num_vecs,
vecs_B_device);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Retrieve output
Eigen::Matrix3Xf vecs_B(3, num_vecs);
checkCudaErrors(cudaMemcpy(vecs_B.data(), vecs_B_device,
num_elements * sizeof(float),
cudaMemcpyDeviceToHost));
cudaFree(vecs_A_device_ptr);
cudaFree(R_A_B_device_ptr);
cudaFree(t_A_B_device_ptr);
cudaFree(vecs_B_device);
return vecs_B;
}
} // namespace test_utils
} // namespace nvblox
|
the_stack
|
#include "kernel_common.h"
#include "geometry/grid_3d.h"
#include "geometry/SE3.h"
#include "optimization/optimization.h"
#include "util/mirrored_memory.h"
namespace dart {
static const float truncVal = 1000.0;
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObs(const int dims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
float * J = &s[tid*dims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
// const float3 sdfGrad_m = SE3Rotate(T_mc,sdfGrad_m);
getErrorJacobianOfModelPoint(J,make_float4(predV_m,1),predFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
atomicAdd(numPredictions,1);
float * JTr = result;
float * JTJ = &result[dims];
float * e = &result[dims + JTJSize(dims)];
// //#pragma unroll
// for (int i=0; i<dims; i++) {
// if( J[i] == 0.0f) continue;
// float v = residual*J[i];
// atomicAdd(&JTr[i],v);
// //#pragma unroll
// for (int j=0; j<=i; j++) {
// float v2 = J[i]*J[j];
// atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
// }
// }
// atomicAdd(e,0.5*residual*residual);
computeSquaredLossResult(dims,residual,J,e,JTr,JTJ);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsTruncated(const int dims,
const float4 * labeledPredVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float truncationDist,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float err = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
// make sure we're in the truncation region and violating free space
if (err >= truncationDist || err < 0) {
return;
}
if (dbgErr) { debugError[index] = err; }
// const float4 predV_m = T_mc*make_float4(predV_c.x,predV_c.y,predV_c.z,1);
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
float * J = &s[tid*dims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
// const float3 sdfGrad_m = SE3Rotate(T_mc,sdfGrad_m);
getErrorJacobianOfModelPoint(J,make_float4(predV_m,1),predFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
atomicAdd(numPredictions,1);
float * eJ = result;
float * JTJ = &result[dims];
float * e = &result[dims + JTJSize(dims)];
//#pragma unroll
for (int i=0; i<dims; i++) {
if( J[i] == 0.0f) continue;
float v = err*J[i];
atomicAdd(&eJ[i],v);
//#pragma unroll
for (int j=0; j<=i; j++) {
float v2 = J[i]*J[j];
atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
}
}
atomicAdd(e,0.5*err*err);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsReduced(const int fullDims,
const int redDims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const float * dtheta_dalpha,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
atomicAdd(numPredictions,1);
getErrorJacobianOfModelPoint(de_dtheta,make_float4(predV_m,1),predFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doPoseGradientReduction(J,de_dtheta,dtheta_dalpha,fullDims,redDims);
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
//#pragma unroll
// for (int i=0; i<redDims; i++) {
// if( J[i]==0.0f) continue;
// float v = residual*J[i];
// atomicAdd(&JTr[i],v);
// //#pragma unroll
// for (int j=0; j<=i; j++) {
// float v2 = J[i]*J[j];
// atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2);
// }
// }
// atomicAdd(e,0.5*residual*residual);
computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ);
}
template <bool dbgDA, bool dbgErr, bool dbgNorm>
__global__ void gpu_normEqnsModToObsParamMap(const int fullDims,
const int redDims,
const float4 * labeledPredictedVertMap,
const int width,
const int height,
const int modelNum,
const SE3 T_mc,
const SE3 * T_fms,
const SE3 * T_mfs,
const Grid3D<float> * obsSdf,
const int * labelFrames,
const int * dependencies,
const JointType * jointTypes,
const float3 * jointAxes,
const int * dMapping,
float * result,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
extern __shared__ float s[];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
if (dbgDA) { if (modelNum == 0) { debugDataAssociation[index] = -1; } }
if (dbgErr) { if (modelNum == 0) { debugError[index] = NAN; } }
if (dbgNorm) { debugNorm[index] = make_float4(0); }
const float4 & predV_c = labeledPredictedVertMap[index];
// no prediction
if (predV_c.z == 0) { return; }
const float3 predV_m = SE3Transform(T_mc,make_float3(predV_c));
const float3 predVGrid = obsSdf->getGridCoords(predV_m);
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float residual = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
if (dbgErr) { debugError[index] = residual; }
const int label = round(predV_c.w);
const int model = label >> 16;
const int sdf = label & 65535;
if (model != modelNum) {
return;
}
if (dbgDA) { debugDataAssociation[index] = label; }
const int predFrame = labelFrames[sdf];
// array declarations
float * de_dtheta = &s[tid*(fullDims+redDims)];
float * J = &s[tid*(fullDims+redDims) + fullDims];
const float3 sdfGrad_m = obsSdf->getGradientInterpolated(predVGrid);
if (dbgNorm) { debugNorm[index] = make_float4(sdfGrad_m,1); }
atomicAdd(numPredictions,1);
getErrorJacobianOfModelPoint(de_dtheta,make_float4(predV_m,1),predFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs);
doParamMapping(J,de_dtheta,dMapping,fullDims,redDims);
float * JTr = result;
float * JTJ = &result[redDims];
float * e = &result[redDims + JTJSize(redDims)];
computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ);
}
__global__ void gpu_splatObsSdf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_cm,
const Grid3D<float> * dObsSdf,
const float focalLength) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
const float3 & o = dObsSdf->offset;
const float & resolution = dObsSdf->resolution;
// TODO: think about this
// const float3 center = o + resolution*make_float3( x + 0.5, y + 0.5, z + 0.5);
const float3 center = SE3Transform(T_cm,o + resolution*make_float3( x , y , z ));
const int u = round( (focalLength/center.z)*center.x + (width>>1) );
const int v = round( (focalLength/center.z)*center.y + (height>>1) );
float & splatVal = dObsSdf->data[x + dObsSdf->dim.x*(y + dObsSdf->dim.y*z)];
if (u < 0 || u >= width || v < 0 || v >= height) {
splatVal = truncVal;
} else if (dObsVertMap[u + v*width].w == 0 || dObsVertMap[u + v*width].z == 0) {
splatVal = 0.5*truncVal; // TODO: think about this
// } else {
// float sdfWorld = (dObsVertMap[u + v*width].z - center.z);
// float sdf = (sdfWorld)/dObsSdf->resolution;
// splatVal = fmaxf(0, fminf(truncVal, sdf));
// }
} else if (dObsVertMap[u + v*width].z < center.z) {
splatVal = 0;
} else {
splatVal = truncVal;
}
}
__global__ void gpu_clearObsSdf(const Grid3D<float> * dObsSdf,
const float truncationDist) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
dObsSdf->data[x + dObsSdf->dim.x*(y + dObsSdf->dim.y*z)] = truncationDist;
}
__global__ void gpu_computeTruncatedObsDf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_mc,
const Grid3D<float> * dObsSdf,
const float truncationDist) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = threadIdx.z;
if (x >= width-1 || y >= height-1) { return; }
float4 pA;
float4 pB;
float4 pC;
if (z == 0) {
pA = dObsVertMap[x + y*width];
pB = dObsVertMap[x+1 + y*width];
pC = dObsVertMap[x+1 + (y+1)*width];
} else {
pA = dObsVertMap[x + y*width];
pC = dObsVertMap[x+1 + (y+1)*width];
pB = dObsVertMap[x + (y+1)*width];
}
if (pA.w != 0 && pB.w != 0 && pC.w != 0 ) {
//printf("%d, %d\n",x,y);
const float3 pAg = dObsSdf->getGridCoords(make_float3(T_mc*pA));
const float3 pBg = dObsSdf->getGridCoords(make_float3(T_mc*pB));
const float3 pCg = dObsSdf->getGridCoords(make_float3(T_mc*pC));
const float3 minPoint = fminf(pAg,fminf(pBg,pCg));
const float3 maxPoint = fmaxf(pAg,fmaxf(pBg,pCg));
const float3 E0 = pAg - pBg;
const float3 E1 = pCg - pBg;
float a = dot(E0,E0);
float b = dot(E0,E1);
float c = dot(E1,E1);
float det = a*c-b*b;
for (int gz=max(0,(int)floor(minPoint.z-truncationDist)); gz< min((int)ceil(maxPoint.z+truncationDist),dObsSdf->dim.z); ++gz) {
for (int gy=max(0,(int)floor(minPoint.y-truncationDist)); gy< min((int)ceil(maxPoint.y+truncationDist),dObsSdf->dim.y); ++gy) {
for (int gx=max(0,(int)floor(minPoint.x-truncationDist)); gx< min((int)ceil(maxPoint.x+truncationDist),dObsSdf->dim.x); ++gx) {
//printf("> %d, %d, %d\n",gx,gy,gz);
float & sdfVal = dObsSdf->data[gx + dObsSdf->dim.x*(gy + dObsSdf->dim.y*gz)];
const float3 P = make_float3(gx+0.5,gy+0.5,gz+0.5);
const float3 D = pBg - P;
float d = dot(E0,D);
float e = dot(E1,D);
float f = dot(D,D);
float s = b*e - c*d;
float t = b*d - a*e;
int region;
if ( s+t <= det) {
if ( s < 0 ) {
if ( t < 0 ) {
region = 4;
} else {
region = 3;
}
} else if ( t < 0 ) {
region = 5;
} else {
region = 0;
}
} else {
if ( s < 0 ) {
region = 2;
} else if ( t < 0) {
region = 6;
} else {
region = 1;
}
}
switch (region) {
case 0:
{
float invDet = 1/det;
s*= invDet;
t*= invDet;
}
break;
case 1:
{
float numer = c + e - b - d;
if (numer <= 0) {
s = 0;
} else {
float denom = a - 2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
}
t = 1-s;
}
break;
case 2:
{
float tmp0 = b+d;
float tmp1 = c+e;
if ( tmp1 > tmp0 ) { // min on edge s+1=1
float numer = tmp1 - tmp0;
float denom = a - 2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
t = 1-s;
} else { // min on edge s=0
s = 0;
t = ( tmp1 <= 0 ? 1 : ( e >= 0 ? 0 : -e/c ) );
}
}
break;
case 3:
s = 0;
t = ( e >= 0 ? 0 :
( -e >= c ? 1 : -e/c ) );
break;
case 4:
if ( d < 0 ) { // min on edge t=0
t = 0;
s = ( d >= 0 ? 0 :
( -d >= a ? 1 : -d/a ) );
} else { // min on edge s = 0
s = 0;
t = ( e >= 0 ? 0 :
( -e >= c ? 1 : -e/c ) );
}
break;
case 5:
t = 0;
s = ( d >= 0 ? 0 :
( -d >= a ? 1 : -d/a ) );
break;
case 6:
{
float tmp0 = a+d;
float tmp1 = b+e;
if (tmp0 > tmp1) { // min on edge s+1=1
float numer = c + e - b - d;
float denom = a -2*b + c;
s = ( numer >= denom ? 1 : numer/denom );
t = 1-s;
} else { // min on edge t=1
t = 0;
s = ( tmp0 <= 0 ? 1 : ( d >= 0 ? 0 : -d/a ));
}
}
break;
}
const float3 closestPoint = pBg + s*E0 + t*E1;
const float3 v = closestPoint-P;
float dist = length(v);
float3 unscaledNorm = cross(pAg-pBg,pCg-pBg);
if (dot(v,unscaledNorm) < 0) { dist = -dist; }
//atomicMin(&sdfVal,length);
// TODO
//sdfVal = min(sdfVal,list);
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
if (fabs(dist) < fabs(sdfVal)) { sdfVal = dist; }
//printf("%f\n",sdfVal);
}
}
}
}
}
__global__ void gpu_signTruncatedObsDf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 T_cm,
const Grid3D<float> * dObsSdf,
const float focalLength) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width-1 || y >= height-1) { return; }
if (dObsVertMap[x + y*width].w != 0 && dObsVertMap[x+1 + y*width].w != 0 && dObsVertMap[x+1 + (y+1)*width].w != 0 ) {
}
}
__global__ void gpu_errorModToObs(const float4 * labeledPredVertMap,
const int width,
const int height,
const Grid3D<float> * obsSdf,
float* result) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// overflow
if (x >= width || y >= height) {
return;
}
const int index = x + y*width;
const float4 & predV = labeledPredVertMap[index];
// no prediction
if (predV.z == 0) { return; }
const float3 predVGrid = obsSdf->getGridCoords(make_float3(predV));
if (!obsSdf->isInBoundsGradientInterp(predVGrid)) {
return;
}
const float err = obsSdf->getValueInterpolated(predVGrid)*obsSdf->resolution;
//atomicAdd(numPredictions,1);
atomicAdd(result,0.5*err*err);
}
__global__ void gpu_cullUnobservable(float4 * predVertMap,
const int predWidth,
const int predHeight,
const float4 * obsVertMap,
const int obsWidth,
const int obsHeight) {
const int predX = blockIdx.x*blockDim.x + threadIdx.x;
const int predY = blockIdx.y*blockDim.y + threadIdx.y;
if (predX >= predWidth || predY >= predHeight) { return; }
const int predIndex = predX + predY*predWidth;
const int obsX = predX*obsWidth/predWidth;
const int obsY = predY*obsHeight/predHeight;
const int obsIndex = obsX + obsY*obsWidth;
if (obsVertMap[obsIndex].w <= 0 || //obsVertMap[obsIndex].z == 0 ||
obsVertMap[obsIndex+1].w <= 0 || //obsVertMap[obsIndex+1].z == 0 ||
obsVertMap[obsIndex+obsWidth].w <= 0 || //obsVertMap[obsIndex+obsWidth].z == 0 ||
obsVertMap[obsIndex+obsWidth+1].w <= 0 //|| obsVertMap[obsIndex+obsWidth+1].z == 0
) {
predVertMap[predIndex].z = 0;
}
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
void normEqnsModToObs(const int dimensions,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
const SE3 T_gc,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dResult,0,(dimensions + JTJSize(dimensions) + 1)*sizeof(float));
cudaMemset(numPredictions,0,sizeof(int));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObs<false,false,false><<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObs<false,false,true><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObs<false,true,false><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObs<false,true,true><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObs<true,false,false><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObs<true,false,true><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObs<true,true,false><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObs<true,true,true><<<grid,block,64*dimensions*sizeof(float)>>> (dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), T_gc, model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("normEqnsModToObs: %s\n",cudaGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsTruncated(const int dimensions,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
const float truncationDistance,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dResult,0,(dimensions + JTJSize(dimensions) + 1)*sizeof(float));
cudaMemset(numPredictions,0,sizeof(int));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsTruncated<false,false,false><<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsTruncated<false,false,true> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsTruncated<false,true,false> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsTruncated<false,true,true> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsTruncated<true,false,false> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsTruncated<true,false,true> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsTruncated<true,true,false> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsTruncated<true,true,true> <<<grid,block,64*dimensions*sizeof(float)>>>(dimensions, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), truncationDistance, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("normEqnsModToObs: %s\n",cudaGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsReduced(const int dims,
const int reductionDims,
const float * d_dtheta_dalpha,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsReduced<false,false,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>>(dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsReduced<false,false,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsReduced<false,true,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsReduced<false,true,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsReduced<true,false,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsReduced<true,false,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsReduced<true,true,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsReduced<true,true,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), d_dtheta_dalpha, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("normEqnsModToObsReduced: %s\n",cudaGetErrorString(err));
}
#endif
}
}
void normEqnsModToObsParamMap(const int dims,
const int reductionDims,
const int * dMapping,
const float4 * dLabeledPredictedVertMap,
const int width,
const int height,
const MirroredModel & model,
float * dResult,
int * numPredictions,
int * debugDataAssociation,
float * debugError,
float4 * debugNorm) {
dim3 block;
if (height == 1) {
block.x = 64; block.y = block.z = 1;
}
else {
block.x = 8; block.y = 8; block.z = 1;
}
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
cudaMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float));
{
if (debugDataAssociation == 0) {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsParamMap<false,false,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>>(dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsParamMap<false,false,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsParamMap<false,true,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsParamMap<false,true,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
else {
if (debugError == 0) {
if (debugNorm == 0) {
gpu_normEqnsModToObsParamMap<true,false,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsParamMap<true,false,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
else {
if (debugNorm == 0) {
gpu_normEqnsModToObsParamMap<true,true,false><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
} else {
gpu_normEqnsModToObsParamMap<true,true,true><<<grid,block,64*(dims+reductionDims)*sizeof(float)>>> (dims, reductionDims, dLabeledPredictedVertMap, width, height, model.getModelID(), model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceObsSdf(), model.getDeviceSdfFrames(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, dResult, numPredictions, debugDataAssociation, debugError, debugNorm);
}
}
}
#ifdef CUDA_ERR_CHECK
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("normEqnsModToObsReduced: %s\n",cudaGetErrorString(err));
}
#endif
}
}
void splatObsSdfZeros(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 & T_cm,
const Grid3D<float> * dObsSdf,
const uint3 sdfDim,
const float focalLength) {
dim3 block(8,8,4);
dim3 grid(sdfDim.x / block.x, sdfDim.y / block.y, sdfDim.z / block.z );
gpu_splatObsSdf<<<grid,block>>>(dObsVertMap,
width,
height,
T_cm,
dObsSdf,
focalLength);
}
void computeTruncatedObsSdf(const float4 * dObsVertMap,
const int width,
const int height,
const SE3 & T_mc,
const Grid3D<float> * dObsSdf,
const uint3 sdfDim,
const float truncationDist) {
dim3 block(8,8,4);
dim3 grid(sdfDim.x / block.x, sdfDim.y / block.y, sdfDim.z / block.z );
gpu_clearObsSdf<<<grid,block>>>(dObsSdf, truncationDist);
block = dim3(16,8,2);
grid = dim3( ceil( width / (float)block.x), ceil(height / (float)block.y ), 1);
gpu_computeTruncatedObsDf<<<grid,block>>>(dObsVertMap,width,height,T_mc,dObsSdf,truncationDist);
//gpu_signTruncatedObsDf<<<grid,block>>>(dObsVertMap,width,height,T_cm,dObsSdf,focalLength);
}
float errorModToObs(const float4 *dLabeledPredictedVertMap,
const int width,
const int height,
const Grid3D<float> *dObsSdf) {
dim3 block(16,8);
dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y ));
static MirroredVector<float> error(1);
cudaMemset(error.devicePtr(),0,sizeof(float));
gpu_errorModToObs<<<grid,block>>>(dLabeledPredictedVertMap,width,height,dObsSdf,error.devicePtr());
error.syncDeviceToHost();
return error.hostPtr()[0];
}
void cullUnobservable_(float4 * predVertMap,
const int predWidth,
const int predHeight,
const float4 * obsVertMap,
const int obsWidth,
const int obsHeight,
const cudaStream_t stream) {
dim3 block(8,8,1);
dim3 grid( ceil( predWidth / (float)block.x), ceil(predHeight / (float)block.y ));
gpu_cullUnobservable<<<grid,block,0,stream>>>(predVertMap,predWidth,predHeight,
obsVertMap,obsWidth,obsHeight);
}
}
|
the_stack
|
namespace nvbio {
namespace priv {
struct pack_flags_functor
{
// constructor
pack_flags_functor(
const uint32 _n,
const uint8* _flags,
uint32* _comp_flags)
: n( _n ), flags( _flags ), comp_flags( _comp_flags ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void operator() (const uint32 thread_id) const
{
const uint32 idx = 32 * thread_id;
if (idx >= n)
return;
// initialize the output word
uint32 f = 0u;
#pragma unroll
for (uint32 i = 0; i < 2; ++i)
{
// fetch and process 16-bytes in one go
const uint4 flag = ((const uint4*)flags)[idx/16u + i];
if (flag.x & (255u << 0)) f |= 1u << (i*16 + 0);
if (flag.x & (255u << 8)) f |= 1u << (i*16 + 1);
if (flag.x & (255u << 16)) f |= 1u << (i*16 + 2);
if (flag.x & (255u << 24)) f |= 1u << (i*16 + 3);
if (flag.y & (255u << 0)) f |= 1u << (i*16 + 4);
if (flag.y & (255u << 8)) f |= 1u << (i*16 + 5);
if (flag.y & (255u << 16)) f |= 1u << (i*16 + 6);
if (flag.y & (255u << 24)) f |= 1u << (i*16 + 7);
if (flag.z & (255u << 0)) f |= 1u << (i*16 + 8);
if (flag.z & (255u << 8)) f |= 1u << (i*16 + 9);
if (flag.z & (255u << 16)) f |= 1u << (i*16 + 10);
if (flag.z & (255u << 24)) f |= 1u << (i*16 + 11);
if (flag.w & (255u << 0)) f |= 1u << (i*16 + 12);
if (flag.w & (255u << 8)) f |= 1u << (i*16 + 13);
if (flag.w & (255u << 16)) f |= 1u << (i*16 + 14);
if (flag.w & (255u << 24)) f |= 1u << (i*16 + 15);
}
// write the output word
comp_flags[thread_id] = f;
}
const uint32 n;
const uint8* flags;
uint32* comp_flags;
};
template <typename T>
struct build_head_flags_functor;
template <>
struct build_head_flags_functor<uint32>
{
// constructor
build_head_flags_functor(
const uint32 _n,
const uint32* _keys,
uint8* _flags)
: n( _n ), keys( _keys ), flags( _flags ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void operator() (const uint32 thread_id) const
{
const uint32 idx = 4 * thread_id;
if (idx >= n)
return;
// load the previous key
const uint32 key_p = idx ? keys[idx-1] : 0xFFFFFFFF;
// load the next 4 keys
const uint4 key = ((const uint4*)keys)[thread_id];
const uchar4 flag = ((const uchar4*)flags)[thread_id];
// and write the corresponding 4 flags
((uchar4*)flags)[thread_id] = make_uchar4(
(key.x != key_p) ? 1u : flag.x,
(key.y != key.x) ? 1u : flag.y,
(key.z != key.y) ? 1u : flag.z,
(key.w != key.z) ? 1u : flag.w );
}
const uint32 n;
const uint32* keys;
uint8* flags;
};
template <>
struct build_head_flags_functor<uint64>
{
// constructor
build_head_flags_functor(
const uint32 _n,
const uint64* _keys,
uint8* _flags)
: n( _n ), keys( _keys ), flags( _flags ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void operator() (const uint32 thread_id) const
{
const uint32 idx = thread_id;
if (idx >= n)
return;
// load the previous key
const uint64 key_p = idx ? keys[idx-1] : 0xFFFFFFFF;
// load the next key
const uint64 key = keys[thread_id];
const uint8 flag = flags[thread_id];
// and write the corresponding flag out
flags[thread_id] = (key != key_p) ? 1u : flag;
}
const uint32 n;
const uint64* keys;
uint8* flags;
};
// pack a set of head flags into a bit-packed array
//
void pack_flags(
const uint32 n,
const uint8* flags,
uint32* comp_flags)
{
const uint32 n_words = util::divide_ri( n, 32u );
// use a for_each to automate support for older compute capabilities with limited grid sizes
nvbio::for_each<device_tag>(
n_words,
thrust::make_counting_iterator<uint32>(0u),
pack_flags_functor( n, flags, comp_flags ) );
}
// build a set of head flags looking at adjacent keys
//
void build_head_flags(
const uint32 n,
const uint32* keys,
uint8* flags)
{
const uint32 n_quads = util::divide_ri( n, 4u );
// use a for_each to automate support for older compute capabilities with limited grid sizes
nvbio::for_each<device_tag>(
n_quads,
thrust::make_counting_iterator<uint32>(0u),
build_head_flags_functor<uint32>( n, keys, flags ) );
}
// build a set of head flags looking at adjacent keys
//
void build_head_flags(
const uint32 n,
const uint64* keys,
uint8* flags)
{
// use a for_each to automate support older compute capabilities with limited grid sizes
nvbio::for_each<device_tag>(
n,
thrust::make_counting_iterator<uint32>(0u),
build_head_flags_functor<uint64>( n, keys, flags ) );
}
uint32 extract_radix_16(
const priv::string_set_2bit_be& string_set,
const uint2 suffix,
const uint32 word_idx)
{
priv::local_set_suffix_word_functor<2u,16u,4u,priv::string_set_2bit_be,uint32> word_functor( string_set, word_idx );
return word_functor( suffix );
}
uint32 extract_radix_32(
const priv::string_set_2bit_be& string_set,
const uint2 suffix,
const uint32 word_idx)
{
priv::local_set_suffix_word_functor<2u,32u,4u,priv::string_set_2bit_be,uint32> word_functor( string_set, word_idx );
return word_functor( suffix );
}
uint32 extract_radix_64(
const priv::string_set_2bit_u64_be& string_set,
const uint2 suffix,
const uint32 word_idx)
{
priv::local_set_suffix_word_functor<2u,64u,5u,priv::string_set_2bit_u64_be,uint32> word_functor( string_set, word_idx );
return word_functor( suffix );
}
void extract_radices_16(
const priv::string_set_2bit_be string_set,
const uint32 n_suffixes,
const uint32 word_begin,
const uint32 word_end,
const uint2* h_suffixes,
uint32* h_radices,
uint8* h_symbols)
{
if (word_begin+1 == word_end)
{
#pragma omp parallel for
for (int32 i = 0; i < int32( n_suffixes ); ++i)
{
const uint2 suffix = h_suffixes[ i ];
h_radices[i] = extract_radix_16( string_set, suffix, word_begin );
}
}
else
{
#pragma omp parallel for
for (int32 i = 0; i < int32( n_suffixes ); ++i)
{
const uint2 local_suffix_idx = h_suffixes[ i ];
const uint32 string_idx = local_suffix_idx.y;
const uint32 suffix_idx = local_suffix_idx.x;
const uint64 string_off = string_set.offsets()[ string_idx ];
const uint64 string_end = string_set.offsets()[ string_idx+1u ];
const uint64 string_len = uint32( string_end - string_off );
const uint32* base_words = string_set.base_string().stream();
if (h_symbols != NULL)
h_symbols[i] = suffix_idx ? string_set.base_string()[ string_off + suffix_idx-1u ] : 255u;
extract_word_packed<16u,4u,2u>(
base_words,
string_len,
string_off,
suffix_idx,
word_begin,
word_end,
strided_iterator<uint32*>( h_radices + i, n_suffixes ) );
}
}
}
void extract_radices_32(
const priv::string_set_2bit_be string_set,
const uint32 n_suffixes,
const uint32 word_begin,
const uint32 word_end,
const uint2* h_suffixes,
uint32* h_radices,
uint8* h_symbols)
{
if (word_begin+1 == word_end)
{
#pragma omp parallel for
for (int32 i = 0; i < int32( n_suffixes ); ++i)
{
const uint2 suffix = h_suffixes[ i ];
h_radices[i] = extract_radix_32( string_set, suffix, word_begin );
}
}
else
{
#pragma omp parallel for
for (int32 i = 0; i < int32( n_suffixes ); ++i)
{
const uint2 local_suffix_idx = h_suffixes[ i ];
const uint32 string_idx = local_suffix_idx.y;
const uint32 suffix_idx = local_suffix_idx.x;
const uint64 string_off = string_set.offsets()[ string_idx ];
const uint64 string_end = string_set.offsets()[ string_idx+1u ];
const uint64 string_len = uint32( string_end - string_off );
const uint32* base_words = string_set.base_string().stream();
if (h_symbols != NULL)
h_symbols[i] = suffix_idx ? string_set.base_string()[ string_off + suffix_idx-1u ] : 255u;
extract_word_packed<32u,4u,2u>(
base_words,
string_len,
string_off,
suffix_idx,
word_begin,
word_end,
strided_iterator<uint32*>( h_radices + i, n_suffixes ) );
}
}
}
void extract_radices_64(
const priv::string_set_2bit_u64_be string_set,
const uint32 n_suffixes,
const uint32 word_begin,
const uint32 word_end,
const uint2* h_suffixes,
uint64* h_radices,
uint8* h_symbols)
{
if (word_begin+1 == word_end)
{
#pragma omp parallel for
for (int32 i = 0; i < int32( n_suffixes ); ++i)
{
const uint2 suffix = h_suffixes[ i ];
h_radices[i] = extract_radix_64( string_set, suffix, word_begin );
}
}
else
{
#pragma omp parallel for
for (int32 i = 0; i < int32( n_suffixes ); ++i)
{
const uint2 local_suffix_idx = h_suffixes[ i ];
const uint32 string_idx = local_suffix_idx.y;
const uint32 suffix_idx = local_suffix_idx.x;
const uint64 string_off = string_set.offsets()[ string_idx ];
const uint64 string_end = string_set.offsets()[ string_idx+1u ];
const uint64 string_len = uint32( string_end - string_off );
const uint64* base_words = string_set.base_string().stream();
if (h_symbols != NULL)
h_symbols[i] = suffix_idx ? string_set.base_string()[ string_off + suffix_idx - 1u ] : 255u;
extract_word_packed<64u,5u,2u>(
base_words,
string_len,
string_off,
suffix_idx,
word_begin,
word_end,
strided_iterator<uint64*>( h_radices + i, n_suffixes ) );
}
}
}
void extract_radices(
const priv::string_set_2bit_be string_set,
const uint32 n_suffixes,
const uint32 word_begin,
const uint32 word_end,
const uint32 word_bits,
const uint2* h_suffixes,
uint32* h_radices,
uint8* h_symbols)
{
if (word_bits == 16)
{
extract_radices_16(
string_set,
n_suffixes,
word_begin,
word_end,
h_suffixes,
h_radices,
h_symbols );
}
else if (word_bits == 32)
{
extract_radices_32(
string_set,
n_suffixes,
word_begin,
word_end,
h_suffixes,
h_radices,
h_symbols );
}
else
{
log_error(stderr,"extract_radices(): unsupported number of bits\n");
exit(1);
}
}
void extract_radices(
const priv::string_set_2bit_u64_be string_set,
const uint32 n_suffixes,
const uint32 word_begin,
const uint32 word_end,
const uint32 word_bits,
const uint2* h_suffixes,
uint64* h_radices,
uint8* h_symbols)
{
if (word_bits == 64)
{
extract_radices_64(
string_set,
n_suffixes,
word_begin,
word_end,
h_suffixes,
h_radices,
h_symbols );
}
else
{
log_error(stderr,"extract_radices(): unsupported number of bits\n");
exit(1);
}
}
/// process a batch of BWT symbols
///
uint32 DollarExtractor::extract(
const uint32 n_suffixes,
const uint8* h_bwt,
const uint8* d_bwt,
const uint2* h_suffixes,
const uint2* d_suffixes,
const uint32* d_indices)
{
if (h_suffixes != NULL && // these are NULL for the empty suffixes
d_suffixes != NULL)
{
priv::alloc_storage( d_dollar_ranks, n_suffixes );
priv::alloc_storage( d_dollars, n_suffixes );
priv::alloc_storage( h_dollar_ranks, n_suffixes );
priv::alloc_storage( h_dollars, n_suffixes );
uint32 n_found_dollars = 0;
if (d_indices != NULL)
{
priv::alloc_storage( d_dollar_indices, n_suffixes );
// find the dollar signs
n_found_dollars = cuda::copy_flagged(
n_suffixes,
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_counting_iterator<uint64>(0) + offset,
thrust::device_ptr<const uint32>( d_indices ) ) ),
thrust::make_transform_iterator( thrust::device_ptr<const uint8>( d_bwt ), equal_to_functor<uint8>(255u) ),
thrust::make_zip_iterator(
thrust::make_tuple(
d_dollar_ranks.begin(),
d_dollar_indices.begin() ) ),
d_temp_storage );
// gather their indices
thrust::gather(
d_dollar_indices.begin(),
d_dollar_indices.begin() + n_found_dollars,
thrust::make_transform_iterator( thrust::device_ptr<const uint2>( d_suffixes ), priv::suffix_component_functor<priv::STRING_ID>() ),
d_dollars.begin() );
}
else
{
// find the dollar signs
n_found_dollars = cuda::copy_flagged(
n_suffixes,
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_counting_iterator<uint64>(0) + offset,
thrust::make_transform_iterator( thrust::device_ptr<const uint2>( d_suffixes ), priv::suffix_component_functor<priv::STRING_ID>() ) ) ),
thrust::make_transform_iterator( thrust::device_ptr<const uint8>( d_bwt ), equal_to_functor<uint8>(255u) ),
thrust::make_zip_iterator(
thrust::make_tuple(
d_dollar_ranks.begin(),
d_dollars.begin() ) ),
d_temp_storage );
}
// and copy them back to the host
thrust::copy(
d_dollar_ranks.begin(),
d_dollar_ranks.begin() + n_found_dollars,
h_dollar_ranks.begin() );
// and copy them back to the host
thrust::copy(
d_dollars.begin(),
d_dollars.begin() + n_found_dollars,
h_dollars.begin() );
offset += n_suffixes;
n_dollars += n_found_dollars;
return n_found_dollars;
}
else
{
offset += n_suffixes;
return 0;
}
}
} // namespace priv
} // namespace nvbio
|
the_stack
|
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "main.h"
#include "extract_kernel.cu"
#include "prepare_kernel.cu"
#include "reduce_kernel.cu"
#include "srad_kernel.cu"
#include "srad2_kernel.cu"
#include "compress_kernel.cu"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int main(int argc, char *argv []){
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
long long time11;
long long time12;
time0 = get_time();
// inputs image, input paramenters
fp* image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp* image; // input image
int Nr,Nc; // IMAGE nbr of rows/cols/elements
long Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1,r2,c1,c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// surrounding pixel indicies
int *iN,*iS,*jE,*jW;
// counters
int iter; // primary loop
long i,j; // image row/col
// memory sizes
int mem_size_i;
int mem_size_j;
int mem_size_single;
//================================================================================80
// GPU VARIABLES
//================================================================================80
// CUDA kernel execution parameters
dim3 threads;
int blocks_x;
dim3 blocks;
dim3 blocks2;
// memory sizes
int mem_size; // matrix memory size
// HOST
int no;
int mul;
fp total;
fp total2;
fp meanROI;
fp meanROI2;
fp varROI;
fp q0sqr;
// DEVICE
fp* d_sums; // partial sum
fp* d_sums2;
int* d_iN;
int* d_iS;
int* d_jE;
int* d_jW;
fp* d_dN;
fp* d_dS;
fp* d_dW;
fp* d_dE;
fp* d_I; // input IMAGE on DEVICE
fp* d_c;
time1 = get_time();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if(argc != 5){
printf("ERROR: wrong number of arguments\n");
return 0;
}
else{
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
}
time2 = get_time();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem);
const char* input_image_path = "../data/srad/image.pgm";
if ( !read_graphics(input_image_path, image_ori, image_ori_rows, image_ori_cols, 1) ) {
printf("ERROR: failed to read input image at %s\n", input_image_path);
if (image_ori != NULL) free(image_ori);
return -1;
}
time3 = get_time();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr*Nc;
image = (fp*)malloc(sizeof(fp) * Ne);
resize( image_ori,
image_ori_rows,
image_ori_cols,
image,
Nr,
Nc,
1);
time4 = get_time();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2-r1+1)*(c2-c1+1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
mem_size_i = sizeof(int) * Nr; //
iN = (int *)malloc(mem_size_i) ; // north surrounding element
iS = (int *)malloc(mem_size_i) ; // south surrounding element
mem_size_j = sizeof(int) * Nc; //
jW = (int *)malloc(mem_size_j) ; // west surrounding element
jE = (int *)malloc(mem_size_j) ; // east surrounding element
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
for (i=0; i<Nr; i++) {
iN[i] = i-1; // holds index of IMAGE row above
iS[i] = i+1; // holds index of IMAGE row below
}
for (j=0; j<Nc; j++) {
jW[j] = j-1; // holds index of IMAGE column on the left
jE[j] = j+1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of image
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr-1] = Nr-1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc-1] = Nc-1; // changes IMAGE rightmost column index from Nc to Nc-1
//================================================================================80
// GPU SETUP
//================================================================================80
// allocate memory for entire IMAGE on DEVICE
mem_size = sizeof(fp) * Ne; // get the size of float representation of input IMAGE
hipMalloc((void **)&d_I, mem_size); //
// allocate memory for coordinates on DEVICE
hipMalloc((void **)&d_iN, mem_size_i); //
hipMemcpy(d_iN, iN, mem_size_i, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_iS, mem_size_i); //
hipMemcpy(d_iS, iS, mem_size_i, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_jE, mem_size_j); //
hipMemcpy(d_jE, jE, mem_size_j, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_jW, mem_size_j); //
hipMemcpy(d_jW, jW, mem_size_j, hipMemcpyHostToDevice); //
// allocate memory for partial sums on DEVICE
hipMalloc((void **)&d_sums, mem_size); //
hipMalloc((void **)&d_sums2, mem_size); //
// allocate memory for derivatives
hipMalloc((void **)&d_dN, mem_size); //
hipMalloc((void **)&d_dS, mem_size); //
hipMalloc((void **)&d_dW, mem_size); //
hipMalloc((void **)&d_dE, mem_size); //
// allocate memory for coefficient on DEVICE
hipMalloc((void **)&d_c, mem_size); //
//checkCUDAError("setup");
//================================================================================80
// KERNEL EXECUTION PARAMETERS
//================================================================================80
// all kernels operating on entire matrix
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = Ne/threads.x;
if (Ne % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
time5 = get_time();
//================================================================================80
// COPY INPUT TO CPU
//================================================================================80
hipMemcpy(d_I, image, mem_size, hipMemcpyHostToDevice);
time6 = get_time();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
hipLaunchKernelGGL(extract, blocks, threads, 0, 0, Ne, d_I);
//checkCUDAError("extract");
time7 = get_time();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// execute main loop
for (iter=0; iter<niter; iter++){ // do for the number of iterations input parameter
// printf("%d ", iter);
// fflush(NULL);
// execute square kernel
hipLaunchKernelGGL(prepare, blocks, threads, 0, 0, Ne,
d_I,
d_sums,
d_sums2);
//checkCUDAError("prepare");
// performs subsequent reductions of sums
blocks2.x = blocks.x; // original number of blocks
blocks2.y = blocks.y;
no = Ne; // original number of sum elements
mul = 1; // original multiplier
while(blocks2.x != 0){
//checkCUDAError("before reduce");
// run kernel
hipLaunchKernelGGL(reduce, blocks2, threads, 0, 0, Ne,
no,
mul,
d_sums,
d_sums2);
//checkCUDAError("reduce");
// update execution parameters
no = blocks2.x; // get current number of elements
if(blocks2.x == 1){
blocks2.x = 0;
}
else{
mul = mul * NUMBER_THREADS; // update the increment
blocks_x = blocks2.x/threads.x; // number of blocks
if (blocks2.x % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks2.x = blocks_x;
blocks2.y = 1;
}
//checkCUDAError("after reduce");
}
//checkCUDAError("before copy sum");
// copy total sums to device
mem_size_single = sizeof(fp) * 1;
hipMemcpy(&total, d_sums, mem_size_single, hipMemcpyDeviceToHost);
hipMemcpy(&total2, d_sums2, mem_size_single, hipMemcpyDeviceToHost);
//checkCUDAError("copy sum");
// calculate statistics
meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI
meanROI2 = meanROI * meanROI; //
varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI
q0sqr = varROI / meanROI2; // gets standard deviation of ROI
// execute srad kernel
hipLaunchKernelGGL(srad, blocks, threads, 0, 0, lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
q0sqr, // standard deviation of ROI
d_c, // diffusion coefficient
d_I); // output image
//checkCUDAError("srad");
// execute srad2 kernel
hipLaunchKernelGGL(srad2, blocks, threads, 0, 0, lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
d_c, // diffusion coefficient
d_I); // output image
//checkCUDAError("srad2");
}
// printf("\n");
time8 = get_time();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
hipLaunchKernelGGL(compress, blocks, threads, 0, 0, Ne, d_I);
//checkCUDAError("compress");
time9 = get_time();
//================================================================================80
// COPY RESULTS BACK TO CPU
//================================================================================80
hipMemcpy(image, d_I, mem_size, hipMemcpyDeviceToHost);
//checkCUDAError("copy back");
time10 = get_time();
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics(
"image_out.pgm",
image,
Nr,
Nc,
1,
255);
time11 = get_time();
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(image);
free(iN);
free(iS);
free(jW);
free(jE);
hipFree(d_I);
hipFree(d_c);
hipFree(d_iN);
hipFree(d_iS);
hipFree(d_jE);
hipFree(d_jW);
hipFree(d_dN);
hipFree(d_dS);
hipFree(d_dE);
hipFree(d_dW);
hipFree(d_sums);
hipFree(d_sums2);
time12 = get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%15.12f s, %15.12f %% : SETUP VARIABLES\n",
(float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : READ COMMAND LINE PARAMETERS\n",
(float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : READ IMAGE FROM FILE\n",
(float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : RESIZE IMAGE\n",
(float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY ALLOCATION\n",
(float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : COPY DATA TO CPU->GPU\n",
(float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : EXTRACT IMAGE\n",
(float) (time7-time6) / 1000000, (float) (time7-time6) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : COMPUTE\n",
(float) (time8-time7) / 1000000, (float) (time8-time7) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : COMPRESS IMAGE\n",
(float) (time9-time8) / 1000000, (float) (time9-time8) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : COPY DATA TO GPU->CPU\n",
(float) (time10-time9) / 1000000, (float) (time10-time9) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : SAVE IMAGE INTO FILE\n",
(float) (time11-time10) / 1000000, (float) (time11-time10) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f %% : FREE MEMORY\n",
(float) (time12-time11) / 1000000, (float) (time12-time11) / (float) (time12-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time12-time0) / 1000000);
}
//====================================================================================================100
// END OF FILE
//====================================================================================================100
|
the_stack
|
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
/*! \file NeighborListGPU.cu
\brief Defines GPU kernel code for neighbor list processing on the GPU
*/
#include "NeighborListGPU.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#pragma GCC diagnostic pop
/*! \param d_result Device pointer to a single uint. Will be set to 1 if an update is needed
\param d_last_pos Particle positions at the time the nlist was last updated
\param d_pos Current particle positions
\param nwork Number of particles this GPU processes
\param box Box dimensions
\param d_rcut_max The maximum rcut(i,j) that any particle of type i participates in
\param r_buff The buffer size that particles can move in
\param ntypes The number of particle types
\param lambda_min Minimum contraction of deformation tensor
\param lambda Diagonal deformation tensor (for orthorhombic boundaries)
\param checkn
gpu_nlist_needs_update_check_new_kernel() executes one thread per particle. Every particle's
current position is compared to its last position. If the particle has moved a distance more than
the buffer width, then *d_result is set to \a checkn.
*/
__global__ void gpu_nlist_needs_update_check_new_kernel(unsigned int* d_result,
const Scalar4* d_last_pos,
const Scalar4* d_pos,
const unsigned int nwork,
const BoxDim box,
const Scalar* d_rcut_max,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar lambda_min,
const Scalar3 lambda,
const unsigned int checkn,
const unsigned int offset)
{
// cache delta max into shared memory
// shared data for per type pair parameters
HIP_DYNAMIC_SHARED(unsigned char, s_data)
// pointer for the r_listsq data
Scalar* s_maxshiftsq = (Scalar*)(&s_data[0]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < ntypes; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < ntypes)
{
const Scalar rmin = d_rcut_max[cur_offset + threadIdx.x];
const Scalar rmax = rmin + r_buff;
const Scalar delta_max = (rmax * lambda_min - rmin) / Scalar(2.0);
s_maxshiftsq[cur_offset + threadIdx.x] = (delta_max > 0) ? delta_max * delta_max : 0.0f;
}
}
__syncthreads();
// each thread will compare vs it's old position to see if the list needs updating
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nwork)
{
// get particle index
idx += offset;
Scalar4 cur_postype = d_pos[idx];
Scalar3 cur_pos = make_scalar3(cur_postype.x, cur_postype.y, cur_postype.z);
const unsigned int cur_type = __scalar_as_int(cur_postype.w);
Scalar4 last_postype = d_last_pos[idx];
Scalar3 last_pos = make_scalar3(last_postype.x, last_postype.y, last_postype.z);
Scalar3 dx = cur_pos - lambda * last_pos;
dx = box.minImage(dx);
if (dot(dx, dx) >= s_maxshiftsq[cur_type])
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(d_result, checkn);
#else
atomicMax(d_result, checkn);
#endif
}
}
hipError_t gpu_nlist_needs_update_check_new(unsigned int* d_result,
const Scalar4* d_last_pos,
const Scalar4* d_pos,
const unsigned int N,
const BoxDim& box,
const Scalar* d_rcut_max,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar lambda_min,
const Scalar3 lambda,
const unsigned int checkn,
const GPUPartition& gpu_partition)
{
const size_t shared_bytes = sizeof(Scalar) * ntypes;
unsigned int block_size = 128;
// iterate over active GPUs in reverse order
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
int n_blocks = nwork / block_size + 1;
hipLaunchKernelGGL((gpu_nlist_needs_update_check_new_kernel),
dim3(n_blocks),
dim3(block_size),
shared_bytes,
0,
d_result,
d_last_pos,
d_pos,
nwork,
box,
d_rcut_max,
r_buff,
ntypes,
lambda_min,
lambda,
checkn,
range.first);
}
return hipSuccess;
}
//! Number of elements of the exclusion list to process in each batch
const unsigned int FILTER_BATCH_SIZE = 4;
/*! \param d_n_neigh Number of neighbors for each particle (read/write)
\param d_nlist Neighbor list for each particle (read/write)
\param nli Indexer for indexing into d_nlist
\param d_n_ex Number of exclusions for each particle
\param d_ex_list List of exclusions for each particle
\param exli Indexer for indexing into d_ex_list
\param N Number of particles
\param ex_start Start filtering the nlist from exclusion number \a ex_start
gpu_nlist_filter_kernel() processes the neighbor list \a d_nlist and removes any entries that
are excluded. To allow for an arbitrary large number of exclusions, these are processed in batch
sizes of FILTER_BATCH_SIZE. The kernel must be called multiple times in order to fully remove all
exclusions from the nlist.
\note The driver gpu_nlist_filter properly makes as many calls as are necessary, it only needs
to be called once.
\b Implementation
One thread is run for each particle. Exclusions \a ex_start, \a ex_start + 1, ... are loaded in
for that particle (or the thread returns if there are no exclusions past that point). The thread
then loops over the neighbor list, comparing each entry to the list of exclusions. If the entry
is not excluded, it is written back out. \a d_n_neigh is updated to reflect the current number of
particles in the list at the end of the kernel call.
*/
__global__ void gpu_nlist_filter_kernel(unsigned int* d_n_neigh,
unsigned int* d_nlist,
const unsigned int* d_head_list,
const unsigned int* d_n_ex,
const unsigned int* d_ex_list,
const Index2D exli,
const unsigned int N,
const unsigned int ex_start)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the particle list
if (idx >= N)
return;
const unsigned int n_neigh = d_n_neigh[idx];
const unsigned int n_ex = d_n_ex[idx];
unsigned int new_n_neigh = 0;
// quit now if the ex_start flag is past the end of n_ex
if (ex_start >= n_ex)
return;
// count the number of exclusions to process in this thread
const unsigned int n_ex_process = n_ex - ex_start;
// load the exclusion list into "local" memory - fully unrolled loops should dump this into
// registers
unsigned int l_ex_list[FILTER_BATCH_SIZE];
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_ex_idx < n_ex_process)
l_ex_list[cur_ex_idx] = d_ex_list[exli(idx, cur_ex_idx + ex_start)];
else
l_ex_list[cur_ex_idx] = 0xffffffff;
}
// loop over the list, regenerating it as we go
const unsigned int my_head = d_head_list[idx];
for (unsigned int cur_neigh_idx = 0; cur_neigh_idx < n_neigh; cur_neigh_idx++)
{
unsigned int cur_neigh = d_nlist[my_head + cur_neigh_idx];
// test if excluded
bool excluded = false;
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_neigh == l_ex_list[cur_ex_idx])
excluded = true;
}
// add it back to the list if it is not excluded
if (!excluded)
{
if (new_n_neigh != cur_neigh_idx)
d_nlist[my_head + new_n_neigh] = cur_neigh;
new_n_neigh++;
}
}
// update the number of neighbors
d_n_neigh[idx] = new_n_neigh;
}
hipError_t gpu_nlist_filter(unsigned int* d_n_neigh,
unsigned int* d_nlist,
const unsigned int* d_head_list,
const unsigned int* d_n_ex,
const unsigned int* d_ex_list,
const Index2D& exli,
const unsigned int N,
const unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_nlist_filter_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
// determine parameters for kernel launch
int n_blocks = N / run_block_size + 1;
// split the processing of the full exclusion list up into a number of batches
unsigned int n_batches = (unsigned int)ceil(double(exli.getH()) / double(FILTER_BATCH_SIZE));
unsigned int ex_start = 0;
for (unsigned int batch = 0; batch < n_batches; batch++)
{
hipLaunchKernelGGL((gpu_nlist_filter_kernel),
dim3(n_blocks),
dim3(run_block_size),
0,
0,
d_n_neigh,
d_nlist,
d_head_list,
d_n_ex,
d_ex_list,
exli,
N,
ex_start);
ex_start += FILTER_BATCH_SIZE;
}
return hipSuccess;
}
//! GPU kernel to update the exclusions list
__global__ void gpu_update_exclusion_list_kernel(const unsigned int* tags,
const unsigned int* rtags,
const unsigned int* n_ex_tag,
const unsigned int* ex_list_tag,
const Index2D ex_list_tag_indexer,
unsigned int* n_ex_idx,
unsigned int* ex_list_idx,
const Index2D ex_list_indexer,
const unsigned int N)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = tags[idx];
unsigned int n = n_ex_tag[tag];
// copy over number of exclusions
n_ex_idx[idx] = n;
for (unsigned int offset = 0; offset < n; offset++)
{
unsigned int ex_tag = ex_list_tag[ex_list_tag_indexer(tag, offset)];
unsigned int ex_idx = rtags[ex_tag];
ex_list_idx[ex_list_indexer(idx, offset)] = ex_idx;
}
}
//! GPU function to update the exclusion list on the device
/*! \param d_tag Array of particle tags
\param d_rtag Array of reverse-lookup tag->idx
\param d_n_ex_tag List of number of exclusions per tag
\param d_ex_list_tag 2D Exclusion list per tag
\param ex_list_tag_indexer Indexer for per-tag exclusion list
\param d_n_ex_idx List of number of exclusions per idx
\param d_ex_list_idx Exclusion list per idx
\param ex_list_indexer Indexer for per-idx exclusion list
\param N number of particles
*/
hipError_t gpu_update_exclusion_list(const unsigned int* d_tag,
const unsigned int* d_rtag,
const unsigned int* d_n_ex_tag,
const unsigned int* d_ex_list_tag,
const Index2D& ex_list_tag_indexer,
unsigned int* d_n_ex_idx,
unsigned int* d_ex_list_idx,
const Index2D& ex_list_indexer,
const unsigned int N)
{
unsigned int block_size = 256;
hipLaunchKernelGGL((gpu_update_exclusion_list_kernel),
dim3(N / block_size + 1),
dim3(block_size),
0,
0,
d_tag,
d_rtag,
d_n_ex_tag,
d_ex_list_tag,
ex_list_tag_indexer,
d_n_ex_idx,
d_ex_list_idx,
ex_list_indexer,
N);
return hipSuccess;
}
//! GPU kernel to do a preliminary sizing on particles
/*!
* \param d_head_list The head list of indexes to overwrite
* \param d_req_size_nlist Flag for the required size of the neighbor list to overwrite
* \param d_Nmax The number of neighbors to size per particle type
* \param d_pos Particle positions and types
* \param N the number of particles on this rank
* \param ntypes the number of types in the system
*
* This kernel initializes the head list with the number of neighbors that each type expects from
* d_Nmax. A prefix sum is then performed in gpu_nlist_build_head_list() to accumulate starting
* indices.
*/
__global__ void gpu_nlist_init_head_list_kernel(unsigned int* d_head_list,
unsigned int* d_req_size_nlist,
const unsigned int* d_Nmax,
const Scalar4* d_pos,
const unsigned int N,
const unsigned int ntypes)
{
// cache the d_Nmax into shared memory for faster reads
HIP_DYNAMIC_SHARED(unsigned char, sh)
unsigned int* s_Nmax = (unsigned int*)(&sh[0]);
for (unsigned int cur_offset = 0; cur_offset < ntypes; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
}
}
__syncthreads();
// particle index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const Scalar4 postype_i = d_pos[idx];
const unsigned int type_i = __scalar_as_int(postype_i.w);
const unsigned int Nmax_i = s_Nmax[type_i];
d_head_list[idx] = Nmax_i;
// last thread presets its number of particles in the memory req as well
if (idx == (N - 1))
{
*d_req_size_nlist = Nmax_i;
}
}
/*!
* \param d_req_size_nlist Flag for the total size of the neighbor list
* \param d_head_list The complete particle head list
* \param N the number of particles on this rank
*
* A single thread on the device is needed to complete the exclusive scan and find the size of the
* neighbor list. Because gpu_nlist_init_head_list_kernel() already set the number of neighbors for
* the last particle in d_req_size_nlist, the head index of the last particle is added to this
* number to get the total size.
*/
__global__ void gpu_nlist_get_nlist_size_kernel(unsigned int* d_req_size_nlist,
const unsigned int* d_head_list,
const unsigned int N)
{
*d_req_size_nlist += d_head_list[N - 1];
}
/*!
* \param d_head_list The head list of indexes to compute for reading the neighbor list
* \param d_req_size_nlist Flag for the total size of the neighbor list
* \param d_Nmax The number of neighbors to size per particle type
* \param d_pos Particle positions and types
* \param N the number of particles on this rank
* \param ntypes the number of types in the system
* \param block_size Number of threads per block for gpu_nlist_init_head_list_kernel()
*
* \return hipSuccess on completion
*
* \b Implementation
* \a d_head_list is filled with the number of neighbors per particle. An exclusive prefix sum is
* performed in place on \a d_head_list using the thrust libraries and a single thread is used to
* perform compute the total size of the neighbor list while still on device.
*/
hipError_t gpu_nlist_build_head_list(unsigned int* d_head_list,
unsigned int* d_req_size_nlist,
const unsigned int* d_Nmax,
const Scalar4* d_pos,
const unsigned int N,
const unsigned int ntypes,
const unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_nlist_init_head_list_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
const size_t shared_bytes = ntypes * sizeof(unsigned int);
// initialize each particle with its number of neighbors
hipLaunchKernelGGL((gpu_nlist_init_head_list_kernel),
dim3(N / run_block_size + 1),
dim3(run_block_size),
shared_bytes,
0,
d_head_list,
d_req_size_nlist,
d_Nmax,
d_pos,
N,
ntypes);
thrust::device_ptr<unsigned int> t_head_list = thrust::device_pointer_cast(d_head_list);
thrust::exclusive_scan(t_head_list, t_head_list + N, t_head_list);
hipLaunchKernelGGL((gpu_nlist_get_nlist_size_kernel),
dim3(1),
dim3(1),
0,
0,
d_req_size_nlist,
d_head_list,
N);
return hipSuccess;
}
|
the_stack
|
// CUDA kernel
#include "kernel.h"
void
kernel_gpu_wrapper( params_common common,
int* endoRow,
int* endoCol,
int* tEndoRowLoc,
int* tEndoColLoc,
int* epiRow,
int* epiCol,
int* tEpiRowLoc,
int* tEpiColLoc,
avi_t* frames)
{
// common
//printf("tSize is %d, sSize is %d\n", common.tSize, common.sSize);
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//==================================================50
// endo points templates
//==================================================50
fp* d_endoT;
cudaMalloc((void**)&d_endoT, common.in_mem * common.endoPoints);
//printf("%d\n", common.in_elem * common.endoPoints);
//==================================================50
// epi points templates
//==================================================50
fp* d_epiT;
cudaMalloc((void**)&d_epiT, common.in_mem * common.epiPoints);
//====================================================================================================100
// AREA AROUND POINT FROM FRAME (LOCAL)
//====================================================================================================100
// common
common.in2_rows = common.sSize + 1 + common.sSize;
common.in2_cols = common.in2_rows;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(fp) * common.in2_elem;
fp* d_in2;
cudaMalloc((void**)&d_in2, common.in2_mem * common.allPoints);
//printf("%d\n", common.in2_elem * common.allPoints);
//====================================================================================================100
// CONVOLUTION (LOCAL)
//====================================================================================================100
// common
common.conv_rows = common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(fp) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// unique
fp* d_conv;
cudaMalloc((void**)&d_conv, common.conv_mem * common.allPoints);
//====================================================================================================100
// CUMULATIVE SUM (LOCAL)
//====================================================================================================100
//==================================================50
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//==================================================50
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(fp) * common.in2_pad_cumv_elem;
// unique
//buffer<fp,1> d_in2_pad_cumv(common.in2_pad_cumv_elem * common.allPoints);
//printf("%d\n", common.in2_pad_cumv_elem * common.allPoints);
fp* d_in2_pad_cumv;
cudaMalloc((void**)&d_in2_pad_cumv, common.in2_pad_cumv_mem * common.allPoints);
//==================================================50
// SELECTION
//==================================================50
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(fp) * common.in2_pad_cumv_sel_elem;
// unique
//buffer<fp,1> d_in2_pad_cumv_sel(common.in2_pad_cumv_sel_elem * common.allPoints);
//printf("%d\n", common.in2_pad_cumv_sel_elem * common.allPoints);
fp* d_in2_pad_cumv_sel;
cudaMalloc((void**)&d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//==================================================50
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(fp) * common.in2_sub_cumh_elem;
// unique
//buffer<fp,1> d_in2_sub_cumh(common.in2_sub_cumh_elem * common.allPoints);
//printf("%d\n", common.in2_sub_cumh_elem * common.allPoints);
fp* d_in2_sub_cumh;
cudaMalloc((void**)&d_in2_sub_cumh, common.in2_sub_cumh_mem * common.allPoints);
//==================================================50
// SELECTION
//==================================================50
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(fp) * common.in2_sub_cumh_sel_elem;
// unique
//buffer<fp,1> d_in2_sub_cumh_sel(common.in2_sub_cumh_sel_elem * common.allPoints);
//printf("%d\n", common.in2_sub_cumh_sel_elem * common.allPoints);
fp* d_in2_sub_cumh_sel;
cudaMalloc((void**)&d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION
//==================================================50
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(fp) * common.in2_sub2_elem;
// unique
//buffer<fp,1> d_in2_sub2(common.in2_sub2_elem * common.allPoints);
//printf("%d\n", common.in2_sub2_elem * common.allPoints);
fp* d_in2_sub2;
cudaMalloc((void**)&d_in2_sub2, common.in2_sub2_mem * common.allPoints);
//====================================================================================================100
// CUMULATIVE SUM 2 (LOCAL)
//====================================================================================================100
//==================================================50
// MULTIPLICATION
//==================================================50
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// unique
//buffer<fp,1> d_in2_sqr(common.in2_elem * common.allPoints);
//printf("%d\n", common.in2_elem * common.allPoints);
fp* d_in2_sqr;
cudaMalloc((void**)&d_in2_sqr, common.in2_sqr_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION
//==================================================50
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// unique
//buffer<fp,1> d_in2_sqr_sub2(common.in2_sub2_elem * common.allPoints);
//printf("%d\n", common.in2_sub2_elem * common.allPoints);
fp* d_in2_sqr_sub2;
cudaMalloc((void**)&d_in2_sqr_sub2, common.in2_sqr_sub2_mem * common.allPoints);
//====================================================================================================100
// FINAL (LOCAL)
//====================================================================================================100
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// unique
//buffer<fp,1> d_in_sqr(common.in_elem * common.allPoints);
//printf("%d\n", common.in_elem * common.allPoints);
fp* d_in_sqr;
cudaMalloc((void**)&d_in_sqr, common.in_sqr_mem * common.allPoints);
//====================================================================================================100
// TEMPLATE MASK CREATE (LOCAL)
//====================================================================================================100
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(fp) * common.tMask_elem;
// unique
//buffer<fp,1> d_tMask(common.tMask_elem * common.allPoints);
//printf("%d\n", common.tMask_elem * common.allPoints);
fp* d_tMask;
cudaMalloc((void**)&d_tMask, common.tMask_mem * common.allPoints);
//====================================================================================================100
// POINT MASK INITIALIZE (LOCAL)
//====================================================================================================100
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(fp) * common.mask_elem;
//====================================================================================================100
// MASK CONVOLUTION (LOCAL)
//====================================================================================================100
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(fp) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
//printf("common.endPoints=%d\n", common.endoPoints); // 20
//printf("common.epiPoints=%d\n", common.epiPoints); // 31
//printf("common.in_elem=%d\n", common.in_elem);
//printf("common.endo_mem=%d\n", common.endo_mem); // 80
//printf("common.epi_mem=%d\n", common.epi_mem); // 124
//
//buffer<params_common,1> d_common(&common, 1, props); // range is 1 ?
//buffer<int,1> d_endoRow(endoRow, common.endoPoints, props);
//d_endoRow.set_final_data(nullptr);
//buffer<int,1> d_endoCol(endoCol, common.endoPoints, props);
//d_endoCol.set_final_data(nullptr);
//buffer<int,1> d_tEndoRowLoc(tEndoRowLoc, common.endoPoints * common.no_frames, props);
//buffer<int,1> d_tEndoColLoc(tEndoColLoc, common.endoPoints * common.no_frames, props);
//buffer<int,1> d_epiRow(epiRow, common.epiPoints, props);
//d_epiRow.set_final_data(nullptr);
//buffer<int,1> d_epiCol(epiCol, common.epiPoints, props);
//d_epiCol.set_final_data(nullptr);
//buffer<int,1> d_tEpiRowLoc(tEpiRowLoc, common.epiPoints * common.no_frames, props);
//buffer<int,1> d_tEpiColLoc(tEpiColLoc, common.epiPoints * common.no_frames, props);
int* d_endoRow;
cudaMalloc((void**)&d_endoRow, common.endo_mem);
cudaMemcpy(d_endoRow, endoRow, common.endo_mem, cudaMemcpyHostToDevice);
int* d_endoCol;
cudaMalloc((void**)&d_endoCol, common.endo_mem);
cudaMemcpy(d_endoCol, endoCol, common.endo_mem, cudaMemcpyHostToDevice);
int* d_tEndoRowLoc;
int* d_tEndoColLoc;
cudaMalloc((void**)&d_tEndoRowLoc, common.endo_mem*common.no_frames);
cudaMemcpy(d_tEndoRowLoc, tEndoRowLoc, common.endo_mem*common.no_frames, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_tEndoColLoc, common.endo_mem*common.no_frames);
cudaMemcpy(d_tEndoColLoc, tEndoColLoc, common.endo_mem*common.no_frames, cudaMemcpyHostToDevice);
int* d_epiRow;
int* d_epiCol;
cudaMalloc((void**)&d_epiRow, common.epi_mem);
cudaMemcpy(d_epiRow, epiRow, common.epi_mem, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_epiCol, common.epi_mem);
cudaMemcpy(d_epiCol, epiCol, common.epi_mem, cudaMemcpyHostToDevice);
int* d_tEpiRowLoc;
int* d_tEpiColLoc;
cudaMalloc((void**)&d_tEpiRowLoc, common.epi_mem*common.no_frames);
cudaMemcpy(d_tEpiRowLoc, tEpiRowLoc, common.epi_mem*common.no_frames, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_tEpiColLoc, common.epi_mem*common.no_frames);
cudaMemcpy(d_tEpiColLoc, tEpiColLoc, common.epi_mem*common.no_frames, cudaMemcpyHostToDevice);
//buffer<fp,1> d_mask_conv(common.mask_conv_elem * common.allPoints);
//d_mask_conv.set_final_data(nullptr);
fp* d_mask_conv;
cudaMalloc((void**)&d_mask_conv, common.mask_conv_mem * common.allPoints);
//printf("%d\n", common.mask_conv_elem * common.allPoints);
//buffer<fp,1> d_in_mod_temp(common.in_elem * common.allPoints);
//d_in_mod_temp.set_final_data(nullptr);
fp* d_in_mod_temp;
cudaMalloc((void**)&d_in_mod_temp, common.in_mem * common.allPoints);
//printf("%d\n", common.in_elem * common.allPoints);
//buffer<fp,1> d_in_partial_sum(common.in_cols * common.allPoints);
//d_in_partial_sum.set_final_data(nullptr);
fp* d_in_partial_sum;
cudaMalloc((void**)&d_in_partial_sum, sizeof(fp)*common.in_cols * common.allPoints);
//printf("%d\n", common.in_cols * common.allPoints);
//buffer<fp,1> d_in_sqr_partial_sum(common.in_sqr_rows * common.allPoints);
//d_in_sqr_partial_sum.set_final_data(nullptr);
fp* d_in_sqr_partial_sum;
cudaMalloc((void**)&d_in_sqr_partial_sum, sizeof(fp)*common.in_sqr_rows * common.allPoints);
//printf("%d\n", common.in_sqr_rows * common.allPoints);
//buffer<fp,1> d_par_max_val(common.mask_conv_rows * common.allPoints);
//d_par_max_val.set_final_data(nullptr);
fp* d_par_max_val;
cudaMalloc((void**)&d_par_max_val, sizeof(fp)*common.mask_conv_rows * common.allPoints);
//printf("%d\n", common.mask_conv_rows * common.allPoints);
//buffer<int,1> d_par_max_coo( common.mask_conv_rows * common.allPoints);
//d_par_max_coo.set_final_data(nullptr);
fp* d_par_max_coo;
cudaMalloc((void**)&d_par_max_coo, sizeof(fp)*common.mask_conv_rows * common.allPoints);
//buffer<fp,1> d_in_final_sum(common.allPoints);
//d_in_final_sum.set_final_data(nullptr);
fp* d_in_final_sum;
cudaMalloc((void**)&d_in_final_sum, sizeof(fp)*common.allPoints);
//buffer<fp,1> d_in_sqr_final_sum(common.allPoints);
//d_in_sqr_final_sum.set_final_data(nullptr);
fp* d_in_sqr_final_sum;
cudaMalloc((void**)&d_in_sqr_final_sum, sizeof(fp)*common.allPoints);
//buffer<fp,1> d_denomT(common.allPoints);
//d_denomT.set_final_data(nullptr);
fp* d_denomT;
cudaMalloc((void**)&d_denomT, sizeof(fp)*common.allPoints);
#ifdef TEST_CHECKSUM
//buffer<fp,1> d_checksum(CHECK);
//d_checksum.set_final_data(nullptr);
//printf("%d\n", CHECK);
fp* checksum = (fp*) malloc (sizeof(fp)*CHECK);
fp* d_checksum;
cudaMalloc((void**)&d_checksum, sizeof(fp)*CHECK);
#endif
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
dim3 threads(NUMBER_THREADS);
dim3 grids(common.allPoints);
printf("frame progress: ");
fflush(NULL);
//====================================================================================================100
// LAUNCH
//====================================================================================================100
// variables
fp* frame;
int frame_no;
//buffer<fp,1> d_frame(common.frame_elem);
fp* d_frame;
cudaMalloc((void**)&d_frame, sizeof(fp)*common.frame_elem);
for(frame_no=0; frame_no<common.frames_processed; frame_no++) {
//==================================================50
// get and write current frame to GPU buffer
//==================================================50
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
cudaMemcpy(d_frame, frame, sizeof(fp)*common.frame_elem, cudaMemcpyHostToDevice);
//==================================================50
// launch kernel
//==================================================50
hw<<<grids, threads>>>(
frame_no,
common,
d_frame,
d_endoRow,
d_endoCol,
d_tEndoRowLoc,
d_tEndoColLoc,
d_epiRow,
d_epiCol,
d_tEpiRowLoc,
d_tEpiColLoc,
d_endoT,
d_epiT,
d_in2,
d_conv,
d_in2_pad_cumv,
d_in2_pad_cumv_sel,
d_in2_sub_cumh,
d_in2_sub_cumh_sel,
d_in2_sub2,
d_in2_sqr,
d_in2_sqr_sub2,
d_in_sqr,
d_tMask,
d_mask_conv,
d_in_mod_temp,
d_in_partial_sum,
d_in_sqr_partial_sum,
d_par_max_val,
d_par_max_coo,
d_in_final_sum,
d_in_sqr_final_sum,
d_denomT
#ifdef TEST_CHECKSUM
,d_checksum
#endif
);
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
//==================================================50
// print frame progress
//==================================================50
// print frame progress
printf("%d ", frame_no);
fflush(NULL);
//==================================================50
// DISPLAY CHECKSUM (TESTING)
//==================================================50
#ifdef TEST_CHECKSUM
cudaMemcpy(checksum, d_checksum, sizeof(fp)*CHECK, cudaMemcpyDeviceToHost);
printf("CHECKSUM:\n");
for(int i=0; i<CHECK; i++){
printf("i=%d checksum=%f\n", i, checksum[i]);
}
printf("\n\n");
#endif
}
cudaMemcpy(tEndoRowLoc, d_tEndoRowLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(tEndoColLoc, d_tEndoColLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(tEpiRowLoc, d_tEpiRowLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(tEpiColLoc, d_tEpiColLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
//====================================================================================================100
// PRINT FRAME PROGRESS END
//====================================================================================================100
#ifdef TEST_CHECKSUM
free(checksum);
cudaFree(d_checksum);
#endif
cudaFree(d_epiT);
cudaFree(d_in2);
cudaFree(d_conv);
cudaFree(d_in2_pad_cumv);
cudaFree(d_in2_pad_cumv_sel);
cudaFree(d_in2_sub_cumh);
cudaFree(d_in2_sub_cumh_sel);
cudaFree(d_in2_sub2);
cudaFree(d_in2_sqr);
cudaFree(d_in2_sqr_sub2);
cudaFree(d_in_sqr);
cudaFree(d_tMask);
cudaFree(d_endoRow);
cudaFree(d_endoCol);
cudaFree(d_tEndoRowLoc);
cudaFree(d_tEndoColLoc);
cudaFree(d_epiRow);
cudaFree(d_epiCol);
cudaFree(d_tEpiRowLoc);
cudaFree(d_tEpiColLoc);
cudaFree(d_mask_conv);
cudaFree(d_in_mod_temp);
cudaFree(d_in_partial_sum);
cudaFree(d_in_sqr_partial_sum);
cudaFree(d_par_max_val);
cudaFree(d_par_max_coo);
cudaFree(d_in_final_sum);
cudaFree(d_in_sqr_final_sum);
cudaFree(d_denomT);
cudaFree(d_frame);
printf("\n");
fflush(NULL);
}
|
the_stack
|
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* * This file has been modified by Megvii ("Megvii Modifications").
* * All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights
* reserved.
* --------------------------------------------------------------------------
*/
#include "cudaconv2.cuh"
#include "img_acts/img_act_templates.cuh"
#include "nvmatrix.cuh"
#ifdef _WIN32
#define _Pragma(x)
#endif
namespace megdnn {
namespace cuda {
/*
* New Titan-optimized stuff.
*/
__device__ __forceinline__ void
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(
const int my, const int mx, const int numModulesX, const int paddingStart,
const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX,
const int filterSize, int& moduleIdx, int& pxIdxInFilter) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
moduleIdx = my * numModulesX + mx; // out
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out
}
#define IA_PRELOAD_LOOP(w, offset) \
_Pragma("unroll") for (int i = 0; i < imgsPerThread; i++) { \
_Pragma("unroll") for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w) + (offset)] * \
shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
}
/*
* Same loop as above but inverted.
*/
#define IA_PRELOAD_LOOP2(w, offset) \
_Pragma("unroll") for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") for (int i = 0; i < imgsPerThread; i++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w) + (offset)] * \
shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
}
#define IA_PRELOAD_LOOP3(i, offset) \
_Pragma("unroll") for (int w = 0; w < filterCacheH; w++) { \
_Pragma("unroll") for (int c = 0; c < colorsPerThread; c++) { \
prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w) + (offset)] * \
shHidActs[w][threadIdx.x * imgsPerThread + i]; \
} \
}
#define IA_PRELOAD_W(z) \
wPreload[z] = fLoad[(z)*B_X * B_Y / filterCacheF * filterPixels * numFilters];
#define IA_PRELOAD_W_TX(z) \
wPreload[z] = tex1Dfetch<float>( \
filters, filtersLoadOffset + (z)*B_X * B_Y / filterCacheF * filterPixels * \
numFilters);
#define IA_PRELOAD_H(y, x) \
if (!checkCaseBounds || myCaseIdx + (x)*B_X < numImages) { \
hPreload[y][x] = hLoad[(y)*B_Y * numModules * numImages + (x)*B_X]; \
}
#define IA_PRELOAD_H_TX(y, x) \
if (!checkCaseBounds || myCaseIdx + (x)*B_X < numImages) { \
hPreload[y][x] = tex1Dfetch<float>( \
hidActs, \
hidActsLoadOffset + (y)*B_Y * numModules * numImages + (x)*B_X); \
}
template <
int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF,
int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void __launch_bounds__(
256, 2) // 256 threads per block, 2 blocks per multiprocessor
// These launch bounds ensure 25% occupancy (128 registers used)
// as oppposed to 13% (130 registers) achieved by defaults.
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(
cudaTextureObject_t hidActs, cudaTextureObject_t filters,
float* targets, const int numModulesY, const int numModulesX,
const int numImages, const int numFilters, const int filterSize,
const int imgSizeY, const int imgSizeX, const int paddingStart,
const int moduleStride, const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread * B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X * imgsPerThread];
fill_shared_mem<float>((float*)shFilters, sizeof(shFilters) / sizeof(float), 0);
fill_shared_mem<float>((float*)shHidActs, sizeof(shHidActs) / sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages, B_X * imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx =
(blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
// const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx %
// (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
// const int outputY = threadIdx.y, outputX = threadIdx.x;
// const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset =
(blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset =
blockFilterIdx +
(filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules +
// myCaseIdx; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) *
// filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages +
blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY =
blockPixelIdxY - paddingStart < filterSize
? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY =
min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX =
blockPixelIdxX - paddingStart < filterSize
? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX =
min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
// const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH / B_Y][imgsPerThread]; // [2][4]
float wPreload[filterCacheF * colorsPerThread / B_X]; // [8]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(
startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels
// * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset =
filtersOffset +
(conv ? pxIdxInFilter * numFilters + 0
: moduleIdx * numFilterColors * filterPixels * numFilters +
pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread * B_Y; i += B_X * B_Y / filterCacheF) {
if ((colorsPerThread * B_Y) % (B_X * B_Y / filterCacheF) == 0 ||
i + filtersLoadY < colorsPerThread * B_Y) {
wPreload[i * filterCacheF / (B_X * B_Y)] = tex1Dfetch<float>(
filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages];
int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j / B_Y][i] = tex1Dfetch<float>(
hidActs,
hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(
myNext, mxNext, numModulesX, paddingStart, moduleStride,
blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext,
pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup;
f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread * B_Y;
i += B_X * B_Y / filterCacheF) {
if ((colorsPerThread * B_Y) % (B_X * B_Y / filterCacheF) == 0 ||
i + filtersLoadY < colorsPerThread * B_Y) {
shFilterLoad[i * filterCacheF] =
wPreload[i * filterCacheF / (B_X * B_Y)];
}
}
filtersLoadOffset =
filtersOffset +
(conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels *
numFilters +
pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset =
filtersOffset +
(conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels *
numFilters +
pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] =
hPreload[j / B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset =
hidActsOffset +
(moduleIdx + (f + filterCacheH) * numModules) * numImages;
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z, 0);
IA_PRELOAD_W_TX(z);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z, 0);
IA_PRELOAD_H_TX((z - 4) / 4, z % 4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z, 0);
}
__syncthreads();
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] =
hPreload[j / B_Y][i];
}
}
}
}
__syncthreads();
hidActsLoadOffset =
hidActsOffset +
(moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
#pragma unroll
for (int z = 0; z < 4; ++z) {
IA_PRELOAD_LOOP(z, filterCacheH);
IA_PRELOAD_W_TX(z + 4);
}
#pragma unroll
for (int z = 4; z < 12; ++z) {
IA_PRELOAD_LOOP(z, filterCacheH);
IA_PRELOAD_H_TX((z - 4) / 4, z % 4);
}
#pragma unroll
for (int z = 12; z < 16; ++z) {
IA_PRELOAD_LOOP(z, filterCacheH);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] =
scaleTargets *
targets[c * B_Y * imgPixels * numImages + i * B_X] +
scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] =
scaleOutputs * prod[c][i];
}
}
}
}
}
template <
int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF,
int filterCacheH, bool scale, bool checkCaseBounds, bool conv>
__global__ void
//__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(
cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages,
const int numFilters, const int filterSize, const int imgSizeY,
const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups, const float scaleTargets,
const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread * B_Y][filterCacheF];
__shared__ float shHidActs[filterCacheH][B_X * imgsPerThread];
fill_shared_mem<float>((float*)shFilters, sizeof(shFilters) / sizeof(float), 0);
fill_shared_mem<float>((float*)shHidActs, sizeof(shHidActs) / sizeof(float), 0);
__syncthreads();
const int numImgBlocks = DIVUP(numImages, B_X * imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int myCaseIdx = blockCaseIdx + threadIdx.x;
const int imgColorIdx =
(blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
// const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X;
// const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx %
// (B_X*imgsPerThread);
const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF;
// nvcc is behaving idiotically again, these useless declarations save registers
// const int outputY = threadIdx.y, outputX = threadIdx.x;
// const int ty = threadIdx.y, tx = threadIdx.x;
const int numModules = numModulesY * numModulesX;
const int hidActsOffset =
(blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx;
const int filtersOffset =
blockFilterIdx +
(filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
// hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules +
// myCaseIdx; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) *
// filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages +
blockPixelIdx * numImages + myCaseIdx;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][i] = 0;
}
}
const int startY =
blockPixelIdxY - paddingStart < filterSize
? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY =
min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX =
blockPixelIdxX - paddingStart < filterSize
? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX =
min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread];
// const bool noFLoop = filterCacheF == filterCacheH;
/*
* Initial preload
*/
float hPreload[filterCacheH / B_Y][imgsPerThread]; // [4][4]
float wPreload[filterCacheF * colorsPerThread / B_X]; // [6]
int moduleIdx, pxIdxInFilter;
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(
startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY,
blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter);
// const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0]
// : &filters[moduleIdx * numFilterColors * filterPixels
// * numFilters + pxIdxInFilter * numFilters + 0];
int filtersLoadOffset =
filtersOffset +
(conv ? pxIdxInFilter * numFilters
: moduleIdx * numFilterColors * filterPixels * numFilters +
pxIdxInFilter * numFilters);
#pragma unroll
for (int i = 0; i < colorsPerThread * B_Y; i += B_X * B_Y / filterCacheF) {
if ((colorsPerThread * B_Y) % (B_X * B_Y / filterCacheF) == 0 ||
i + filtersLoadY < colorsPerThread * B_Y) {
wPreload[i * filterCacheF / (B_X * B_Y)] = tex1Dfetch<float>(
filters, filtersLoadOffset + i * filterPixels * numFilters);
}
}
// const float* hLoad = &hidActs[moduleIdx * numImages];
int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages;
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
hPreload[j / B_Y][i] = tex1Dfetch<float>(
hidActs,
hidActsLoadOffset + j * numModules * numImages + i * B_X);
}
}
}
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext;
const bool lastModule = my == endY - 1 && mx == endX - 1;
if (!lastModule) {
mxNext = mx + 1 == endX ? startX : mx + 1;
myNext = my + (mx + 1 == endX);
}
conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(
myNext, mxNext, numModulesX, paddingStart, moduleStride,
blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext,
pxIdxInFilterNext);
for (int f = 0; f < numFiltersPerGroup;
f += filterCacheF) { // multiply with filterCacheF filters at a time
#pragma unroll
for (int i = 0; i < colorsPerThread * B_Y;
i += B_X * B_Y / filterCacheF) {
if ((colorsPerThread * B_Y) % (B_X * B_Y / filterCacheF) == 0 ||
i + filtersLoadY < colorsPerThread * B_Y) {
shFilterLoad[i * filterCacheF] =
wPreload[i * filterCacheF / (B_X * B_Y)];
}
}
filtersLoadOffset =
filtersOffset +
(conv ? pxIdxInFilter * numFilters + f + filterCacheF
: moduleIdx * numFilterColors * filterPixels *
numFilters +
pxIdxInFilter * numFilters + f + filterCacheF);
if (f == numFiltersPerGroup - filterCacheF) {
filtersLoadOffset =
filtersOffset +
(conv ? pxIdxInFilterNext * numFilters
: moduleIdxNext * numFilterColors * filterPixels *
numFilters +
pxIdxInFilterNext * numFilters);
}
#pragma unroll
for (int j = 0; j < filterCacheH; j += B_Y) {
if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
// NOTE: bank conflicts here!
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
shHidActLoad[j * B_X * imgsPerThread + i] =
hPreload[j / B_Y][i];
}
}
}
}
hidActsLoadOffset =
hidActsOffset +
(moduleIdx + (f + filterCacheF) * numModules) * numImages;
if (f == numFiltersPerGroup - filterCacheF) {
hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages;
}
__syncthreads();
// It seems that there is no point explicitly interleaving loads
// and computations because the scheduler does that anyway.
IA_PRELOAD_LOOP2(0, 0);
IA_PRELOAD_LOOP2(1, 0);
IA_PRELOAD_LOOP2(2, 0);
IA_PRELOAD_LOOP2(3, 0);
IA_PRELOAD_LOOP2(4, 0);
IA_PRELOAD_LOOP2(5, 0);
IA_PRELOAD_LOOP2(6, 0);
IA_PRELOAD_LOOP2(7, 0);
IA_PRELOAD_LOOP2(8, 0);
IA_PRELOAD_LOOP2(9, 0);
IA_PRELOAD_LOOP2(10, 0);
IA_PRELOAD_LOOP2(11, 0);
IA_PRELOAD_LOOP2(12, 0);
IA_PRELOAD_LOOP2(13, 0);
IA_PRELOAD_LOOP2(14, 0);
IA_PRELOAD_LOOP2(15, 0);
IA_PRELOAD_W_TX(0);
IA_PRELOAD_W_TX(1);
IA_PRELOAD_W_TX(2);
IA_PRELOAD_W_TX(3);
IA_PRELOAD_W_TX(4);
IA_PRELOAD_W_TX(5);
IA_PRELOAD_H_TX(0, 0);
IA_PRELOAD_H_TX(0, 1);
IA_PRELOAD_H_TX(0, 2);
IA_PRELOAD_H_TX(0, 3);
IA_PRELOAD_H_TX(1, 0);
IA_PRELOAD_H_TX(1, 1);
IA_PRELOAD_H_TX(1, 2);
IA_PRELOAD_H_TX(1, 3);
IA_PRELOAD_H_TX(2, 0);
IA_PRELOAD_H_TX(2, 1);
IA_PRELOAD_H_TX(2, 2);
IA_PRELOAD_H_TX(2, 3);
IA_PRELOAD_H_TX(3, 0);
IA_PRELOAD_H_TX(3, 1);
IA_PRELOAD_H_TX(3, 2);
IA_PRELOAD_H_TX(3, 3);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] =
scaleTargets *
targets[c * B_Y * imgPixels * numImages + i * B_X] +
scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) {
targets[c * B_Y * imgPixels * numImages + i * B_X] =
scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(
cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput,
bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
megdnn_assert_internal(numImgColors % numGroups == 0);
// megdnn_assert_internal(numFilters % (16*numGroups) == 0); // TODO: insisting on
// 32 filters due to bug in calling code below. fix that.
bool previous_limit = (numFilters % (16 * numGroups)) == 0;
megdnn_assert_internal(
numGroups > 1 ||
(numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
megdnn_assert_internal(numGroups == 1 || numFilterColors % 4 == 0);
megdnn_assert_internal(filterPixels == filterSize * filterSize);
megdnn_assert_internal(hidActs.getNumRows() == numModules * numFilters);
megdnn_assert_internal(
filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
megdnn_assert_internal(numModules == numModulesY * numModulesX);
megdnn_assert_internal(hidActs.isContiguous());
megdnn_assert_internal(filters.isContiguous());
megdnn_assert_internal(!hidActs.isTrans());
megdnn_assert_internal(!filters.isTrans());
megdnn_assert_internal(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in
// the convolution
megdnn_assert_internal(paddingStart <= 0);
megdnn_assert_internal(
paddingStart + (numModulesX - 1) * moduleStride + filterSize >= imgSizeX);
megdnn_assert_internal(
paddingStart + (numModulesY - 1) * moduleStride + filterSize >= imgSizeY);
megdnn_assert_internal(moduleStride <= filterSize);
megdnn_assert_internal(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread = 0, imgsPerThread = 0;
if (numFilterColors % 8 == 0) {
threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4);
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 12
: numFilterColors % 32 == 0 ? 8
: numFilterColors % 16 == 0 ? 4
: 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
megdnn_assert_internal(numFilterColors % (threads.y * colorsPerThread) == 0);
// previous_limit = numFilterColors % (threads.y * colorsPerThread) == 0;
blocks =
dim3(DIVUP(numImages, threads.x * imgsPerThread) *
(numImgColors / (threads.y * colorsPerThread)),
imgPixels);
// NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels %
// 64 != 0 has not been optimized!!
} else if (numFilterColors > 3) {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks =
dim3(DIVUP(numImages, threads.x * imgsPerThread) *
(numImgColors / colorsPerThread),
DIVUP(imgSizeY, 4) * DIVUP(imgSizeX, 4));
} else {
// NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!!
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
blocks =
dim3(DIVUP(numImages, threads.x * imgsPerThread),
DIVUP(imgSizeY, 4) * DIVUP(imgSizeX, 4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors * imgPixels, numImages);
} else {
megdnn_assert_internal(targets.getNumRows() == numImgColors * imgPixels);
megdnn_assert_internal(targets.getNumCols() == numImages);
}
const bool scale = scaleTargets != 0;
// cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16<
// 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared);
// conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12,
// 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(
// hidActs.getTextureObject(), filters.getTextureObject(),
// targets.getDevData(), numModulesY, numModulesX, numImages, numFilters,
// filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride,
// numImgColors, numGroups, scaleTargets, scaleOutput);
// return;
// printf("conv: %d\n", conv);
// printf("scale: %d\n", scale);
// printf("checkCaseBounds: %d\n", checkCaseBounds);
// printf("numFilterColors: %d\n", numFilterColors);
// printf("numImages: %d\n", numImages);
// cudaStream_t stream = NVMatrix::getDefaultStream();
if (conv == false) {
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
if (previous_limit) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex<
8, 32, 4, 8, 32, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex<
8, 32, 4, 8, 32, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getTextureObject(),
filters.getTextureObject(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 4, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 4, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups,
scaleTargets, scaleOutput);
}
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 2, 8, 32, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 2, 8, 32, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 32, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 32, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 32, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 32, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
} else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 4, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 4, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 2, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 2, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
if (previous_limit) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16<
4, 32, 4, 12, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16<
4, 32, 4, 12, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getTextureObject(),
filters.getTextureObject(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 4, 12, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 4, 12, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups,
scaleTargets, scaleOutput);
}
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 2, 12, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 2, 12, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 12, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 12, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 12, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 12, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 4, 8, 32, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 4, 8, 32, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 2, 8, 32, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 2, 8, 32, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 32, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 32, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 32, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 32, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
} else if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 4, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 4, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 2, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 2, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 4, 4, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 4, 4, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 2, 4, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 2, 4, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 4, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 4, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 4, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 4, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 4, 2, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 4, 2, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 2, 2, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 2, 2, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 2, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 2, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 2, 16, 16, false, false,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 2, 16, 16, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
img_acts_mediumcolor<8, 4, false, false, false>,
cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
img_acts_mediumcolor<4, 4, false, false, false>,
cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
img_acts_mediumcolor<2, 4, false, false, false>,
cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
img_acts_mediumcolor<2, 4, false, false, false>,
cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<8, 2, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<4, 2, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<4, 2, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 2, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 2, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 2, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 2, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<8, 3, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<4, 3, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<4, 3, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 3, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 3, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 3, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 3, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<8, 2, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<4, 2, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<4, 2, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 2, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 2, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 2, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 2, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 128 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<8, 1, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 64 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<4, 1, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<4, 1, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 32 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 1, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 1, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
} else if (numImages % 16 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 1, false, false, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 1, false, false, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
}
}
}
}
} else if (checkCaseBounds == true) {
if (numFilterColors % 8 == 0) {
if (numFilterColors % 64 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 32, 16, false, true,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 32, 16, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
} else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 16, 16, false, true,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
8, 32, 1, 8, 16, 16, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors % 48 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 12, 16, 16, false, true,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 12, 16, 16, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors % 32 == 0) {
if (numFilters % 32 == 0) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 32, 16, false, true,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 32, 16, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
} else if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 16, 16, false, true,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 8, 16, 16, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors % 16 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 4, 16, 16, false, true,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 4, 16, 16, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
} else if (numFilterColors % 8 == 0) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
conv_img_acts_manycolor_kepler<
4, 32, 1, 2, 16, 16, false, true,
false>,
cudaFuncCachePreferShared);
conv_img_acts_manycolor_kepler<
4, 32, 1, 2, 16, 16, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (numFilterColors == 4) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
img_acts_mediumcolor<2, 4, false, true, false>,
cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
numImgColors, numGroups, scaleTargets,
scaleOutput);
}
}
}
/*
else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(img_acts_color < 2, 2, false,
true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2,
false, true, false ><<<blocks, threads, 0,
stream>>>(hidActs.getDevData(), filters.getDevData(),
targets.getDevData(), numModulesY, numModulesX, numImages,
numFilters, filterSize, imgSizeY, imgSizeX, paddingStart,
moduleStride, scaleTargets, scaleOutput);
}
}
}
*/
} else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 3, false, true, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 3, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 2, false, true, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 2, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 1) {
if ((numFilters % 1 == 0)) {
if (numImages % 1 == 0) {
cudaFuncSetCacheConfig(
img_acts_color<2, 1, false, true, false>,
cudaFuncCachePreferShared);
img_acts_color<2, 1, false, true, false>
<<<blocks, threads, 0, stream>>>(
hidActs.getDevData(),
filters.getDevData(),
targets.getDevData(), numModulesY,
numModulesX, numImages, numFilters,
filterSize, imgSizeY, imgSizeX,
paddingStart, moduleStride,
scaleTargets, scaleOutput);
}
}
}
}
}
}
}
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs(
cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
_imgActs(
stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY,
paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(
cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput) {
_imgActs(
stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY,
paddingStart, moduleStride, numImgColors, numGroups, scaleTargets,
scaleOutput, true);
}
void localImgActs(
cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride,
int numImgColors, int numGroups) {
_imgActs(
stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY,
paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(
cudaStream_t stream, NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput) {
_imgActs(
stream, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY,
paddingStart, moduleStride, numImgColors, numGroups, scaleTargets,
scaleOutput, false);
}
} // namespace cuda
} // namespace megdnn
|
the_stack
|
#include "common.h"
#include "texture.h"
//------------------------------------------------------------------------
// Memory access and math helpers.
static __device__ __forceinline__ void accum_from_mem(float* a, int s, float b, float c) { a[0] += b * c; }
static __device__ __forceinline__ void accum_from_mem(float* a, int s, float2 b, float c) { a[0] += b.x * c; a[s] += b.y * c; }
static __device__ __forceinline__ void accum_from_mem(float* a, int s, float4 b, float c) { a[0] += b.x * c; a[s] += b.y * c; a[2*s] += b.z * c; a[3*s] += b.w * c; }
static __device__ __forceinline__ void accum_to_mem(float& a, float* b, int s) { a += b[0]; }
static __device__ __forceinline__ void accum_to_mem(float2& a, float* b, int s) { float2 v = a; v.x += b[0]; v.y += b[s]; a = v; }
static __device__ __forceinline__ void accum_to_mem(float4& a, float* b, int s) { float4 v = a; v.x += b[0]; v.y += b[s]; v.z += b[2*s]; v.w += b[3*s]; a = v; }
static __device__ __forceinline__ bool isfinite_vec3(const float3& a) { return isfinite(a.x) && isfinite(a.y) && isfinite(a.z); }
static __device__ __forceinline__ bool isfinite_vec4(const float4& a) { return isfinite(a.x) && isfinite(a.y) && isfinite(a.z) && isfinite(a.w); }
template<class T> static __device__ __forceinline__ T lerp (const T& a, const T& b, float c) { return a + c * (b - a); }
template<class T> static __device__ __forceinline__ T bilerp(const T& a, const T& b, const T& c, const T& d, const float2& e) { return lerp(lerp(a, b, e.x), lerp(c, d, e.x), e.y); }
//------------------------------------------------------------------------
// Cube map wrapping for smooth filtering across edges and corners. At corners,
// one of the texture coordinates will be negative. For correct interpolation,
// the missing texel must take the average color of the other three.
static __constant__ uint32_t c_cubeWrapMask1[48] =
{
0x1530a440, 0x1133a550, 0x6103a110, 0x1515aa44, 0x6161aa11, 0x40154a04, 0x44115a05, 0x04611a01,
0x2630a440, 0x2233a550, 0x5203a110, 0x2626aa44, 0x5252aa11, 0x40264a04, 0x44225a05, 0x04521a01,
0x32608064, 0x3366a055, 0x13062091, 0x32328866, 0x13132299, 0x50320846, 0x55330a55, 0x05130219,
0x42508064, 0x4455a055, 0x14052091, 0x42428866, 0x14142299, 0x60420846, 0x66440a55, 0x06140219,
0x5230a044, 0x5533a055, 0x1503a011, 0x5252aa44, 0x1515aa11, 0x40520a44, 0x44550a55, 0x04150a11,
0x6130a044, 0x6633a055, 0x2603a011, 0x6161aa44, 0x2626aa11, 0x40610a44, 0x44660a55, 0x04260a11,
};
static __constant__ uint8_t c_cubeWrapMask2[48] =
{
0x26, 0x33, 0x11, 0x05, 0x00, 0x09, 0x0c, 0x04, 0x04, 0x00, 0x00, 0x05, 0x00, 0x81, 0xc0, 0x40,
0x02, 0x03, 0x09, 0x00, 0x0a, 0x00, 0x00, 0x02, 0x64, 0x30, 0x90, 0x55, 0xa0, 0x99, 0xcc, 0x64,
0x24, 0x30, 0x10, 0x05, 0x00, 0x01, 0x00, 0x00, 0x06, 0x03, 0x01, 0x05, 0x00, 0x89, 0xcc, 0x44,
};
static __device__ __forceinline__ int4 wrapCubeMap(int face, int ix0, int ix1, int iy0, int iy1, int w)
{
// Calculate case number.
int cx = (ix0 < 0) ? 0 : (ix1 >= w) ? 2 : 1;
int cy = (iy0 < 0) ? 0 : (iy1 >= w) ? 6 : 3;
int c = cx + cy;
if (c >= 5)
c--;
c = (face << 3) + c;
// Compute coordinates and faces.
unsigned int m = c_cubeWrapMask1[c];
int x0 = (m >> 0) & 3; x0 = (x0 == 0) ? 0 : (x0 == 1) ? ix0 : iy0;
int x1 = (m >> 2) & 3; x1 = (x1 == 0) ? 0 : (x1 == 1) ? ix1 : iy0;
int x2 = (m >> 4) & 3; x2 = (x2 == 0) ? 0 : (x2 == 1) ? ix0 : iy1;
int x3 = (m >> 6) & 3; x3 = (x3 == 0) ? 0 : (x3 == 1) ? ix1 : iy1;
int y0 = (m >> 8) & 3; y0 = (y0 == 0) ? 0 : (y0 == 1) ? ix0 : iy0;
int y1 = (m >> 10) & 3; y1 = (y1 == 0) ? 0 : (y1 == 1) ? ix1 : iy0;
int y2 = (m >> 12) & 3; y2 = (y2 == 0) ? 0 : (y2 == 1) ? ix0 : iy1;
int y3 = (m >> 14) & 3; y3 = (y3 == 0) ? 0 : (y3 == 1) ? ix1 : iy1;
int f0 = ((m >> 16) & 15) - 1;
int f1 = ((m >> 20) & 15) - 1;
int f2 = ((m >> 24) & 15) - 1;
int f3 = ((m >> 28) ) - 1;
// Flips.
unsigned int f = c_cubeWrapMask2[c];
int w1 = w - 1;
if (f & 0x01) x0 = w1 - x0;
if (f & 0x02) x1 = w1 - x1;
if (f & 0x04) x2 = w1 - x2;
if (f & 0x08) x3 = w1 - x3;
if (f & 0x10) y0 = w1 - y0;
if (f & 0x20) y1 = w1 - y1;
if (f & 0x40) y2 = w1 - y2;
if (f & 0x80) y3 = w1 - y3;
// Done.
int4 tcOut;
tcOut.x = x0 + (y0 + f0 * w) * w;
tcOut.y = x1 + (y1 + f1 * w) * w;
tcOut.z = x2 + (y2 + f2 * w) * w;
tcOut.w = x3 + (y3 + f3 * w) * w;
return tcOut;
}
//------------------------------------------------------------------------
// Cube map indexing and gradient functions.
// Map a 3D lookup vector into an (s,t) face coordinates (returned in first .
// two parameters) and face index.
static __device__ __forceinline__ int indexCubeMap(float& x, float& y, float z)
{
float ax = fabsf(x);
float ay = fabsf(y);
float az = fabsf(z);
int idx;
float c;
if (az > fmaxf(ax, ay)) { idx = 4; c = z; }
else if (ay > ax) { idx = 2; c = y; y = z; }
else { idx = 0; c = x; x = z; }
if (c < 0.f) idx += 1;
float m = __frcp_rz(fabsf(c)) * .5;
float m0 = __uint_as_float(__float_as_uint(m) ^ ((0x21u >> idx) << 31));
float m1 = (idx != 2) ? -m : m;
x = x * m0 + .5;
y = y * m1 + .5;
if (!isfinite(x) || !isfinite(y))
return -1; // Invalid uv.
x = fminf(fmaxf(x, 0.f), 1.f);
y = fminf(fmaxf(y, 0.f), 1.f);
return idx;
}
// Based on dA/d{s,t}, compute dA/d{x,y,z} at a given 3D lookup vector.
static __device__ __forceinline__ float3 indexCubeMapGrad(float3 uv, float gu, float gv)
{
float ax = fabsf(uv.x);
float ay = fabsf(uv.y);
float az = fabsf(uv.z);
int idx;
float c;
float c0 = gu;
float c1 = gv;
if (az > fmaxf(ax, ay)) { idx = 0x10; c = uv.z; c0 *= uv.x; c1 *= uv.y; }
else if (ay > ax) { idx = 0x04; c = uv.y; c0 *= uv.x; c1 *= uv.z; }
else { idx = 0x01; c = uv.x; c0 *= uv.z; c1 *= uv.y; }
if (c < 0.f) idx += idx;
float m = __frcp_rz(fabsf(c));
c0 = (idx & 0x34) ? -c0 : c0;
c1 = (idx & 0x2e) ? -c1 : c1;
float gl = (c0 + c1) * m;
float gx = (idx & 0x03) ? gl : (idx & 0x20) ? -gu : gu;
float gy = (idx & 0x0c) ? gl : -gv;
float gz = (idx & 0x30) ? gl : (idx & 0x03) ? gu : gv;
gz = (idx & 0x09) ? -gz : gz;
float3 res = make_float3(gx, gy, gz) * (m * .5f);
if (!isfinite_vec3(res))
return make_float3(0.f, 0.f, 0.f); // Invalid uv.
return res;
}
// Based on dL/d(d{s,t}/s{X,Y}), compute dL/d(d{x,y,z}/d{X,Y}). This is just two
// indexCubeMapGrad() functions rolled together.
static __device__ __forceinline__ void indexCubeMapGrad4(float3 uv, float4 dw, float3& g0, float3& g1)
{
float ax = fabsf(uv.x);
float ay = fabsf(uv.y);
float az = fabsf(uv.z);
int idx;
float c, c0, c1;
if (az > fmaxf(ax, ay)) { idx = 0x10; c = uv.z; c0 = uv.x; c1 = uv.y; }
else if (ay > ax) { idx = 0x04; c = uv.y; c0 = uv.x; c1 = uv.z; }
else { idx = 0x01; c = uv.x; c0 = uv.z; c1 = uv.y; }
if (c < 0.f) idx += idx;
float m = __frcp_rz(fabsf(c));
c0 = (idx & 0x34) ? -c0 : c0;
c1 = (idx & 0x2e) ? -c1 : c1;
float gl0 = (dw.x * c0 + dw.z * c1) * m;
float gl1 = (dw.y * c0 + dw.w * c1) * m;
float gx0 = (idx & 0x03) ? gl0 : (idx & 0x20) ? -dw.x : dw.x;
float gx1 = (idx & 0x03) ? gl1 : (idx & 0x20) ? -dw.y : dw.y;
float gy0 = (idx & 0x0c) ? gl0 : -dw.z;
float gy1 = (idx & 0x0c) ? gl1 : -dw.w;
float gz0 = (idx & 0x30) ? gl0 : (idx & 0x03) ? dw.x : dw.z;
float gz1 = (idx & 0x30) ? gl1 : (idx & 0x03) ? dw.y : dw.w;
if (idx & 0x09)
{
gz0 = -gz0;
gz1 = -gz1;
}
g0 = make_float3(gx0, gy0, gz0) * (m * .5f);
g1 = make_float3(gx1, gy1, gz1) * (m * .5f);
if (!isfinite_vec3(g0) || !isfinite_vec3(g1))
{
g0 = make_float3(0.f, 0.f, 0.f); // Invalid uv.
g1 = make_float3(0.f, 0.f, 0.f);
}
}
// Compute d{s,t}/d{X,Y} based on d{x,y,z}/d{X,Y} at a given 3D lookup vector.
// Result is (ds/dX, ds/dY, dt/dX, dt/dY).
static __device__ __forceinline__ float4 indexCubeMapGradST(float3 uv, float3 dvdX, float3 dvdY)
{
float ax = fabsf(uv.x);
float ay = fabsf(uv.y);
float az = fabsf(uv.z);
int idx;
float c, gu, gv;
if (az > fmaxf(ax, ay)) { idx = 0x10; c = uv.z; gu = uv.x; gv = uv.y; }
else if (ay > ax) { idx = 0x04; c = uv.y; gu = uv.x; gv = uv.z; }
else { idx = 0x01; c = uv.x; gu = uv.z; gv = uv.y; }
if (c < 0.f) idx += idx;
if (idx & 0x09)
{
dvdX.z = -dvdX.z;
dvdY.z = -dvdY.z;
}
float m = __frcp_rz(fabsf(c));
float dm = m * .5f;
float mm = m * dm;
gu *= (idx & 0x34) ? -mm : mm;
gv *= (idx & 0x2e) ? -mm : mm;
float4 res;
if (idx & 0x03)
{
res = make_float4(gu * dvdX.x + dm * dvdX.z,
gu * dvdY.x + dm * dvdY.z,
gv * dvdX.x - dm * dvdX.y,
gv * dvdY.x - dm * dvdY.y);
}
else if (idx & 0x0c)
{
res = make_float4(gu * dvdX.y + dm * dvdX.x,
gu * dvdY.y + dm * dvdY.x,
gv * dvdX.y + dm * dvdX.z,
gv * dvdY.y + dm * dvdY.z);
}
else // (idx & 0x30)
{
res = make_float4(gu * dvdX.z + copysignf(dm, c) * dvdX.x,
gu * dvdY.z + copysignf(dm, c) * dvdY.x,
gv * dvdX.z - dm * dvdX.y,
gv * dvdY.z - dm * dvdY.y);
}
if (!isfinite_vec4(res))
return make_float4(0.f, 0.f, 0.f, 0.f);
return res;
}
// Compute d(d{s,t}/d{X,Y})/d{x,y,z}, i.e., how the pixel derivatives of 2D face
// coordinates change w.r.t. 3D texture coordinate vector, returned as follows:
// | d(ds/dX)/dx d(ds/dY)/dx d(dt/dX)/dx d(dt/dY)/dx |
// | d(ds/dX)/dy d(ds/dY)/dy d(dt/dX)/dy d(dt/dY)/dy |
// | d(ds/dX)/dz d(ds/dY)/dz d(dt/dX)/dz d(dt/dY)/dz |
static __device__ __forceinline__ void indexCubeMapGrad2(float3 uv, float3 dvdX, float3 dvdY, float4& dx, float4& dy, float4& dz)
{
float ax = fabsf(uv.x);
float ay = fabsf(uv.y);
float az = fabsf(uv.z);
int idx;
float c, gu, gv;
if (az > fmaxf(ax, ay)) { idx = 0x10; c = uv.z; gu = uv.x; gv = uv.y; }
else if (ay > ax) { idx = 0x04; c = uv.y; gu = uv.x; gv = uv.z; }
else { idx = 0x01; c = uv.x; gu = uv.z; gv = uv.y; }
if (c < 0.f) idx += idx;
if (idx & 0x09)
{
dvdX.z = -dvdX.z;
dvdY.z = -dvdY.z;
}
float m = __frcp_rz(c);
float dm = -m * fabsf(m) * .5;
float mm = m * m * .5;
float mu = (idx & 0x34) ? -mm : mm;
float mv = (idx & 0x2e) ? -mm : mm;
gu *= -2.0 * m * mu;
gv *= -2.0 * m * mv;
if (idx & 0x03)
{
dx.x = gu * dvdX.x + dm * dvdX.z;
dx.y = gu * dvdY.x + dm * dvdY.z;
dx.z = gv * dvdX.x - dm * dvdX.y;
dx.w = gv * dvdY.x - dm * dvdY.y;
dy.x = 0.f;
dy.y = 0.f;
dy.z = mv * dvdX.x;
dy.w = mv * dvdY.x;
dz.x = mu * dvdX.x;
dz.y = mu * dvdY.x;
dz.z = 0.f;
dz.w = 0.f;
}
else if (idx & 0x0c)
{
dx.x = mu * dvdX.y;
dx.y = mu * dvdY.y;
dx.z = 0.f;
dx.w = 0.f;
dy.x = gu * dvdX.y + dm * dvdX.x;
dy.y = gu * dvdY.y + dm * dvdY.x;
dy.z = gv * dvdX.y + dm * dvdX.z;
dy.w = gv * dvdY.y + dm * dvdY.z;
dz.x = 0.f;
dz.y = 0.f;
dz.z = mv * dvdX.y;
dz.w = mv * dvdY.y;
}
else // (idx & 0x30)
{
dx.x = mu * dvdX.z;
dx.y = mu * dvdY.z;
dx.z = 0.f;
dx.w = 0.f;
dy.x = 0.f;
dy.y = 0.f;
dy.z = mv * dvdX.z;
dy.w = mv * dvdY.z;
dz.x = gu * dvdX.z - fabsf(dm) * dvdX.x;
dz.y = gu * dvdY.z - fabsf(dm) * dvdY.x;
dz.z = gv * dvdX.z - dm * dvdX.y;
dz.w = gv * dvdY.z - dm * dvdY.y;
}
}
//------------------------------------------------------------------------
// General texture indexing.
template <bool CUBE_MODE>
static __device__ __forceinline__ int indexTextureNearest(const TextureKernelParams& p, float3 uv, int tz)
{
int w = p.texWidth;
int h = p.texHeight;
float u = uv.x;
float v = uv.y;
// Cube map indexing.
if (CUBE_MODE)
{
// No wrap. Fold face index into tz right away.
int idx = indexCubeMap(u, v, uv.z); // Rewrites u, v.
if (idx < 0)
return -1; // Invalid uv.
tz = 6 * tz + idx;
}
else
{
// Handle boundary.
if (p.boundaryMode == TEX_BOUNDARY_MODE_WRAP)
{
u = u - (float)__float2int_rd(u);
v = v - (float)__float2int_rd(v);
}
}
u = u * (float)w;
v = v * (float)h;
int iu = __float2int_rd(u);
int iv = __float2int_rd(v);
// In zero boundary mode, return texture address -1.
if (!CUBE_MODE && p.boundaryMode == TEX_BOUNDARY_MODE_ZERO)
{
if (iu < 0 || iu >= w || iv < 0 || iv >= h)
return -1;
}
// Otherwise clamp and calculate the coordinate properly.
iu = min(max(iu, 0), w-1);
iv = min(max(iv, 0), h-1);
return iu + w * (iv + tz * h);
}
template <bool CUBE_MODE>
static __device__ __forceinline__ float2 indexTextureLinear(const TextureKernelParams& p, float3 uv, int tz, int4& tcOut, int level)
{
// Mip level size.
int2 sz = mipLevelSize(p, level);
int w = sz.x;
int h = sz.y;
// Compute texture-space u, v.
float u = uv.x;
float v = uv.y;
bool clampU = false;
bool clampV = false;
// Cube map indexing.
int face = 0;
if (CUBE_MODE)
{
// Neither clamp or wrap.
face = indexCubeMap(u, v, uv.z); // Rewrites u, v.
if (face < 0)
{
tcOut.x = tcOut.y = tcOut.z = tcOut.w = -1; // Invalid uv.
return make_float2(0.f, 0.f);
}
u = u * (float)w - 0.5f;
v = v * (float)h - 0.5f;
}
else
{
if (p.boundaryMode == TEX_BOUNDARY_MODE_WRAP)
{
// Wrap.
u = u - (float)__float2int_rd(u);
v = v - (float)__float2int_rd(v);
}
// Move to texel space.
u = u * (float)w - 0.5f;
v = v * (float)h - 0.5f;
if (p.boundaryMode == TEX_BOUNDARY_MODE_CLAMP)
{
// Clamp to center of edge texels.
u = fminf(fmaxf(u, 0.f), w - 1.f);
v = fminf(fmaxf(v, 0.f), h - 1.f);
clampU = (u == 0.f || u == w - 1.f);
clampV = (v == 0.f || v == h - 1.f);
}
}
// Compute texel coordinates and weights.
int iu0 = __float2int_rd(u);
int iv0 = __float2int_rd(v);
int iu1 = iu0 + (clampU ? 0 : 1); // Ensure zero u/v gradients with clamped.
int iv1 = iv0 + (clampV ? 0 : 1);
u -= (float)iu0;
v -= (float)iv0;
// Cube map wrapping.
bool cubeWrap = CUBE_MODE && (iu0 < 0 || iv0 < 0 || iu1 >= w || iv1 >= h);
if (cubeWrap)
{
tcOut = wrapCubeMap(face, iu0, iu1, iv0, iv1, w);
tcOut += 6 * tz * w * h; // Bring in tz.
return make_float2(u, v); // Done.
}
// Fold cube map face into tz.
if (CUBE_MODE)
tz = 6 * tz + face;
// Wrap overflowing texel indices.
if (!CUBE_MODE && p.boundaryMode == TEX_BOUNDARY_MODE_WRAP)
{
if (iu0 < 0) iu0 += w;
if (iv0 < 0) iv0 += h;
if (iu1 >= w) iu1 -= w;
if (iv1 >= h) iv1 -= h;
}
// Coordinates
iu0 += tz * w * h;
iu1 += tz * w * h;
tcOut.x = iu0 + w * iv0;
tcOut.y = iu1 + w * iv0;
tcOut.z = iu0 + w * iv1;
tcOut.w = iu1 + w * iv1;
// Invalidate texture addresses outside unit square if we are in zero mode.
if (!CUBE_MODE && p.boundaryMode == TEX_BOUNDARY_MODE_ZERO)
{
bool iu0_out = (iu0 < 0 || iu0 >= w);
bool iu1_out = (iu1 < 0 || iu1 >= w);
bool iv0_out = (iv0 < 0 || iv0 >= h);
bool iv1_out = (iv1 < 0 || iv1 >= h);
if (iu0_out || iv0_out) tcOut.x = -1;
if (iu1_out || iv0_out) tcOut.y = -1;
if (iu0_out || iv1_out) tcOut.z = -1;
if (iu1_out || iv1_out) tcOut.w = -1;
}
// All done.
return make_float2(u, v);
}
//------------------------------------------------------------------------
// Mip level calculation.
template <bool CUBE_MODE, bool BIAS_ONLY, int FILTER_MODE>
static __device__ __forceinline__ void calculateMipLevel(int& level0, int& level1, float& flevel, const TextureKernelParams& p, int pidx, float3 uv, float4* pdw, float3* pdfdv)
{
// Do nothing if mips not in use.
if (FILTER_MODE == TEX_MODE_NEAREST || FILTER_MODE == TEX_MODE_LINEAR)
return;
// Determine mip level based on UV pixel derivatives. If no derivatives are given (mip level bias only), leave as zero.
if (!BIAS_ONLY)
{
// Get pixel derivatives of texture coordinates.
float4 uvDA;
float3 dvdX, dvdY; // Gradients use these later.
if (CUBE_MODE)
{
// Fetch.
float2 d0 = ((const float2*)p.uvDA)[3 * pidx + 0];
float2 d1 = ((const float2*)p.uvDA)[3 * pidx + 1];
float2 d2 = ((const float2*)p.uvDA)[3 * pidx + 2];
// Map d{x,y,z}/d{X,Y} into d{s,t}/d{X,Y}.
dvdX = make_float3(d0.x, d1.x, d2.x); // d{x,y,z}/dX
dvdY = make_float3(d0.y, d1.y, d2.y); // d{x,y,z}/dY
uvDA = indexCubeMapGradST(uv, dvdX, dvdY); // d{s,t}/d{X,Y}
}
else
{
// Fetch.
uvDA = ((const float4*)p.uvDA)[pidx];
}
// Scaling factors.
float uscl = p.texWidth;
float vscl = p.texHeight;
// d[s,t]/d[X,Y].
float dsdx = uvDA.x * uscl;
float dsdy = uvDA.y * uscl;
float dtdx = uvDA.z * vscl;
float dtdy = uvDA.w * vscl;
// Calculate footprint axis lengths.
float A = dsdx*dsdx + dtdx*dtdx;
float B = dsdy*dsdy + dtdy*dtdy;
float C = dsdx*dsdy + dtdx*dtdy;
float l2b = 0.5 * (A + B);
float l2n = 0.25 * (A-B)*(A-B) + C*C;
float l2a = sqrt(l2n);
float lenMinorSqr = fmaxf(0.0, l2b - l2a);
float lenMajorSqr = l2b + l2a;
// Footprint vs. mip level gradient.
if (pdw && FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_LINEAR)
{
float dw = 0.72134752f / (l2n + l2a * l2b); // Constant is 0.5/ln(2).
float AB = dw * .5f * (A - B);
float Cw = dw * C;
float l2aw = dw * l2a;
float d_f_ddsdX = uscl * (dsdx * (l2aw + AB) + dsdy * Cw);
float d_f_ddsdY = uscl * (dsdy * (l2aw - AB) + dsdx * Cw);
float d_f_ddtdX = vscl * (dtdx * (l2aw + AB) + dtdy * Cw);
float d_f_ddtdY = vscl * (dtdy * (l2aw - AB) + dtdx * Cw);
float4 d_f_dw = make_float4(d_f_ddsdX, d_f_ddsdY, d_f_ddtdX, d_f_ddtdY);
if (!CUBE_MODE)
*pdw = isfinite_vec4(d_f_dw) ? d_f_dw : make_float4(0.f, 0.f, 0.f, 0.f);
// In cube maps, there is also a texture coordinate vs. mip level gradient.
// Only output nonzero vectors if both are free of inf/Nan garbage.
if (CUBE_MODE)
{
float4 dx, dy, dz;
indexCubeMapGrad2(uv, dvdX, dvdY, dx, dy, dz);
float3 d_dsdX_dv = make_float3(dx.x, dy.x, dz.x);
float3 d_dsdY_dv = make_float3(dx.y, dy.y, dz.y);
float3 d_dtdX_dv = make_float3(dx.z, dy.z, dz.z);
float3 d_dtdY_dv = make_float3(dx.w, dy.w, dz.w);
float3 d_f_dv = make_float3(0.f, 0.f, 0.f);
d_f_dv += d_dsdX_dv * d_f_ddsdX;
d_f_dv += d_dsdY_dv * d_f_ddsdY;
d_f_dv += d_dtdX_dv * d_f_ddtdX;
d_f_dv += d_dtdY_dv * d_f_ddtdY;
bool finite = isfinite_vec4(d_f_dw) && isfinite_vec3(d_f_dv);
*pdw = finite ? d_f_dw : make_float4(0.f, 0.f, 0.f, 0.f);
*pdfdv = finite ? d_f_dv : make_float3(0.f, 0.f, 0.f);
}
}
// Finally, calculate mip level.
flevel = .5f * __log2f(lenMajorSqr); // May be inf/NaN, but clamp fixes it.
}
// Bias the mip level and clamp.
if (p.mipLevelBias)
flevel += p.mipLevelBias[pidx];
flevel = fminf(fmaxf(flevel, 0.f), (float)p.mipLevelMax);
// Calculate levels depending on filter mode.
level0 = __float2int_rd(flevel);
// Leave everything else at zero if flevel == 0 (magnification) or when in linear-mipmap-nearest mode.
if (FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_LINEAR && flevel > 0.f)
{
level1 = min(level0 + 1, p.mipLevelMax);
flevel -= level0; // Fractional part. Zero if clamped on last level.
}
}
//------------------------------------------------------------------------
// Texel fetch and accumulator helpers that understand cube map corners.
template<class T>
static __device__ __forceinline__ void fetchQuad(T& a00, T& a10, T& a01, T& a11, const float* pIn, int4 tc, bool corner)
{
// For invalid cube map uv, tc will be all negative, and all texel values will be zero.
if (corner)
{
T avg = zero_value<T>();
if (tc.x >= 0) avg += (a00 = *((const T*)&pIn[tc.x]));
if (tc.y >= 0) avg += (a10 = *((const T*)&pIn[tc.y]));
if (tc.z >= 0) avg += (a01 = *((const T*)&pIn[tc.z]));
if (tc.w >= 0) avg += (a11 = *((const T*)&pIn[tc.w]));
avg *= 0.33333333f;
if (tc.x < 0) a00 = avg;
if (tc.y < 0) a10 = avg;
if (tc.z < 0) a01 = avg;
if (tc.w < 0) a11 = avg;
}
else
{
a00 = (tc.x >= 0) ? *((const T*)&pIn[tc.x]) : zero_value<T>();
a10 = (tc.y >= 0) ? *((const T*)&pIn[tc.y]) : zero_value<T>();
a01 = (tc.z >= 0) ? *((const T*)&pIn[tc.z]) : zero_value<T>();
a11 = (tc.w >= 0) ? *((const T*)&pIn[tc.w]) : zero_value<T>();
}
}
static __device__ __forceinline__ void accumQuad(float4 c, float* pOut, int level, int4 tc, bool corner, CA_TEMP_PARAM)
{
// For invalid cube map uv, tc will be all negative, and no accumulation will take place.
if (corner)
{
float cb;
if (tc.x < 0) cb = c.x;
if (tc.y < 0) cb = c.y;
if (tc.z < 0) cb = c.z;
if (tc.w < 0) cb = c.w;
cb *= 0.33333333f;
if (tc.x >= 0) caAtomicAddTexture(pOut, level, tc.x, c.x + cb);
if (tc.y >= 0) caAtomicAddTexture(pOut, level, tc.y, c.y + cb);
if (tc.z >= 0) caAtomicAddTexture(pOut, level, tc.z, c.z + cb);
if (tc.w >= 0) caAtomicAddTexture(pOut, level, tc.w, c.w + cb);
}
else
{
if (tc.x >= 0) caAtomicAddTexture(pOut, level, tc.x, c.x);
if (tc.y >= 0) caAtomicAddTexture(pOut, level, tc.y, c.y);
if (tc.z >= 0) caAtomicAddTexture(pOut, level, tc.z, c.z);
if (tc.w >= 0) caAtomicAddTexture(pOut, level, tc.w, c.w);
}
}
//------------------------------------------------------------------------
// Mip builder kernel.
template<class T, int C>
static __forceinline__ __device__ void MipBuildKernelTemplate(const TextureKernelParams p)
{
// Sizes.
int2 sz_in = mipLevelSize(p, p.mipLevelOut - 1);
int2 sz_out = mipLevelSize(p, p.mipLevelOut);
// Calculate pixel position.
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int pz = blockIdx.z;
if (px >= sz_out.x || py >= sz_out.y)
return;
// Pixel indices.
int pidx_in0 = p.channels * (((px + sz_in.x * py) << 1) + (pz * sz_in.x * sz_in.y));
int pidx_in1 = pidx_in0 + p.channels * sz_in.x; // Next pixel down.
int pidx_out = p.channels * (px + sz_out.x * (py + sz_out.y * pz));
// Input and output pointers.
const float* pin = p.tex[p.mipLevelOut - 1];
float* pout = (float*)p.tex[p.mipLevelOut];
// Special case: Input texture height or width is 1.
if (sz_in.x == 1 || sz_in.y == 1)
{
if (sz_in.y == 1)
pidx_in1 = pidx_in0 + p.channels; // Next pixel on the right.
for (int i=0; i < p.channels; i += C)
{
T v0 = *((const T*)&pin[pidx_in0 + i]);
T v1 = *((const T*)&pin[pidx_in1 + i]);
T avg = .5f * (v0 + v1);
#if TEX_DEBUG_MIP_RETAIN_VARIANCE
avg = (avg - .5f) * 1.41421356f + .5f;
#endif
*((T*)&pout[pidx_out + i]) = avg;
}
return;
}
for (int i=0; i < p.channels; i += C)
{
T v0 = *((const T*)&pin[pidx_in0 + i]);
T v1 = *((const T*)&pin[pidx_in0 + i + p.channels]);
T v2 = *((const T*)&pin[pidx_in1 + i]);
T v3 = *((const T*)&pin[pidx_in1 + i + p.channels]);
T avg = .25f * (v0 + v1 + v2 + v3);
#if TEX_DEBUG_MIP_RETAIN_VARIANCE
avg = (avg - .5f) * 2.f + .5f;
#endif
*((T*)&pout[pidx_out + i]) = avg;
}
}
// Template specializations.
__global__ void MipBuildKernel1(const TextureKernelParams p) { MipBuildKernelTemplate<float, 1>(p); }
__global__ void MipBuildKernel2(const TextureKernelParams p) { MipBuildKernelTemplate<float2, 2>(p); }
__global__ void MipBuildKernel4(const TextureKernelParams p) { MipBuildKernelTemplate<float4, 4>(p); }
//------------------------------------------------------------------------
// Forward kernel.
template <class T, int C, bool CUBE_MODE, bool BIAS_ONLY, int FILTER_MODE>
static __forceinline__ __device__ void TextureFwdKernelTemplate(const TextureKernelParams p)
{
// Calculate pixel position.
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int pz = blockIdx.z;
int tz = (p.texDepth == 1) ? 0 : pz;
if (px >= p.imgWidth || py >= p.imgHeight || pz >= p.n)
return;
// Pixel index.
int pidx = px + p.imgWidth * (py + p.imgHeight * pz);
// Output ptr.
float* pOut = p.out + pidx * p.channels;
// Get UV.
float3 uv;
if (CUBE_MODE)
uv = ((const float3*)p.uv)[pidx];
else
uv = make_float3(((const float2*)p.uv)[pidx], 0.f);
// Nearest mode.
if (FILTER_MODE == TEX_MODE_NEAREST)
{
int tc = indexTextureNearest<CUBE_MODE>(p, uv, tz);
tc *= p.channels;
const float* pIn = p.tex[0];
// Copy if valid tc, otherwise output zero.
for (int i=0; i < p.channels; i += C)
*((T*)&pOut[i]) = (tc >= 0) ? *((const T*)&pIn[tc + i]) : zero_value<T>();
return; // Exit.
}
// Calculate mip level. In 'linear' mode these will all stay zero.
float flevel = 0.f; // Fractional level.
int level0 = 0; // Discrete level 0.
int level1 = 0; // Discrete level 1.
calculateMipLevel<CUBE_MODE, BIAS_ONLY, FILTER_MODE>(level0, level1, flevel, p, pidx, uv, 0, 0);
// Get texel indices and pointer for level 0.
int4 tc0 = make_int4(0, 0, 0, 0);
float2 uv0 = indexTextureLinear<CUBE_MODE>(p, uv, tz, tc0, level0);
const float* pIn0 = p.tex[level0];
bool corner0 = CUBE_MODE && ((tc0.x | tc0.y | tc0.z | tc0.w) < 0);
tc0 *= p.channels;
// Bilinear fetch.
if (FILTER_MODE == TEX_MODE_LINEAR || FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_NEAREST)
{
// Interpolate.
for (int i=0; i < p.channels; i += C, tc0 += C)
{
T a00, a10, a01, a11;
fetchQuad<T>(a00, a10, a01, a11, pIn0, tc0, corner0);
*((T*)&pOut[i]) = bilerp(a00, a10, a01, a11, uv0);
}
return; // Exit.
}
// Get texel indices and pointer for level 1.
int4 tc1 = make_int4(0, 0, 0, 0);
float2 uv1 = indexTextureLinear<CUBE_MODE>(p, uv, tz, tc1, level1);
const float* pIn1 = p.tex[level1];
bool corner1 = CUBE_MODE && ((tc1.x | tc1.y | tc1.z | tc1.w) < 0);
tc1 *= p.channels;
// Trilinear fetch.
for (int i=0; i < p.channels; i += C, tc0 += C, tc1 += C)
{
// First level.
T a00, a10, a01, a11;
fetchQuad<T>(a00, a10, a01, a11, pIn0, tc0, corner0);
T a = bilerp(a00, a10, a01, a11, uv0);
// Second level unless in magnification mode.
if (flevel > 0.f)
{
T b00, b10, b01, b11;
fetchQuad<T>(b00, b10, b01, b11, pIn1, tc1, corner1);
T b = bilerp(b00, b10, b01, b11, uv1);
a = lerp(a, b, flevel); // Interpolate between levels.
}
// Write.
*((T*)&pOut[i]) = a;
}
}
// Template specializations.
__global__ void TextureFwdKernelNearest1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, false, TEX_MODE_NEAREST>(p); }
__global__ void TextureFwdKernelNearest2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, false, TEX_MODE_NEAREST>(p); }
__global__ void TextureFwdKernelNearest4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, false, TEX_MODE_NEAREST>(p); }
__global__ void TextureFwdKernelLinear1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, false, TEX_MODE_LINEAR>(p); }
__global__ void TextureFwdKernelLinear2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, false, TEX_MODE_LINEAR>(p); }
__global__ void TextureFwdKernelLinear4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, false, TEX_MODE_LINEAR>(p); }
__global__ void TextureFwdKernelLinearMipmapNearest1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelLinearMipmapNearest2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelLinearMipmapNearest4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelLinearMipmapLinear1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelLinearMipmapLinear2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelLinearMipmapLinear4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelCubeNearest1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, false, TEX_MODE_NEAREST>(p); }
__global__ void TextureFwdKernelCubeNearest2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, false, TEX_MODE_NEAREST>(p); }
__global__ void TextureFwdKernelCubeNearest4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, false, TEX_MODE_NEAREST>(p); }
__global__ void TextureFwdKernelCubeLinear1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, false, TEX_MODE_LINEAR>(p); }
__global__ void TextureFwdKernelCubeLinear2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, false, TEX_MODE_LINEAR>(p); }
__global__ void TextureFwdKernelCubeLinear4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, false, TEX_MODE_LINEAR>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapNearest1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapNearest2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapNearest4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapLinear1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapLinear2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapLinear4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelLinearMipmapNearestBO1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelLinearMipmapNearestBO2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelLinearMipmapNearestBO4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelLinearMipmapLinearBO1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelLinearMipmapLinearBO2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelLinearMipmapLinearBO4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapNearestBO1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapNearestBO2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapNearestBO4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapLinearBO1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapLinearBO2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureFwdKernelCubeLinearMipmapLinearBO4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
//------------------------------------------------------------------------
// Gradient mip puller kernel.
template<class T, int C>
static __forceinline__ __device__ void MipGradKernelTemplate(const TextureKernelParams p)
{
// Calculate pixel position.
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int pz = blockIdx.z;
if (px >= p.texWidth || py >= p.texHeight)
return;
// Number of wide elements.
int c = p.channels;
if (C == 2) c >>= 1;
if (C == 4) c >>= 2;
// Dynamically allocated shared memory for holding a texel.
extern __shared__ float s_texelAccum[];
int sharedOfs = threadIdx.x + threadIdx.y * blockDim.x;
int sharedStride = blockDim.x * blockDim.y;
# define TEXEL_ACCUM(_i) (s_texelAccum + (sharedOfs + (_i) * sharedStride))
// Clear the texel.
for (int i=0; i < p.channels; i++)
*TEXEL_ACCUM(i) = 0.f;
// Track texel position and accumulation weight over the mip stack.
int x = px;
int y = py;
float w = 1.f;
// Pull gradients from all levels.
int2 sz = mipLevelSize(p, 0); // Previous level size.
for (int level=1; level <= p.mipLevelMax; level++)
{
// Weight decay depends on previous level size.
if (sz.x > 1) w *= .5f;
if (sz.y > 1) w *= .5f;
// Current level size and coordinates.
sz = mipLevelSize(p, level);
x >>= 1;
y >>= 1;
T* pIn = (T*)(p.gradTex[level] + (x + sz.x * (y + sz.y * pz)) * p.channels);
for (int i=0; i < c; i++)
accum_from_mem(TEXEL_ACCUM(i * C), sharedStride, pIn[i], w);
}
// Add to main texture gradients.
T* pOut = (T*)(p.gradTex[0] + (px + p.texWidth * (py + p.texHeight * pz)) * p.channels);
for (int i=0; i < c; i++)
accum_to_mem(pOut[i], TEXEL_ACCUM(i * C), sharedStride);
}
// Template specializations.
__global__ void MipGradKernel1(const TextureKernelParams p) { MipGradKernelTemplate<float, 1>(p); }
__global__ void MipGradKernel2(const TextureKernelParams p) { MipGradKernelTemplate<float2, 2>(p); }
__global__ void MipGradKernel4(const TextureKernelParams p) { MipGradKernelTemplate<float4, 4>(p); }
//------------------------------------------------------------------------
// Gradient kernel.
template <bool CUBE_MODE, bool BIAS_ONLY, int FILTER_MODE>
static __forceinline__ __device__ void TextureGradKernelTemplate(const TextureKernelParams p)
{
// Temporary space for coalesced atomics.
CA_DECLARE_TEMP(TEX_GRAD_MAX_KERNEL_BLOCK_WIDTH * TEX_GRAD_MAX_KERNEL_BLOCK_HEIGHT);
// Calculate pixel position.
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int pz = blockIdx.z;
int tz = (p.texDepth == 1) ? 0 : pz;
if (px >= p.imgWidth || py >= p.imgHeight || pz >= p.n)
return;
// Pixel index.
int pidx = px + p.imgWidth * (py + p.imgHeight * pz);
// Early exit if output gradients are zero.
const float* pDy = p.dy + pidx * p.channels;
unsigned int dmax = 0u;
if ((p.channels & 3) == 0)
{
for (int i=0; i < p.channels; i += 4)
{
uint4 dy = *((const uint4*)&pDy[i]);
dmax |= (dy.x | dy.y | dy.z | dy.w);
}
}
else
{
for (int i=0; i < p.channels; i++)
dmax |= __float_as_uint(pDy[i]);
}
// Store zeros and exit.
if (__uint_as_float(dmax) == 0.f)
{
if (CUBE_MODE)
{
if (FILTER_MODE != TEX_MODE_NEAREST)
((float3*)p.gradUV)[pidx] = make_float3(0.f, 0.f, 0.f);
if (FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_LINEAR)
{
if (p.gradUVDA)
{
((float2*)p.gradUVDA)[3 * pidx + 0] = make_float2(0.f, 0.f);
((float2*)p.gradUVDA)[3 * pidx + 1] = make_float2(0.f, 0.f);
((float2*)p.gradUVDA)[3 * pidx + 2] = make_float2(0.f, 0.f);
}
if (p.gradMipLevelBias)
p.gradMipLevelBias[pidx] = 0.f;
}
}
else
{
if (FILTER_MODE != TEX_MODE_NEAREST)
((float2*)p.gradUV)[pidx] = make_float2(0.f, 0.f);
if (FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_LINEAR)
{
if (p.gradUVDA)
((float4*)p.gradUVDA)[pidx] = make_float4(0.f, 0.f, 0.f, 0.f);
if (p.gradMipLevelBias)
p.gradMipLevelBias[pidx] = 0.f;
}
}
return;
}
// Get UV.
float3 uv;
if (CUBE_MODE)
uv = ((const float3*)p.uv)[pidx];
else
uv = make_float3(((const float2*)p.uv)[pidx], 0.f);
// Nearest mode - texture gradients only.
if (FILTER_MODE == TEX_MODE_NEAREST)
{
int tc = indexTextureNearest<CUBE_MODE>(p, uv, tz);
if (tc < 0)
return; // Outside texture.
tc *= p.channels;
float* pOut = p.gradTex[0];
// Accumulate texture gradients.
for (int i=0; i < p.channels; i++)
caAtomicAddTexture(pOut, 0, tc + i, pDy[i]);
return; // Exit.
}
// Calculate mip level. In 'linear' mode these will all stay zero.
float4 dw = make_float4(0.f, 0.f, 0.f, 0.f);
float3 dfdv = make_float3(0.f, 0.f, 0.f);
float flevel = 0.f; // Fractional level.
int level0 = 0; // Discrete level 0.
int level1 = 0; // Discrete level 1.
calculateMipLevel<CUBE_MODE, BIAS_ONLY, FILTER_MODE>(level0, level1, flevel, p, pidx, uv, &dw, &dfdv);
// UV gradient accumulators.
float gu = 0.f;
float gv = 0.f;
// Get texel indices and pointers for level 0.
int4 tc0 = make_int4(0, 0, 0, 0);
float2 uv0 = indexTextureLinear<CUBE_MODE>(p, uv, tz, tc0, level0);
const float* pIn0 = p.tex[level0];
float* pOut0 = p.gradTex[level0];
bool corner0 = CUBE_MODE && ((tc0.x | tc0.y | tc0.z | tc0.w) < 0);
tc0 *= p.channels;
// Texel weights.
float uv011 = uv0.x * uv0.y;
float uv010 = uv0.x - uv011;
float uv001 = uv0.y - uv011;
float uv000 = 1.f - uv0.x - uv001;
float4 tw0 = make_float4(uv000, uv010, uv001, uv011);
// Attribute weights.
int2 sz0 = mipLevelSize(p, level0);
float sclu0 = (float)sz0.x;
float sclv0 = (float)sz0.y;
// Bilinear mode - texture and uv gradients.
if (FILTER_MODE == TEX_MODE_LINEAR || FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_NEAREST)
{
for (int i=0; i < p.channels; i++, tc0 += 1)
{
float dy = pDy[i];
accumQuad(tw0 * dy, pOut0, level0, tc0, corner0, CA_TEMP);
float a00, a10, a01, a11;
fetchQuad<float>(a00, a10, a01, a11, pIn0, tc0, corner0);
float ad = (a11 + a00 - a10 - a01);
gu += dy * ((a10 - a00) + uv0.y * ad) * sclu0;
gv += dy * ((a01 - a00) + uv0.x * ad) * sclv0;
}
// Store UV gradients and exit.
if (CUBE_MODE)
((float3*)p.gradUV)[pidx] = indexCubeMapGrad(uv, gu, gv);
else
((float2*)p.gradUV)[pidx] = make_float2(gu, gv);
return;
}
// Accumulate fractional mip level gradient.
float df = 0; // dL/df.
// Get texel indices and pointers for level 1.
int4 tc1 = make_int4(0, 0, 0, 0);
float2 uv1 = indexTextureLinear<CUBE_MODE>(p, uv, tz, tc1, level1);
const float* pIn1 = p.tex[level1];
float* pOut1 = p.gradTex[level1];
bool corner1 = CUBE_MODE && ((tc1.x | tc1.y | tc1.z | tc1.w) < 0);
tc1 *= p.channels;
// Texel weights.
float uv111 = uv1.x * uv1.y;
float uv110 = uv1.x - uv111;
float uv101 = uv1.y - uv111;
float uv100 = 1.f - uv1.x - uv101;
float4 tw1 = make_float4(uv100, uv110, uv101, uv111);
// Attribute weights.
int2 sz1 = mipLevelSize(p, level1);
float sclu1 = (float)sz1.x;
float sclv1 = (float)sz1.y;
// Trilinear mode.
for (int i=0; i < p.channels; i++, tc0 += 1, tc1 += 1)
{
float dy = pDy[i];
float dy0 = (1.f - flevel) * dy;
accumQuad(tw0 * dy0, pOut0, level0, tc0, corner0, CA_TEMP);
// UV gradients for first level.
float a00, a10, a01, a11;
fetchQuad<float>(a00, a10, a01, a11, pIn0, tc0, corner0);
float ad = (a11 + a00 - a10 - a01);
gu += dy0 * ((a10 - a00) + uv0.y * ad) * sclu0;
gv += dy0 * ((a01 - a00) + uv0.x * ad) * sclv0;
// Second level unless in magnification mode.
if (flevel > 0.f)
{
// Texture gradients for second level.
float dy1 = flevel * dy;
accumQuad(tw1 * dy1, pOut1, level1, tc1, corner1, CA_TEMP);
// UV gradients for second level.
float b00, b10, b01, b11;
fetchQuad<float>(b00, b10, b01, b11, pIn1, tc1, corner1);
float bd = (b11 + b00 - b10 - b01);
gu += dy1 * ((b10 - b00) + uv1.y * bd) * sclu1;
gv += dy1 * ((b01 - b00) + uv1.x * bd) * sclv1;
// Mip level gradient.
float a = bilerp(a00, a10, a01, a11, uv0);
float b = bilerp(b00, b10, b01, b11, uv1);
df += (b-a) * dy;
}
}
// Store UV gradients.
if (CUBE_MODE)
((float3*)p.gradUV)[pidx] = indexCubeMapGrad(uv, gu, gv) + (dfdv * df);
else
((float2*)p.gradUV)[pidx] = make_float2(gu, gv);
// Store mip level bias gradient.
if (p.gradMipLevelBias)
p.gradMipLevelBias[pidx] = df;
// Store UV pixel differential gradients.
if (!BIAS_ONLY)
{
// Final gradients.
dw *= df; // dL/(d{s,y}/d{X,Y}) = df/(d{s,y}/d{X,Y}) * dL/df.
// Store them.
if (CUBE_MODE)
{
// Remap from dL/(d{s,t}/s{X,Y}) to dL/(d{x,y,z}/d{X,Y}).
float3 g0, g1;
indexCubeMapGrad4(uv, dw, g0, g1);
((float2*)p.gradUVDA)[3 * pidx + 0] = make_float2(g0.x, g1.x);
((float2*)p.gradUVDA)[3 * pidx + 1] = make_float2(g0.y, g1.y);
((float2*)p.gradUVDA)[3 * pidx + 2] = make_float2(g0.z, g1.z);
}
else
((float4*)p.gradUVDA)[pidx] = dw;
}
}
// Template specializations.
__global__ void TextureGradKernelNearest (const TextureKernelParams p) { TextureGradKernelTemplate<false, false, TEX_MODE_NEAREST>(p); }
__global__ void TextureGradKernelLinear (const TextureKernelParams p) { TextureGradKernelTemplate<false, false, TEX_MODE_LINEAR>(p); }
__global__ void TextureGradKernelLinearMipmapNearest (const TextureKernelParams p) { TextureGradKernelTemplate<false, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureGradKernelLinearMipmapLinear (const TextureKernelParams p) { TextureGradKernelTemplate<false, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureGradKernelCubeNearest (const TextureKernelParams p) { TextureGradKernelTemplate<true, false, TEX_MODE_NEAREST>(p); }
__global__ void TextureGradKernelCubeLinear (const TextureKernelParams p) { TextureGradKernelTemplate<true, false, TEX_MODE_LINEAR>(p); }
__global__ void TextureGradKernelCubeLinearMipmapNearest (const TextureKernelParams p) { TextureGradKernelTemplate<true, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureGradKernelCubeLinearMipmapLinear (const TextureKernelParams p) { TextureGradKernelTemplate<true, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureGradKernelLinearMipmapNearestBO (const TextureKernelParams p) { TextureGradKernelTemplate<false, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureGradKernelLinearMipmapLinearBO (const TextureKernelParams p) { TextureGradKernelTemplate<false, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
__global__ void TextureGradKernelCubeLinearMipmapNearestBO (const TextureKernelParams p) { TextureGradKernelTemplate<true, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
__global__ void TextureGradKernelCubeLinearMipmapLinearBO (const TextureKernelParams p) { TextureGradKernelTemplate<true, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
//------------------------------------------------------------------------
|
the_stack
|
#include <cuda.h>
//#define DEBUG
// calculate the IoU of a single box against another box
__device__
float calc_single_iou(const float4 b1, const float4 b2) {
// (lt), (rb)
float l = max(b1.x, b2.x);
float t = max(b1.y, b2.y);
float r = min(b1.z, b2.z);
float b = min(b1.w, b2.w);
float first = (r - l);
first = (first < 0) ? 0 : first;
float second = (b - t);
second = (second < 0) ? 0 : second;
float intersection = first * second;
float area1 = (b1.w - b1.y) * (b1.z - b1.x);
float area2 = (b2.w - b2.y) * (b2.z - b2.x);
return intersection / (area1 + area2 - intersection);
}
__global__
// boxes1 : [N x 4]
// boxes2 : [M x 4]
// ious : [N x M]
void calc_ious_kernel(const int N_img, const float4 *box1, const int *box1_offsets,
const int M, const float4 *boxes2, float *ious) {
// launch N_img blocks
const int img = blockIdx.x;
// each block, i will run over the box1_N[i] source and M target boxes
// generating box1_N[i] x M outputs
// alias to start of boxes for this image
const float4 *b1 = &box1[box1_offsets[img]];
if (threadIdx.x == 0) {
//printf("offset for img %d : %d\n", img, box1_offsets[img]);
}
// number of boxes for this image from offsets
int N = box1_offsets[img+1] - box1_offsets[img];
for (int i = 0; i < N; ++i) {
// if (threadIdx.x == 0) printf("i : %d\n", i);
const float4 source = b1[i];
// for each source, loop over targets
for (int j = threadIdx.x; j < M; j += blockDim.x) {
const float4 target = boxes2[j];
float iou = calc_single_iou(source, target);
// store the calculated IoU in the correct spot
int out_idx = box1_offsets[img] * M + i * M + j;
ious[out_idx] = iou;
}
}
}
__device__
void reduce_val_idx(int N, volatile float *vals, volatile int *idx) {
// naive: single thread for now
if (threadIdx.x == 0) {
float max_val = vals[0];
int max_idx = idx[0];
for (int i = 1; i < N; ++i) {
if (vals[i] > max_val) {
max_val = vals[i];
max_idx = idx[i];
}
}
vals[0] = max_val;
idx[0] = max_idx;
}
}
/**
* perform remaining parts, storing temporary values in global workspace
* workspace needs N_img * M values, each of 8 bytes (float, int)
**/
template <int BLOCK_SIZE, int MAX_BBOXES_PER_BLOCK>
__global__
void encode(const int N_img, const float4 *bbox_in, const long *labels_in, const int *offsets,
const int M, const float4 *dboxes, // const float *ious,
const float criteria, uint8_t *workspace, float4 *bbox_out, long *label_out) {
// Each block will take a single image's IoU set
const int img = blockIdx.x;
// shared memory for intermediate results
__shared__ volatile float best_bbox_iou_tmp[BLOCK_SIZE];
__shared__ volatile int best_bbox_idx_tmp[BLOCK_SIZE];
// shared memory for final best_bbox_{iou, idx} values
__shared__ volatile float best_bbox_iou[MAX_BBOXES_PER_BLOCK];
__shared__ volatile int best_bbox_idx[MAX_BBOXES_PER_BLOCK];
// index into the global workspace - each image needs (float + int) * M values
volatile float *best_dbox_iou = (float *)&workspace[img * M * 8];
volatile int *best_dbox_idx = (int *)&workspace[img * M * 8 + M * 4];
// number of input bboxes for this image
const int N_rows = offsets[img+1] - offsets[img];
// Check for potential crash
assert(N_rows <= MAX_BBOXES_PER_BLOCK);
#ifdef DEBUG
if (threadIdx.x == 0)
printf("N rows: %d %d to %d (%p - %p)\n", N_rows, offsets[img], offsets[img+1], best_dbox_iou, best_dbox_idx);
#endif
for (int i = threadIdx.x; i < MAX_BBOXES_PER_BLOCK; i += blockDim.x) {
best_bbox_iou[i] = -FLT_MAX;
best_bbox_idx[i] = -1;
}
__syncthreads();
// loop serially over the rows of the IoU set that correspond to this image
int row_num = 0;
for (int i = offsets[img]; i < offsets[img+1]; ++i) {
// reset shmem tallies
best_bbox_iou_tmp[threadIdx.x] = -FLT_MAX;
best_bbox_idx_tmp[threadIdx.x] = -1;
// index into the input buffer
// const float *row = &ious[i * M];
const float4 input_bbox = bbox_in[i];
#ifdef DEBUG
if (threadIdx.x == 0)
printf("%d - %p\n", img, &input_bbox);
#endif
// loop by threads over the columns
for (int j = threadIdx.x; j < M; j += blockDim.x) {
// check and store new max if necessary
const float4 input_dbox = dboxes[j];
// float new_val = row[j];
float new_val = calc_single_iou(input_bbox, input_dbox);
// handle per-row max in shared memory
if (new_val > best_bbox_iou_tmp[threadIdx.x]) {
best_bbox_iou_tmp[threadIdx.x] = new_val;
best_bbox_idx_tmp[threadIdx.x] = j;
}
// handle per-col max in global workspace
if (new_val > best_dbox_iou[j]) {
best_dbox_iou[j] = new_val;
best_dbox_idx[j] = row_num;
#ifdef DEBUG
assert(best_dbox_idx[j] >= 0);
assert(best_dbox_idx[j] < N_rows);
#endif
}
}
// Now we have all the values for this row -- reduce
__syncthreads();
// reduce - output is in max_{val, idx}_row[0]
reduce_val_idx(blockDim.x, best_bbox_iou_tmp, best_bbox_idx_tmp);
#ifdef DEBUG
__syncthreads();
#endif
// store output for row i
if (threadIdx.x == 0) {
best_bbox_iou[row_num] = best_bbox_iou_tmp[0];
best_bbox_idx[row_num] = best_bbox_idx_tmp[0];
#ifdef DEBUG
assert(best_bbox_idx[row_num] >= 0);
assert(best_bbox_idx[row_num] < M);
#endif
}
__syncthreads();
// keep track of _local_ row
row_num++;
}
#ifdef DEBUG
if (threadIdx.x == 0) {
for (int i = 0; i < N_rows; ++i) {
printf("%d - row : %d : best bbox_idx: %d\n", img, i, best_bbox_idx[i]);
}
}
#endif
#ifdef DEBUG
// make sure all best_bbox_{iou, val} are seen by everyone
__syncthreads();
#endif
// At this point we have the maximum values & indices for both bbox and dbox
/*
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
*/
for (int i = threadIdx.x; i < N_rows; i += blockDim.x) {
int idx = best_bbox_idx[i];
#ifdef DEBUG
assert(idx < M);
assert(idx >= 0);
#endif
best_dbox_iou[idx] = 2.;
best_dbox_idx[idx] = i;
#ifdef DEBUG
printf("%d - set best dbox_idx[%d] to %d\n", img, best_bbox_idx[i], i);
#endif
}
/**
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
__syncthreads();
for (int i = threadIdx.x; i < M; i += blockDim.x) {
// offset into output arrays: M values per image
// int output_idx = offsets[img] * M + i;
int output_idx = img * M + i;
// reset output labels to background
// NOTE: bbox_out is already cloned from dbox outside of this kernel
label_out[output_idx] = 0;
// Filter IoU > 0.5
bool mask = best_dbox_iou[i] > criteria;
float4 bbox = bbox_out[output_idx];
// copy some labels and bboxes
if (mask) {
// copy label
#ifdef DEBUG
printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + best_dbox_idx[i]]);
// printf("%d : label: local input idx: %d, value: %d\n", i, best_dbox_idx[i], labels_in[offsets[img] + i]);
#endif
label_out[output_idx] = labels_in[offsets[img] + best_dbox_idx[i]];
// grab original box
bbox = bbox_in[offsets[img] + best_dbox_idx[i]];
#ifdef DEBUG
printf("mask %d : %d : %f %f %f %f\n", i, best_dbox_idx[i], bbox.x, bbox.y, bbox.z, bbox.w);
#endif
}
// transfer to xywh
float4 bbox_tmp;
bbox_tmp.x = 0.5 * (bbox.x + bbox.z);
bbox_tmp.y = 0.5 * (bbox.y + bbox.w);
bbox_tmp.z = bbox.z - bbox.x;
bbox_tmp.w = bbox.w - bbox.y;
// write out
bbox_out[output_idx] = bbox_tmp;
}
}
/**
def encode(self, bboxes_in, labels_in, criteria = 0.5):
ious = calc_iou_tensor(bboxes_in, self.dboxes)
best_dbox_ious, best_dbox_idx = ious.max(dim=0)
best_bbox_ious, best_bbox_idx = ious.max(dim=1)
# set best ious 2.0
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
#print(maxloc.shape, labels_in.shape, labels_out.shape)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
**/
std::vector<at::Tensor> box_encoder(const int N_img,
const at::Tensor& bbox_input,
const at::Tensor& bbox_offsets,
const at::Tensor& labels_input,
const at::Tensor& dbox,
float criteria) {
// Check everything is on the device
AT_ASSERTM(bbox_input.is_cuda(), "bboxes must be a CUDA tensor");
AT_ASSERTM(bbox_offsets.is_cuda(), "bbox offsets must be a CUDA tensor");
AT_ASSERTM(labels_input.is_cuda(), "labels must be a CUDA tensor");
AT_ASSERTM(dbox.is_cuda(), "dboxes must be a CUDA tensor");
// Check at least offsets, bboxes and labels are consistent
// Note: offsets is N+1 vs. N for labels
AT_ASSERTM(N_img + 1 == bbox_offsets.numel(), "must have N_img+1 offsets");
auto num_bbox_total = bbox_offsets[bbox_offsets.numel()-1].item<int>();
#ifdef DEBUG
printf("%d : bboxes: %d\n", (int)bbox_offsets.numel(), num_bbox_total);
#endif
AT_ASSERTM(num_bbox_total <= 2048, "total num bboxes must be <= 2048");
AT_ASSERTM(bbox_input.size(0) == labels_input.size(0), "bbox and labels must have same leading dimension");
const int N = bbox_input.size(0);
const int M = dbox.size(0);
auto stream = at::cuda::getCurrentCUDAStream();
// allocate final outputs (known size)
#ifdef DEBUG
printf("%d x %d\n", N_img * M, 4);
// at::Tensor bbox_out = dbox.scalar_type().tensor({N_img * M, 4});
printf("allocating %lu bytes for output labels\n", N_img*M*sizeof(long));
#endif
at::Tensor labels_out = at::empty({N_img * M}, labels_input.options());
THCudaCheck(cudaGetLastError());
// copy default boxes to outputs
#ifdef DEBUG
printf("allocating %lu bytes for output bboxes\n", N_img*M*4*sizeof(float));
#endif
at::Tensor bbox_out = dbox.repeat({N_img, 1});
THCudaCheck(cudaGetLastError());
// need to allocate some workspace
#ifdef DEBUG
printf("allocating %lu bytes for workspace\n", 8*M*N_img);
#endif
// at::Tensor workspace = at::CUDA(at::kByte).zeros({8 * M * N_img});
at::Tensor workspace = at::zeros({8 * M * N_img}, at::CUDA(at::kByte));
THCudaCheck(cudaGetLastError());
// Encode the inputs
const int THREADS_PER_BLOCK = 256;
encode<THREADS_PER_BLOCK, 256><<<N_img, THREADS_PER_BLOCK, 0, stream.stream()>>>(N_img,
(float4*)bbox_input.data_ptr<float>(),
labels_input.data_ptr<long>(),
bbox_offsets.data_ptr<int>(),
M,
(float4*)dbox.data_ptr<float>(),
criteria,
workspace.data_ptr<uint8_t>(),
(float4*)bbox_out.data_ptr<float>(),
labels_out.data_ptr<long>());
THCudaCheck(cudaGetLastError());
return {bbox_out, labels_out};
}
at::Tensor calc_ious(const int N_img,
const at::Tensor& boxes1,
const at::Tensor& boxes1_offsets,
const at::Tensor& boxes2) {
const int N = boxes1.size(0);
const int M = boxes2.size(0);
auto stream = at::cuda::getCurrentCUDAStream();
// at::Tensor ious = at::CUDA(at::kFloat).zeros({N, M});
// at::Tensor ious = at::ones(at::CUDA(at::kFloat), {N, M});
at::Tensor ious = at::empty({N, M}, boxes1.options());
// Get IoU of all source x default box pairs
calc_ious_kernel<<<N_img, 256, 0, stream.stream()>>>(
N_img,
(float4*)boxes1.data_ptr<float>(),
boxes1_offsets.data_ptr<int>(),
M,
(float4*)boxes2.data_ptr<float>(),
ious.data_ptr<float>());
THCudaCheck(cudaGetLastError());
return ious;
}
/**
* Each block will handle one channel of each image
**/
template <typename T>
__global__
void HorizFlipImagesAndBoxes(
const int N,
const int C,
const int H,
const int W,
const T* img_in,
float* bboxes,
const int* offsets,
const float p,
const float* flip,
T* img_out,
const bool nhwc) {
// early return if not flipping
if (flip[blockIdx.x] < p) return;
// pointer offset into images
const int img_offset = blockIdx.x * C * H * W;
const T* img = &img_in[img_offset];
T* img_o = &img_out[img_offset];
// flip bboxes
auto bbox_offset_begin = offsets[blockIdx.x];
auto bbox_offset_end = offsets[blockIdx.x + 1];
auto num_bboxes = bbox_offset_end - bbox_offset_begin;
const int thread_idx = threadIdx.y * blockDim.x + threadIdx.x;
// bboxes in ltrb format, scaled to [0, 1]
for (int i = thread_idx; i < num_bboxes; i += blockDim.x * blockDim.y) {
float *bbox = &bboxes[(bbox_offset_begin + thread_idx) * 4];
// Could do this inplace, but not register constrained
auto bbox_0 = bbox[0];
auto bbox_2 = bbox[2];
bbox[0] = 1. - bbox_2;
bbox[2] = 1. - bbox_0;
}
if (nhwc) {
// loop over float3 pixels, handle 3 values / thread
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const T* img_hw = &img[h * W * C + w * C];
T * img_out_hw = &img_o[h * W * C + (W - 1 - w) * C];
for (int c = 0; c < C; ++c) {
img_out_hw[c] = img_hw[c];
}
}
}
} else {
// loop over channels
for (int c = 0; c < C; ++c) {
const T* img_c = &img[c * H * W];
T *img_out_c = &img_o[c * H * W];
// handle tiles of (h, w) at a time
for (int h = threadIdx.y; h < H; h += blockDim.y) {
for (int w = threadIdx.x; w < W; w += blockDim.x) {
const int input_idx = h * W + w;
const int output_idx = h * W + (W - 1 - w);
img_out_c[output_idx] = img_c[input_idx];
}
}
}
}
}
/**
* Take images and their bboxes, randomly flip on horizontal axis
* In/Out: img: NCHW tensor of N, C-channel images of constant (H, W)
* In/Out: bboxes: [N_i, 4] tensor of original bboxes in ltrb format
* In: bbox_offsets: [N] offset values into bboxes
* In: p \in [0, 1): probability of flipping each (img, bbox) pair
* In: nhwc: Tensor in NHWC format
* ----
* Note: allocate temp memory, but effectively do this inplace
*/
std::vector<at::Tensor> random_horiz_flip(
at::Tensor& img,
at::Tensor& bboxes,
const at::Tensor& bbox_offsets,
const float p,
const bool nhwc) {
// dimensions
const int N = img.size(0);
int C, H, W;
if (nhwc) {
C = img.size(3);
H = img.size(1);
W = img.size(2);
} else {
C = img.size(1);
H = img.size(2);
W = img.size(3);
}
assert(img.is_cuda());
assert(bboxes.is_cuda());
assert(bbox_offsets.is_cuda());
// printf("%d %d %d %d\n", N, C, H, W);
// Need temp storage of size img
at::Tensor tmp_img = img.clone();
at::Tensor flip = at::zeros({N}, at::CUDA(at::kFloat)).uniform_(0., 1.);
auto stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
img.scalar_type(),
"HorizFlipImagesAndBoxes",
[&] {
HorizFlipImagesAndBoxes<scalar_t><<<N, dim3(16, 16), 0, stream.stream()>>>(
N,
C,
H,
W,
img.data_ptr<scalar_t>(),
bboxes.data_ptr<float>(),
bbox_offsets.data_ptr<int>(),
p,
flip.data_ptr<float>(),
tmp_img.data_ptr<scalar_t>(),
nhwc);
THCudaCheck(cudaGetLastError());
});
// copy tmp_img -> img
// img = tmp_img;
return {tmp_img, bboxes};
}
|
the_stack
|
#include <iostream>
#include <chrono>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
#include "Common.cuh"
#define BBCU_BATCHNORM_FW_BLOCK_SIZE 128
#define BBCU_BATCHNORM_BW_BLOCK_SIZE 128
/*
//////////////////////////////
// common
//////////////////////////////
__device__ __forceinline__ float device_fp32_LocalSum(float v, float *buf)
{
buf[threadIdx.x] = v;
__syncthreads();
// スレッド間集計
int comb = 1;
while (comb < blockDim.x) {
int next = comb * 2;
int mask = next - 1;
if ((threadIdx.x & mask) == 0) {
buf[threadIdx.x] += buf[threadIdx.x + comb];
}
comb = next;
__syncthreads();
}
float sum = buf[0];
__syncthreads();
return sum;
}
*/
//////////////////////////////
// forward training
//////////////////////////////
__global__ void kernal_fp32_StochasticBatchNormalization_ForwardTraining(
const float *x_buf,
float *y_buf,
float *mean_buf,
float *rstd_buf,
float *running_mean_buf,
float *running_var_buf,
float gamma,
float beta,
float momentum,
float reciprocal_frame_size,
int frame_size,
int frame_stride
)
{
__shared__ float buf[BBCU_BATCHNORM_FW_BLOCK_SIZE];
// 初期化
int const node = blockIdx.x;
int const id = threadIdx.x;
int const id_step = blockDim.x;
const float* x_ptr = &x_buf[frame_stride * node];
// カハンの加算アルゴリズム(Kahan summation algorithm)
float s1 = 0, c1 = 0, y1, t1;
float s2 = 0, c2 = 0, y2, t2;
for ( int frame = id; frame < frame_size; frame += id_step) {
float x = x_ptr[frame];
// printf("StochasticBatchNorm frame=%d node=%d x=%f\n", frame, node, x);
y1 = x - c1;
t1 = s1 + y1;
c1 = (t1 - s1) - y1;
s1 = t1;
y2 = (x * x) - c2;
t2 = s2 + y2;
c2 = (t2 - s2) - y2;
s2 = t2;
}
s1 = device_fp32_LocalSum(s1, buf);
s2 = device_fp32_LocalSum(s2, buf);
float mean = s1 * reciprocal_frame_size;
float var = max(1.0e-5f, (s2 * reciprocal_frame_size) - (mean * mean));
float rstd = rsqrt(var);
// if ( id == 0 ) {
// printf("[1] n=%3d s1=%10f s2=%10f mean=%10f var=%10f rstd=%10f\n", node, s1, s2, mean, var, rstd);
// printf("1\t%3d\t%.20e\t%.20e\t%.20e\t%.20e\t%.20e\n", node, s1, s2, mean, var, rstd);
// }
// 書き込み
if (id == 0) {
running_mean_buf[node] = running_mean_buf[node] * momentum + mean * (1.0f - momentum);
running_var_buf[node] = running_var_buf[node] * momentum + var * (1.0f - momentum);
mean_buf[node] = mean;
rstd_buf[node] = rstd;
// printf("[StochasticBatchNormalization] node=%d mean=%f rstd=%f\n", node, mean, rstd);
}
// 正規化
float* y_ptr = &y_buf[frame_stride * node];
for ( int frame = id; frame < frame_size; frame += id_step) {
float x = x_ptr[frame];
x = (x - mean) * rstd;
x = x * gamma + beta;
y_ptr[frame] = x;
// printf("[StochasticBatchNormalization] frame=%d node=%d y=%f\n", frame, node, x);
}
}
BBCU_DLL_EXPORT int bbcu_fp32_StochasticBatchNormalization_ForwardTraining
(
float const *dev_x_buf,
float *dev_y_buf,
float *dev_mean_buf,
float *dev_rstd_buf,
float *dev_running_mean_buf,
float *dev_running_var_buf,
float gamma,
float beta,
float momentum,
int node_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 grid(node_size);
dim3 block(BBCU_BATCHNORM_FW_BLOCK_SIZE);
kernal_fp32_StochasticBatchNormalization_ForwardTraining<<<grid, block, 0, streamId>>> (
dev_x_buf,
dev_y_buf,
dev_mean_buf,
dev_rstd_buf,
dev_running_mean_buf,
dev_running_var_buf,
gamma,
beta,
momentum,
1.0f/ frame_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// ReForward
//////////////////////////////
__global__ void kernal_fp32_StochasticBatchNormalization_ReForward(
const float *x_buf,
float *y_buf,
float const *mean_buf,
float const *rstd_buf,
float gamma,
float beta,
int frame_size,
int frame_stride
)
{
// 初期化
int const node = blockIdx.x;
int const id = threadIdx.x;
int const id_step = blockDim.x;
float mean = mean_buf[node];
float rstd = rstd_buf[node];
float const *x_ptr = &x_buf[frame_stride * node];
float *y_ptr = &y_buf[frame_stride * node];
for ( int frame = id; frame < frame_size; frame += id_step) {
float x = x_ptr[frame];
x = (x - mean) * rstd;
x = x * gamma + beta;
y_ptr[frame] = x;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_StochasticBatchNormalization_ReForward
(
float const *dev_x_buf,
float *dev_y_buf,
float const *dev_mean_buf,
float const *dev_rstd_buf,
float gamma,
float beta,
int node_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 grid(node_size);
dim3 block(BBCU_BATCHNORM_FW_BLOCK_SIZE);
kernal_fp32_StochasticBatchNormalization_ReForward<<<grid, block, 0, streamId>>>
(
dev_x_buf,
dev_y_buf,
dev_mean_buf,
dev_rstd_buf,
gamma,
beta,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// Forward Inference
//////////////////////////////
__global__ void kernal_fp32_StochasticBatchNormalization_ForwardInference(
const float *x_buf,
float *y_buf,
float const *running_mean_buf,
float const *running_var_buf,
float gamma,
float beta,
int node_size,
int frame_size,
int frame_stride
)
{
int node = blockDim.y * blockIdx.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
if ( node >= node_size) {
return;
}
float mean = running_mean_buf[node];
float var = running_var_buf[node];
float rstd = 1.0 / (sqrt(var) + 1.0e-7);
float const *x_ptr = &x_buf[frame_stride * node];
float *y_ptr = &y_buf[frame_stride * node];
for ( int frame = id; frame < frame_size; frame += id_step ) {
float x = x_ptr[frame];
y_ptr[frame] = ((x - mean) * rstd) * gamma + beta;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_StochasticBatchNormalization_ForwardInference
(
float const *dev_x_buf,
float *dev_y_buf,
float const *dev_running_mean_buf,
float const *dev_running_var_buf,
float gamma,
float beta,
int node_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block;
dim3 grid;
block.x = std::min(frame_size, 1024);
block.y = std::min(node_size, 1024);
while (block.y > 1 && block.x * block.y > 1024) {
block.y = (block.y + 1) / 2;
}
grid.x = 1;
grid.y = (node_size + (block.y - 1)) / block.y;
kernal_fp32_StochasticBatchNormalization_ForwardInference<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_running_mean_buf,
dev_running_var_buf,
gamma,
beta,
node_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// Backward
//////////////////////////////
__global__ void kernal_fp32_StochasticBatchNormalization_Backward
(
float const *x_buf,
float const *dy_buf,
float *dx_buf,
float const *mean_buf,
float const *rstd_buf,
float gamma,
float reciprocal_frame_size,
int frame_size,
int frame_stride,
int x_frame_stride
)
{
__shared__ float buf[BBCU_BATCHNORM_BW_BLOCK_SIZE];
// 初期化
int const node = blockIdx.x;
int const id = threadIdx.x;
int const id_step = blockDim.x;
float mean = mean_buf[node];
float rstd = rstd_buf[node];
float dmeanx = 0;
float dstd = 0;
float rstd2 = rstd * rstd;
float const * const x_ptr = &x_buf[node * x_frame_stride];
float const * const dy_ptr = &dy_buf[node * frame_stride];
for ( int frame = id; frame < frame_size; frame += id_step) {
float x = x_ptr[frame];
float dy = dy_ptr[frame];
float xc = x - mean;
// float xn = xc * rstd;
float dxn = gamma * dy;
dstd += -(dxn * xc * rstd2);
dmeanx += -(dxn * rstd);
// printf("[StochasticBatchNormalization bw] frame=%d node=%d x=%f dy=%f\n", frame, node, x, dy);
}
dstd = device_fp32_LocalSum(dstd, buf);
dmeanx = device_fp32_LocalSum(dmeanx, buf);
float * const dx_ptr = &dx_buf[node * frame_stride];
float dvar = dstd * rstd;
float dmean = (dmeanx - (mean * dvar)) * reciprocal_frame_size;
for ( int frame = id; frame < frame_size; frame += id_step) {
float dy = dy_ptr[frame];
float x = x_ptr[frame];
float dxn = dy * gamma;
float dxc = dxn * rstd;
float dx = dxc + dmean + (x * dvar * reciprocal_frame_size);
dx_ptr[frame] = dx;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_StochasticBatchNormalization_Backward
(
const float *dev_x_buf,
const float *dev_dy_buf,
float *dev_dx_buf,
float const *dev_mean_buf,
float const *dev_rstd_buf,
float gamma,
float reciprocal_frame_size,
int node_size,
int frame_size,
int frame_stride,
int x_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 grid(node_size);
dim3 block(BBCU_BATCHNORM_BW_BLOCK_SIZE);
kernal_fp32_StochasticBatchNormalization_Backward << <grid, block, 0, streamId >> > (
dev_x_buf,
dev_dy_buf,
dev_dx_buf,
dev_mean_buf,
dev_rstd_buf,
gamma,
reciprocal_frame_size,
frame_size,
frame_stride,
x_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
|
the_stack
|
//-------------------------------------------------------------------------------------------------
// This file is based on Peter Shirley's book "Ray Tracing in One Weekend"
//
#include <cstdlib>
#include <iostream>
#include <ostream>
#include <memory>
#include <GL/glew.h>
#include <cuda_runtime_api.h>
#include <thrust/device_vector.h>
#include <Support/CmdLine.h>
#include <Support/CmdLineUtil.h>
#include <visionaray/detail/platform.h>
#include <visionaray/bvh.h>
#include <visionaray/pixel_unpack_buffer_rt.h>
#include <visionaray/generic_material.h>
#include <visionaray/kernels.h>
#include <visionaray/material.h>
#include <visionaray/scheduler.h>
#include <visionaray/thin_lens_camera.h>
#include <common/manip/arcball_manipulator.h>
#include <common/manip/pan_manipulator.h>
#include <common/manip/zoom_manipulator.h>
#include <common/viewer_glut.h>
#ifdef _WIN32
//-------------------------------------------------------------------------------------------------
// https://pubs.opengroup.org/onlinepubs/007908799/xsh/drand48.html
//
double drand48()
{
constexpr static uint64_t m = 1ULL<<48;
constexpr static uint64_t a = 0x5DEECE66DULL;
constexpr static uint64_t c = 0xBULL;
thread_local static uint64_t x = 0;
x = (a * x + c) & (m - 1ULL);
return static_cast<double>(x) / m;
}
#endif
using namespace visionaray;
using viewer_type = viewer_glut;
//-------------------------------------------------------------------------------------------------
//
//
struct renderer : viewer_type
{
using ray_type = basic_ray<float>;
renderer()
: viewer_type(512, 512, "Visionaray \"Ray Tracing in One Weekend with CUDA\" Example")
, bbox({ -3.0f, -3.0f, -3.0f }, { 3.0f, 3.0f, 3.0f })
, device_sched(16, 16)
{
using namespace support;
add_cmdline_option( cl::makeOption<unsigned&>(
cl::Parser<>(),
"spp",
cl::Desc("Pixels per sample for path tracing"),
cl::ArgRequired,
cl::init(this->spp)
) );
random_scene();
set_background_color(vec3(0.5f, 0.7f, 1.0f));
}
aabb bbox;
thin_lens_camera cam;
pixel_unpack_buffer_rt<PF_RGBA32F, PF_UNSPECIFIED> device_rt;
cuda_sched<ray_type> device_sched;
unsigned frame_num = 0;
unsigned spp = 1;
// rendering data
index_bvh<basic_sphere<float>> sphere_bvh;
std::vector<basic_sphere<float>> list;
std::vector<generic_material<
glass<float>,
matte<float>,
mirror<float>
>> materials;
// copies that are located on the device
// (we build up the initial data structures on the host!)
cuda_index_bvh<basic_sphere<float>> device_bvh;
thrust::device_vector<generic_material<
glass<float>,
matte<float>,
mirror<float>
>> device_materials;
basic_sphere<float> make_sphere(vec3 center, float radius)
{
static int sphere_id = 0;
basic_sphere<float> sphere(center, radius);
sphere.prim_id = sphere_id;
sphere.geom_id = sphere_id;
++sphere_id;
return sphere;
}
glass<float> make_dielectric(float ior)
{
glass<float> mat;
mat.ct() = from_rgb(1.0f, 1.0f, 1.0f);
mat.kt() = 1.0f;
mat.cr() = from_rgb(1.0f, 1.0f, 1.0f);
mat.kr() = 1.0f;
mat.ior() = spectrum<float>(ior);
return mat;
}
matte<float> make_lambertian(vec3 cd)
{
matte<float> mat;
mat.ca() = from_rgb(0.0f, 0.0f, 0.0f);
mat.ka() = 0.0f;
mat.cd() = from_rgb(cd);
mat.kd() = 1.0f;
return mat;
}
mirror<float> make_metal(vec3 cr)
{
mirror<float> mat;
mat.cr() = from_rgb(cr);
mat.kr() = 1.0f;
mat.ior() = spectrum<float>(0.0);
mat.absorption() = spectrum<float>(0.0);
return mat;
}
void random_scene()
{
int n = 500;
list.resize(n + 1);
materials.resize(n + 1);
list[0] = make_sphere(vec3(0, -1000, 0), 1000);
materials[0] = make_lambertian(vec3(0.5f, 0.5f, 0.5f));
int i = 1;
for (int a = -11; a < 11; ++a)
{
for (int b = -11; b < 11; ++b)
{
float choose_mat = drand48();
vec3 center(a + 0.9 * drand48(), 0.2, b + 0.9 * drand48());
if (length(center - vec3(4, 0.2, 0)) > 0.9)
{
list[i] = make_sphere(center, 0.2);
if (choose_mat < 0.8) // diffuse
{
materials[i] = make_lambertian(vec3(
static_cast<float>(drand48() * drand48()),
static_cast<float>(drand48() * drand48()),
static_cast<float>(drand48() * drand48())
));
}
else if (choose_mat < 0.95) // metal
{
materials[i] = make_metal(vec3(
0.5f * (1.0f + static_cast<float>(drand48())),
0.5f * (1.0f + static_cast<float>(drand48())),
0.5f * (1.0f + static_cast<float>(drand48()))
));
}
else
{
materials[i] = make_dielectric(1.5f);
}
++i;
}
}
}
list[i] = make_sphere(vec3(0, 1, 0), 1.0);
materials[i] = make_dielectric(1.5f);
++i;
list[i] = make_sphere(vec3(-4, 1, 0), 1.0);
materials[i] = make_lambertian(vec3(0.4f, 0.2f, 0.1f));
++i;
list[i] = make_sphere(vec3(4, 1, 0), 1.0);
materials[i] = make_metal(vec3(0.7f, 0.6f, 0.5f));
++i;
binned_sah_builder builder;
builder.enable_spatial_splits(true);
sphere_bvh = builder.build(index_bvh<basic_sphere<float>>{}, list.data(), i);
// Copy data to GPU
device_bvh = cuda_index_bvh<basic_sphere<float>>(sphere_bvh);
device_materials = materials;
}
protected:
void on_display();
void on_mouse_move(visionaray::mouse_event const& event);
void on_space_mouse_move(visionaray::space_mouse_event const& event);
void on_resize(int w, int h);
};
//-------------------------------------------------------------------------------------------------
// Display function, contains the rendering kernel
//
void renderer::on_display()
{
// some setup
pixel_sampler::basic_jittered_blend_type<float> blend_params;
blend_params.spp = spp;
float alpha = 1.0f / ++frame_num;
blend_params.sfactor = alpha;
blend_params.dfactor = 1.0f - alpha;
auto sparams = make_sched_params(
blend_params,
cam,
device_rt
);
thrust::device_vector<cuda_index_bvh<basic_sphere<float>>::bvh_ref> device_primitives;
device_primitives.push_back(device_bvh.ref());
auto kparams = make_kernel_params(
thrust::raw_pointer_cast(device_primitives.data()),
thrust::raw_pointer_cast(device_primitives.data()) + device_primitives.size(),
thrust::raw_pointer_cast(device_materials.data()),
50,
1E-3f,
vec4(background_color(), 1.0f),
vec4(0.5f, 0.7f, 1.0f, 1.0f)
);
pathtracing::kernel<decltype(kparams)> kern;
kern.params = kparams;
device_sched.frame(kern, sparams);
// display the rendered image
auto bgcolor = background_color();
glClearColor(bgcolor.x, bgcolor.y, bgcolor.z, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_FRAMEBUFFER_SRGB);
device_rt.display_color_buffer();
}
//-------------------------------------------------------------------------------------------------
// resize event
//
void renderer::on_resize(int w, int h)
{
cam.set_viewport(0, 0, w, h);
float aspect = w / static_cast<float>(h);
cam.perspective(45.0f * constants::degrees_to_radians<float>(), aspect, 0.001f, 1000.0f);
device_rt.resize(w, h);
device_rt.clear_color_buffer();
frame_num = 0;
viewer_type::on_resize(w, h);
}
//-------------------------------------------------------------------------------------------------
// mouse move event
//
void renderer::on_mouse_move(visionaray::mouse_event const& event)
{
if (event.buttons() != mouse::NoButton)
{
frame_num = 0;
device_rt.clear_color_buffer();
}
viewer_type::on_mouse_move(event);
}
void renderer::on_space_mouse_move(visionaray::space_mouse_event const& event)
{
frame_num = 0;
device_rt.clear_color_buffer();
viewer_type::on_space_mouse_move(event);
}
//-------------------------------------------------------------------------------------------------
// Main function, performs initialization
//
int main(int argc, char** argv)
{
renderer rend;
try
{
rend.init(argc, argv);
}
catch (std::exception& e)
{
std::cerr << e.what() << '\n';
return EXIT_FAILURE;
}
float aspect = rend.width() / static_cast<float>(rend.height());
rend.cam.perspective(45.0f * constants::degrees_to_radians<float>(), aspect, 0.001f, 1000.0f);
rend.cam.view_all( rend.bbox );
float aperture = 0.1f;
rend.cam.set_lens_radius(aperture / 2.0f);
rend.cam.set_focal_distance(10.0f);
rend.add_manipulator( std::make_shared<arcball_manipulator>(rend.cam, mouse::Left) );
rend.add_manipulator( std::make_shared<pan_manipulator>(rend.cam, mouse::Middle) );
// Additional "Alt + LMB" pan manipulator for setups w/o middle mouse button
rend.add_manipulator( std::make_shared<pan_manipulator>(rend.cam, mouse::Left, keyboard::Alt) );
rend.add_manipulator( std::make_shared<zoom_manipulator>(rend.cam, mouse::Right) );
rend.event_loop();
}
|
the_stack
|
#include <thrust/iterator/transform_iterator.h>
template <typename FloatT, typename IdxType>
RepresentationsStorage<FloatT, IdxType>::RepresentationsStorage(
const size_t num_objects,
const size_t size,
Streams* const streams)
: reprs_(size, num_objects, streams->next()) {
PROFILE_FUNCTION();
DCHECK_GT(num_objects, 0);
DCHECK_GT(size, 0);
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
// From http://stackoverflow.com/questions/16077464/atomicadd-for-double-on-gpu.
//
// This is a hack that allows the tests to run in double precision.
// atomicAdd for doubles is available in CUDA 8 and onwards.
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
// Optimizations for loading the gradient into shared memory inspired by
// https://devblogs.nvidia.com/parallelforall/how-access-global-memory-efficiently-cuda-c-kernels/
template <typename FloatT, typename IdxType>
__global__
void update_repr_kernel(const FloatT learning_rate,
const FloatT* const gradient,
const IdxType* const indices,
FloatT* const repr,
const FloatT* const repr_weights) {
const FloatT weight = repr_weights != nullptr ? repr_weights[blockIdx.x * gridDim.y + blockIdx.y] : 1.0;
// Update.
atomicAdd(&repr[indices[blockIdx.x * gridDim.y + blockIdx.y] * blockDim.x + threadIdx.x],
learning_rate * weight * gradient[blockIdx.x * blockDim.x + threadIdx.x]);
}
template <typename FloatT, typename IdxType>
template <typename UpdateTransformOp, typename AggOp>
void RepresentationsStorage<FloatT, IdxType>::update(
const GradientType& gradient_descs,
const FloatT learning_rate,
const FloatT scaled_regularization_lambda,
Streams* const streams,
const UpdateTransformOp update_transform_op,
const AggOp agg_op) {
PROFILE_FUNCTION();
CHECK_GE(learning_rate, 0.0);
CHECK_GE(scaled_regularization_lambda, 0.0);
if (scaled_regularization_lambda > 0.0) {
reprs_.scale(reprs_.getStream(), 1.0 - (scaled_regularization_lambda * learning_rate));
}
for (const SingleGradientType& gradient_desc : gradient_descs) {
const device_matrix<FloatT>& repr_grad = std::get<0>(gradient_desc);
const device_matrix<IdxType>& indices = std::get<1>(gradient_desc);
const size_t window_size = std::get<2>(gradient_desc);
const device_matrix<FloatT>* const repr_weights = std::get<3>(gradient_desc);
if (repr_weights != nullptr) {
CHECK_DIMENSIONS(*repr_weights, 1, indices.getCols());
}
CHECK_EQ(indices.getRows(), 1);
// Indices should be a multiple of window_size.
CHECK_EQ(indices.size() % window_size, 0);
CHECK_DIMENSIONS(repr_grad,
repr_size(), indices.size() / window_size);
const size_t num_grads = repr_grad.getCols();
LAUNCH_KERNEL(
update_repr_kernel<<<dim3(num_grads, window_size), /* num_blocks */
repr_size(), /* threads_per_block */
0,
streams->next() /* stream */>>>(
learning_rate,
repr_grad.getData(), /* repr_grad */
indices.getData(), /* idx_input */
reprs_.getData(), /* output */
repr_weights != nullptr ? repr_weights->getData() : nullptr));
}
CHECK_MATRIX(reprs_);
}
template <typename FloatT, typename IdxType>
device_matrix<FloatT>* RepresentationsStorage<FloatT, IdxType>::get() {
return &reprs_;
}
template <typename FloatT, typename IdxType>
void RepresentationsStorage<FloatT, IdxType>::initialize_with_constant(const FloatT value) {
reprs_.fillwith(reprs_.getStream(), value);
}
template <typename FloatT, typename IdxType>
typename Storage<FloatT>::DataType RepresentationsStorage<FloatT, IdxType>::get_data() const {
PROFILE_FUNCTION();
return {
{"representations", &reprs_},
};
}
template <typename FloatT, typename IdxType>
void RepresentationsStorage<FloatT, IdxType>::increment_parameter(
const size_t idx, const FloatT epsilon) {
PROFILE_FUNCTION();
DCHECK_LT(idx, num_parameters());
increment_scalar(epsilon, raw_begin(reprs_) + idx);
}
template <typename FloatT, typename IdxType>
FloatT RepresentationsStorage<FloatT, IdxType>::get_parameter_gradient(
const GradientType& gradient_descs,
const size_t param_idx) const {
PROFILE_FUNCTION();
DCHECK_GE(param_idx, 0);
DCHECK_LT(param_idx, num_parameters());
FloatT grad_param = 0.0;
for (const SingleGradientType& gradient_desc : gradient_descs) {
const device_matrix<FloatT>& repr_grad = std::get<0>(gradient_desc);
const device_matrix<IdxType>& d_indices = std::get<1>(gradient_desc);
const thrust::host_vector<IdxType> indices(begin(d_indices), end(d_indices));
const size_t window_size = std::get<2>(gradient_desc);
const device_matrix<FloatT>* const repr_weights = std::get<3>(gradient_desc);
CHECK_DIMENSIONS(repr_grad,
repr_size(), indices.size() / window_size);
CHECK_EQ(indices.size() % window_size, 0);
// Compute the representation identifier.
const size_t repr_idx = param_idx / repr_size();
const size_t repr_param_idx = param_idx % repr_size();
size_t g_idx = 0;
for (const IdxType idx : indices) {
if (repr_idx == idx) {
const size_t grad_offset = (g_idx / window_size) * repr_size();
const FloatT* const scalar_ptr = raw_begin(repr_grad) + grad_offset + repr_param_idx;
const FloatT grad_param_weight_ = (
repr_weights != nullptr
? get_scalar(raw_begin(*repr_weights) + g_idx) : 1.0);
const FloatT grad_param_ = grad_param_weight_* get_scalar(scalar_ptr);
grad_param += grad_param_;
}
++g_idx;
}
CHECK_EQ(g_idx, repr_grad.getCols() * window_size);
}
return grad_param;
}
template <typename FloatT>
TransformStorage<FloatT>::TransformStorage(
const size_t word_repr_size,
const size_t entity_repr_size,
Streams* const streams)
: transform_(entity_repr_size, word_repr_size, streams->next()),
bias_(entity_repr_size, 1, streams->next()) {
PROFILE_FUNCTION();
CHECK_GT(word_repr_size, 0);
CHECK_GT(entity_repr_size, 0);
}
template <typename FloatT>
template <typename UpdateTransformOp, typename AggOp>
void TransformStorage<FloatT>::update(
const GradientType& gradient_desc,
const FloatT learning_rate,
const FloatT scaled_regularization_lambda,
Streams* const streams,
const UpdateTransformOp update_transform_op,
const AggOp agg_op) {
PROFILE_FUNCTION();
const device_matrix<FloatT>& grad_matrix = std::get<0>(gradient_desc);
const device_matrix<FloatT>& grad_bias = std::get<1>(gradient_desc);
CHECK_GE(learning_rate, 0.0);
CHECK_GE(scaled_regularization_lambda, 0.0);
// W + grad_W; element-wise addition.
update_dense(&transform_,
grad_matrix,
learning_rate,
scaled_regularization_lambda,
update_transform_op, agg_op);
// b + grad_b; element-wise addition.
update_dense(&bias_,
grad_bias,
learning_rate,
static_cast<FloatT>(0.0), /* bias should not be regularized */
update_transform_op, agg_op);
}
template <typename FloatT>
std::tuple<device_matrix<FloatT>*,
device_matrix<FloatT>*> TransformStorage<FloatT>::get() {
return std::make_tuple(&transform_, &bias_);
}
template <typename FloatT>
void TransformStorage<FloatT>::initialize_with_constant(const FloatT value) {
transform_.fillwith(transform_.getStream(), value);
bias_.fillwith(bias_.getStream(), value);
}
template <typename FloatT>
typename Storage<FloatT>::DataType TransformStorage<FloatT>::get_data() const {
PROFILE_FUNCTION();
return {
{"transform", &transform_},
{"bias", &bias_},
};
}
template <typename FloatT>
void TransformStorage<FloatT>::increment_parameter(
const size_t idx, const FloatT epsilon) {
PROFILE_FUNCTION();
DCHECK_GE(idx, 0);
DCHECK_LT(idx, num_parameters());
FloatT* const scalar_ptr = (idx < transform_.size()) ?
raw_begin(transform_) + idx :
raw_begin(bias_) + (idx - transform_.size());
increment_scalar(epsilon, scalar_ptr);
}
template <typename FloatT>
FloatT TransformStorage<FloatT>::get_parameter_gradient(
const GradientType& gradient_desc,
const size_t idx) const {
PROFILE_FUNCTION();
const device_matrix<FloatT>& grad_transform = std::get<0>(gradient_desc);
const device_matrix<FloatT>& grad_bias = std::get<1>(gradient_desc);
CHECK_DIMENSIONS_EQUAL(grad_transform, transform_);
CHECK_DIMENSIONS_EQUAL(grad_bias, bias_);
DCHECK_GE(idx, 0);
DCHECK_LT(idx, num_parameters());
const FloatT* const scalar_ptr = (idx < grad_transform.size()) ?
raw_begin(grad_transform) + idx :
raw_begin(grad_bias) + (idx - grad_transform.size());
return get_scalar(scalar_ptr);
}
// Explicit instantiations.
template class Storage<FLOATING_POINT_TYPE>;
template class TransformStorage<FLOATING_POINT_TYPE>;
#define INSTANTIATE_TransformStorage(FloatT, UpdateTransformOp, AggOp) \
template void TransformStorage<FloatT>::update<UpdateTransformOp<FloatT>, AggOp<FloatT>>( \
const TransformStorage<FloatT>::GradientType&, \
const FloatT, \
const FloatT, \
Streams* const, \
const UpdateTransformOp<FloatT>, \
const AggOp<FloatT>)
INSTANTIATE_TransformStorage(FLOATING_POINT_TYPE, func::identity, thrust::plus);
INSTANTIATE_TransformStorage(FLOATING_POINT_TYPE, func::square, thrust::plus);
#undef INSTANTIATE_TransformStorage
template class RepresentationsStorage<FLOATING_POINT_TYPE, int32>;
#define INSTANTIATE_RepresentationsStorage(FloatT, IdxType, UpdateTransformOp, AggOp) \
template void RepresentationsStorage<FloatT, IdxType>::update<UpdateTransformOp<FloatT>, AggOp<FloatT>>( \
const GradientType&, \
const FloatT, \
const FloatT, \
Streams* const, \
const UpdateTransformOp<FloatT>, \
const AggOp<FloatT>)
INSTANTIATE_RepresentationsStorage(FLOATING_POINT_TYPE, int32, func::identity, thrust::plus);
// INSTANTIATE_RepresentationsStorage(FLOATING_POINT_TYPE, int32, func::square, func::identity, thrust::plus);
#undef INSTANTIATE_RepresentationsStorage
|
the_stack
|
#pragma once
#include <math/vector.h>
#include <math/matrix.h>
#include <utils.h>
#include <warp_ops.cuh>
#include "config.h"
#include "instrumentation.cuh"
#include "BinRasterizer.cuh"
#include "TileRasterizer.cuh"
#include "StampShading.cuh"
#include "TileRasterizerMask.cuh"
#include "viewport.cuh"
#include "work_assignment.cuh"
#include "bitmask.cuh"
#include "rasterization_stage.cuh"
template <unsigned int NUM_RASTERIZERS, unsigned int NUM_WARPS, class BinTileSpace>
class BinTileRasterizationStageCommon
{
protected:
template<unsigned int BITS_A, unsigned int BITS_B, unsigned int BITS_C>
struct TriplePack
{
//static constexpr unsigned int TRI_BITS = 10U;
//static constexpr unsigned int BIN_COORD_BITS = 11U;
static constexpr unsigned int OFFSET_A = BITS_B + BITS_C;
static constexpr unsigned int OFFSET_B = BITS_C;
static constexpr unsigned int OFFSET_C = 0;
static constexpr unsigned int MASK_C = (1U << BITS_C) - 1U;
static constexpr unsigned int MASK_B = (1U << BITS_B) - 1U;
static constexpr unsigned int MASK_BC = (1U << (BITS_B + BITS_C)) - 1U;
unsigned int value;
public:
TriplePack() = default;
__device__
TriplePack(unsigned int a, unsigned int b, unsigned int c)
: value((a << OFFSET_A) | (b << OFFSET_B) | (c << OFFSET_C))
{}
__device__ unsigned int a() const { return value >> OFFSET_A; }
__device__ unsigned int b() const { return (value >> OFFSET_B) & MASK_B; }
__device__ unsigned int c() const { return (value >> OFFSET_C) & MASK_C; }
__device__ unsigned int bc() const { return value & MASK_BC; }
};
typedef TriplePack<10, 11, 11> BinTrianglePack;
static_assert(NUM_WARPS <= WARP_SIZE, "Rasterization stage work assignment depends on NUM_WARPS being less than or equal WARP_SIZE");
static constexpr int NUM_THREADS = NUM_WARPS * WARP_SIZE;
public:
struct SharedMemT
{
RasterizerQueue::SortQueueShared<NUM_THREADS> rasterizer_queue_shared;
};
static constexpr unsigned int MAX_TRIANGLE_REFERENCES = NUM_RASTERIZERS;
__device__
static unsigned int enqueueTriangle(unsigned int triangle_id, unsigned int primitive_id, const math::int4& bounds)
{
int2 start_bin = BinTileSpace::bin(bounds.x, bounds.y);
int2 end_bin = BinTileSpace::bin(bounds.z, bounds.w);
return BinTileSpace::traverseRasterizers(start_bin, end_bin, [triangle_id, primitive_id](int r)
{
rasterizer_queue.enqueue(r, triangle_id, primitive_id);
});
}
__device__
static void writeSufficientToRunNoSync(volatile int* shared_memory)
{
if (threadIdx.x == 0)
*shared_memory = rasterizer_queue.availableElements(BinTileSpace::MyQueue()) >= NUM_THREADS;
}
__device__
static int fillLevelNoCheck(int qId)
{
return rasterizer_queue.count(qId);
}
__device__
static void writeCanNotReceiveAllNoSync(volatile int* shared_memory)
{
if (threadIdx.x < BinTileSpace::num_rasterizers())
if (rasterizer_queue.index_queue.size(threadIdx.x) >= static_cast<int>(RASTERIZER_QUEUE_SIZE - RASTERIZATION_CONSUME_THRESHOLD))
{
//printf("neglecting due to filllevel on %d: %d\n", threadIdx.x, rasterizer_queue[threadIdx.x].size());
*shared_memory = 1;
}
}
__device__
static void writeIterateCanNotReceiveAllNoSync(volatile int* shared_memory)
{
for (int i = threadIdx.x; i < BinTileSpace::num_rasterizers(); i += NUM_THREADS)
if (rasterizer_queue.index_queue.size(i) >= static_cast<int>(RASTERIZER_QUEUE_SIZE - RASTERIZATION_CONSUME_THRESHOLD))
{
//printf("neglecting due to filllevel on %d: %d >= %d\n", i, rasterizer_queue.index_queue.size(i), static_cast<int>(RASTERIZER_QUEUE_SIZE - RASTERIZATION_CONSUME_THRESHOLD));
*shared_memory = 1;
}
}
__device__
static void completedPrimitive(unsigned int primitive_id)
{
rasterizer_queue.completedPrimitive(primitive_id);
}
__device__
static bool prepareRun(char* shared_memory_in, volatile int* sufficienttorun)
{
bool res = rasterizer_queue.sortQueue<NUM_THREADS>(BinTileSpace::MyQueue(), shared_memory_in, sufficienttorun);
return res;
}
};
template <unsigned int NUM_RASTERIZERS, unsigned int NUM_WARPS, TILE_ACCESS_MODE EXCLUSIVE_TILES_RASTERMODE, bool PRIMITIVE_ORDER, bool QUAD_SHADING, class BinTileSpace, class CoverageShader, class FragmentShader, class FrameBuffer, class BlendOp>
class BinTileRasterizationStage;
template <unsigned int NUM_RASTERIZERS, unsigned int NUM_WARPS, bool PRIMITIVE_ORDER, class BinTileSpace, class CoverageShader, class FragmentShader, class FrameBuffer, class BlendOp>
class BinTileRasterizationStage<NUM_RASTERIZERS, NUM_WARPS, TILE_ACCESS_MODE::WARP_EXCLUSIVE, PRIMITIVE_ORDER, false, BinTileSpace, CoverageShader, FragmentShader, FrameBuffer, BlendOp> : public BinTileRasterizationStageCommon<NUM_RASTERIZERS, NUM_WARPS, BinTileSpace>
{
typedef ::BinRasterizer<NUM_WARPS, BinTileSpace> BinRasterizer;
typedef ::TileRasterizer<NUM_WARPS, true, BinTileSpace, CoverageShader, FragmentShader, FrameBuffer, BlendOp> TileRasterizer;
typedef ::BlockWorkAssignmentOld<NUM_THREADS> BlockWorkAssignment;
typedef ::TileBitMask<BinTileSpace> TileBitMask;
typedef BinTileRasterizationStageCommon<NUM_RASTERIZERS, NUM_WARPS, BinTileSpace> Common;
public:
struct SharedMemT
{
union
{
struct
{
unsigned int tri_ids[NUM_THREADS];
BlockWorkAssignment::SharedMemT bin_work_assignment;
TileBitMask tile_bit_masks[NUM_THREADS + WARP_SIZE];
BinTrianglePack bin_triangle_pack[NUM_THREADS];
union
{
BlockWorkAssignment::SharedTempMemT blockwork_sharedtemp;
BinRasterizer::SharedMemT binraster;
TileRasterizer::SharedMemT tileraster;
};
volatile unsigned int bin_work_counter[2];
};
Common::SharedMemT commonSMem;
};
};
static constexpr size_t SHARED_MEMORY = sizeof(SharedMemT);
__device__
static bool run(char* shared_memory_in)
{
SharedMemT& shared_memory = *new(shared_memory_in) SharedMemT;
unsigned int triidin = 0xFFFFFFFFU;
int num_tris = rasterizer_queue.dequeueIndexBlock(BinTileSpace::MyQueue(), triidin, NUM_THREADS);
shared_memory.tri_ids[threadIdx.x] = triidin;
int wip = threadIdx.x / WARP_SIZE;
if (num_tris > 0)
{
Instrumentation::BlockObserver<4, 1> observer;
// clear the additional bin masks
shared_memory.tile_bit_masks[NUM_THREADS + laneid()] = TileBitMask::Empty();
{
Instrumentation::BlockObserver<14, 2> observer;
int num_bins = 0;
if (threadIdx.x < num_tris)
{
// compute num elements
math::int4 bounds = triangle_buffer.loadBounds(triidin);
int2 start_bin = BinTileSpace::bin(bounds.x, bounds.y);
int2 end_bin = BinTileSpace::bin(bounds.z - 1, bounds.w - 1);
num_bins = BinTileSpace::numHitBinsForMyRasterizer(start_bin, end_bin);
}
BlockWorkAssignment::prepare(shared_memory.bin_work_assignment, shared_memory.blockwork_sharedtemp, num_bins);
}
do
{
__syncthreads();
// process bin of triangle
int triangle, bin;
TileBitMask bitmask = TileBitMask::Empty();
// store numbins in shared so we dont waste a register
shared_memory.bin_work_counter[1] = min(NUM_THREADS, BlockWorkAssignment::availableWork(shared_memory.bin_work_assignment));
if ([&]() -> bool
{
Instrumentation::BlockObserver<7, 2> observer;
return BlockWorkAssignment::pullWorkThreads(shared_memory.bin_work_assignment, shared_memory.blockwork_sharedtemp, triangle, bin);
}())
{
math::int4 bounds;
int2 start_bin, end_bin, binid;
int triangleId = shared_memory.tri_ids[triangle];
{
Instrumentation::BlockObserver<15, 2> observer;
// note that we could store the bin bounds in shared, but that actually does not make things faster...
bounds = triangle_buffer.loadBounds(triangleId);
start_bin = BinTileSpace::bin(bounds.x, bounds.y);
end_bin = BinTileSpace::bin(bounds.z - 1, bounds.w - 1);
binid = BinTileSpace::getHitBinForMyRasterizer(bin, start_bin, end_bin);
}
// store meta information
shared_memory.bin_triangle_pack[threadIdx.x] = BinTrianglePack(triangle, binid.x, binid.y);
BinRasterizer::run(shared_memory.binraster, bitmask, triangleId, binid, bounds);
//if (blockIdx.x == 16)
// printf("%d: %llx\n", threadIdx.x, bitmask.mask);
}
shared_memory.tile_bit_masks[threadIdx.x] = bitmask;
__syncthreads();
// every warp figures out if there are other warps working on the same tile
// updates its internal bit mask copies
// computes the work offsets
// and picks its own tile
int start = 0;
int lid = laneid();
while (start < shared_memory.bin_work_counter[1])
{
TileBitMask myMask;
unsigned int c, workon, mCounter;
{
Instrumentation::BlockObserver<8, 2> observer;
const unsigned int MaxPropagate = 15U;
myMask = shared_memory.tile_bit_masks[start + lid];
uint myBin = shared_memory.bin_triangle_pack[start + lid].bc();
{
Instrumentation::BlockObserver<9, 3> observer;
#pragma unroll
for (int i = 0; i < MaxPropagate; ++i)
{
TileBitMask otherMask = myMask.shfl(i);
if (i < laneid() && shared_memory.bin_triangle_pack[start + i].bc() == myBin)
myMask.unmark(otherMask);
}
}
mCounter = myMask.count();
if (MaxPropagate < 31U && lid > MaxPropagate)
mCounter = 0;
WarpScan<unsigned int>::InclusiveSum(mCounter, c, lid);
workon = __ffs(__ballot_sync(~0U, c > wip)) - 1;
if (wip == NUM_WARPS - 1)
{
bool changed = !(myMask == shared_memory.tile_bit_masks[start + lid]);
unsigned int nchanged = __ffs(__ballot_sync(~0U, changed)) - 1;
unsigned int nextstart = start + min(MaxPropagate + 1, min(nchanged, workon));
shared_memory.bin_work_counter[0] = nextstart;
}
__syncthreads();
}
if (workon != 0xFFFFFFFFU)
{
// distribute offset and bitmask
unsigned int bitoffsetid = wip + mCounter - c;
bitoffsetid = __shfl_sync(~0U, bitoffsetid, workon);
myMask = myMask.shfl(workon);
// find the bit
unsigned int bit = myMask.getSetBitWarp(bitoffsetid);
// if i am the last one to work on that bitmask, unset all of us
if (lid == workon && (wip == NUM_WARPS - 1 || bitoffsetid + 1 == mCounter))
{
myMask.andStride(0, bit + 1);
shared_memory.tile_bit_masks[start + lid].unmark(myMask);
}
BinTrianglePack& pack = shared_memory.bin_triangle_pack[start + workon];
//TODO: NUM_WARPS is not right here - we need to put in the number of active waprs
TileRasterizer::run(shared_memory.tileraster, bit, shared_memory.tri_ids[pack.a()], pack.b(), pack.c(), NUM_WARPS);
}
start = shared_memory.bin_work_counter[0];
__syncthreads();
}
} while (BlockWorkAssignment::isWorkAvailable(shared_memory.bin_work_assignment));
//////////////////////////////////////////////////////////////////////////////
//// vis bounding box
//__syncthreads();
//int wip = threadIdx.x / WARP_SIZE;
//for (int i = wip; i < num_tris; i += NUM_WARPS)
//{
// math::int4 bounds = triangle_buffer.loadBounds(tri_ids[i]);
// int2 start_bin = BinTileSpace::bin(bounds.x, bounds.y);
// for (int x = bounds.x + laneid(); x < bounds.z; x += warpSize)
// {
// FrameBuffer::writeColor(x, bounds.y, make_uchar4(255, 255, 255, 255));
// FrameBuffer::writeColor(x, bounds.w, make_uchar4(255, 255, 255, 255));
// }
// for (int y = bounds.y + laneid(); y < bounds.w; y += warpSize)
// {
// FrameBuffer::writeColor(bounds.x, y, make_uchar4(255, 255, 255, 255));
// FrameBuffer::writeColor(bounds.z, y, make_uchar4(255, 255, 255, 255));
// }
//}
////////////////////////////////////////////////////////////////////////////////
__threadfence();
if (shared_memory.tri_ids[threadIdx.x] != 0xFFFFFFFFU)
{
triangle_buffer.release(shared_memory.tri_ids[threadIdx.x]);
}
return true;
}
return false;
}
};
template <unsigned int NUM_RASTERIZERS, unsigned int NUM_WARPS, bool PRIMITIVE_ORDER, class BinTileSpace, class CoverageShader, class FragmentShader, class FrameBuffer, class BlendOp>
class BinTileRasterizationStage<NUM_RASTERIZERS, NUM_WARPS, TILE_ACCESS_MODE::WARP_PER_FRAGMENT, PRIMITIVE_ORDER, false, BinTileSpace, CoverageShader, FragmentShader, FrameBuffer, BlendOp> : public BinTileRasterizationStageCommon<NUM_RASTERIZERS, NUM_WARPS, BinTileSpace>
{
typedef ::BinRasterizer<NUM_WARPS, BinTileSpace> BinRasterizer;
typedef ::TileRasterizer<NUM_WARPS, true, BinTileSpace, CoverageShader, FragmentShader, FrameBuffer, BlendOp> TileRasterizer;
typedef BlockWorkAssignment<NUM_WARPS, false> BinWorkAssignment;
typedef BlockWorkAssignment<NUM_WARPS, true> TileWorkAssignment;
typedef ::TileBitMask<BinTileSpace> TileBitMask;
typedef BinTileRasterizationStageCommon<NUM_RASTERIZERS, NUM_WARPS, BinTileSpace> Common;
public:
struct SharedMemT
{
union
{
struct
{
unsigned int tri_ids[NUM_THREADS];
BinWorkAssignment::SharedMemT bin_work_assignment;
TileWorkAssignment::SharedMemT tile_work_assignment;
TileBitMask tile_bit_masks[NUM_THREADS];
BinTrianglePack bin_triangle_pack[NUM_THREADS];
union
{
BinWorkAssignment::SharedTempMemT bin_work_sharedtemp;
TileWorkAssignment::SharedTempMemT tile_work_sharedtemp;
BinRasterizer::SharedMemT binraster;
TileRasterizer::SharedMemT tileraster;
};
};
Common::SharedMemT commonSMem;
};
};
static constexpr size_t SHARED_MEMORY = sizeof(SharedMemT);
__device__
static bool run(char* shared_memory_in)
{
SharedMemT& shared_memory = *new(shared_memory_in)SharedMemT;
unsigned int triidin = 0xFFFFFFFFU;
int num_tris = rasterizer_queue.dequeueIndexBlock(BinTileSpace::MyQueue(), triidin, NUM_THREADS);
shared_memory.tri_ids[threadIdx.x] = triidin;
if (num_tris > 0)
{
Instrumentation::BlockObserver<4, 1> observer;
{
Instrumentation::BlockObserver<14, 2> observer;
int num_bins = 0;
if (threadIdx.x < num_tris)
{
// compute num elements
math::int4 bounds = triangle_buffer.loadBounds(triidin);
int2 start_bin = BinTileSpace::bin(bounds.x, bounds.y);
int2 end_bin = BinTileSpace::bin(bounds.z - 1, bounds.w - 1);
num_bins = BinTileSpace::numHitBinsForMyRasterizer(start_bin, end_bin);
}
{
Instrumentation::BlockObserver<7, 2> observer;
BinWorkAssignment::prepare(shared_memory.bin_work_assignment, shared_memory.bin_work_sharedtemp, num_bins);
}
}
do
{
__syncthreads();
// process bin of triangle
int triangle, bin;
TileBitMask bitmask = TileBitMask::Empty();
if ([&]() -> bool
{
Instrumentation::BlockObserver<7, 2> observer;
return BinWorkAssignment::pullWorkThreads(shared_memory.bin_work_assignment, shared_memory.bin_work_sharedtemp, triangle, bin);
}())
{
math::int4 bounds;
int2 start_bin, end_bin, binid;
int triangleId = shared_memory.tri_ids[triangle];
{
Instrumentation::BlockObserver<15, 2> observer;
// note that we could store the bin bounds in shared, but that actually does not make things faster...
bounds = triangle_buffer.loadBounds(triangleId);
start_bin = BinTileSpace::bin(bounds.x, bounds.y);
end_bin = BinTileSpace::bin(bounds.z - 1, bounds.w - 1);
binid = BinTileSpace::getHitBinForMyRasterizer(bin, start_bin, end_bin);
}
// store meta information
shared_memory.bin_triangle_pack[threadIdx.x] = BinTrianglePack(triangle, binid.x, binid.y);
BinRasterizer::run(shared_memory.binraster, bitmask, triangleId, binid, bounds);
}
shared_memory.tile_bit_masks[threadIdx.x] = bitmask;
__syncthreads();
// work on one triangle and tile after each other (one tile per warp)
{
Instrumentation::BlockObserver<8, 2> observer;
TileWorkAssignment::prepare(shared_memory.tile_work_assignment, shared_memory.tile_work_sharedtemp, bitmask.count());
__syncthreads();
}
while (TileWorkAssignment::availableWork(shared_memory.tile_work_assignment) > 0)
{
{
Instrumentation::BlockObserver<8, 2> observer;
TileWorkAssignment::prepareConsistentWorkThreads(shared_memory.tile_work_assignment, shared_memory.tile_work_sharedtemp);
}
int numTiles;
do
{
int tileid, tilebit;
if ([&]() -> bool
{
Instrumentation::BlockObserver<8, 2> observer;
return TileWorkAssignment::takeOutConsistentWorkThreads(warp_id(), NUM_WARPS, shared_memory.tile_work_assignment, tileid, tilebit, numTiles);
}())
{
unsigned int bit = shared_memory.tile_bit_masks[tileid].getSetBitWarp(tilebit);
BinTrianglePack & pack = shared_memory.bin_triangle_pack[tileid];
TileRasterizer::run(shared_memory.tileraster, bit, shared_memory.tri_ids[pack.a()], pack.b(), pack.c(), min(NUM_WARPS, numTiles));
}
} while (numTiles > NUM_WARPS);
{
Instrumentation::BlockObserver<8, 2> observer;
TileWorkAssignment::removeTakenWorkThreads(NUM_THREADS, shared_memory.tile_work_assignment);
}
}
} while (BinWorkAssignment::availableWork(shared_memory.bin_work_assignment) > 0);
//////////////////////////////////////////////////////////////////////////////
//// vis bounding box
//__syncthreads();
//int wip = threadIdx.x / WARP_SIZE;
//for (int i = wip; i < num_tris; i += NUM_WARPS)
//{
// math::int4 bounds = triangle_buffer.loadBounds(tri_ids[i]);
// int2 start_bin = BinTileSpace::bin(bounds.x, bounds.y);
// for (int x = bounds.x + laneid(); x < bounds.z; x += warpSize)
// {
// FrameBuffer::writeColor(x, bounds.y, make_uchar4(255, 255, 255, 255));
// FrameBuffer::writeColor(x, bounds.w, make_uchar4(255, 255, 255, 255));
// }
// for (int y = bounds.y + laneid(); y < bounds.w; y += warpSize)
// {
// FrameBuffer::writeColor(bounds.x, y, make_uchar4(255, 255, 255, 255));
// FrameBuffer::writeColor(bounds.z, y, make_uchar4(255, 255, 255, 255));
// }
//}
////////////////////////////////////////////////////////////////////////////////
__threadfence();
if (shared_memory.tri_ids[threadIdx.x] != 0xFFFFFFFFU)
{
triangle_buffer.release(shared_memory.tri_ids[threadIdx.x]);
}
return true;
}
return false;
}
};
template <unsigned int NUM_RASTERIZERS, unsigned int NUM_WARPS, bool PRIMITIVE_ORDER, bool QUAD_SHADING, class BinTileSpace, class CoverageShader, class FragmentShader, class FrameBuffer, class BlendOp>
class BinTileRasterizationStage<NUM_RASTERIZERS, NUM_WARPS, TILE_ACCESS_MODE::COVERAGE_MASK, PRIMITIVE_ORDER, QUAD_SHADING, BinTileSpace, CoverageShader, FragmentShader, FrameBuffer, BlendOp> : public BinTileRasterizationStageCommon<NUM_RASTERIZERS, NUM_WARPS, BinTileSpace>
{
typedef ::BinRasterizer<NUM_WARPS, BinTileSpace> BinRasterizer;
typedef ::TileRasterizerMask<NUM_WARPS, BinTileSpace, CoverageShader> TileRasterizer;
typedef ::StampShading<NUM_WARPS, TILE_RASTER_EXCLUSIVE_ACCESS_METHOD, QUAD_SHADING, BinTileSpace, CoverageShader, FragmentShader, FrameBuffer, BlendOp> StampShading;
typedef BlockWorkAssignment<NUM_WARPS, false> BinWorkAssignment;
typedef BlockWorkAssignment<NUM_WARPS, false> TileWorkAssignment;
typedef BlockWorkAssignment<NUM_WARPS, false> ShadeWorkAssignment;
typedef ::TileBitMask<BinTileSpace> TileBitMask;
typedef ::StampBitMask<BinTileSpace> StampBitMask;
typedef BinTileRasterizationStageCommon<NUM_RASTERIZERS, NUM_WARPS, BinTileSpace> Common;
public:
typedef TriplePack<6, 10, 16> MaskTileHashPack;
struct SharedMemT
{
union
{
struct
{
unsigned int tri_ids[NUM_THREADS];
BinWorkAssignment::SharedMemT bin_work_assignment;
TileWorkAssignment::SharedMemT tile_work_assignment;
ShadeWorkAssignment::SharedMemT shade_work_assignment;
TileBitMask tile_bit_masks[NUM_THREADS];
BinTrianglePack bin_triangle_pack[NUM_THREADS];
StampBitMask stamp_bit_masks[NUM_THREADS];
MaskTileHashPack tile_bin_mask_pack[NUM_THREADS];
union
{
BinWorkAssignment::SharedTempMemT bin_work_sharedtemp;
TileWorkAssignment::SharedTempMemT tile_work_sharedtemp;
ShadeWorkAssignment::SharedTempMemT shade_work_sharedtemp;
BinRasterizer::SharedMemT binraster;
TileRasterizer::SharedMemT tileraster;
StampShading::SharedMemT stampshading;
};
};
Common::SharedMemT commonSMem;
};
};
static constexpr size_t SHARED_MEMORY = sizeof(SharedMemT);
__device__
static bool run(char* shared_memory_in)
{
SharedMemT& shared_memory = *new(shared_memory_in)SharedMemT;
unsigned int triidin = 0xFFFFFFFFU;
int num_tris = rasterizer_queue.dequeueIndexBlock(BinTileSpace::MyQueue(), triidin, NUM_THREADS);
shared_memory.tri_ids[threadIdx.x] = triidin;
if (num_tris > 0)
{
Instrumentation::BlockObserver<4, 1> observer;
{
Instrumentation::BlockObserver<14, 2> observer;
int num_bins = 0;
if (threadIdx.x < num_tris)
{
// compute num elements
math::int4 bounds = triangle_buffer.loadBounds(triidin);
int2 start_bin = BinTileSpace::bin(bounds.x, bounds.y);
int2 end_bin = BinTileSpace::bin(bounds.z - 1, bounds.w - 1);
num_bins = BinTileSpace::numHitBinsForMyRasterizer(start_bin, end_bin);
}
{
Instrumentation::BlockObserver<7, 2> observer;
BinWorkAssignment::prepare(shared_memory.bin_work_assignment, shared_memory.bin_work_sharedtemp, num_bins);
}
}
do
{
__syncthreads();
// process bin of triangle
int triangle, bin;
TileBitMask bitmask = TileBitMask::Empty();
if ([&]() -> bool
{
Instrumentation::BlockObserver<7, 2> observer;
return BinWorkAssignment::pullWorkThreads(shared_memory.bin_work_assignment, shared_memory.bin_work_sharedtemp, triangle, bin);
}())
{
math::int4 bounds;
int2 start_bin, end_bin, binid;
int triangleId = shared_memory.tri_ids[triangle];
{
Instrumentation::BlockObserver<15, 2> observer;
// note that we could store the bin bounds in shared, but that actually does not make things faster...
bounds = triangle_buffer.loadBounds(triangleId);
start_bin = BinTileSpace::bin(bounds.x, bounds.y);
end_bin = BinTileSpace::bin(bounds.z - 1, bounds.w - 1);
binid = BinTileSpace::getHitBinForMyRasterizer(bin, start_bin, end_bin);
}
// store meta information
shared_memory.bin_triangle_pack[threadIdx.x] = BinTrianglePack(triangle, binid.x, binid.y);
BinRasterizer::run(shared_memory.binraster, bitmask, triangleId, binid, bounds);
/* for (int r = 0; r < 8; ++r)
for (int c = 0; c < 8; ++c)
{
if (bitmask.isset(c, r))
{
int2 localtile = make_int2(c, r);
int4 tbound = BinTileSpace::tileBounds(binid, localtile);
for (int y = tbound.y; y < tbound.w; ++y)
for (int x = tbound.x; x < tbound.z; ++x)
FrameBuffer::writeColor(x, y, make_uchar4(triangleId % 256, triangleId / 256 % 256, triangleId / 256 / 256 % 256, 255));
}
}*/
}
shared_memory.tile_bit_masks[threadIdx.x] = bitmask;
__syncthreads();
{
Instrumentation::BlockObserver<8, 2> observer;
TileWorkAssignment::prepare(shared_memory.tile_work_assignment, shared_memory.tile_work_sharedtemp, bitmask.count());
//__syncthreads();
}
do
{
__syncthreads();
int tile, tilebit;
StampBitMask stampbitmask = StampBitMask::Empty();
if ([&]() -> bool
{
Instrumentation::BlockObserver<8, 2> observer;
return TileWorkAssignment::pullWorkThreads(shared_memory.tile_work_assignment, shared_memory.tile_work_sharedtemp, tile, tilebit);
}())
{
BinTrianglePack & pack = shared_memory.bin_triangle_pack[tile];
unsigned int bit = shared_memory.tile_bit_masks[tile].getSetBit(tilebit);
int2 localTile = TileBitMask::bitToCoord(bit);
int2 bin = make_int2(pack.b(), pack.c());
int triangleId = shared_memory.tri_ids[pack.a()];
shared_memory.tile_bin_mask_pack[threadIdx.x] = MaskTileHashPack(bit, tile, ((bin.x << 6) ^ (bin.y << 3) ^ bit) & 0xFFFF);
TileRasterizer::run(shared_memory.tileraster, stampbitmask, triangleId, bin, localTile);
//int4 tbound = BinTileSpace::tileBounds(bin, localTile);
//for (int y = 0; y < 8; ++y)
// for (int x = 0; x < 8; ++x)
// if (stampbitmask.isset(x, y))
// {
// FrameBuffer::writeColor(tbound.x + x, tbound.y + y, make_uchar4(triangleId % 256, triangleId / 256 % 256, triangleId / 256 / 256 % 256, 255));
// }
}
shared_memory.stamp_bit_masks[threadIdx.x] = stampbitmask;
__syncthreads();
{
Instrumentation::BlockObserver<17, 2> observer;
ShadeWorkAssignment::prepare(shared_memory.shade_work_assignment, shared_memory.shade_work_sharedtemp, QUAD_SHADING ? (stampbitmask.quadMask().count()*4) : stampbitmask.count());
__syncthreads();
}
while (ShadeWorkAssignment::availableWork(shared_memory.shade_work_assignment) > 0)
{
__syncthreads();
int stampmaskoffset = 0, localoffset = 0, stampbit, sumwork, startTile;
int triangleId, bit;
int2 p;
if ([&]() -> bool
{
Instrumentation::BlockObserver<17, 2> observer;
return ShadeWorkAssignment::pullWorkThreads(shared_memory.shade_work_assignment, shared_memory.shade_work_sharedtemp, stampmaskoffset, stampbit, sumwork, startTile, localoffset);
}())
{
MaskTileHashPack & tilepack = shared_memory.tile_bin_mask_pack[stampmaskoffset];
BinTrianglePack & binpack = shared_memory.bin_triangle_pack[tilepack.b()];
StampBitMask& myBitMask = shared_memory.stamp_bit_masks[stampmaskoffset];
bit = QUAD_SHADING ? myBitMask.quadMask().getSetBit(stampbit/4) : myBitMask.getSetBit(stampbit);
bit = QUAD_SHADING ? (bit + threadIdx.x % 2 + (threadIdx.x % 4 / 2) * StampBitMask::Cols) : bit;
int2 localStamp = StampBitMask::bitToCoord(bit);
int2 localTile = TileBitMask::bitToCoord(tilepack.a());
int2 bin = make_int2(binpack.b(), binpack.c());
triangleId = shared_memory.tri_ids[binpack.a()];
//run shading and blending
int4 tbound = BinTileSpace::tileBounds(bin, localTile);
p = make_int2(tbound.x + localStamp.x, tbound.y + localStamp.y);
//FrameBuffer::writeColor(p.x, p.y, make_uchar4(triangleId % 256, triangleId / 256 % 256, triangleId / 256 / 256 % 256, 255));
}
//__syncthreads();
//TODO: we need to use the compacted bin and the tileid within the bin for the comparison
StampShading::run(shared_memory.stampshading, triangleId, p, shared_memory.stamp_bit_masks, stampmaskoffset, stampbit, bit, localoffset, startTile, sumwork, [&shared_memory](int i){return shared_memory.tile_bin_mask_pack[i].c(); });
//__syncthreads();
}
} while (TileWorkAssignment::availableWork(shared_memory.tile_work_assignment) > 0);
} while (BinWorkAssignment::availableWork(shared_memory.bin_work_assignment) > 0);
//////////////////////////////////////////////////////////////////////////////
//// vis bounding box
//__syncthreads();
//int wip = threadIdx.x / WARP_SIZE;
//for (int i = wip; i < num_tris; i += NUM_WARPS)
//{
// math::int4 bounds = triangle_buffer.loadBounds(tri_ids[i]);
// int2 start_bin = BinTileSpace::bin(bounds.x, bounds.y);
// for (int x = bounds.x + laneid(); x < bounds.z; x += warpSize)
// {
// FrameBuffer::writeColor(x, bounds.y, make_uchar4(255, 255, 255, 255));
// FrameBuffer::writeColor(x, bounds.w, make_uchar4(255, 255, 255, 255));
// }
// for (int y = bounds.y + laneid(); y < bounds.w; y += warpSize)
// {
// FrameBuffer::writeColor(bounds.x, y, make_uchar4(255, 255, 255, 255));
// FrameBuffer::writeColor(bounds.z, y, make_uchar4(255, 255, 255, 255));
// }
//}
////////////////////////////////////////////////////////////////////////////////
__threadfence();
if (shared_memory.tri_ids[threadIdx.x] != 0xFFFFFFFFU)
{
triangle_buffer.release(shared_memory.tri_ids[threadIdx.x]);
}
return true;
}
return false;
}
};
#endif // INCLUDED_CURE_BIN_TILE_RASTERIZATION_STAGE
|
the_stack
|
**************************************************************************
* \file dct8x8.cu
* \brief Contains entry point, wrappers to host and device code and benchmark.
*
* This sample implements forward and inverse Discrete Cosine Transform to blocks
* of image pixels (of 8x8 size), as in JPEG standard. The typical work flow is as
* follows:
* 1. Run CPU version (Host code) and measure execution time;
* 2. Run CUDA version (Device code) and measure execution time;
* 3. Output execution timings and calculate CUDA speedup.
*/
#include "Common.h"
/**
* The number of DCT kernel calls
*/
#if 1
#define BENCHMARK_SIZE 1
#else
#define BENCHMARK_SIZE 10
#endif
/**
* The PSNR values over this threshold indicate images equality
*/
#define PSNR_THRESHOLD_EQUAL 30
/**
* Texture reference that is passed through this global variable into device code.
* This is done because any conventional passing through argument list way results
* in compiler internal error. 2008.03.11
*/
texture<float, 2, cudaReadModeElementType> TexSrc;
// includes kernels
#include "dct8x8_kernel1.cuh"
#include "dct8x8_kernel2.cuh"
#include "dct8x8_kernel_short.cuh"
#include "dct8x8_kernel_quantization.cuh"
/**
**************************************************************************
* Wrapper function for 1st gold version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperGold1(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate float buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
float *ImgF2 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//create and start CUDA timer
unsigned int timerGold = 0;
cutilCheckError(cutCreateTimer(&timerGold));
cutilCheckError(cutResetTimer(timerGold));
//perform block-wise DCT processing and benchmarking
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerGold));
computeDCT8x8Gold1(ImgF1, ImgF2, StrideF, Size);
cutilCheckError(cutStopTimer(timerGold));
}
//stop and destroy CUDA timer
float TimerGoldSpan = cutGetAverageTimerValue(timerGold);
cutilCheckError(cutDeleteTimer(timerGold));
//perform quantization
quantizeGoldFloat(ImgF2, StrideF, Size);
//perform block-wise IDCT processing
computeIDCT8x8Gold1(ImgF2, ImgF1, StrideF, Size);
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//free float buffers
FreePlane(ImgF1);
FreePlane(ImgF2);
//return time taken by the operation
return TimerGoldSpan;
}
/**
**************************************************************************
* Wrapper function for 2nd gold version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperGold2(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate float buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
float *ImgF2 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//create and start CUDA timer
unsigned int timerGold = 0;
cutilCheckError(cutCreateTimer(&timerGold));
cutilCheckError(cutResetTimer(timerGold));
//perform block-wise DCT processing and benchmarking
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerGold));
computeDCT8x8Gold2(ImgF1, ImgF2, StrideF, Size);
cutilCheckError(cutStopTimer(timerGold));
}
//stop and destroy CUDA timer
float TimerGoldSpan = cutGetAverageTimerValue(timerGold);
cutilCheckError(cutDeleteTimer(timerGold));
//perform quantization
quantizeGoldFloat(ImgF2, StrideF, Size);
//perform block-wise IDCT processing
computeIDCT8x8Gold2(ImgF2, ImgF1, StrideF, Size);
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//free float buffers
FreePlane(ImgF1);
FreePlane(ImgF2);
//return time taken by the operation
return TimerGoldSpan;
}
/**
**************************************************************************
* Wrapper function for 1st CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDA1(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//prepare channel format descriptor for passing texture into kernels
cudaChannelFormatDesc floattex = cudaCreateChannelDesc<float>();
//allocate device memory
cudaArray *Src;
float *Dst;
size_t DstStride;
cutilSafeCall(cudaMallocArray(&Src, &floattex, Size.width, Size.height));
cutilSafeCall(cudaMallocPitch((void **)(&Dst), &DstStride, Size.width * sizeof(float), Size.height));
DstStride /= sizeof(float);
//convert source image to float representation
int ImgSrcFStride;
float *ImgSrcF = MallocPlaneFloat(Size.width, Size.height, &ImgSrcFStride);
CopyByte2Float(ImgSrc, Stride, ImgSrcF, ImgSrcFStride, Size);
AddFloatPlane(-128.0f, ImgSrcF, ImgSrcFStride, Size);
//copy from host memory to device
cutilSafeCall(cudaMemcpy2DToArray(Src, 0, 0,
ImgSrcF, ImgSrcFStride * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyHostToDevice) );
//setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
//create and start CUDA timer
unsigned int timerCUDA = 0;
cutilCheckError(cutCreateTimer(&timerCUDA));
cutilCheckError(cutResetTimer(timerCUDA));
//execute DCT kernel and benchmark
cutilSafeCall(cudaBindTextureToArray(TexSrc, Src));
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerCUDA));
CUDAkernel1DCT<<< grid, threads >>>(Dst, (int) DstStride, 0, 0);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timerCUDA));
}
cutilSafeCall(cudaUnbindTexture(TexSrc));
cutilCheckMsg("Kernel execution failed");
// finalize CUDA timer
float TimerCUDASpan = cutGetAverageTimerValue(timerCUDA);
cutilCheckError(cutDeleteTimer(timerCUDA));
// execute Quantization kernel
CUDAkernelQuantizationFloat<<< grid, threads >>>(Dst, (int) DstStride);
cutilCheckMsg("Kernel execution failed");
//copy quantized coefficients from host memory to device array
cutilSafeCall(cudaMemcpy2DToArray(Src, 0, 0,
Dst, DstStride * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyDeviceToDevice) );
// execute IDCT kernel
cutilSafeCall(cudaBindTextureToArray(TexSrc, Src));
CUDAkernel1IDCT<<< grid, threads >>>(Dst, (int) DstStride, 0, 0);
cutilSafeCall(cudaUnbindTexture(TexSrc));
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(cudaMemcpy2D(ImgSrcF, ImgSrcFStride * sizeof(float),
Dst, DstStride * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyDeviceToHost) );
//convert image back to byte representation
AddFloatPlane(128.0f, ImgSrcF, ImgSrcFStride, Size);
CopyFloat2Byte(ImgSrcF, ImgSrcFStride, ImgDst, Stride, Size);
//clean up memory
cutilSafeCall(cudaFreeArray(Src));
cutilSafeCall(cudaFree(Dst));
FreePlane(ImgSrcF);
//return time taken by the operation
return TimerCUDASpan;
}
/**
**************************************************************************
* Wrapper function for 2nd CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDA2(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate host buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//allocate device memory
float *SrcDst;
size_t DeviceStride;
cutilSafeCall(cudaMallocPitch((void **)(&SrcDst), &DeviceStride, Size.width * sizeof(float), Size.height));
DeviceStride /= sizeof(float);
//copy from host memory to device
cutilSafeCall(cudaMemcpy2D(SrcDst, DeviceStride * sizeof(float),
ImgF1, StrideF * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyHostToDevice) );
//create and start CUDA timer
unsigned int timerCUDA = 0;
cutilCheckError(cutCreateTimer(&timerCUDA));
cutilCheckError(cutResetTimer(timerCUDA));
//setup execution parameters
dim3 GridFullWarps(Size.width / KER2_BLOCK_WIDTH, Size.height / KER2_BLOCK_HEIGHT, 1);
dim3 ThreadsFullWarps(8, KER2_BLOCK_WIDTH/8, KER2_BLOCK_HEIGHT/8);
//perform block-wise DCT processing and benchmarking
cutilCheckError(cutStartTimer(timerCUDA));
CUDAkernel2DCT<<< GridFullWarps, ThreadsFullWarps >>>(SrcDst, (int) DeviceStride);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timerCUDA));
cutilCheckMsg("Kernel execution failed");
// finalize CUDA timer
float TimerCUDASpan = cutGetAverageTimerValue(timerCUDA);
cutilCheckError(cutDeleteTimer(timerCUDA));
//setup execution parameters for quantization
dim3 ThreadsSmallBlocks(BLOCK_SIZE, BLOCK_SIZE);
dim3 GridSmallBlocks(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
// execute Quantization kernel
CUDAkernelQuantizationFloat<<< GridSmallBlocks, ThreadsSmallBlocks >>>(SrcDst, (int) DeviceStride);
cutilCheckMsg("Kernel execution failed");
//perform block-wise IDCT processing
CUDAkernel2IDCT<<< GridFullWarps, ThreadsFullWarps >>>(SrcDst, (int) DeviceStride);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(cudaMemcpy2D(ImgF1, StrideF * sizeof(float),
SrcDst, DeviceStride * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyDeviceToHost) );
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//clean up memory
cutilSafeCall(cudaFree(SrcDst));
FreePlane(ImgF1);
//return time taken by the operation
return TimerCUDASpan;
}
/**
**************************************************************************
* Wrapper function for short CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDAshort(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate host buffers for DCT and other data
int StrideS;
short *ImgS1 = MallocPlaneShort(Size.width, Size.height, &StrideS);
//convert source image to short representation centered at 128
for (int i=0; i<Size.height; i++)
{
for (int j=0; j<Size.width; j++)
{
ImgS1[i*StrideS+j] = (short)ImgSrc[i*Stride+j] - 128;
}
}
//allocate device memory
short *SrcDst;
size_t DeviceStride;
cutilSafeCall(cudaMallocPitch((void **)(&SrcDst), &DeviceStride, Size.width * sizeof(short), Size.height));
DeviceStride /= sizeof(short);
//copy from host memory to device
cutilSafeCall(cudaMemcpy2D(SrcDst, DeviceStride * sizeof(short),
ImgS1, StrideS * sizeof(short),
Size.width * sizeof(short), Size.height,
cudaMemcpyHostToDevice) );
//create and start CUDA timer
unsigned int timerLibJpeg = 0;
cutilCheckError(cutCreateTimer(&timerLibJpeg));
cutilCheckError(cutResetTimer(timerLibJpeg));
//setup execution parameters
dim3 GridShort(Size.width / KERS_BLOCK_WIDTH, Size.height / KERS_BLOCK_HEIGHT, 1);
dim3 ThreadsShort(8, KERS_BLOCK_WIDTH/8, KERS_BLOCK_HEIGHT/8);
//perform block-wise DCT processing and benchmarking
cutilCheckError(cutStartTimer(timerLibJpeg));
CUDAkernelShortDCT<<< GridShort, ThreadsShort >>>(SrcDst, (int) DeviceStride);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timerLibJpeg));
cutilCheckMsg("Kernel execution failed");
//stop and destroy CUDA timer
float TimerLibJpegSpan16b = cutGetAverageTimerValue(timerLibJpeg);
cutilCheckError(cutDeleteTimer(timerLibJpeg));
//setup execution parameters for quantization
dim3 ThreadsSmallBlocks(BLOCK_SIZE, BLOCK_SIZE);
dim3 GridSmallBlocks(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
// execute Quantization kernel
CUDAkernelQuantizationShort<<< GridSmallBlocks, ThreadsSmallBlocks >>>(SrcDst, (int) DeviceStride);
cutilCheckMsg("Kernel execution failed");
//perform block-wise IDCT processing
CUDAkernelShortIDCT<<< GridShort, ThreadsShort >>>(SrcDst, (int) DeviceStride);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(cudaMemcpy2D(ImgS1, StrideS * sizeof(short),
SrcDst, DeviceStride * sizeof(short),
Size.width * sizeof(short), Size.height,
cudaMemcpyDeviceToHost) );
//convert image back to byte representation
for (int i=0; i<Size.height; i++)
{
for (int j=0; j<Size.width; j++)
{
ImgDst[i*Stride+j] = clamp_0_255(ImgS1[i*StrideS+j] + 128);
}
}
//free float buffers
cutilSafeCall(cudaFree(SrcDst));
FreePlane(ImgS1);
//return time taken by the operation
return TimerLibJpegSpan16b;
}
/**
**************************************************************************
* Program entry point
*
* \param argc [IN] - Number of command-line arguments
* \param argv [IN] - Array of command-line arguments
*
* \return Status code
*/
int main(int argc, char** argv)
{
//
// Sample initialization
//
//initialize CUDA
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
//source and results image filenames
char SampleImageFname[] = "barbara.bmp";
char SampleImageFnameResGold1[] = "barbara_gold1.bmp";
char SampleImageFnameResGold2[] = "barbara_gold2.bmp";
char SampleImageFnameResCUDA1[] = "barbara_cuda1.bmp";
char SampleImageFnameResCUDA2[] = "barbara_cuda2.bmp";
char SampleImageFnameResCUDAshort[] = "barbara_cuda_short.bmp";
char *pSampleImageFpath = cutFindFilePath(SampleImageFname, argv[0]);
//preload image (acquire dimensions)
int ImgWidth, ImgHeight;
ROI ImgSize;
int res = PreLoadBmp(pSampleImageFpath, &ImgWidth, &ImgHeight);
ImgSize.width = ImgWidth;
ImgSize.height = ImgHeight;
//CONSOLE INFORMATION: saying hello to user
printf("CUDA sample DCT/IDCT implementation\n");
printf("===================================\n");
printf("Loading test image: %s... ", SampleImageFname);
if (res)
{
printf("\nError: Image file not found or invalid!\n");
printf("Press ENTER to exit...\n");
getchar();
//finalize
cutilExit(argc, argv);
return 1;
}
//check image dimensions are multiples of BLOCK_SIZE
if (ImgWidth % BLOCK_SIZE != 0 || ImgHeight % BLOCK_SIZE != 0)
{
printf("\nError: Input image dimensions must be multiples of 8!\n");
printf("Press ENTER to exit...\n");
getchar();
//finalize
cutilExit(argc, argv);
return 1;
}
printf("[%d x %d]... ", ImgWidth, ImgHeight);
//allocate image buffers
int ImgStride;
byte *ImgSrc = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstGold1 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstGold2 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDA1 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDA2 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDAshort = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
//load sample image
LoadBmpAsGray(pSampleImageFpath, ImgStride, ImgSize, ImgSrc);
//
// RUNNING WRAPPERS
//
//compute Gold 1 version of DCT/quantization/IDCT
printf("Success\nRunning Gold 1 (CPU) version... ");
float TimeGold1 = WrapperGold1(ImgSrc, ImgDstGold1, ImgStride, ImgSize);
//compute Gold 2 version of DCT/quantization/IDCT
printf("Success\nRunning Gold 2 (CPU) version... ");
float TimeGold2 = WrapperGold2(ImgSrc, ImgDstGold2, ImgStride, ImgSize);
//compute CUDA 1 version of DCT/quantization/IDCT
printf("Success\nRunning CUDA 1 (GPU) version... ");
float TimeCUDA1 = WrapperCUDA1(ImgSrc, ImgDstCUDA1, ImgStride, ImgSize);
//compute CUDA 2 version of DCT/quantization/IDCT
printf("Success\nRunning CUDA 2 (GPU) version... ");
float TimeCUDA2 = WrapperCUDA2(ImgSrc, ImgDstCUDA2, ImgStride, ImgSize);
//compute CUDA short version of DCT/quantization/IDCT
printf("Success\nRunning CUDA short (GPU) version... ");
float TimeCUDAshort = WrapperCUDAshort(ImgSrc, ImgDstCUDAshort, ImgStride, ImgSize);
//
// Execution statistics, result saving and validation
//
//dump result of Gold 1 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResGold1);
DumpBmpAsGray(SampleImageFnameResGold1, ImgDstGold1, ImgStride, ImgSize);
//dump result of Gold 2 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResGold2);
DumpBmpAsGray(SampleImageFnameResGold2, ImgDstGold2, ImgStride, ImgSize);
//dump result of CUDA 1 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDA1);
DumpBmpAsGray(SampleImageFnameResCUDA1, ImgDstCUDA1, ImgStride, ImgSize);
//dump result of CUDA 2 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDA2);
DumpBmpAsGray(SampleImageFnameResCUDA2, ImgDstCUDA2, ImgStride, ImgSize);
//dump result of CUDA short processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDAshort);
DumpBmpAsGray(SampleImageFnameResCUDAshort, ImgDstCUDAshort, ImgStride, ImgSize);
//print speed info
printf("Success\n");
#if 0
printf("Processing time : not relevant in CUDA emulation mode\n");
#else
printf("Processing time (CUDA 1) : %f ms \n", TimeCUDA1);
printf("Processing time (CUDA 2) : %f ms \n", TimeCUDA2);
printf("Processing time (CUDA short): %f ms \n", TimeCUDAshort);
#endif
//calculate PSNR between each pair of images
float PSNR_Src_DstGold1 = CalculatePSNR(ImgSrc, ImgDstGold1, ImgStride, ImgSize);
float PSNR_Src_DstGold2 = CalculatePSNR(ImgSrc, ImgDstGold2, ImgStride, ImgSize);
float PSNR_Src_DstCUDA1 = CalculatePSNR(ImgSrc, ImgDstCUDA1, ImgStride, ImgSize);
float PSNR_Src_DstCUDA2 = CalculatePSNR(ImgSrc, ImgDstCUDA2, ImgStride, ImgSize);
float PSNR_Src_DstCUDAshort = CalculatePSNR(ImgSrc, ImgDstCUDAshort, ImgStride, ImgSize);
float PSNR_DstGold1_DstCUDA1 = CalculatePSNR(ImgDstGold1, ImgDstCUDA1, ImgStride, ImgSize);
float PSNR_DstGold2_DstCUDA2 = CalculatePSNR(ImgDstGold2, ImgDstCUDA2, ImgStride, ImgSize);
float PSNR_DstGold2_DstCUDA16b = CalculatePSNR(ImgDstGold2, ImgDstCUDAshort, ImgStride, ImgSize);
printf("PSNR Original <---> CPU(Gold 1) : %f\n", PSNR_Src_DstGold1);
printf("PSNR Original <---> CPU(Gold 2) : %f\n", PSNR_Src_DstGold2);
printf("PSNR Original <---> GPU(CUDA 1) : %f\n", PSNR_Src_DstCUDA1);
printf("PSNR Original <---> GPU(CUDA 2) : %f\n", PSNR_Src_DstCUDA2);
printf("PSNR Original <---> GPU(CUDA short): %f\n", PSNR_Src_DstCUDAshort);
printf("PSNR CPU(Gold 1) <---> GPU(CUDA 1) : %f\n", PSNR_DstGold1_DstCUDA1);
printf("PSNR CPU(Gold 2) <---> GPU(CUDA 2) : %f\n", PSNR_DstGold2_DstCUDA2);
printf("PSNR CPU(Gold 2) <---> GPU(CUDA short): %f\n", PSNR_DstGold2_DstCUDA16b);
if (PSNR_DstGold1_DstCUDA1 > PSNR_THRESHOLD_EQUAL && PSNR_DstGold2_DstCUDA2 > PSNR_THRESHOLD_EQUAL && PSNR_DstGold2_DstCUDA16b > PSNR_THRESHOLD_EQUAL)
{
printf("\nTEST PASSED!\n");
}
else
{
printf("\nTEST FAILED! (CPU and GPU results differ too much)\n");
}
//
// Finalization
//
//release byte planes
FreePlane(ImgSrc);
FreePlane(ImgDstGold1);
FreePlane(ImgDstGold2);
FreePlane(ImgDstCUDA1);
FreePlane(ImgDstCUDA2);
FreePlane(ImgDstCUDAshort);
//finalize
cudaThreadExit();
cutilExit(argc, argv);
return 0;
}
|
the_stack
|
// TODO describe me
#define GB_CUDA_KERNEL
//#include <cstdint>
#include "GB_cuda_buckets.h"
#include "matrix.h"
#include <cooperative_groups.h>
#include "local_cub/block/block_scan.cuh"
using namespace cooperative_groups;
// A stateful callback functor that maintains a running prefix to be applied
// during consecutive scan operations.
struct BlockPrefixCallbackOp
{
// Running prefix
int64_t running_total;
// Constructor
__device__ BlockPrefixCallbackOp(int64_t running_total) : running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ int64_t operator()(int64_t block_aggregate)
{
int64_t old_prefix = running_total;
running_total += block_aggregate;
return old_prefix;
}
};
__inline__
__device__ void blockBucketExclusiveSum(int bucketId, int64_t *d_data, int nblocks)
{
#define blocksize 32
// Specialize BlockScan for a 1D block of 32 threads
typedef cub::BlockScan<int64_t, 32, cub::BLOCK_SCAN_WARP_SCANS> BlockScan;
// Allocate shared memory for BlockScan
__shared__ typename BlockScan::TempStorage temp_storage;
// Initialize running total
BlockPrefixCallbackOp prefix_op(0);
// Have the block iterate over segments of items
int64_t data=0;
int64_t *blockbucket= d_data;
for (int block_id = 0; block_id < nblocks; block_id += blocksize)
{
// Load a segment of consecutive items that are blocked across threads
//printf("block %d entering sum\n",blockIdx.x);
int loc = block_id + threadIdx.x;
if ( loc < nblocks)
{
//printf("block %di loading tid=%d\n",block_id,tid);
data = blockbucket[bucketId*nblocks +loc ] ;
}
__syncthreads();
//printf("bb%d_%d s0 before prefix= %ld \n", block_id,bucketId,
// blockbucket[bucketId*nblocks + block_id+threadIdx.x] ) ;
// Collectively compute the block-wide exclusive prefix sum
BlockScan(temp_storage).ExclusiveSum( data, data, prefix_op);
__syncthreads();
if ( loc < nblocks)
{
blockbucket[bucketId*nblocks +loc ] = data ;
}
__syncthreads();
//printf("bb%d_%d = %ld \n", block_id, bucketId, blockbucket[bucketId*nblocks+block_id+threadIdx.x] ) ;
data = 0;
}
}
template< typename T, int tile_sz>
__inline__ __device__
T warp_ReduceSumPlus( thread_block_tile<tile_sz> tile, T val)
{
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = tile.size() / 2; i > 0; i /= 2) {
val += tile.shfl_down( val, i);
}
return val; // note: only thread 0 will return full sum
}
template<typename T, int warpSize>
__inline__ __device__
T block_ReduceSum(thread_block g, T val)
{
static __shared__ T shared[warpSize]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
thread_block_tile<warpSize> tile = tiled_partition<warpSize>( g );
// Each warp performs partial reduction
val = warp_ReduceSumPlus<T, warpSize>( tile, val);
// Wait for all partial reductions
if (lane==0) {
//printf("thd%d warp%d sum is %d\n", threadIdx.x, wid, val);
shared[wid]=val; // Write reduced value to shared memory
//printf("thd%d stored warp %d sum %d\n", threadIdx.x, wid, val);
}
__syncthreads(); // Wait for all partial reductions
if (wid > 0 ) return val ;
//Final reduce within first warp
if (wid==0) val = warp_ReduceSumPlus<T, warpSize>( tile, val) ;
return val;
}
// GB_AxB_cuda_dot3_phase2 is a CUDA kernel that takes as input the
// nanobuckets and blockbucket arrays computed by the first phase kernel,
// GB_AxB_cuda_dot3_phase1. The launch geometry of this kernel must match the
// GB_AxB_cuda_dot3_phase1 kernel, with the same # of threads and threadblocks.
__global__
void simple_nongrb_test(int i) {
}
__global__
void simple_grb_test(GrB_Matrix C) {
}
__global__
void AxB_phase2
(
// input, not modified:
int64_t *__restrict__ nanobuckets, // array of size 12-blockDim.x-by-nblocks
int64_t *__restrict__ blockbucket, // global bucket count, of size 12*nblocks
// output:
int64_t *__restrict__ bucketp, // global bucket cumsum, of size 13
int64_t *__restrict__ bucket, // global buckets, of size cnz (== mnz)
int64_t *__restrict__ offset, // global offsets, for each bucket
// inputs, not modified:
const int nblocks // input number of blocks to reduce
)
{
//printf("In AxB_phase2 kernel\n");
//printf("nanobuckets: %ld\n", nanobuckets[0]);
//--------------------------------------------------------------------------
// sum up the bucket counts of prior threadblocks
//--------------------------------------------------------------------------
// blockbucket is an array of size 12-by-nblocks, held by row. The
// entry blockbucket [bucket * nblocks + t] holds the # of entries
// in the bucket (in range 0 to 11) found by threadblock t.
//__shared__ uint64_t offset [12] ;
uint64_t s_0=0;
uint64_t s_1=0;
uint64_t s_2=0;
uint64_t s_3=0;
uint64_t s_4=0;
uint64_t s_5=0;
uint64_t s_6=0;
uint64_t s_7=0;
uint64_t s_8=0;
uint64_t s_9=0;
uint64_t s_10=0;
uint64_t s_11=0;
thread_block_tile<32> tile = tiled_partition<32>(this_thread_block() );
//printf("block %d entering sum\n",blockIdx.x);
int tid = threadIdx.x + blockIdx.x*blockDim.x;
#define reduceBucket( B ) \
for( tid = threadIdx.x + blockIdx.x*blockDim.x; \
tid < nblocks; \
tid += blockDim.x*gridDim.x) \
{ \
s_ ## B += blockbucket[ B *nblocks +tid] ; \
} \
__syncthreads(); \
s_ ## B = warp_ReduceSumPlus<uint64_t , 32>( tile, s_ ## B);
reduceBucket( 0 )
reduceBucket( 1 )
reduceBucket( 2 )
reduceBucket( 3 )
reduceBucket( 4 )
reduceBucket( 5 )
reduceBucket( 6 )
reduceBucket( 7 )
reduceBucket( 8 )
reduceBucket( 9 )
reduceBucket( 10 )
reduceBucket( 11 )
//printf("summing blk,tid=%d,%d\n",blockIdx.x,threadIdx.x);
if (threadIdx.x ==0 )
{
atomicAdd( (unsigned long long int*)&(offset[0]), s_0);
atomicAdd( (unsigned long long int*)&(offset[1]), s_1);
atomicAdd( (unsigned long long int*)&(offset[2]), s_2);
atomicAdd( (unsigned long long int*)&(offset[3]), s_3);
atomicAdd( (unsigned long long int*)&(offset[4]), s_4);
atomicAdd( (unsigned long long int*)&(offset[5]), s_5);
atomicAdd( (unsigned long long int*)&(offset[6]), s_6);
atomicAdd( (unsigned long long int*)&(offset[7]), s_7);
atomicAdd( (unsigned long long int*)&(offset[8]), s_8);
atomicAdd( (unsigned long long int*)&(offset[9]), s_9);
atomicAdd( (unsigned long long int*)&(offset[10]),s_10);
atomicAdd( (unsigned long long int*)&(offset[11]),s_11);
}
__syncthreads();
if( gridDim.x >= 12)
{
// Cumulative sum across blocks for each bucket
if (blockIdx.x <12)
blockBucketExclusiveSum( blockIdx.x, blockbucket, nblocks ) ;
}
else
{
if (blockIdx.x == 0)
{
blockBucketExclusiveSum( 0, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 1, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 2, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 3, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 4, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 5, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 6, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 7, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 8, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 9, blockbucket, nblocks ) ;
blockBucketExclusiveSum( 10, blockbucket, nblocks) ;
blockBucketExclusiveSum( 11, blockbucket, nblocks) ;
}
}
//--------------------------------------------------------------------------
// last threadblock saves the cumsum of the 12 global buckets
//--------------------------------------------------------------------------
/* do on cpu
if (blockIdx.x == 0) // gridDim.x - 1)
{
// the last threadblock: compute all 12 global bucket sizes, and its
// cumulative sum
if (threadIdx.x == 0)
{
// the work in this last threadblock is single-threaded
uint64_t s = 0;
for (int bucket = 0 ; bucket < 12 ; bucket++)
{
// write the global cumsum of all buckets to the final global
// bucketp. bucketp [bucket] is the starting position in
// the bucket.
bucketp [bucket] = s ;
// bucket_size is the total # of entries in this bucket, for
// all threadblocks. It has nearly been computed already,
// since offset [bucket] = sum (blockbucket (bucket,0:blockDim.x-1)).
// All that is left is to add the counts for the last threadblock.`
//int64_t global_bucket_size = offset [bucket];
// + blockbucket [bucket * gridDim.x + blockIdx.x] ;
//printf("bucketp[%d]= %ld\n",bucket, s);
// s is a cumulative sum of the global bucket sizes
s += offset[bucket]; // global_bucket_size ;
}
// The kth global bucket (for k = 0 to 11) appears in:
// bucket [bucketp [k]... bucketp [k+1]-1],
// so the end of the last bucket needs bucketp [12].
bucketp [12] = (int64_t)s;
//printf("bucketp[12]= %ld\n", s);
// all entries in C now appear in the buckets.
// ASSERT (s == cnz) ;
}
__syncthreads ( ) ;
}
*/
} // phase2
__global__
void AxB_phase2end
(
// input, not modified:
int64_t *__restrict__ nanobuckets, // array of size 12-blockDim.x-by-nblocks
const int64_t *__restrict__ blockbucket, // global bucket count, of size 12*nblocks
// output:
const int64_t *__restrict__ bucketp, // global bucket cumsum, of size 13
int64_t *__restrict__ bucket, // global buckets, of size cnz (== mnz)
const int64_t *__restrict__ offset, // global offsets, for each bucket
// inputs, not modified:
const GrB_Matrix C, // output matrix
const int64_t cnz // number of entries in C and M
)
{
//--------------------------------------------------------------------------
// get C and M
//--------------------------------------------------------------------------
// Ci [p] for an entry C(i,j) contains either GB_FLIP(i) if C(i,j) is a
// zombie, or (k << 4) + bucket otherwise, where C(:,j) is the kth vector
// of C (j = Ch [k] if hypersparse or j = k if standard sparse), and
// where bucket is the bucket assignment for C(i,j). This phase does not
// need k, just the bucket for each entry C(i,j).
int64_t *__restrict__ Ci = C->i ; // for zombies, or bucket assignment
int64_t *__restrict__ Mp = C->p ; // for offset calculations
int64_t mnvec = C->nvec;
//--------------------------------------------------------------------------
// load and shift the nanobuckets for this thread block
//--------------------------------------------------------------------------
// The taskbucket for this threadblock is an array of size
// 12-by-blockDim.x, held by row. It forms a 2D array within the 3D
// nanobuckets array.
int64_t *__restrict__ taskbucket = nanobuckets + blockIdx.x * (12 * blockDim.x) ;
//printf("block%d thd%d blockbucket= %ld\n", blockIdx.x, threadIdx.x,
// blockbucket[blockIdx.x*gridDim.x+blockIdx.x]);
// Each thread in this threadblock owns one column of this taskbucket, for
// its set of 12 nanobuckets. The nanobuckets are a column of length 12,
// with stride equal to blockDim.x.
int64_t *__restrict__ nanobucket = taskbucket + threadIdx.x;
// Each thread loads its 12 nanobucket values into registers.
#define LOAD_NANOBUCKET(bucket) \
int64_t my_bucket_ ## bucket = \
nanobucket [bucket * blockDim.x] \
+ blockbucket [bucket * gridDim.x + blockIdx.x]\
+ bucketp [bucket] ;
LOAD_NANOBUCKET (0) ;
LOAD_NANOBUCKET (1) ;
LOAD_NANOBUCKET (2) ;
LOAD_NANOBUCKET (3) ;
LOAD_NANOBUCKET (4) ;
LOAD_NANOBUCKET (5) ;
LOAD_NANOBUCKET (6) ;
LOAD_NANOBUCKET (7) ;
LOAD_NANOBUCKET (8) ;
LOAD_NANOBUCKET (9) ;
LOAD_NANOBUCKET (10) ;
LOAD_NANOBUCKET (11) ;
// Now each thread has an index into the global set of 12 buckets,
// held in bucket, of where to place its own entries.
//--------------------------------------------------------------------------
// construct the global buckets
//--------------------------------------------------------------------------
// The slice for task blockIdx.x contains entries pfirst:plast-1 of M and
// C, which is the part of C operated on by this threadblock.
int64_t pfirst, plast ;
/*
for ( int tid_global = threadIdx.x + blockIdx.x * blockDim.x ;
tid_global < (mnvec+7)/8 ;
tid_global += blockDim.x * gridDim.x)
*/
int chunk_max= (cnz + chunksize -1)/chunksize;
for ( int chunk = blockIdx.x;
chunk < chunk_max;
chunk += gridDim.x )
{
//GB_PARTITION (pfirst, plast, cnz, tid_global, (mnvec+7)/8 ) ;
pfirst = chunksize * chunk ;
plast = GB_IMIN( chunksize * (chunk+1), cnz ) ;
int chunk_end;
if ( cnz > chunksize) chunk_end = GB_IMIN( chunksize,
cnz - chunksize*(chunk) );
else chunk_end = cnz;
// find the first vector of the slice for task blockIdx.x: the
// vector that owns the entry Ai [pfirst] and Ax [pfirst].
//kfirst = GB_search_for_vector_device (pfirst, Mp, 0, mnvec) ;
// find the last vector of the slice for task blockIdx.x: the
// vector that owns the entry Ai [plast-1] and Ax [plast-1].
//klast = GB_search_for_vector_device (plast-1, Mp, kfirst, mnvec) ;
for ( int p = pfirst + threadIdx.x;
p < pfirst + chunk_end;
p += blockDim.x )
{
// get the entry C(i,j), and extract its bucket. Then
// place the entry C(i,j) in the global bucket it belongs to.
// TODO: these writes to global are not coalesced. Instead: each
// threadblock could buffer its writes to 12 buffers and when the
// buffers are full they can be written to global.
int ibucket = Ci[p] & 0xF;
//printf(" thd: %d p,Ci[p] = %ld,%ld,%d\n", threadIdx.x, p, Ci[p], irow );
switch (ibucket)
{
case 0: bucket [my_bucket_0++ ] = p ; Ci[p] = Ci[p] >>4; break ; //unshift zombies
case 1: bucket [my_bucket_1++ ] = p ; break ;
case 2: bucket [my_bucket_2++ ] = p ; break ;
case 3: bucket [my_bucket_3++ ] = p ; break ;
case 4: bucket [my_bucket_4++ ] = p ; break ;
case 5: bucket [my_bucket_5++ ] = p ; break ;
case 6: bucket [my_bucket_6++ ] = p ; break ;
case 7: bucket [my_bucket_7++ ] = p ; break ;
case 8: bucket [my_bucket_8++ ] = p ; break ;
case 9: bucket [my_bucket_9++ ] = p ; break ;
case 10: bucket [my_bucket_10++] = p ; break ;
case 11: bucket [my_bucket_11++] = p ; break ;
default: break;
}
}
//__syncthreads();
}
}
|
the_stack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.