text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include "modules/perception/inference/tensorrt/plugins/kernels.h" #include "modules/perception/inference/tensorrt/plugins/rcnn_proposal_plugin.h" namespace apollo { namespace perception { namespace inference { // nthreads = num_rois __global__ void get_rois_nums_kernel(const int nthreads, const float *rois, int *batch_rois_nums) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int batch_id = (int)rois[index * 5]; if (batch_id >= 0) { atomicAdd(&batch_rois_nums[batch_id], 1); } } } // bbox_pred dims: [num_rois, box_len, num_class] // out_bbox_pred dims: [num_rois, num_class, box_len] __global__ void transpose_bbox_pred_kernel(const int nthreads, const float *bbox_pred, const int box_len, const int num_class, float *out_bbox_pred) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { int roi_id = index / num_class / box_len; int class_id = (index / box_len) % num_class; int feature_id = index % box_len; int in_index = roi_id * box_len * num_class + feature_id * num_class + class_id; out_bbox_pred[index] = bbox_pred[in_index]; } } // bbox_pred dims: [num_box, num_class+1, 4], // scores dims: [num_box, num_class+1], // out_bbox_pred dims: [num_box, 4] // out_scores dims: [num_box] __global__ void get_max_score_kernel(const int nthreads, const float *bbox_pred, const float *scores, const int num_class, const float threshold_objectness, const float *class_thresholds, float *out_bbox_pred, float *out_scores, float *out_all_probs, int *filter_count) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= nthreads) { return; } int box_id = index; if ((1.0f - scores[box_id * (num_class + 1)]) < threshold_objectness) { return; } float score_max = -FLT_MAX; int cls_max = -1; for (int c = 0; c < num_class; ++c) { float score = scores[box_id * (num_class + 1) + c + 1] - class_thresholds[c]; if (score > score_max) { score_max = score; cls_max = c; } } if (score_max < 0) { return; } else { int counter = atomicAdd(filter_count, 1); int box_cls_id = box_id * (num_class + 1) + cls_max + 1; for (int i = 0; i < 4; ++i) { out_bbox_pred[counter * 4 + i] = bbox_pred[box_cls_id * 4 + i]; } out_scores[counter] = scores[box_cls_id]; for (int i = 0; i < num_class + 1; ++i) { out_all_probs[counter * (num_class + 1) + i] = scores[box_id * (num_class + 1) + i]; } } } int RCNNProposalPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { // cls_score_softmax dims: [num_rois, 4, 1, 1] const float *cls_score_softmax = reinterpret_cast<const float *>(inputs[0]); // bbox_pred dims: [num_rois, 4 * 4 (num_class * box_dim), 1, 1] const float *bbox_pred = reinterpret_cast<const float *>(inputs[1]); // rois dims: [num_rois, 5, 1, 1] const float *rois = reinterpret_cast<const float *>(inputs[2]); // im_info dims: [N, 6, 1, 1] const float *im_info = reinterpret_cast<const float *>(inputs[3]); // output dims: [num_result_box, 9] (axis-1: batch_id, x1, y1, x2, y2, // unknown_score, class1_score, class2_score, class3_score) float *result_boxes = reinterpret_cast<float *>(outputs[0]); int cls_score_softmax_size = num_rois_ * 4; int bbox_pred_size = num_rois_ * 4 * 4; int output_size = batchSize * top_n_ * out_channel_; // Using thrust::fill might cause crash float *init_result_boxes = new float[output_size](); std::fill_n(init_result_boxes, output_size, -1.0f); BASE_CUDA_CHECK(cudaMemcpyAsync(result_boxes, init_result_boxes, output_size * sizeof(float), cudaMemcpyHostToDevice, stream)); float *host_im_info = new float[batchSize * 6](); BASE_CUDA_CHECK(cudaMemcpyAsync(host_im_info, im_info, batchSize * 6 * sizeof(float), cudaMemcpyDeviceToHost, stream)); float origin_height = host_im_info[0]; float origin_width = host_im_info[1]; float scale = host_im_info[2]; int nthreads, block_size; // TODO(chenjiahao): filter roi that has img_id == -1 at first float *host_thresholds = new float[num_class_]; for (int i = 0; i < num_class_; ++i) { host_thresholds[i] = thresholds_[i]; } float *dev_thresholds; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&dev_thresholds), num_class_ * sizeof(float))); BASE_CUDA_CHECK(cudaMemcpyAsync(dev_thresholds, host_thresholds, num_class_ * sizeof(float), cudaMemcpyHostToDevice, stream)); // Normalize bbox_pred float *dev_bbox_mean, *dev_bbox_std; float *norm_bbox_pred; BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&dev_bbox_mean), 4 * sizeof(float))); BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&dev_bbox_std), 4 * sizeof(float))); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&norm_bbox_pred), bbox_pred_size * sizeof(float))); BASE_CUDA_CHECK(cudaMemcpyAsync(dev_bbox_mean, bbox_mean_, 4 * sizeof(float), cudaMemcpyHostToDevice, stream)); BASE_CUDA_CHECK(cudaMemcpyAsync(dev_bbox_std, bbox_std_, 4 * sizeof(float), cudaMemcpyHostToDevice, stream)); BASE_CUDA_CHECK(cudaMemcpyAsync(norm_bbox_pred, bbox_pred, bbox_pred_size * sizeof(float), cudaMemcpyDeviceToDevice, stream)); nthreads = bbox_pred_size; block_size = DIVUP(nthreads, thread_size_); repeatedly_mul_cuda(block_size, thread_size_, 0, stream, nthreads, norm_bbox_pred, norm_bbox_pred, dev_bbox_std, 4); repeatedly_add_cuda(block_size, thread_size_, 0, stream, nthreads, norm_bbox_pred, norm_bbox_pred, dev_bbox_mean, 4); // Slice rois int slice_axis[4] = {1, 2, 3, 4}; int *dev_slice_axis; float *sliced_rois; BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&dev_slice_axis), 4 * sizeof(int))); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&sliced_rois), num_rois_ * 4 * sizeof(float))); BASE_CUDA_CHECK(cudaMemcpyAsync(dev_slice_axis, slice_axis, 4 * sizeof(int), cudaMemcpyHostToDevice, stream)); nthreads = num_rois_ * 4; block_size = DIVUP(nthreads, thread_size_); slice2d_cuda(block_size, thread_size_, 0, stream, nthreads, rois, sliced_rois, dev_slice_axis, 4, 5); // Decode bbox float *decoded_bbox_pred; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&decoded_bbox_pred), bbox_pred_size * sizeof(float))); BASE_CUDA_CHECK(cudaMemsetAsync(decoded_bbox_pred, 0, bbox_pred_size * sizeof(float), stream)); nthreads = num_rois_ * 4; block_size = DIVUP(nthreads, thread_size_); bbox_transform_inv_cuda(block_size, thread_size_, 0, stream, nthreads, sliced_rois, norm_bbox_pred, num_rois_, 4, decoded_bbox_pred); // Refine boxes that are out of map if (refine_out_of_map_bbox_) { nthreads = bbox_pred_size; block_size = DIVUP(nthreads, thread_size_); clip_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, decoded_bbox_pred, origin_height, origin_width); } // Separate data by batch_id int *batch_rois_nums = new int[batchSize](); int *dev_batch_rois_nums; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&dev_batch_rois_nums), batchSize * sizeof(int))); BASE_CUDA_CHECK( cudaMemsetAsync(dev_batch_rois_nums, 0, batchSize * sizeof(int), stream)); nthreads = num_rois_; block_size = DIVUP(nthreads, thread_size_); get_rois_nums_kernel<<<block_size, thread_size_, 0, stream>>>( nthreads, rois, dev_batch_rois_nums); BASE_CUDA_CHECK(cudaMemcpyAsync(batch_rois_nums, dev_batch_rois_nums, batchSize * sizeof(int), cudaMemcpyDeviceToHost, stream)); float *max_bbox, *max_score, *max_all_probs; int *max_filtered_count; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&max_bbox), max_candidate_n_ * 4 * sizeof(float))); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&max_score), max_candidate_n_ * sizeof(float))); BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&max_all_probs), max_candidate_n_ * (num_class_ + 1) * sizeof(float))); BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&max_filtered_count), sizeof(int))); float *filtered_bbox, *filtered_score, *filtered_all_probs; int *filtered_count; int host_filtered_count; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_bbox), max_candidate_n_ * 4 * sizeof(float))); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&filtered_score), max_candidate_n_ * sizeof(float))); BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&filtered_all_probs), max_candidate_n_ * (num_class_ + 1) * sizeof(float))); BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&filtered_count), sizeof(int))); int *sorted_indexes; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&sorted_indexes), max_candidate_n_ * sizeof(int))); float *pre_nms_bbox, *pre_nms_score, *pre_nms_all_probs; BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&pre_nms_bbox), max_candidate_n_ * 4 * sizeof(float))); BASE_CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&pre_nms_score), max_candidate_n_ * sizeof(float))); BASE_CUDA_CHECK( cudaMalloc(reinterpret_cast<void **>(&pre_nms_all_probs), max_candidate_n_ * (num_class_ + 1) * sizeof(float))); int cur_ptr = 0; acc_box_num_ = 0; for (int batch_id = 0; batch_id < batchSize; ++batch_id) { // TODO(chenjiahao): replace 300 with input dims cur_ptr = batch_id * 300; BASE_CUDA_CHECK(cudaMemsetAsync( max_bbox, 0, max_candidate_n_ * 4 * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync(max_score, 0, max_candidate_n_ * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync( max_all_probs, 0, max_candidate_n_ * (num_class_ + 1) * sizeof(float), stream)); BASE_CUDA_CHECK( cudaMemsetAsync(max_filtered_count, 0, sizeof(int), stream)); // Get max score among classes and filter with threshold nthreads = batch_rois_nums[batch_id]; block_size = DIVUP(nthreads, thread_size_); get_max_score_kernel<<<block_size, thread_size_, 0, stream>>>( nthreads, decoded_bbox_pred + size_t(cur_ptr * (num_class_ + 1) * 4), cls_score_softmax + size_t(cur_ptr * (num_class_ + 1)), num_class_, threshold_objectness_, dev_thresholds, max_bbox, max_score, max_all_probs, max_filtered_count); int host_max_filtered_count = 0; BASE_CUDA_CHECK(cudaMemcpyAsync(&host_max_filtered_count, max_filtered_count, sizeof(int), cudaMemcpyDeviceToHost, stream)); if (host_max_filtered_count == 0) { continue; } BASE_CUDA_CHECK(cudaMemsetAsync( filtered_bbox, 0, max_candidate_n_ * 4 * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync(filtered_score, 0, max_candidate_n_ * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync( filtered_all_probs, 0, max_candidate_n_ * (num_class_ + 1) * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync(filtered_count, 0, sizeof(int), stream)); // Filter boxes according to min_size_mode nthreads = host_max_filtered_count; block_size = DIVUP(nthreads, thread_size_); filter_boxes_cuda(block_size, thread_size_, 0, stream, nthreads, max_bbox, max_score, max_all_probs, host_max_filtered_count, 1, 1, num_class_ + 1, 0, 0, min_size_mode_, min_size_h_, min_size_w_, 0.0f, filtered_bbox, filtered_score, filtered_all_probs, filtered_count); BASE_CUDA_CHECK(cudaMemcpyAsync(&host_filtered_count, filtered_count, sizeof(int), cudaMemcpyDeviceToHost, stream)); if (host_filtered_count == 0) { continue; } // Descending sort proposals by score thrust::sequence(thrust::device, sorted_indexes, sorted_indexes + host_filtered_count); thrust::sort_by_key(thrust::device, filtered_score, filtered_score + size_t(host_filtered_count), sorted_indexes, thrust::greater<float>()); BASE_CUDA_CHECK(cudaMemsetAsync( pre_nms_bbox, 0, max_candidate_n_ * 4 * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync(pre_nms_score, 0, max_candidate_n_ * sizeof(float), stream)); BASE_CUDA_CHECK(cudaMemsetAsync( pre_nms_all_probs, 0, max_candidate_n_ * (num_class_ + 1) * sizeof(float), stream)); // Keep max N candidates nthreads = std::min(max_candidate_n_, host_filtered_count); block_size = DIVUP(nthreads, thread_size_); keep_topN_boxes_cuda( block_size, thread_size_, 0, stream, nthreads, filtered_bbox, filtered_score, filtered_all_probs, sorted_indexes, filtered_count, rpn_proposal_output_score_, max_candidate_n_, num_class_ + 1, max_candidate_n_, pre_nms_bbox, pre_nms_score, pre_nms_all_probs); // NMS int cur_filter_count = std::min(host_filtered_count, max_candidate_n_); NmsForward(rpn_proposal_output_score_, cur_filter_count, 4, overlap_ratio_, max_candidate_n_, top_n_, batch_id, num_class_ + 1, pre_nms_bbox, pre_nms_score, pre_nms_all_probs, result_boxes + size_t(acc_box_num_ * out_channel_), &acc_box_num_, stream); } // TODO(chenjiahao): rescale bbox // Free device memory BASE_CUDA_CHECK(cudaFree(dev_thresholds)); BASE_CUDA_CHECK(cudaFree(dev_bbox_mean)); BASE_CUDA_CHECK(cudaFree(dev_bbox_std)); BASE_CUDA_CHECK(cudaFree(norm_bbox_pred)); BASE_CUDA_CHECK(cudaFree(dev_slice_axis)); BASE_CUDA_CHECK(cudaFree(sliced_rois)); BASE_CUDA_CHECK(cudaFree(decoded_bbox_pred)); BASE_CUDA_CHECK(cudaFree(dev_batch_rois_nums)); BASE_CUDA_CHECK(cudaFree(max_bbox)); BASE_CUDA_CHECK(cudaFree(max_score)); BASE_CUDA_CHECK(cudaFree(max_filtered_count)); BASE_CUDA_CHECK(cudaFree(filtered_bbox)); BASE_CUDA_CHECK(cudaFree(filtered_score)); BASE_CUDA_CHECK(cudaFree(filtered_count)); BASE_CUDA_CHECK(cudaFree(sorted_indexes)); BASE_CUDA_CHECK(cudaFree(pre_nms_bbox)); BASE_CUDA_CHECK(cudaFree(pre_nms_score)); // Free host memory delete[] init_result_boxes; delete[] host_im_info; delete[] host_thresholds; delete[] batch_rois_nums; } } // namespace inference } // namespace perception } // namespace apollo
the_stack
* cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a cudaThreadSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.cu" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "cuPrintf.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_11_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if(!printfBufferPtr) return NULL; // Thread/block restriction check if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) return NULL; if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) return NULL; // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if(thread_buf_len < (CUPRINTF_MAX_LEN * 2)) return NULL; // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if(hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if(offset >= hdr.thread_buf_len) offset = CUPRINTF_MAX_LEN; // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if(ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if(!dest || !src || (dest >= end)) return NULL; // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while(n--) { if(dest >= end) // Overflow check break; len++; *dest++ = *src; if(*src++ == '\0') break; } // Now write out the padding bytes, and we have our length. while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if(!ptr || !arg) return NULL; // strncpy does all our work. We just terminate. if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) *ptr = 0; return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) return NULL; // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) restrictRules.threadid = threadid; int block_count = gridDim.x * gridDim.y; if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) restrictRules.blockid = blockid; } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while(p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if(*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if(arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch(specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if(arglen == 4) // Float vs. Double thing fprintf(printf_fp, format, *((float *)data)); else fprintf(printf_fp, format, *((double *)data)); break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, "%s", format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while(bufptr != endptr) { // Wrap ourselves at the end-of-buffer if(bufptr == bufend) bufptr = bufstart; // Adjust our start pointer to within the circular buffer and copy a block. cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if(headings) fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); if(hdr->fmtoffset == 0) fprintf(printf_fp, "printf buffer overflow\n"); else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) break; printf_count++; // Clear if asked if(clear) cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN); // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" cudaError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if((bufferLen % CUPRINTF_MAX_LEN) > 0) bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess) return cudaErrorInitializationError; cudaMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return cudaSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if(!printfbuf_start || !printfbuf_device) return; cudaFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if(!printfbuf_start || !printfbuf_device || !printf_fp) return cudaErrorMissingConfiguration; // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if(magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while(blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost); // We get our block-size-step from the very first header if(hdr.thread_buf_len != 0) blocklen = hdr.thread_buf_len; // No magic number means no printfs from this thread if(hdr.magic != CUPRINTF_SM10_MAGIC) { if(blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if(hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if(magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if(sync_printfs) cudaMemset(printfbuf_device, 0, printfbuf_len); return cudaSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
the_stack
#include <stdio.h> #include <ATen/ATen.h> #include <cuda.h> #include <iostream> #include <cuda_runtime.h> __device__ __forceinline__ float atomicMax(float *address, float val) { int ret = __float_as_int(*address); while(val > __int_as_float(ret)) { int old = ret; if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old) break; } return __int_as_float(ret); } __global__ void clear(int b, int * cnt_tmp, int * unass_cnt) { for (int i = threadIdx.x; i < b; i += blockDim.x) { cnt_tmp[i] = 0; unass_cnt[i] = 0; } } __global__ void calc_unass_cnt(int b, int n, int * assignment, int * unass_cnt) { // count the number of unassigned points in each batch const int BLOCK_SIZE = 1024; __shared__ int scan_array[BLOCK_SIZE]; for (int i = blockIdx.x; i < b; i += gridDim.x) { scan_array[threadIdx.x] = assignment[i * n + blockIdx.y * BLOCK_SIZE + threadIdx.x] == -1 ? 1 : 0; __syncthreads(); int stride = 1; while(stride <= BLOCK_SIZE / 2) { int index = (threadIdx.x + 1) * stride * 2 - 1; if(index < BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; stride = stride * 2; __syncthreads(); } __syncthreads(); if (threadIdx.x == BLOCK_SIZE - 1) { atomicAdd(&unass_cnt[i], scan_array[threadIdx.x]); } __syncthreads(); } } __global__ void calc_unass_cnt_sum(int b, int * unass_cnt, int * unass_cnt_sum) { // count the cumulative sum over over unass_cnt const int BLOCK_SIZE = 512; // batch_size <= 512 __shared__ int scan_array[BLOCK_SIZE]; scan_array[threadIdx.x] = unass_cnt[threadIdx.x]; __syncthreads(); int stride = 1; while(stride <= BLOCK_SIZE / 2) { int index = (threadIdx.x + 1) * stride * 2 - 1; if(index < BLOCK_SIZE) scan_array[index] += scan_array[index - stride]; stride = stride * 2; __syncthreads(); } __syncthreads(); stride = BLOCK_SIZE / 4; while(stride > 0) { int index = (threadIdx.x + 1) * stride * 2 - 1; if((index + stride) < BLOCK_SIZE) scan_array[index + stride] += scan_array[index]; stride = stride / 2; __syncthreads(); } __syncthreads(); //printf("%d\n", unass_cnt_sum[b - 1]); unass_cnt_sum[threadIdx.x] = scan_array[threadIdx.x]; } __global__ void calc_unass_idx(int b, int n, int * assignment, int * unass_idx, int * unass_cnt, int * unass_cnt_sum, int * cnt_tmp) { // list all the unassigned points for (int i = blockIdx.x; i < b; i += gridDim.x) { if (assignment[i * n + blockIdx.y * 1024 + threadIdx.x] == -1) { int idx = atomicAdd(&cnt_tmp[i], 1); unass_idx[unass_cnt_sum[i] - unass_cnt[i] + idx] = blockIdx.y * 1024 + threadIdx.x; } } } __global__ void Bid(int b, int n, const float * xyz1, const float * xyz2, float eps, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * unass_cnt, int * unass_cnt_sum, int * unass_idx) { const int batch = 2048, block_size = 1024, block_cnt = n / 1024; __shared__ float xyz2_buf[batch * 3]; __shared__ float price_buf[batch]; __shared__ float best_buf[block_size]; __shared__ float better_buf[block_size]; __shared__ int best_i_buf[block_size]; for (int i = blockIdx.x; i < b; i += gridDim.x) { int _unass_cnt = unass_cnt[i]; if (_unass_cnt == 0) continue; int _unass_cnt_sum = unass_cnt_sum[i]; int unass_per_block = (_unass_cnt + block_cnt - 1) / block_cnt; int thread_per_unass = block_size / unass_per_block; int unass_this_block = max(min(_unass_cnt - (int) blockIdx.y * unass_per_block, unass_per_block), 0); float x1, y1, z1, best = -1e9, better = -1e9; int best_i = -1, _unass_id = -1, thread_in_unass; if (threadIdx.x < thread_per_unass * unass_this_block) { _unass_id = unass_per_block * blockIdx.y + threadIdx.x / thread_per_unass + _unass_cnt_sum - _unass_cnt; _unass_id = unass_idx[_unass_id]; thread_in_unass = threadIdx.x % thread_per_unass; x1 = xyz1[(i * n + _unass_id) * 3 + 0]; y1 = xyz1[(i * n + _unass_id) * 3 + 1]; z1 = xyz1[(i * n + _unass_id) * 3 + 2]; } for (int k2 = 0; k2 < n; k2 += batch) { int end_k = min(n, k2 + batch) - k2; for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x) { xyz2_buf[j] = xyz2[(i * n + k2) * 3 + j]; } for (int j = threadIdx.x; j < end_k; j += blockDim.x) { price_buf[j] = price[i * n + k2 + j]; } __syncthreads(); if (_unass_id != -1) { int delta = (end_k + thread_per_unass - 1) / thread_per_unass; int l = thread_in_unass * delta; int r = min((thread_in_unass + 1) * delta, end_k); for (int k = l; k < r; k++) //if (!last || assignment_inv[i * n + k + k2] == -1) { float x2 = xyz2_buf[k * 3 + 0] - x1; float y2 = xyz2_buf[k * 3 + 1] - y1; float z2 = xyz2_buf[k * 3 + 2] - z1; // the coordinates of points should be normalized to [0, 1] float d = 3.0 - sqrtf(x2 * x2 + y2 * y2 + z2 * z2) - price_buf[k]; if (d > best) { better = best; best = d; best_i = k + k2; } else if (d > better) { better = d; } } } __syncthreads(); } best_buf[threadIdx.x] = best; better_buf[threadIdx.x] = better; best_i_buf[threadIdx.x] = best_i; __syncthreads(); if (_unass_id != -1 && thread_in_unass == 0) { for (int j = threadIdx.x + 1; j < threadIdx.x + thread_per_unass; j++) { if (best_buf[j] > best) { better = max(best, better_buf[j]); best = best_buf[j]; best_i = best_i_buf[j]; } else better = max(better, best_buf[j]); } bid[i * n + _unass_id] = best_i; bid_increments[i * n + _unass_id] = best - better + eps; atomicMax(&max_increments[i * n + best_i], best - better + eps); } } } __global__ void GetMax(int b, int n, int * assignment, int * bid, float * bid_increments, float * max_increments, int * max_idx) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; if (assignment[i * n + j] == -1) { int bid_id = bid[i * n + j]; float bid_inc = bid_increments[i * n + j]; float max_inc = max_increments[i * n + bid_id]; if (bid_inc - 1e-6 <= max_inc && max_inc <= bid_inc + 1e-6) { max_idx[i * n + bid_id] = j; } } } } __global__ void Assign(int b, int n, int * assignment, int * assignment_inv, float * price, int * bid, float * bid_increments, float * max_increments, int * max_idx, bool last) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; if (assignment[i * n + j] == -1) { int bid_id = bid[i * n + j]; if (last || max_idx[i * n + bid_id] == j) { float bid_inc = bid_increments[i * n + j]; int ass_inv = assignment_inv[i * n + bid_id]; if (!last && ass_inv != -1) { assignment[i * n + ass_inv] = -1; } assignment_inv[i * n + bid_id] = j; assignment[i * n + j] = bid_id; price[i * n + bid_id] += bid_inc; max_increments[i * n + bid_id] = -1e9; } } } } __global__ void CalcDist(int b, int n, float * xyz1, float * xyz2, float * dist, int * assignment) { for (int i = blockIdx.x; i < b; i += gridDim.x) { int j = threadIdx.x + blockIdx.y * blockDim.x; int k = assignment[i * n + j]; float deltax = xyz1[(i * n + j) * 3 + 0] - xyz2[(i * n + k) * 3 + 0]; float deltay = xyz1[(i * n + j) * 3 + 1] - xyz2[(i * n + k) * 3 + 1]; float deltaz = xyz1[(i * n + j) * 3 + 2] - xyz2[(i * n + k) * 3 + 2]; dist[i * n + j] = deltax * deltax + deltay * deltay + deltaz * deltaz; } } int emd_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist, at::Tensor assignment, at::Tensor price, at::Tensor assignment_inv, at::Tensor bid, at::Tensor bid_increments, at::Tensor max_increments, at::Tensor unass_idx, at::Tensor unass_cnt, at::Tensor unass_cnt_sum, at::Tensor cnt_tmp, at::Tensor max_idx, float eps, int iters) { const auto batch_size = xyz1.size(0); const auto n = xyz1.size(1); //num_points point cloud A const auto m = xyz2.size(1); //num_points point cloud B if (n != m) { printf("Input Error! The two point clouds should have the same size.\n"); return -1; } if (batch_size > 512) { printf("Input Error! The batch size should be less than 512.\n"); return -1; } if (n % 1024 != 0) { printf("Input Error! The size of the point clouds should be a multiple of 1024.\n"); return -1; } //cudaEvent_t start,stop; //cudaEventCreate(&start); //cudaEventCreate(&stop); //cudaEventRecord(start); //int iters = 50; for (int i = 0; i < iters; i++) { clear<<<1, batch_size>>>(batch_size, cnt_tmp.data<int>(), unass_cnt.data<int>()); calc_unass_cnt<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, assignment.data<int>(), unass_cnt.data<int>()); calc_unass_cnt_sum<<<1, batch_size>>>(batch_size, unass_cnt.data<int>(), unass_cnt_sum.data<int>()); calc_unass_idx<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, assignment.data<int>(), unass_idx.data<int>(), unass_cnt.data<int>(), unass_cnt_sum.data<int>(), cnt_tmp.data<int>()); Bid<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, xyz1.data<float>(), xyz2.data<float>(), eps, assignment.data<int>(), assignment_inv.data<int>(), price.data<float>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), unass_cnt.data<int>(), unass_cnt_sum.data<int>(), unass_idx.data<int>()); GetMax<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, assignment.data<int>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), max_idx.data<int>()); Assign<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, assignment.data<int>(), assignment_inv.data<int>(), price.data<float>(), bid.data<int>(), bid_increments.data<float>(), max_increments.data<float>(), max_idx.data<int>(), i == iters - 1); } CalcDist<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, xyz1.data<float>(), xyz2.data<float>(), dist.data<float>(), assignment.data<int>()); //cudaEventRecord(stop); //cudaEventSynchronize(stop); //float elapsedTime; //cudaEventElapsedTime(&elapsedTime,start,stop); //printf("%lf\n", elapsedTime); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in nnd Output: %s\n", cudaGetErrorString(err)); return 0; } return 1; } __global__ void NmDistanceGradKernel(int b, int n, const float * xyz1, const float * xyz2, const float * grad_dist, const int * idx, float * grad_xyz){ for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) { float x1 = xyz1[(i * n + j) * 3 + 0]; float y1 = xyz1[(i * n + j) * 3 + 1]; float z1 = xyz1[(i * n + j) * 3 + 2]; int j2 = idx[i * n + j]; float x2 = xyz2[(i * n + j2) * 3 + 0]; float y2 = xyz2[(i * n + j2) * 3 + 1]; float z2 = xyz2[(i * n + j2) * 3 + 2]; float g = grad_dist[i * n + j] * 2; atomicAdd(&(grad_xyz[(i * n + j) * 3 + 0]), g * (x1 - x2)); atomicAdd(&(grad_xyz[(i * n + j) * 3 + 1]), g * (y1 - y2)); atomicAdd(&(grad_xyz[(i * n + j) * 3 + 2]), g * (z1 - z2)); } } } int emd_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz, at::Tensor graddist, at::Tensor idx){ const auto batch_size = xyz1.size(0); const auto n = xyz1.size(1); const auto m = xyz2.size(1); NmDistanceGradKernel<<<dim3(batch_size, n / 1024, 1), 1024>>>(batch_size, n, xyz1.data<float>(), xyz2.data<float>(), graddist.data<float>(), idx.data<int>(), gradxyz.data<float>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); return 0; } return 1; }
the_stack
#include "CollisionGrid.cuh" #include "ConstraintsInfo.h" #include "CudaConstraints.cuh" #include "CudaData.cuh" #include "CudaSolver.h" #include "CudaUtils.cuh" #include <fstream> #include <thrust/binary_search.h> #include <thrust/gather.h> #define MIN_RADIUS 0.001f #define COLLISION_MARGIN 0.002f namespace viper { struct CudaConstraints { thrust::device_vector<C_skinning> skinning; thrust::device_vector<C_distance> dist; thrust::device_vector<C_distancemax> distmax; thrust::device_vector<C_volume> vol; thrust::device_vector<C_volume2> vol2; thrust::device_vector<C_bend> bend; thrust::device_vector<C_stretch> stretch; thrust::device_vector<C_radius> radius; thrust::device_vector<C_shape> shape; thrust::device_vector<C_shape2> shape2; thrust::device_vector<C_touch> touch; thrust::device_vector<C_bilap> bilap; thrust::device_vector<C_collpp> collpp; thrust::device_vector<C_collision> collision; }; struct CudaSolverData { CudaSimData S; // state CudaProjections Pc; // projections per constraint CudaProjections Pp; // projections per particle CudaProjections Pt; // projections per particle temp CudaConstraints C; // constraints CollisionGrid cgrid; thrust::device_vector<int> c_perm; }; struct floor_friction { CudaStatePtr state; floor_friction(CudaStatePtr S) : state(S) {} __device__ void operator()(int i) const { if (state.xa[i] == 0) return; bool isTouching = state.x[i][1] - state.r[i] < 1e-6f; if (state.w[i] > 1e-6f && isTouching) { float pen = min(0.f, state.x[i][1] - state.r[i]); Vec3 dx = state.x[i] - state.xp[i]; Vec3 tandx = Vec3(dx[0], 0.f, dx[2]); float tan_norm = tandx.norm(); float mu_s = 0.01; float mu_k = 3.0; float factor = 0.99f; float d = abs(pen); if (tan_norm > mu_s * d) factor = min(0.99, mu_k * d / tan_norm); state.x[i][1] -= pen; // normal y state.x[i][0] -= tandx[0] * factor; // tangential x state.x[i][2] -= tandx[2] * factor; // tangential z } float wall = 20.0f; float newx = max(-wall, min(wall, state.x[i][0])); float newz = max(-wall, min(wall, state.x[i][2])); } }; struct V_integration { CudaStatePtr state; float dt; Vec3 gravity; float damping; V_integration(CudaStatePtr S, float dt, Vec3 g, float damping) : state(S), dt(dt), gravity(g), damping(damping) {} __device__ void operator()(int i) const { if (state.xa[i] == 0) return; Vec3 v = Vec3::Zero(); if (state.w[i] > 1e-6f) v = ((state.x[i] - state.xp[i]) / dt + dt * gravity) * damping; state.xp[i] = state.x[i]; state.x[i] += dt * v; float dr = 0.f; if (state.wr[i] > 1e-6f) dr = (state.r[i] - state.rp[i]) * damping; state.rp[i] = state.r[i]; state.r[i] += dr; } }; struct Vq_integration { CudaStatePtr state; float dt; float damping; Vq_integration(CudaStatePtr S, float dt, float damping) : state(S), dt(dt), damping(damping) {} __device__ void operator()(int i) const { if (state.qa[i] == 0) return; Vec3 vq = 2.f / dt * (state.qp[i].conjugate() * state.q[i]).vec(); vq *= damping; Quaternion vqq; vqq.w() = 0.f; vqq.vec() = vq; state.qp[i] = state.q[i]; state.q[i] = state.qp[i].coeffs() + 0.5f * dt * (state.qp[i] * vqq).coeffs(); state.q[i].normalize(); } }; struct bend_damping { CudaStatePtr state; C_bend *C; float dt; float damping; __device__ void operator()(int i) const { Quaternion &qa = state.q[C[i].a]; Quaternion &qb = state.q[C[i].b]; Quaternion &qap = state.qp[C[i].a]; Quaternion &qbp = state.qp[C[i].b]; Vec3 vqa = 2.f / dt * (qap.conjugate() * qa).vec(); Vec3 vqb = 2.f / dt * (qbp.conjugate() * qb).vec(); Vec3 dv = (vqb - vqa) * (1.0f - damping); vqa += dv; vqb -= dv; Quaternion vqaq, vqbq; vqaq.w() = 0.f; vqbq.w() = 0.f; vqaq.vec() = vqa; vqbq.vec() = vqb; qa = qap.coeffs() + 0.5f * dt * (qap * vqaq).coeffs(); qb = qbp.coeffs() + 0.5f * dt * (qbp * vqbq).coeffs(); qa.normalize(); qb.normalize(); } }; struct apply_projection_particles { Vec3 *x; float *r; Vec6 *dx; int *id; uint8_t *a; apply_projection_particles(Vec3 *x, float *r, Vec6 *dx, int *id, uint8_t *a) : x(x), r(r), dx(dx), id(id), a(a) {} __device__ void operator()(int i) const { int k = id[i]; if (a[k] == 0) return; if (dx[i][4] > 1e-6f) x[k] += dx[i].head<3>() / dx[i][4]; if (dx[i][5] > 1e-6f) r[k] = fmaxf(MIN_RADIUS, r[k] + dx[i][3] / dx[i][5]); } }; struct apply_projection_frames { Quaternion *x; Vec6 *dx; int *id; int N; uint8_t *a; apply_projection_frames(Quaternion *x, Vec6 *dx, int *id, uint8_t *a, int N) : x(x), dx(dx), id(id), N(N), a(a) {} __device__ void operator()(int i) const { int k = id[i] - N; if (a[k] == 0) return; if (dx[i][4] > 1e-6f) x[k].coeffs() += dx[i].head<4>() / dx[i][4]; x[k].normalize(); } }; struct generate_pills_proxys { Vec3 *x; float *r; Vec2i *pills; Vec3 *sx; float *sr; generate_pills_proxys(Vec3 *x, float *r, Vec2i *pills, Vec3 *sx, float *sr) : x(x), r(r), pills(pills), sx(sx), sr(sr) {} __device__ void operator()(int i) const { int a = pills[i][0]; int b = pills[i][1]; Vec3 s0 = x[a]; Vec3 s1 = x[b]; float r0 = r[a]; float r1 = r[b]; Vec3 d = s1 - s0; float l = d.norm(); Vec3 dl = d / (l + FLT_EPSILON); sx[i] = (s1 + s0 + dl * (r1 - r0)) / 2; sr[i] = (l + r0 + r1) / 2; } }; struct generate_collisions { Vec2i *pills; const Vec2i *coll_pairs; C_collision *C; generate_collisions(Vec2i *pills, const Vec2i *coll_pairs, C_collision *C) : pills(pills), coll_pairs(coll_pairs), C(C) {} __device__ void operator()(int i) const { int a = coll_pairs[i][0]; int b = coll_pairs[i][1]; C[i].a = pills[a]; C[i].b = pills[b]; C[i].enabled = true; } }; struct collision_filter { Vec2i *pills; int *group; CudaStatePtr S; collision_filter(Vec2i *pills, int *group, CudaStatePtr S) : pills(pills), group(group), S(S) {} __device__ bool operator()(const Vec2i &c) { int a0 = pills[c[0]][0]; int a1 = pills[c[0]][1]; int b0 = pills[c[1]][0]; int b1 = pills[c[1]][1]; int zeroa = S.w[a0] < 1e-6f || S.w[a1] < 1e-6f; int zerob = S.w[b0] < 1e-6f || S.w[b1] < 1e-6f; return group[c[0]] != group[c[1]] && (zeroa + zerob < 2); } }; CudaSolver::CudaSolver() { gpu = new CudaSolverData(); } CudaSolver::~CudaSolver() { // delete gpu; } template <typename T> struct DisabledPredicate { bool operator()(const T &constraint) { return !constraint.enabled; } }; template <typename CPUVec, typename GPUVec> void upload_and_filter(GPUVec &gpu_vec, const CPUVec &cpu_vec) { using T = typename GPUVec::value_type; gpu_vec = cpu_vec; if (gpu_vec.size() > 0) { gpu_vec.erase(thrust::remove_if(thrust::device, gpu_vec.begin(), gpu_vec.end(), DisabledPredicate<T>()), gpu_vec.end()); } } double CudaSolver::solve(ConstraintsCPU &constraints, SimulationState &state, const std::vector<Vec2i> &pills, const std::vector<int> &group, float dt, const Vec3 &g, int iterations, bool floor, float damping) { CudaConstraints &C = gpu->C; CudaSimData &S = gpu->S; CudaProjections &Pc = gpu->Pc; CudaProjections &Pt = gpu->Pt; CudaProjections &Pp = gpu->Pp; // CPU -> GPU S.X.x = state.x; S.X.q = state.q; S.X.r = state.r; S.Xp.x = state.xp; S.Xp.q = state.qp; S.Xp.r = state.rp; S.Xi.x = state.xi; S.Xi.q = state.qi; S.Xi.r = state.ri; S.b = state.b; S.bp = state.bp; S.bi = state.bi; S.w = state.w; S.wq = state.wq; S.wr = state.wr; S.xa = state.xa; S.qa = state.qa; thrust::device_vector<Vec2i> gpu_pills = pills; thrust::device_vector<int> pill_groups = group; int N = state.x.size(); // particles count int M = state.q.size(); // pills count upload_and_filter(C.dist, constraints.distance); upload_and_filter(C.distmax, constraints.distancemax); upload_and_filter(C.skinning, constraints.skinning); upload_and_filter(C.vol, constraints.volume); upload_and_filter(C.vol2, constraints.volume2); upload_and_filter(C.bend, constraints.bend); upload_and_filter(C.stretch, constraints.stretch); upload_and_filter(C.bilap, constraints.bilap); upload_and_filter(C.shape, constraints.shape); upload_and_filter(C.shape2, constraints.shape2); upload_and_filter(C.radius, constraints.radius); upload_and_filter(C.touch, constraints.touch); tic(); // time integration thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(N), V_integration(CudaStatePtr(S), dt, g, damping)); thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(M), Vq_integration(CudaStatePtr(S), dt, damping)); thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.skinning.size()), C_skinning_solve(ptr(C.skinning), CudaStatePtr(S))); float t_velocity = toc(); thrust::device_vector<Vec3> sp(M); thrust::device_vector<float> sr(M); thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(M), generate_pills_proxys(ptr(S.X.x), ptr(S.X.r), ptr(gpu_pills), ptr(sp), ptr(sr))); if (M < 2) { C.collision.resize(0); } else { const thrust::device_vector<Vec2i> &coll_pairs = gpu->cgrid.test_particles(sp, sr, COLLISION_MARGIN); thrust::device_vector<Vec2i> coll_pairs_filtered(coll_pairs.size()); auto valid_coll_end = thrust::copy_if(thrust::device, coll_pairs.begin(), coll_pairs.end(), coll_pairs_filtered.begin(), collision_filter(ptr(gpu_pills), ptr(pill_groups), CudaStatePtr(S))); coll_pairs_filtered.erase(valid_coll_end, coll_pairs_filtered.end()); int K = coll_pairs_filtered.size(); C.collision.resize(K); thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(K), generate_collisions(ptr(gpu_pills), ptr(coll_pairs_filtered), ptr(C.collision))); } float t_collision = toc(); ConstraintsInfo cInfo; cInfo.add("distance", C.dist.size(), 2, 1); cInfo.add("volume", C.vol.size(), 2, 1); cInfo.add("volume2", C.vol2.size(), 3, 1); cInfo.add("bend", C.bend.size(), 2, 3); cInfo.add("stretch", C.stretch.size(), 3, 3); cInfo.add("bilap", C.bilap.size(), 1, 1); cInfo.add("shape", C.shape.size(), SHAPE_MATCHING_MAX, 1); cInfo.add("shape2", C.shape2.size(), 3 * SHAPE_MATCHING_MAX, 1); cInfo.add("radius", C.radius.size(), 1, 1); cInfo.add("collision", C.collision.size(), 4, 0); cInfo.add("touch", C.touch.size(), 2, 0); int np = cInfo.get_np(); int nl = cInfo.get_nl(); std::map<std::string, int> o = cInfo.get_o(); std::map<std::string, int> ol = cInfo.get_ol(); int n_cst = C.dist.size() + C.vol.size() + C.bend.size() + C.stretch.size() + C.shape.size() + C.radius.size() + C.collision.size() + C.vol2.size() + C.shape2.size(); bool permutation_built = false; Pc.resize(np); Pt.resize(np); Pp.resize(N + M); thrust::device_vector<float> L(nl); // XPBD thrust::fill(L.begin(), L.end(), 0.f); thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.bend.size()), bend_damping{CudaStatePtr(S), ptr(C.bend), dt, 0.98f}); for (int i = 0; i < iterations; i++) { bool collisions_only = (i % 2 == 1) || i == -1; Pc.setZero(); Pp.setZero(); if (!collisions_only) { thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.dist.size()), C_distance_solve(ptr(C.dist), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["distance"]), ptr(L, ol["distance"]), dt)); thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.vol.size()), C_volume_solve(ptr(C.vol), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["volume"]), ptr(L, ol["volume"]), dt)); thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.vol2.size()), C_volume2_solve(ptr(C.vol2), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["volume2"]), ptr(L, ol["volume2"]), dt)); thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.bend.size()), C_bend_solve(ptr(C.bend), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["bend"]), N, ptr(L, ol["bend"]), dt)); thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.stretch.size()), C_stretch_solve(ptr(C.stretch), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["stretch"]), N, ptr(L, ol["stretch"]), dt)); thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.bilap.size()), C_bilap_solve(ptr(C.bilap), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["bilap"]), ptr(L, ol["bilap"]), dt)); thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.shape.size()), C_shape_solve(ptr(C.shape), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["shape"]), ptr(L, ol["shape"]), dt)); thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.shape2.size()), C_shape2_solve(ptr(C.shape2), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["shape2"]), N, ptr(L, ol["shape2"]), dt)); thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.radius.size()), C_radius_solve(ptr(C.radius), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["radius"]), ptr(L, ol["radius"]), dt)); thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.touch.size()), C_touch_solve(ptr(C.touch), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["touch"]), ptr(L, ol["touch"]), dt)); } thrust::for_each( thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator((int)C.collision.size()), C_collision_solve(ptr(C.collision), CudaStatePtr(S), CudaProjectionsPtr(Pc, o["collision"]))); if (!permutation_built) { gpu->c_perm.resize(np); thrust::sequence(thrust::device, gpu->c_perm.begin(), gpu->c_perm.end()); auto vals_begin = thrust::make_zip_iterator( thrust::make_tuple(Pc.dx.begin(), gpu->c_perm.begin())); thrust::sort_by_key(Pc.id.begin(), Pc.id.end(), vals_begin); Pt = Pc; permutation_built = true; } else { auto src_begin = thrust::make_zip_iterator( thrust::make_tuple(Pc.dx.begin(), Pc.id.begin())); auto dst_begin = thrust::make_zip_iterator( thrust::make_tuple(Pt.dx.begin(), Pt.id.begin())); thrust::gather(thrust::device, gpu->c_perm.begin(), gpu->c_perm.end(), src_begin, dst_begin); } auto new_end = thrust::reduce_by_key(thrust::device, Pt.id.begin(), Pt.id.end(), Pt.dx.begin(), Pp.id.begin(), Pp.dx.begin()) .first; auto f_start = thrust::lower_bound(Pp.id.begin(), new_end, N); int proj_count = new_end - Pp.id.begin(); int p_count = f_start - Pp.id.begin(); thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(p_count), apply_projection_particles(ptr(S.X.x), ptr(S.X.r), ptr(Pp.dx), ptr(Pp.id), ptr(S.xa))); thrust::for_each(thrust::device, thrust::make_counting_iterator(p_count), thrust::make_counting_iterator(proj_count), apply_projection_frames(ptr(S.X.q), ptr(Pp.dx), ptr(Pp.id), ptr(S.qa), N)); if (floor) thrust::for_each(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(N), floor_friction(CudaStatePtr(S))); } float t_solve = toc(); // GPU -> CPU thrust::copy(S.X.x.begin(), S.X.x.end(), state.x.begin()); thrust::copy(S.Xp.x.begin(), S.Xp.x.end(), state.xp.begin()); thrust::copy(S.X.q.begin(), S.X.q.end(), state.q.begin()); thrust::copy(S.Xp.q.begin(), S.Xp.q.end(), state.qp.begin()); thrust::copy(S.X.r.begin(), S.X.r.end(), state.r.begin()); thrust::copy(S.Xp.r.begin(), S.Xp.r.end(), state.rp.begin()); thrust::copy(C.shape.begin(), C.shape.end(), constraints.shape.begin()); thrust::copy(C.shape2.begin(), C.shape2.end(), constraints.shape2.begin()); return t_solve; } } // namespace viper
the_stack
using namespace megdnn; using namespace cuda; namespace { struct BufferFetcherTexture { cudaTextureObject_t tex; __device__ __forceinline__ float get(uint32_t offset) { return tex1Dfetch<float>(tex, offset); } }; struct BufferFetcherRaw { const float* ptr; __device__ __forceinline__ float get(uint32_t offset) { return ptr[offset]; } }; struct BufferFetcherTextureHost { bool init_succ; BufferFetcherTexture val; BufferFetcherTextureHost(float* p, const size_t n); ~BufferFetcherTextureHost() { reset(); } void reset() { if (init_succ) { cuda_check(cudaDestroyTextureObject(val.tex)); init_succ = false; } } }; BufferFetcherTextureHost::BufferFetcherTextureHost(float* p, const size_t n) { init_succ = false; cudaTextureObject_t tex_obj; cudaResourceDesc res_desc; memset(&res_desc, 0, sizeof(cudaResourceDesc)); res_desc.resType = cudaResourceTypeLinear; res_desc.res.linear.devPtr = static_cast<void*>(p); res_desc.res.linear.sizeInBytes = n * sizeof(float); res_desc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaTextureDesc tex_desc; memset(&tex_desc, 0, sizeof(cudaTextureDesc)); if (cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL) == cudaSuccess) { val.tex = tex_obj; init_succ = true; } else { cudaGetLastError(); // reset error } } template <class BufferFetcher> struct KernelPtr { typedef void (*type)( BufferFetcher, BufferFetcher, float*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); }; //! 1 -> 0xffffffff, 0 -> 0x00000000 __device__ __forceinline__ uint32_t bool_as_mask(uint32_t cond) { return (!cond) - 1u; } union FloatAndU32 { float f; uint32_t u; }; //! \p mask must be either all 1 or 0 bits template <class BufferFetcher> __device__ __forceinline__ float visit_with_mask( BufferFetcher buf, uint32_t offset, uint32_t mask) { FloatAndU32 f; f.f = buf.get(offset & mask); f.u &= mask; return f.f; } __device__ __forceinline__ uint32_t with_dilation(const uint32_t origin, const uint32_t D) { return origin * D; } template <uint32_t BY, uint32_t BX, bool is_xcorr, class BufferFetcher> __global__ void conv_kernel( BufferFetcher diff, BufferFetcher src, float* grad, const uint32_t N, const uint32_t INP_BS, const uint32_t OUT_BS, const uint32_t IC, const uint32_t ID, const uint32_t IH, const uint32_t IW, const uint32_t OC, const uint32_t OD, const uint32_t OH, const uint32_t OW, const uint32_t FD, const uint32_t FH, const uint32_t FW, const uint32_t SD, const uint32_t SH, const uint32_t SW, const uint32_t PD, const uint32_t PH, const uint32_t PW, const uint32_t DD, const uint32_t DH, const uint32_t DW) { const uint32_t BM = BY < BX ? BY : BX; uint32_t n = blockIdx.z; const uint32_t tidx = threadIdx.x; const uint32_t tidy = threadIdx.y; const uint32_t posx = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t posy = blockIdx.y * blockDim.y + threadIdx.y; const uint32_t posx2 = posx << 2; const uint32_t posy2 = posy << 2; const uint32_t heightA = OC; const uint32_t widthA = OD * OH * OW; const uint32_t heightB = widthA; const uint32_t widthB = IC * FD * FH * FW; uint32_t ic0 = (posx2 + 0) / FW / FH / FD; uint32_t fd0 = (posx2 + 0) / FW / FH % FD; uint32_t fh0 = (posx2 + 0) / FW % FH; uint32_t fw0 = (posx2 + 0) % FW; uint32_t ic1 = (posx2 + 1) / FW / FH / FD; uint32_t fd1 = (posx2 + 1) / FW / FH % FD; uint32_t fh1 = (posx2 + 1) / FW % FH; uint32_t fw1 = (posx2 + 1) % FW; uint32_t ic2 = (posx2 + 2) / FW / FH / FD; uint32_t fd2 = (posx2 + 2) / FW / FH % FD; uint32_t fh2 = (posx2 + 2) / FW % FH; uint32_t fw2 = (posx2 + 2) % FW; uint32_t ic3 = (posx2 + 3) / FW / FH / FD; uint32_t fd3 = (posx2 + 3) / FW / FH % FD; uint32_t fh3 = (posx2 + 3) / FW % FH; uint32_t fw3 = (posx2 + 3) % FW; if (!is_xcorr) { fd0 = FD - fd0 - 1; fd1 = FD - fd1 - 1; fd2 = FD - fd2 - 1; fd3 = FD - fd3 - 1; fh0 = FH - fh0 - 1; fh1 = FH - fh1 - 1; fh2 = FH - fh2 - 1; fh3 = FH - fh3 - 1; fw0 = FW - fw0 - 1; fw1 = FW - fw1 - 1; fw2 = FW - fw2 - 1; fw3 = FW - fw3 - 1; } const uint32_t fd0d = with_dilation(fd0, DD); const uint32_t fd1d = with_dilation(fd1, DD); const uint32_t fd2d = with_dilation(fd2, DD); const uint32_t fd3d = with_dilation(fd3, DD); const uint32_t fh0d = with_dilation(fh0, DH); const uint32_t fh1d = with_dilation(fh1, DH); const uint32_t fh2d = with_dilation(fh2, DH); const uint32_t fh3d = with_dilation(fh3, DH); const uint32_t fw0d = with_dilation(fw0, DW); const uint32_t fw1d = with_dilation(fw1, DW); const uint32_t fw2d = with_dilation(fw2, DW); const uint32_t fw3d = with_dilation(fw3, DW); const uint32_t fp0 = ic0 * ID * IH * IW + fd0d * IH * IW + fh0d * IW + fw0d; const uint32_t fp1 = ic1 * ID * IH * IW + fd1d * IH * IW + fh1d * IW + fw1d; const uint32_t fp2 = ic2 * ID * IH * IW + fd2d * IH * IW + fh2d * IW + fw2d; const uint32_t fp3 = ic3 * ID * IH * IW + fd3d * IH * IW + fh3d * IW + fw3d; const uint32_t OP = OH * OW; __shared__ float4 localA[BY][BM]; __shared__ float4 localB[BM][BX]; uint32_t i = 0u; uint32_t offsetA = n * OUT_BS + posy2 * widthA + tidx; uint32_t offsetB = n * INP_BS - PD * IH * IW - PH * IW - PW; float4 sum0 = {0.0f, 0.0f, 0.0f, 0.0f}, sum1 = {0.0f, 0.0f, 0.0f, 0.0f}, sum2 = {0.0f, 0.0f, 0.0f, 0.0f}, sum3 = {0.0f, 0.0f, 0.0f, 0.0f}; uint32_t od = tidy / (OW * OH); uint32_t oh = tidy / (OW) % OH; uint32_t ow = tidy % OW; uint32_t odm = tidy % (OW * OH); const uint32_t ods = BM / (OW * OH); const uint32_t ohs = BM / (OW) % OH; const uint32_t ows = BM % OW; const uint32_t odms = BM % (OW * OH); for (; i < widthA; i += BM, offsetA += BM) { // load localA if (tidx < BM) { localA[tidy][tidx].x = diff.get(offsetA + 0 * widthA); localA[tidy][tidx].y = diff.get(offsetA + 1 * widthA); localA[tidy][tidx].z = diff.get(offsetA + 2 * widthA); localA[tidy][tidx].w = diff.get(offsetA + 3 * widthA); } if (tidy < BM) { uint32_t tmp = offsetB + od * SD * IH * IW + oh * SH * IW + ow * SW, ok = bool_as_mask(tidy + i < heightB), p0 = bool_as_mask( fd0d + od * SD >= PD && fd0d + od * SD < ID + PD && fh0d + oh * SH >= PH && fh0d + oh * SH < IH + PH && fw0d + ow * SW >= PW && fw0d + ow * SW < IW + PW), p1 = bool_as_mask( fd1d + od * SD >= PD && fd1d + od * SD < ID + PD && fh1d + oh * SH >= PH && fh1d + oh * SH < IH + PH && fw1d + ow * SW >= PW && fw1d + ow * SW < IW + PW), p2 = bool_as_mask( fd2d + od * SD >= PD && fd2d + od * SD < ID + PD && fh2d + oh * SH >= PH && fh2d + oh * SH < IH + PH && fw2d + ow * SW >= PW && fw2d + ow * SW < IW + PW), p3 = bool_as_mask( fd3d + od * SD >= PD && fd3d + od * SD < ID + PD && fh3d + oh * SH >= PH && fh3d + oh * SH < IH + PH && fw3d + ow * SW >= PW && fw3d + ow * SW < IW + PW); localB[tidy][tidx].x = visit_with_mask(src, tmp + fp0, ok & p0); localB[tidy][tidx].y = visit_with_mask(src, tmp + fp1, ok & p1); localB[tidy][tidx].z = visit_with_mask(src, tmp + fp2, ok & p2); localB[tidy][tidx].w = visit_with_mask(src, tmp + fp3, ok & p3); } __syncthreads(); for (uint32_t j = 0u; j < BM; ++j) { float4 tmpA = localA[tidy][j]; float4 tmpB = localB[j][tidx]; sum0.x += tmpA.x * tmpB.x; sum0.y += tmpA.x * tmpB.y; sum0.z += tmpA.x * tmpB.z; sum0.w += tmpA.x * tmpB.w; sum1.x += tmpA.y * tmpB.x; sum1.y += tmpA.y * tmpB.y; sum1.z += tmpA.y * tmpB.z; sum1.w += tmpA.y * tmpB.w; sum2.x += tmpA.z * tmpB.x; sum2.y += tmpA.z * tmpB.y; sum2.z += tmpA.z * tmpB.z; sum2.w += tmpA.z * tmpB.w; sum3.x += tmpA.w * tmpB.x; sum3.y += tmpA.w * tmpB.y; sum3.z += tmpA.w * tmpB.z; sum3.w += tmpA.w * tmpB.w; } oh += ohs; ow += ows; oh += (ow >= OW); ow -= (ow >= OW) * OW; oh -= (oh >= OH) * OH; od += ods; odm += odms; od += (odm >= OP); odm -= (odm >= OP) * OP; __syncthreads(); } // widthB == IC*FD*FH*FW, heightA == OC const uint32_t grad_idx = posy2 * widthB + posx2; bool y0 = (posy2 + 0 < heightA); bool y1 = (posy2 + 1 < heightA); bool y2 = (posy2 + 2 < heightA); bool y3 = (posy2 + 3 < heightA); bool x0 = (posx2 + 0 < widthB); bool x1 = (posx2 + 1 < widthB); bool x2 = (posx2 + 2 < widthB); bool x3 = (posx2 + 3 < widthB); if (y0) { if (x0) atomicAdd(&grad[grad_idx + 0 * widthB + 0], sum0.x); if (x1) atomicAdd(&grad[grad_idx + 0 * widthB + 1], sum0.y); if (x2) atomicAdd(&grad[grad_idx + 0 * widthB + 2], sum0.z); if (x3) atomicAdd(&grad[grad_idx + 0 * widthB + 3], sum0.w); } if (y1) { if (x0) atomicAdd(&grad[grad_idx + 1 * widthB + 0], sum1.x); if (x1) atomicAdd(&grad[grad_idx + 1 * widthB + 1], sum1.y); if (x2) atomicAdd(&grad[grad_idx + 1 * widthB + 2], sum1.z); if (x3) atomicAdd(&grad[grad_idx + 1 * widthB + 3], sum1.w); } if (y2) { if (x0) atomicAdd(&grad[grad_idx + 2 * widthB + 0], sum2.x); if (x1) atomicAdd(&grad[grad_idx + 2 * widthB + 1], sum2.y); if (x2) atomicAdd(&grad[grad_idx + 2 * widthB + 2], sum2.z); if (x3) atomicAdd(&grad[grad_idx + 2 * widthB + 3], sum2.w); } if (y3) { if (x0) atomicAdd(&grad[grad_idx + 3 * widthB + 0], sum3.x); if (x1) atomicAdd(&grad[grad_idx + 3 * widthB + 1], sum3.y); if (x2) atomicAdd(&grad[grad_idx + 3 * widthB + 2], sum3.z); if (x3) atomicAdd(&grad[grad_idx + 3 * widthB + 3], sum3.w); } } } // anonymous namespace void convolution3d::exec_inplace_matmul_bwd_filter( const float* diff, const float* src, float* grad, size_t N, size_t INP_BS, size_t OUT_BS, size_t IC, size_t ID, size_t IH, size_t IW, size_t OC, size_t OD, size_t OH, size_t OW, size_t FD, size_t FH, size_t FW, size_t PD, size_t PH, size_t PW, size_t SD, size_t SH, size_t SW, size_t DD, size_t DH, size_t DW, bool is_xcorr, cudaStream_t stream) { BufferFetcherTextureHost diff_tex(const_cast<float*>(diff), OC * OD * OH * OW * N), src_tex(const_cast<float*>(src), N * INP_BS); BufferFetcherRaw diff_buf, src_buf; src_buf.ptr = src; diff_buf.ptr = diff; if (!src_tex.init_succ || !diff_tex.init_succ) { src_tex.reset(); diff_tex.reset(); } int m = OC; int n = IC * FD * FH * FW; int BY = 1; int BX = 1; if (m <= 64) { while (BY < 16 && (BY << 2) < m) BY <<= 1; BX = 256 / BY; } else if (n <= 64) { while (BX < 16 && (BX << 2) < n) BX <<= 1; BY = 256 / BX; } else { BX = BY = 16; } cudaMemset(grad, 0, OC * IC * FD * FH * FW * sizeof(float)); dim3 blocks(DIVUP(n, 4 * BX), DIVUP(m, 4 * BY), N); dim3 threads(BX, BY); #define DISPATCH_BX_BY(BX, BY) \ do { \ if (diff_tex.init_succ) { \ KernelPtr<BufferFetcherTexture>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherTexture>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherTexture>; \ } \ kptr<<<blocks, threads, 0, stream>>>( \ diff_tex.val, src_tex.val, grad, N, INP_BS, OUT_BS, IC, ID, IH, \ IW, OC, OD, OH, OW, FD, FH, FW, SD, SH, SW, PD, PH, PW, DD, DH, \ DW); \ } else { \ KernelPtr<BufferFetcherRaw>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherRaw>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherRaw>; \ } \ kptr<<<blocks, threads, 0, stream>>>( \ diff_buf, src_buf, grad, N, INP_BS, OUT_BS, IC, ID, IH, IW, OC, \ OD, OH, OW, FD, FH, FW, SD, SH, SW, PD, PH, PW, DD, DH, DW); \ } \ } while (0) #define DISPATCH_BX(BX) \ do { \ DISPATCH_BX_BY(BX, 256 / BX); \ } while (0) #define DISPATCH() \ do { \ switch (BX) { \ case 1: \ DISPATCH_BX(1); \ break; \ case 2: \ DISPATCH_BX(2); \ break; \ case 4: \ DISPATCH_BX(4); \ break; \ case 8: \ DISPATCH_BX(8); \ break; \ case 16: \ DISPATCH_BX(16); \ break; \ case 32: \ DISPATCH_BX(32); \ break; \ case 64: \ DISPATCH_BX(64); \ break; \ case 128: \ DISPATCH_BX(128); \ break; \ case 256: \ DISPATCH_BX(256); \ break; \ default: \ report_error("no usable kernel"); \ } \ } while (0) DISPATCH(); #undef DISPATCH #undef DISPATCH_BX #undef DISPATCH_BX_BY after_kernel_launch(); } // vim: syntax=cpp.doxygen
the_stack
#include "src/FeatureLPPooling.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/CudaStaticAssert.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/RegisterUtils.cuh" #include "cuda/util/CachedDeviceProperties.h" #include "THC.h" #include <boost/preprocessor/repetition/repeat.hpp> using namespace facebook::cuda; #define OUTPUT_FEATURES_PER_THREAD 32 #define MAX_WARPS_PER_RUN 4 namespace facebook { namespace deeplearning { namespace torch { namespace detail { __device__ __forceinline__ int getDim1Point(const DeviceTensor<float, 4>& input) { const int threadPoint = blockIdx.x * blockDim.x + threadIdx.x; return threadPoint / input.getSize(3); } __device__ __forceinline__ int getDim2Point(const DeviceTensor<float, 4>& input) { const int threadPoint = blockIdx.x * blockDim.x + threadIdx.x; return threadPoint % input.getSize(3); } __device__ __forceinline__ int getStartOutputFeature() { return blockIdx.y * OUTPUT_FEATURES_PER_THREAD; } __device__ __forceinline__ int getEndOutputFeature(const DeviceTensor<float, 4>& output) { return min((blockIdx.y + 1) * OUTPUT_FEATURES_PER_THREAD, output.getSize(1)); } __device__ __forceinline__ int getBatch() { return blockIdx.z; } // All of these functions that follow are MathOps; they are template // parameters so L2 can be more efficiently implemented typedef float (*MathOp)(const float in, const float arg); __device__ __forceinline__ float power2(const float in, const float power) { return in * in; } __device__ __forceinline__ float root2(const float in, const float power) { return sqrtf(in); } __device__ __forceinline__ float powerGrad2(const float in, const float power) { return in; } __device__ __forceinline__ float powerN(const float in, const float power) { return powf(in, power); } __device__ __forceinline__ float rootN(const float in, const float power) { const float invPower = 1.0f / power; return powf(in, invPower); } __device__ __forceinline__ float powerGradN(const float in, const float power) { return powf(in, power - 1.0f); } // Input is of the form: // [batch][feature dim][optional dim 1][optional dim 2] template <int Width, int Stride, MathOp PowerFunc, MathOp RootFunc> __global__ void featureLPPoolingUpdateOutput(const DeviceTensor<float, 4> input, DeviceTensor<float, 4> output, float power) { // What non-feature points is this thread handling? const int dim1Point = getDim1Point(input); const int dim2Point = getDim2Point(input); if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) { // This thread in the warp is out of bounds return; } // What feature points is this thread handling? const int startOutputFeature = getStartOutputFeature(); const int endOutputFeature = getEndOutputFeature(output); const int startInputFeature = startOutputFeature * Stride; // What batch points is this thread handling? const int batch = getBatch(); // If stride >= width, then there is no loaded data reuse. // If stride > 1 and stride < width, then shift by stride, since we // can reuse Width - Stride elements from the previous round. // e.g., width = 5, stride = 2, // output 0 uses input 0 1 2 3 4 // output 1 uses input 2 3 4 5 6 (inputs 2 - 4 are reused, i.e., 5 - // 2 elements are reused, and we have to shift the array by 2) // // e.g., width = 5, stride = 3, // output 0 uses input 0 1 2 3 4 // output 1 uses input 3 4 5 6 7 (inputs 3 - 4 are reused, i.e., 5 - 3 // elements are reused, and we have to shift the array by 3) // Valid only pooling: load Width elements from input (Width - // Stride is handled here, at the top of the loop we handle the // remaining Stride elements). We already verified that the input is // larger than the width. // `in` will contain the input values ^ power. float in[Width]; #pragma unroll for (int i = 0; i < Width - Stride; ++i) { const float data = input[batch][startInputFeature + i][dim1Point][dim2Point]; in[i] = PowerFunc(data, power); } for (int outputFeature = startOutputFeature; outputFeature < endOutputFeature; ++outputFeature) { // If Stride < Width, we're loading Stride new values starting at // Width - Stride // If Stride >= Width, we're loading Width new values starting at 0 if (Stride < Width) { const int nextInputFeature = outputFeature * Stride + Width - Stride; #pragma unroll for (int i = 0; i < Stride; ++i) { const float data = input[batch][nextInputFeature + i][dim1Point][dim2Point]; in[Width - Stride + i] = PowerFunc(data, power); } } else { const int nextInputFeature = outputFeature * Stride; #pragma unroll for (int i = 0; i < Width; ++i) { float data = input[batch][nextInputFeature + i][dim1Point][dim2Point]; in[i] = PowerFunc(data, power); } } // Calculate the new output feature float val = 0.0f; for (int i = 0; i < Width; ++i) { val += in[i]; } val = RootFunc(val, power); output[batch][outputFeature][dim1Point][dim2Point] = val; if (Stride < Width) { // Shift registers for calculating the next point RegisterUtils<float, Width>::shiftLeft<Stride>(in); } } } // forward pass: f(a, ..., z) = (a^p + ... + z^p)^(1 / p) // for bprop: // partial df(a, ... z)/da = a^(p - 1) * (a^p + ... + z^p)^((1 / p) - 1) = // a^(p - 1) * 1/(f(a, ..., z)^(p - 1)) = (a / f(a, ..., z))^(p - 1) // // example: for p = 2, df(a, ..., z)/da = a / f(a, ..., z) // example: for p = 3, df(a, ..., z)/da = (a / f(a, ..., z))^2 // // PowerGradFunc implements x^(p - 1) template <int Width, int Stride, MathOp PowerGradFunc> __launch_bounds__(32 * 8, 8) // max 32 registers per thread __global__ void featureLPPoolingUpdateGradInput(const DeviceTensor<float, 4> gradOutput, const DeviceTensor<float, 4> input, const DeviceTensor<float, 4> output, DeviceTensor<float, 4> gradInput, float power) { // What non-feature points is this thread handling? const int dim1Point = getDim1Point(input); const int dim2Point = getDim2Point(input); if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) { // This thread in the warp is out of bounds return; } // What feature points is this thread handling? [start, end) const int startOutputFeature = getStartOutputFeature(); const int endOutputFeature = getEndOutputFeature(output); // What is the first input point that the output features depend // upon? [start, end) const int startInputFeature = startOutputFeature * Stride; const int endInputFeature = endOutputFeature * Stride; // What batch points is this thread handling? const int batch = getBatch(); // atomicAdd into gradInput is slow, avoid it where possible. // We can do this because there is a range of gradInput elements // that we are updating exclusively. This is how we find it // // width = 3 stride = 1 example: // ------------------------------ // startOutputFeature for this thread // | // | // previous thread's output feature // | | // | | gradOutput // __v____v___________________ // | | | | | | // --------------------------- // |\ \_____ // | \__ \ gradInput // __v____v____v_____________ // | | | | | | // --------------------------- // A A // | | // startInputFeature // | // exclusiveStartInputFeature // // exclusiveStartInputFeature is the first input feature that we can // write into exclusively; the one right before it overlaps with // updates from a previous thread and thus has to use atomicAdd. const int exclusiveStartInputFeature = startInputFeature == 0 ? // no thread is before ourselves 0 : // there is a thread before ourselves startInputFeature + (Width - 1) * Stride; // Similarly, exclusiveEndInputFeature is the last input feature // that we can write into exclusively, since we might be overlapping // with the following thread const int exclusiveEndInputFeature = endOutputFeature == output.getSize(1) ? // no thread is after ourselves endInputFeature + (Width - 1) * Stride : // there is a thread after ourselves endInputFeature; // As with updateOutput preload input elements, except no need to // transform them float in[Width]; #pragma unroll for (int i = 0; i < Width - Stride; ++i) { in[i] = input[batch][startInputFeature + i][dim1Point][dim2Point]; } for (int outputFeature = startOutputFeature; outputFeature < endOutputFeature; ++outputFeature) { // As with updateOutput load the subsequent input elements that we // need, except no need to transform them // // If Stride < Width, we're loading Stride new values starting at // Width - Stride // If Stride >= Width, we're loading Width new values starting at 0 if (Stride < Width) { const int nextInputFeature = outputFeature * Stride + Width - Stride; #pragma unroll for (int i = 0; i < Stride; ++i) { in[Width - Stride + i] = input[batch][nextInputFeature + i][dim1Point][dim2Point]; } } else { const int nextInputFeature = outputFeature * Stride; #pragma unroll for (int i = 0; i < Width; ++i) { in[i] = input[batch][nextInputFeature + i][dim1Point][dim2Point]; } } // A given output feature gradient contributes to `Width` input // gradients const float gradOut = gradOutput[batch][outputFeature][dim1Point][dim2Point]; // Load output (f(x_is)). It is possible that this is zero, in // which case we'll ignore this point. float out = output[batch][outputFeature][dim1Point][dim2Point]; if (out == 0.0f) { continue; } const int curStartInputFeature = outputFeature * Stride; const int curEndInputFeature = outputFeature * Stride + Width - 1; if (curStartInputFeature >= exclusiveStartInputFeature && curEndInputFeature < exclusiveEndInputFeature) { // This thread is exclusively responsible for updating these // input points, so we need not make the addition atomic for (int i = 0; i < Width; ++i) { const int inputFeature = outputFeature * Stride + i; // Calculate grad * (x_i / f(x_is))^(p - 1) const float val = gradOut * PowerGradFunc(in[i] / out, power); gradInput[batch][inputFeature][dim1Point][dim2Point] += val; } } else { // Handle start and end boundary cases: potential overlap with // other threads for (int i = 0; i < Width; ++i) { const int inputFeature = outputFeature * Stride + i; // Calculate grad * (x_i / f(x_is))^(p - 1) const float val = gradOut * PowerGradFunc(in[i] / out, power); // We don't overlap other threads for this range if (inputFeature >= exclusiveStartInputFeature && inputFeature < exclusiveEndInputFeature) { gradInput[batch][inputFeature][dim1Point][dim2Point] += val; } else { // We are potentially overlapping with threads handling // features before ourselves, so these need to be added atomically atomicAdd(&gradInput[batch][inputFeature][dim1Point][dim2Point], val); } } } if (Stride < Width) { // Shift registers for calculating the next point RegisterUtils<float, Width>::shiftLeft<Stride>(in); } } } } // namespace detail bool runFeatureLPPoolingUpdateOutput(cudaStream_t stream, const DeviceTensor<float, 4>& input, DeviceTensor<float, 4>& output, float power, int width, int stride) { const cudaDeviceProp& deviceProperties = facebook::cuda::getCurrentDeviceProperties(); const int outputFeatures = ((input.getSize(1) - width) / stride) + 1; assert(input.getSize(0) == output.getSize(0)); assert(outputFeatures == output.getSize(1)); assert(input.getSize(1) >= width); assert(input.getSize(2) == output.getSize(2)); assert(input.getSize(3) == output.getSize(3)); assert(power > 0.0f); assert(width >= 1); assert(stride >= 1); // Split non-features among threads and grid x int totalNonFeatureSize = input.getSize(2) * input.getSize(3); int numWarps = min(ceil(totalNonFeatureSize, deviceProperties.warpSize), MAX_WARPS_PER_RUN); int blockSize = deviceProperties.warpSize * numWarps; // Split non-features among grid x int nonFeatureSizeBlocks = ceil(totalNonFeatureSize, blockSize); // Split features among grid y, up to a maximum number of features per thread int featureBlocks = ceil(outputFeatures, OUTPUT_FEATURES_PER_THREAD); // Split batch among grid z. dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0)); dim3 block(blockSize); #define L2_STRIDE_CASE(UNUSED, STRIDE_MIN_1, WIDTH) \ case STRIDE_MIN_1 + 1: \ detail:: \ featureLPPoolingUpdateOutput<WIDTH, \ STRIDE_MIN_1 + 1, \ detail::power2, \ detail::root2><<<grid, block, 0, stream>>>( \ input, output, power); \ return true; // WIDTH_MIN_2 is from 0 -> 14, but we want 2 -> 16 #define L2_WIDTH_CASE(UNUSED1, WIDTH_MIN_2, UNUSED2) \ case WIDTH_MIN_2 + 2: \ switch (stride) { \ BOOST_PP_REPEAT(4, L2_STRIDE_CASE, WIDTH_MIN_2 + 2); \ } #define LP_STRIDE_CASE(UNUSED, STRIDE_MIN_1, WIDTH) \ case STRIDE_MIN_1 + 1: \ detail:: \ featureLPPoolingUpdateOutput<WIDTH, \ STRIDE_MIN_1 + 1, \ detail::powerN, \ detail::rootN><<<grid, block, 0, stream>>>( \ input, output, power); \ return true; // WIDTH_MIN_2 is from 0 -> 14, but we want 2 -> 16 #define LP_WIDTH_CASE(UNUSED1, WIDTH_MIN_2, UNUSED2) \ case WIDTH_MIN_2 + 2: \ switch (stride) { \ BOOST_PP_REPEAT(4, LP_STRIDE_CASE, WIDTH_MIN_2 + 2); \ } if (power == 2.0f) { switch (width) { // widths 2 -> 16 (PP iterate from 0 -> 14) BOOST_PP_REPEAT(15, L2_WIDTH_CASE, 0); } } else { switch (width) { // widths 2 -> 16 (PP iterate from 0 -> 14) BOOST_PP_REPEAT(15, LP_WIDTH_CASE, 0); } } // Otherwise, we have an unhandled width and/or stride. return false; #undef L2_STRIDE_CASE #undef L2_WIDTH_CASE #undef LP_STRIDE_CASE #undef LP_WIDTH_CASE } bool runFeatureLPPoolingUpdateGradInput(cudaStream_t stream, const DeviceTensor<float, 4>& gradOutput, const DeviceTensor<float, 4>& input, const DeviceTensor<float, 4>& output, DeviceTensor<float, 4>& gradInput, float power, int width, int stride) { const cudaDeviceProp& deviceProperties = facebook::cuda::getCurrentDeviceProperties(); for (int i = 0; i < 4; ++i) { assert(gradOutput.getSize(i) == output.getSize(i)); assert(gradInput.getSize(i) == input.getSize(i)); } int outputFeatures = ((input.getSize(1) - width) / stride) + 1; assert(gradInput.getSize(0) == gradOutput.getSize(0)); assert(outputFeatures == gradOutput.getSize(1)); assert(gradInput.getSize(1) >= width); assert(gradInput.getSize(2) == gradOutput.getSize(2)); assert(gradInput.getSize(3) == gradOutput.getSize(3)); assert(power > 0.0f); assert(width >= 1); assert(stride >= 1); // Different threads are potentially adding into overlapping input // points, so we must clear out gradInput before continuing. gradInput.zero(); // Split non-features among threads and grid x int totalNonFeatureSize = input.getSize(2) * input.getSize(3); int numWarps = min(ceil(totalNonFeatureSize, deviceProperties.warpSize), MAX_WARPS_PER_RUN); int blockSize = deviceProperties.warpSize * numWarps; // Split non-features among grid x int nonFeatureSizeBlocks = ceil(totalNonFeatureSize, blockSize); // Split features among grid y, up to a maximum number of features per thread int featureBlocks = ceil(outputFeatures, OUTPUT_FEATURES_PER_THREAD); // Split batch among grid z. dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0)); dim3 block(blockSize); #define L2_STRIDE_CASE(UNUSED, STRIDE_MIN_1, WIDTH) \ case STRIDE_MIN_1 + 1: \ detail:: \ featureLPPoolingUpdateGradInput<WIDTH, \ STRIDE_MIN_1 + 1, \ detail::powerGrad2><<<grid, block, \ 0, stream>>>( \ gradOutput, input, output, \ gradInput, power); \ return true; // WIDTH_MIN_2 is from 0 -> 14, but we want 2 -> 16 #define L2_WIDTH_CASE(UNUSED1, WIDTH_MIN_2, UNUSED2) \ case WIDTH_MIN_2 + 2: \ switch (stride) { \ BOOST_PP_REPEAT(4, L2_STRIDE_CASE, WIDTH_MIN_2 + 2); \ } #define LP_STRIDE_CASE(UNUSED, STRIDE_MIN_1, WIDTH) \ case STRIDE_MIN_1 + 1: \ detail:: \ featureLPPoolingUpdateGradInput<WIDTH, \ STRIDE_MIN_1 + 1, \ detail::powerGradN><<<grid, block, \ 0, stream>>>( \ gradOutput, input, output, \ gradInput, power); \ return true; // WIDTH_MIN_2 is from 0 -> 14, but we want 2 -> 16 #define LP_WIDTH_CASE(UNUSED1, WIDTH_MIN_2, UNUSED2) \ case WIDTH_MIN_2 + 2: \ switch (stride) { \ BOOST_PP_REPEAT(4, LP_STRIDE_CASE, WIDTH_MIN_2 + 2); \ } if (power == 2.0f) { switch (width) { // widths 2 -> 16 (PP iterate from 0 -> 14) BOOST_PP_REPEAT(15, L2_WIDTH_CASE, 0); } } else { switch (width) { // widths 2 -> 16 (PP iterate from 0 -> 14) BOOST_PP_REPEAT(15, LP_WIDTH_CASE, 0); } } // Otherwise, we have an unhandled width and/or stride. return false; #undef L2_STRIDE_CASE #undef L2_WIDTH_CASE #undef LP_STRIDE_CASE #undef LP_WIDTH_CASE } } } }
the_stack
#include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/deformable_convolution.hpp> #include <nbla/cuda/math.hpp> #include <nbla/singleton_manager.hpp> #include <nbla/variable.hpp> #include <nbla/cuda/utils/deformable_im2col.hpp> #include <algorithm> namespace nbla { template <typename T> void DeformableConvolutionCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { DeformableConvolution<T>::setup_impl(inputs, outputs); } template <typename T> void DeformableConvolutionCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { NBLA_CHECK(!this->channel_last_, error_code::value, "The passed argument channel_last_=true is not supported in CUDA " "Convolution."); cuda_set_device(std::stoi(this->ctx_.device_id)); // Getting variable pointers const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); const Tc *w = inputs[1]->get_data_pointer<Tc>(this->ctx_); const Tc *offset = inputs[2]->get_data_pointer<Tc>(this->ctx_); const Tc *mask = nullptr; const Tc *b = nullptr; if (inputs.size() == 4) { if (inputs[3]->ndim() == 1) { b = inputs[3]->get_data_pointer<Tc>(this->ctx_); } else { mask = inputs[3]->get_data_pointer<Tc>(this->ctx_); } } if (inputs.size() == 5) { mask = inputs[3]->get_data_pointer<Tc>(this->ctx_); b = inputs[4]->get_data_pointer<Tc>(this->ctx_); } Variable *vcol = &this->col_; Tc *col = vcol->cast_data_and_get_pointer<Tc>(this->ctx_, true); Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); // Sample loop for (int n = 0; n < this->outer_size_; ++n) { // Im2col if (this->spatial_dims_ == 2) { if (inputs.size() == 5 || (inputs.size() == 4 && inputs[3]->ndim() != 1)) { modulated_deformable_im2col_cuda<Tc, true>( x + n * this->inner_size_i_, offset + n * this->offset_size_i_, mask + n * this->mask_size_i_, this->channels_i_, this->spatial_shape_i_.data(), this->kernel_.data(), this->pad_.data(), this->stride_.data(), this->dilation_.data(), this->deformable_group_, col); } else { modulated_deformable_im2col_cuda<Tc, false>( x + n * this->inner_size_i_, offset + n * this->offset_size_i_, nullptr, this->channels_i_, this->spatial_shape_i_.data(), this->kernel_.data(), this->pad_.data(), this->stride_.data(), this->dilation_.data(), this->deformable_group_, col); } } else { NBLA_ERROR(error_code::not_implemented, "Only 2D-deformable convolution supported"); // TODO implement ND variation } // Convolution by matrix multiplication Tc *y_n = y + n * this->inner_size_o_; for (int g = 0; g < this->group_; ++g) { // y = x * w cuda_gemm<Tc>(device_, y_n + g * this->row_y_ * this->col_y_, false, col + g * this->row_col_ * this->col_col_, this->col_col_, this->row_col_, false, w + g * this->row_w_ * this->col_w_, this->col_w_, this->row_w_, false, 1, 0); } // Adding bias if (inputs.size() == 5 || (inputs.size() == 4 && inputs[3]->ndim() == 1)) { const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( this->col_y_, get_dtype<Tc>(), this->ctx_)); // y = 1s * b^T + y cuda_gemm<Tc>(device_, y_n, false, ones, 1, this->col_y_, true, b, this->channels_o_, 1, true, 1, 1); } } } template <typename T> void DeformableConvolutionCuda<T>::backward_impl( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1] || propagate_down[2] || (inputs.size() >= 4 && propagate_down[3]) || (inputs.size() == 5 && propagate_down[4]))) { return; } NBLA_CHECK(!this->channel_last_, error_code::value, "The passed argument channel_last_=true is not supported in CUDA " "Convolution."); cuda_set_device(std::stoi(this->ctx_.device_id)); const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); const Tc *x = nullptr; const Tc *w = nullptr; const Tc *offset = nullptr; const Tc *mask = nullptr; Tc *dx = nullptr; Tc *dw = nullptr; Tc *db = nullptr; Tc *doff = nullptr; Tc *dmask = nullptr; Tc *col = nullptr; Variable *temp_col = &this->col_; if (propagate_down[0] || propagate_down[1] || propagate_down[2]) { col = temp_col->cast_data_and_get_pointer<Tc>(this->ctx_, true); offset = inputs[2]->get_data_pointer<Tc>(this->ctx_); if (inputs.size() == 5 || (inputs.size() == 4 && inputs[3]->ndim() != 1) || propagate_down[3]) { mask = inputs[3]->get_data_pointer<Tc>(this->ctx_); } } if (propagate_down[0]) { if (!accum[0]) inputs[0]->grad()->zero(); w = inputs[1]->get_data_pointer<Tc>(this->ctx_); dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, false); } if (propagate_down[1]) { if (!accum[1]) inputs[1]->grad()->zero(); x = inputs[0]->get_data_pointer<Tc>(this->ctx_); dw = inputs[1]->cast_grad_and_get_pointer<Tc>(this->ctx_, false); } if (propagate_down[2]) { if (!accum[2]) inputs[2]->grad()->zero(); doff = inputs[2]->cast_grad_and_get_pointer<Tc>(this->ctx_, false); } if ((inputs.size() == 5 || (inputs.size() == 4 && inputs[3]->ndim() != 1)) && propagate_down[3]) { if (!accum[3]) inputs[3]->grad()->zero(); dmask = inputs[3]->cast_grad_and_get_pointer<Tc>(this->ctx_, false); } if (inputs.size() == 5 && propagate_down[4]) { if (!accum[4]) inputs[4]->grad()->zero(); db = inputs[4]->cast_grad_and_get_pointer<Tc>(this->ctx_, false); } else if ((inputs.size() == 4 && inputs[3]->ndim() == 1) && propagate_down[3]) { if (!accum[3]) inputs[3]->grad()->zero(); db = inputs[3]->cast_grad_and_get_pointer<Tc>(this->ctx_, false); } // Sample loop for (int n = 0; n < this->outer_size_; ++n) { const Tc *dy_n = dy + n * this->inner_size_o_; if (propagate_down[0]) { // Backprop to image Tc *dx_n = dx + n * this->inner_size_i_; for (int g = 0; g < this->group_; ++g) { // dx = w^T * dy cuda_gemm<Tc>(device_, col + this->row_col_ * this->col_col_ * g, true, w + this->row_w_ * this->col_w_ * g, this->col_w_, this->row_w_, false, dy_n + this->row_y_ * this->col_y_ * g, this->col_y_, this->row_y_, true, 1, 0); } // col2im if (this->spatial_dims_ == 2) { Tc *doff_n = doff + n * this->offset_size_i_; if (inputs.size() == 5 || (inputs.size() == 4 && inputs[3]->ndim() != 1)) { modulated_deformable_col2im_cuda<Tc, true>( col, offset + n * this->offset_size_i_, mask + n * this->mask_size_i_, this->channels_i_, this->spatial_shape_i_.data(), this->kernel_.data(), this->pad_.data(), this->stride_.data(), this->dilation_.data(), this->deformable_group_, dx_n); if (propagate_down[2] && propagate_down[3]) { Tc *dmask_n = dmask + n * this->mask_size_i_; modulated_deformable_col2im_coord_cuda<Tc, true>( col, x + n * this->inner_size_i_, offset + n * this->offset_size_i_, mask + n * this->mask_size_i_, this->channels_i_, this->spatial_shape_i_.data(), this->kernel_.data(), this->pad_.data(), this->stride_.data(), this->dilation_.data(), this->deformable_group_, doff_n, dmask_n); } } else { modulated_deformable_col2im_cuda<Tc, false>( col, offset + n * this->offset_size_i_, nullptr, this->channels_i_, this->spatial_shape_i_.data(), this->kernel_.data(), this->pad_.data(), this->stride_.data(), this->dilation_.data(), this->deformable_group_, dx_n); if (propagate_down[2]) { modulated_deformable_col2im_coord_cuda<Tc, false>( col, x + n * this->inner_size_i_, offset + n * this->offset_size_i_, nullptr, this->channels_i_, this->spatial_shape_i_.data(), this->kernel_.data(), this->pad_.data(), this->stride_.data(), this->dilation_.data(), this->deformable_group_, doff_n, nullptr); } } } else { NBLA_ERROR(error_code::not_implemented, "deformable_im2colND not Implemented") } } if (propagate_down[1]) { // Backprop to weights // im2col if (this->spatial_dims_ == 2) { if (inputs.size() == 5 || (inputs.size() == 4 && inputs[3]->ndim() != 1)) { modulated_deformable_im2col_cuda<Tc, true>( x + n * this->inner_size_i_, offset + n * this->offset_size_i_, mask + n * this->mask_size_i_, this->channels_i_, this->spatial_shape_i_.data(), this->kernel_.data(), this->pad_.data(), this->stride_.data(), this->dilation_.data(), this->deformable_group_, col); } else { modulated_deformable_im2col_cuda<Tc, false>( x + n * this->inner_size_i_, offset + n * this->offset_size_i_, nullptr, this->channels_i_, this->spatial_shape_i_.data(), this->kernel_.data(), this->pad_.data(), this->stride_.data(), this->dilation_.data(), this->deformable_group_, col); } } else { NBLA_ERROR(error_code::not_implemented, "deformable_im2colND not Implemented") } // Weight convolution by matrix multiplication for (int g = 0; g < this->group_; ++g) { // dw += dy * col^T cuda_gemm<Tc>(device_, dw + g * this->row_w_ * this->col_w_, true, dy_n + g * this->row_y_ * this->col_y_, this->col_y_, this->row_y_, true, col + g * this->row_col_ * this->col_col_, this->col_col_, this->row_col_, false, 1, 1); } } if ((inputs.size() == 5 && propagate_down[4]) || (inputs.size() == 4 && inputs[3]->ndim() == 1 && propagate_down[3])) { // Backprop to bias const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( this->col_y_, get_dtype<Tc>(), this->ctx_)); cuda_gemv<Tc>(device_, db, dy_n, this->col_y_, this->channels_o_, true, ones, this->col_y_, 1, 1); } } } }
the_stack
template <class T> void free_data(T **ptr) { if (ptr) CUDACHECK(cudaFree(*ptr)); } void free_data_float(float **ptr) { free_data<float>(ptr); } void free_data_double(double **ptr) { free_data<double>(ptr); } void free_data_int(int **ptr) { free_data<int>(ptr); } template <class T> void copy_fecatorization_result(T *dst, const T **src, const int size) { CUDACHECK(cudaMemcpy(dst, *src, sizeof(T) * size, cudaMemcpyDeviceToHost)); } void copy_fecatorization_result_float(float *dst, const float **src, const int size) { copy_fecatorization_result(dst, src, size); } void copy_fecatorization_result_double(double *dst, const double **src, const int size) { copy_fecatorization_result(dst, src, size); } template <class T> int make_factorization_data( const int m, const int n, const int f, const long nnz, const long nnz_test, const int *csrRowIndexHostPtr, const int *csrColIndexHostPtr, const T *csrValHostPtr, const int *cscRowIndexHostPtr, const int *cscColIndexHostPtr, const T *cscValHostPtr, const int *cooRowIndexHostPtr, const int *cooColIndexHostPtr, const T *cooValHostPtr, T *thetaTHost, T *XTHost, const int *cooRowIndexTestHostPtr, const int *cooColIndexTestHostPtr, const T *cooValTestHostPtr, int **csrRowIndexDevicePtr, int **csrColIndexDevicePtr, T **csrValDevicePtr, int **cscRowIndexDevicePtr, int **cscColIndexDevicePtr, T **cscValDevicePtr, int **cooRowIndexDevicePtr, int **cooColIndexDevicePtr, T **cooValDevicePtr, T **thetaTDevice, T **XTDevice, int **cooRowIndexTestDevicePtr, int **cooColIndexTestDevicePtr, T **cooValDeviceTestPtr) { CUDACHECK(cudaMalloc((void **)cooRowIndexDevicePtr, nnz * sizeof(**cooRowIndexDevicePtr))); CUDACHECK(cudaMemcpy(*cooRowIndexDevicePtr, cooRowIndexHostPtr, (size_t)(nnz * sizeof(**cooRowIndexDevicePtr)), cudaMemcpyHostToDevice)); CUDACHECK(cudaMalloc((void **)cooColIndexDevicePtr, nnz * sizeof(**cooColIndexDevicePtr))); CUDACHECK(cudaMemcpy(*cooColIndexDevicePtr, cooColIndexHostPtr, (size_t)(nnz * sizeof(**cooColIndexDevicePtr)), cudaMemcpyHostToDevice)); CUDACHECK( cudaMalloc((void **)cooValDevicePtr, nnz * sizeof(**cooValDevicePtr))); CUDACHECK(cudaMemcpy(*cooValDevicePtr, cooValHostPtr, (size_t)(nnz * sizeof(**cooValDevicePtr)), cudaMemcpyHostToDevice)); CUDACHECK(cudaMalloc((void **)cscRowIndexDevicePtr, nnz * sizeof(**cscRowIndexDevicePtr))); CUDACHECK(cudaMalloc((void **)cscColIndexDevicePtr, (n + 1) * sizeof(**cscColIndexDevicePtr))); CUDACHECK( cudaMalloc((void **)cscValDevicePtr, nnz * sizeof(**cscValDevicePtr))); // dimension: F*N CUDACHECK(cudaMalloc((void **)thetaTDevice, f * n * sizeof(**thetaTDevice))); // dimension: M*F CUDACHECK(cudaMalloc((void **)XTDevice, f * m * sizeof(**XTDevice))); CUDACHECK(cudaMemcpy(*cscRowIndexDevicePtr, cscRowIndexHostPtr, (size_t)nnz * sizeof(**cscRowIndexDevicePtr), cudaMemcpyHostToDevice)); CUDACHECK(cudaMemcpy(*cscColIndexDevicePtr, cscColIndexHostPtr, (size_t)(n + 1) * sizeof(**cscColIndexDevicePtr), cudaMemcpyHostToDevice)); CUDACHECK(cudaMemcpy(*cscValDevicePtr, cscValHostPtr, (size_t)(nnz * sizeof(**cscValDevicePtr)), cudaMemcpyHostToDevice)); CUDACHECK(cudaMemcpy(*thetaTDevice, thetaTHost, (size_t)(n * f * sizeof(**thetaTDevice)), cudaMemcpyHostToDevice)); // CG needs XT CUDACHECK(cudaMemcpy(*XTDevice, XTHost, (size_t)(m * f * sizeof(**XTDevice)), cudaMemcpyHostToDevice)); CUDACHECK(cudaMalloc((void **)csrRowIndexDevicePtr, (m + 1) * sizeof(**csrRowIndexDevicePtr))); CUDACHECK(cudaMalloc((void **)csrColIndexDevicePtr, nnz * sizeof(**csrColIndexDevicePtr))); CUDACHECK( cudaMalloc((void **)csrValDevicePtr, nnz * sizeof(**csrValDevicePtr))); CUDACHECK(cudaMemcpy(*csrRowIndexDevicePtr, csrRowIndexHostPtr, (size_t)((m + 1) * sizeof(**csrRowIndexDevicePtr)), cudaMemcpyHostToDevice)); CUDACHECK(cudaMemcpy(*csrColIndexDevicePtr, csrColIndexHostPtr, (size_t)(nnz * sizeof(**csrColIndexDevicePtr)), cudaMemcpyHostToDevice)); CUDACHECK(cudaMemcpy(*csrValDevicePtr, csrValHostPtr, (size_t)(nnz * sizeof(**csrValDevicePtr)), cudaMemcpyHostToDevice)); if (cooColIndexTestHostPtr && cooRowIndexTestHostPtr && cooValTestHostPtr && nnz_test > 0) { CUDACHECK(cudaMalloc((void **)cooRowIndexTestDevicePtr, nnz_test * sizeof(**cooRowIndexTestDevicePtr))); CUDACHECK(cudaMalloc((void **)cooColIndexTestDevicePtr, nnz_test * sizeof(**cooColIndexTestDevicePtr))); CUDACHECK(cudaMalloc((void **)cooValDeviceTestPtr, nnz_test * sizeof(**cooValDeviceTestPtr))); CUDACHECK( cudaMemcpy(*cooRowIndexTestDevicePtr, cooRowIndexTestHostPtr, (size_t)(nnz_test * sizeof(**cooRowIndexTestDevicePtr)), cudaMemcpyHostToDevice)); CUDACHECK( cudaMemcpy(*cooColIndexTestDevicePtr, cooColIndexTestHostPtr, (size_t)(nnz_test * sizeof(**cooColIndexTestDevicePtr)), cudaMemcpyHostToDevice)); CUDACHECK(cudaMemcpy(*cooValDeviceTestPtr, cooValTestHostPtr, (size_t)(nnz_test * sizeof(**cooValDeviceTestPtr)), cudaMemcpyHostToDevice)); } return 0; } template <typename T> T factorization_score(const int m, const int n, const int f, const long nnz, const T lambda, T **thetaTDevice, T **XTDevice, int **cooRowIndexDevicePtr, int **cooColIndexDevicePtr, T **cooValDevicePtr) { ALSFactorization<T> factorization(m, n, f, lambda, *thetaTDevice, *XTDevice); return factorization.Score(*cooRowIndexDevicePtr, *cooColIndexDevicePtr, *cooValDevicePtr, nnz); } template <typename T> int run_factorization_step( const int m, const int n, const int f, const long nnz, const T lambda, int **csrRowIndexDevicePtr, int **csrColIndexDevicePtr, T **csrValDevicePtr, int **cscRowIndexDevicePtr, int **cscColIndexDevicePtr, T **cscValDevicePtr, T **thetaTDevice, T **XTDevice, const int X_BATCH, const int THETA_BATCH) { ALSFactorization<T> factorization(m, n, f, lambda, *thetaTDevice, *XTDevice); factorization.Iter(*csrRowIndexDevicePtr, *csrColIndexDevicePtr, *csrValDevicePtr, *cscRowIndexDevicePtr, *cscColIndexDevicePtr, *cscValDevicePtr, nnz, X_BATCH, THETA_BATCH); return 0; } float factorization_score_float(const int m, const int n, const int f, const long nnz, const float lambda, float **thetaTDevice, float **XTDevice, int **cooRowIndexDevicePtr, int **cooColIndexDevicePtr, float **cooValDevicePtr) { return factorization_score<float>(m, n, f, nnz, lambda, thetaTDevice, XTDevice, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr); } double factorization_score_double(const int m, const int n, const int f, const long nnz, const float lambda, double **thetaTDevice, double **XTDevice, int **cooRowIndexDevicePtr, int **cooColIndexDevicePtr, double **cooValDevicePtr) { return 0.0; } int run_factorization_step_double( const int m, const int n, const int f, const long nnz, const double lambda, int **csrRowIndexDevicePtr, int **csrColIndexDevicePtr, double **csrValDevicePtr, int **cscRowIndexDevicePtr, int **cscColIndexDevicePtr, double **cscValDevicePtr, double **thetaTDevice, double **XTDevice, const int X_BATCH, const int THETA_BATCH) { return 1; // TODO: implement // return run_factorization_step<double>(m, n, f, nnz, nnz_test, // csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, // cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, // cooRowIndexDevicePtr, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, // cooColIndexTestDevicePtr, cooValTestDevicePtr); } int run_factorization_step_float( const int m, const int n, const int f, const long nnz, const float lambda, int **csrRowIndexDevicePtr, int **csrColIndexDevicePtr, float **csrValDevicePtr, int **cscRowIndexDevicePtr, int **cscColIndexDevicePtr, float **cscValDevicePtr, float **thetaTDevice, float **XTDevice, const int X_BATCH, const int THETA_BATCH) { return run_factorization_step<float>( m, n, f, nnz, lambda, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, thetaTDevice, XTDevice, X_BATCH, THETA_BATCH); } int make_factorization_data_double( const int m, const int n, const int f, const long nnz, const long nnz_test, const int *csrRowIndexHostPtr, const int *csrColIndexHostPtr, const double *csrValHostPtr, const int *cscRowIndexHostPtr, const int *cscColIndexHostPtr, const double *cscValHostPtr, const int *cooRowIndexHostPtr, const int *cooColIndexHostPtr, const double *cooValHostPtr, double *thetaTHost, double *XTHost, const int *cooRowIndexTestHostPtr, const int *cooColIndexTestHostPtr, const double *cooValTestHostPtr, int **csrRowIndexDevicePtr, int **csrColIndexDevicePtr, double **csrValDevicePtr, int **cscRowIndexDevicePtr, int **cscColIndexDevicePtr, double **cscValDevicePtr, int **cooRowIndexDevicePtr, int **cooColIndexDevicePtr, double **cooValDevicePtr, double **thetaTDevice, double **XTDevice, int **cooRowIndexTestDevicePtr, int **cooColIndexTestDevicePtr, double **cooValTestDevicePtr) { return make_factorization_data<double>( m, n, f, nnz, nnz_test, csrRowIndexHostPtr, csrColIndexHostPtr, csrValHostPtr, cscRowIndexHostPtr, cscColIndexHostPtr, cscValHostPtr, cooRowIndexHostPtr, cooColIndexHostPtr, cooValHostPtr, thetaTHost, XTHost, cooRowIndexTestHostPtr, cooColIndexTestHostPtr, cooValTestHostPtr, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, cooColIndexTestDevicePtr, cooValTestDevicePtr); } int make_factorization_data_float( const int m, const int n, const int f, const long nnz, const long nnz_test, const int *csrRowIndexHostPtr, const int *csrColIndexHostPtr, const float *csrValHostPtr, const int *cscRowIndexHostPtr, const int *cscColIndexHostPtr, const float *cscValHostPtr, const int *cooRowIndexHostPtr, const int *cooColIndexHostPtr, const float *cooValHostPtr, float *thetaTHost, float *XTHost, const int *cooRowIndexTestHostPtr, const int *cooColIndexTestHostPtr, const float *cooValTestHostPtr, int **csrRowIndexDevicePtr, int **csrColIndexDevicePtr, float **csrValDevicePtr, int **cscRowIndexDevicePtr, int **cscColIndexDevicePtr, float **cscValDevicePtr, int **cooRowIndexDevicePtr, int **cooColIndexDevicePtr, float **cooValDevicePtr, float **thetaTDevice, float **XTDevice, int **cooRowIndexTestDevicePtr, int **cooColIndexTestDevicePtr, float **cooValTestDevicePtr) { return make_factorization_data<float>( m, n, f, nnz, nnz_test, csrRowIndexHostPtr, csrColIndexHostPtr, csrValHostPtr, cscRowIndexHostPtr, cscColIndexHostPtr, cscValHostPtr, cooRowIndexHostPtr, cooColIndexHostPtr, cooValHostPtr, thetaTHost, XTHost, cooRowIndexTestHostPtr, cooColIndexTestHostPtr, cooValTestHostPtr, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, cooColIndexTestDevicePtr, cooValTestDevicePtr); }
the_stack
// Another possibility: // #include <torch/all.h> #include <assert.h> #include "type_shim.h" #include "multi_tensor_apply.cuh" #define BLOCK_SIZE 512 #define ILP 4 template<typename T> __device__ __forceinline__ bool is_aligned(T* p){ return ((uint64_t)p) % (ILP*sizeof(T)) == 0; } template<typename T> __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } typedef enum{ MOMENT_MODE_0 =0, // L2 regularization mode MOMENT_MODE_1 =1 // Decoupled weight decay mode } adamMode_t; std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_mp_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::optional<bool> per_tensor_python); using MATH_T = float; template<typename T, typename param_t> struct LAMBStage1Functor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<4>& tl, const float beta1, const float beta2, const float beta3, const int* step_ptr, const int bias_correction, const float epsilon, adamMode_t mode, const float decay, const float* global_grad_norm, const float* max_global_grad_norm, const float* found_inf, const float* inv_scale) { if (*noop_gmem) { return; } float beta1_correction = 1.0f; float beta2_correction = 1.0f; if (bias_correction == 1) { int step = *step_ptr; beta1_correction = 1 - std::pow(beta1, step); beta2_correction = 1 - std::pow(beta2, step); } int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; float clipped_global_grad_norm = (*global_grad_norm) > (*max_global_grad_norm) ? (*global_grad_norm) / (*max_global_grad_norm) : 1.0f; T* g = (T*)tl.addresses[0][tensor_loc]; g += chunk_idx*chunk_size; param_t* p = (param_t*)tl.addresses[1][tensor_loc]; p += chunk_idx*chunk_size; param_t* m = (param_t*)tl.addresses[2][tensor_loc]; m += chunk_idx*chunk_size; param_t* v = (param_t*)tl.addresses[3][tensor_loc]; v += chunk_idx*chunk_size; n -= chunk_idx*chunk_size; MATH_T r_g[ILP]; MATH_T r_p[ILP]; MATH_T r_m[ILP]; MATH_T r_v[ILP]; // to make things simple, we put aligned case in a different code path if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(g) && is_aligned(p) && is_aligned(m) && is_aligned(v)) { T l_g[ILP]; param_t l_p[ILP]; param_t l_m[ILP]; param_t l_v[ILP]; for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) { // load load_store(l_g, g, 0, i_start); if (decay != 0) load_store(l_p, p, 0, i_start); load_store(l_m, m, 0, i_start); load_store(l_v, v, 0, i_start); // unpack #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_g[ii] = l_g[ii] * (*inv_scale); if (decay == 0) { r_p[ii] = MATH_T(0); } else { r_p[ii] = l_p[ii]; } r_m[ii] = l_m[ii]; r_v[ii] = l_v[ii]; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if (mode == MOMENT_MODE_0) { MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; // L2 on scaled grad scaled_grad = scaled_grad + decay*r_p[ii]; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = next_m_unbiased / denom; } else { MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { l_p[ii] = r_p[ii]; // Difference from APEX's LAMB kernel. `g` and `p` can be different dtypes. l_g[ii] = r_p[ii]; l_m[ii] = r_m[ii]; l_v[ii] = r_v[ii]; } // store load_store(g, l_g, i_start, 0); load_store(m, l_m, i_start, 0); load_store(v, l_v, i_start, 0); } } else { // see note in multi_tensor_scale_kernel.cu for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { MATH_T r_g[ILP]; MATH_T r_p[ILP]; MATH_T r_m[ILP]; MATH_T r_v[ILP]; #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { r_g[ii] = g[i] * (*inv_scale); // special ?optimization? for lamb stage 1 if (decay == 0) { r_p[ii] = MATH_T(0); } else { r_p[ii] = p[i]; } r_m[ii] = m[i]; r_v[ii] = v[i]; } else { r_g[ii] = MATH_T(0); r_p[ii] = MATH_T(0); r_m[ii] = MATH_T(0); r_v[ii] = MATH_T(0); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if (mode == MOMENT_MODE_0) { MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; // L2 on scaled grad scaled_grad = scaled_grad + decay*r_p[ii]; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = next_m_unbiased / denom; } else { MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { g[i] = r_p[ii]; m[i] = r_m[ii]; v[i] = r_v[ii]; } } } } } }; // Step 2 reads in 'update' value and per-tensor param_norm and update_norm. // It computes new parameter value. // N == 2: FP32 params, no master params // N == 3: FP16 params, FP32 master params. template<typename T, int N, typename param_t> struct LAMBStage2Functor { static_assert((N == 2 && std::is_same<T, param_t>::value) || (N == 3 && std::is_same<param_t, float>::value), ""); __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<N>& tl, const float* per_tensor_param_norm, const float* per_tensor_update_norm, const float* learning_rate, const float decay, bool use_nvlamb) { if (*noop_gmem) { return; } int tensor_loc = tl.block_to_tensor[blockIdx.x]; int tensor_num = tl.start_tensor_this_launch + tensor_loc; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; MATH_T ratio = *learning_rate; // nvlamb: apply adaptive learning rate to all parameters // otherwise, only apply to those with non-zero weight decay if (use_nvlamb || (decay != 0.0)) { float param_norm = per_tensor_param_norm[tensor_num]; float update_norm = per_tensor_update_norm[tensor_num]; ratio = (update_norm != 0.0f && param_norm != 0.0f) ? *learning_rate * (param_norm / update_norm) : *learning_rate; } T* update = (T*)tl.addresses[0][tensor_loc]; update += chunk_idx*chunk_size; param_t* p = (param_t*)tl.addresses[1][tensor_loc]; p += chunk_idx*chunk_size; T* out_p; if (N == 3) { out_p = (T*)tl.addresses[2][tensor_loc]; out_p += chunk_idx*chunk_size; } n -= chunk_idx*chunk_size; // to make things simple, we put aligned case in a different code path bool can_use_aligned_path = n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(p) && is_aligned(update); if (N == 3) { can_use_aligned_path = can_use_aligned_path && is_aligned(out_p); } if(can_use_aligned_path) { param_t r_p[ILP]; T r_update[ILP]; T r_out_p[ILP]; for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) { // load load_store(r_p, p, 0, i_start); load_store(r_update, update, 0, i_start); if (N == 3) { load_store(r_out_p, out_p, 0, i_start); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_p[ii] = static_cast<MATH_T>(r_p[ii]) - (ratio * static_cast<MATH_T>(r_update[ii])); if (N == 3) { r_out_p[ii] = r_p[ii]; } } load_store(p, r_p, i_start, 0); if (N == 3) { load_store(out_p, r_out_p, i_start, 0); } } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { MATH_T r_p[ILP]; MATH_T r_update[ILP]; #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { r_p[ii] = p[i]; r_update[ii] = update[i]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_p[ii] = r_p[ii] - (ratio * r_update[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { p[i] = r_p[ii]; if (N == 3) { out_p[i] = r_p[ii]; } } } } } } }; void multi_tensor_lamb_mp_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::Tensor lr, const float beta1, const float beta2, const float epsilon, at::Tensor step, const int bias_correction, const float weight_decay, const int grad_averaging, const int mode, at::Tensor global_grad_norm, at::Tensor max_grad_norm, at::optional<bool> use_nvlamb_python, at::Tensor found_inf, at::Tensor inv_scale) { // n_tensors == 5: FP16 model params & FP32 master params // n_tensors == 4: FP32 model params & NO FP32 master params const auto n_tensors = tensor_lists.size(); assert(n_tensors == 4 || n_tensors == 5); using namespace at; bool use_nvlamb = use_nvlamb_python.has_value() ? use_nvlamb_python.value() : false; // note(mkozuki): move bias handling below to functor // Handle bias correction mode // float bias_correction1 = 1.0f, bias_correction2 = 1.0f; // if (bias_correction == 1) { // bias_correction1 = 1 - std::pow(beta1, step); // bias_correction2 = 1 - std::pow(beta2, step); // } // Handle grad averaging mode float beta3 = 1.0f; if (grad_averaging == 1) beta3 = 1 - beta1; std::vector<std::vector<at::Tensor>> stage1_tensor_lists(tensor_lists.begin(), tensor_lists.begin() + 4); std::vector<std::vector<at::Tensor>> grad_list(tensor_lists.begin(), tensor_lists.begin()+1); std::vector<std::vector<at::Tensor>> param_list(tensor_lists.begin()+1, tensor_lists.begin()+2); // Compute per tensor param norm auto param_norm_tuple = multi_tensor_l2norm_mp_cuda(chunk_size, noop_flag, param_list, true); // We now in-place modify grad to store update before compute its norm // Generally this is not a issue since people modify grad in step() method all the time // We can also grab list of empty tensor to avoid this, but I'd like to save space/cpu code if (n_tensors == 4) { DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1", multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, stage1_tensor_lists, LAMBStage1Functor<scalar_t_0, scalar_t_0>(), beta1, beta2, beta3, // 1-beta1 or 1 depends on averaging mode // bias_correction1, // bias_correction2, step.data_ptr<int>(), bias_correction, epsilon, (adamMode_t) mode, weight_decay, global_grad_norm.data_ptr<float>(), max_grad_norm.data_ptr<float>(), found_inf.data_ptr<float>(), inv_scale.data_ptr<float>()); ) } else { DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1", multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, stage1_tensor_lists, LAMBStage1Functor<scalar_t_0, float>(), beta1, beta2, beta3, // 1-beta1 or 1 depends on averaging mode // bias_correction1, // bias_correction2, step.data_ptr<int>(), bias_correction, epsilon, (adamMode_t) mode, weight_decay, global_grad_norm.data_ptr<float>(), max_grad_norm.data_ptr<float>(), found_inf.data_ptr<float>(), inv_scale.data_ptr<float>()); ) } // Compute update norms auto update_norm_tuple = multi_tensor_l2norm_mp_cuda(chunk_size, noop_flag, grad_list, true); std::vector<std::vector<at::Tensor>> grad_param_list(tensor_lists.begin(), tensor_lists.begin()+2); if (n_tensors == 4) { DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2", multi_tensor_apply<2>( BLOCK_SIZE, chunk_size, noop_flag, grad_param_list, LAMBStage2Functor<scalar_t_0, 2, scalar_t_0>(), std::get<1>(param_norm_tuple).data_ptr<float>(), std::get<1>(update_norm_tuple).data_ptr<float>(), lr.data_ptr<float>(), weight_decay, use_nvlamb); ) } else { grad_param_list.push_back(tensor_lists[4]); DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2", multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, grad_param_list, LAMBStage2Functor<scalar_t_0, 3, float>(), std::get<1>(param_norm_tuple).data_ptr<float>(), std::get<1>(update_norm_tuple).data_ptr<float>(), lr.data_ptr<float>(), weight_decay, use_nvlamb); ) } AT_CUDA_CHECK(cudaGetLastError()); }
the_stack
// local dependency #include "electric_potential/src/density_function.h" DREAMPLACE_BEGIN_NAMESPACE /// define triangle_density_function template <typename T> inline __device__ DEFINE_TRIANGLE_DENSITY_FUNCTION(T); template <typename T> __global__ void __launch_bounds__(1024, 8) computeElectricForce( int num_bins_x, int num_bins_y, const T *field_map_x_tensor, const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl, T xh, T yh, const T half_bin_size_x, const T half_bin_size_y, const T bin_size_x, const T bin_size_y, const T inv_bin_size_x, const T inv_bin_size_y, int num_nodes, T *grad_x_tensor, T *grad_y_tensor, const int *sorted_node_map ///< can be NULL if not sorted ) { int index = blockIdx.x * blockDim.z + threadIdx.z; if (index < num_nodes) { int i = (sorted_node_map) ? sorted_node_map[index] : index; // use stretched node size T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; // Yibo: looks very weird implementation, but this is how RePlAce implements // it Zixuan and Jiaqi: use the common practice of floor int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x); int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y); // blockDim.x * blockDim.y threads will be used to update one node // shared memory is used to privatize the atomic memory access to thread // block extern __shared__ unsigned char s_xy[]; T *s_x = (T *)s_xy; T *s_y = s_x + blockDim.z; if (threadIdx.x == 0 && threadIdx.y == 0) { s_x[threadIdx.z] = s_y[threadIdx.z] = 0; } __syncthreads(); T tmp_x, tmp_y; tmp_x = 0; tmp_y = 0; // update density potential map for (int k = bin_index_xl + threadIdx.y; k < bin_index_xh; k += blockDim.y) { T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x); for (int h = bin_index_yl + threadIdx.x; h < bin_index_yh; h += blockDim.x) { T py = triangle_density_function(node_y, node_size_y, yl, h, bin_size_y); T area = px * py; int idx = k * num_bins_y + h; tmp_x += area * field_map_x_tensor[idx]; tmp_y += area * field_map_y_tensor[idx]; } } atomicAdd(&s_x[threadIdx.z], tmp_x * ratio); atomicAdd(&s_y[threadIdx.z], tmp_y * ratio); __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { grad_x_tensor[i] = s_x[threadIdx.z]; grad_y_tensor[i] = s_y[threadIdx.z]; } } } /// @brief An unrolled way to compute the force. /// Currently it is not as efficient as computeElectricForce, /// it has the potential to be better. /// It is not used for now. template <typename T> __global__ void computeElectricForceUnroll( int num_bins_x, int num_bins_y, const T *field_map_x_tensor, const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl, T xh, T yh, const T half_bin_size_x, const T half_bin_size_y, const T bin_size_x, const T bin_size_y, const T inv_bin_size_x, const T inv_bin_size_y, int num_nodes, T *grad_x_tensor, T *grad_y_tensor, const int *sorted_node_map ///< can be NULL if not sorted ) { int index = blockIdx.x * blockDim.y + threadIdx.y; if (index < num_nodes) { int i = (sorted_node_map) ? sorted_node_map[index] : index; // stretch node size to bin size T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; // Yibo: looks very weird implementation, but this is how RePlAce implements // it Zixuan and Jiaqi: use the common practice of floor int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)); // inclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x - 1); int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)); // inclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y - 1); int k, h; int cond = ((bin_index_xl == bin_index_xh) << 1) | (bin_index_yl == bin_index_yh); switch (cond) { case 0: { // blockDim.x threads will be used to update one node // shared memory is used to privatize the atomic memory access to thread // block extern __shared__ unsigned char shared_memory[]; T *s_x = (T *)shared_memory; T *s_y = s_x + blockDim.y; if (threadIdx.x == 0) { s_x[threadIdx.y] = s_y[threadIdx.y] = 0; } __syncthreads(); T tmp_x = 0; T tmp_y = 0; T px_c = bin_size_x; T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T py_c = bin_size_y; T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl); T area_xc_yl = px_c * py_l; T area_xc_yc = px_c * py_c; T area_xc_yh = px_c * py_h; k = bin_index_xl; if (threadIdx.x == 0) { T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T area_xl_yl = px_l * py_l; T area_xl_yc = px_l * py_c; T area_xl_yh = px_l * py_h; h = bin_index_yl; tmp_x = area_xl_yl * field_map_x_tensor[k * num_bins_y + h]; tmp_y = area_xl_yl * field_map_y_tensor[k * num_bins_y + h]; for (++h; h < bin_index_yh; ++h) { tmp_x += area_xl_yc * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xl_yc * field_map_y_tensor[k * num_bins_y + h]; } tmp_x += area_xl_yh * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xl_yh * field_map_y_tensor[k * num_bins_y + h]; k += blockDim.x; } for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) { h = bin_index_yl; tmp_x += area_xc_yl * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xc_yl * field_map_y_tensor[k * num_bins_y + h]; for (++h; h < bin_index_yh; ++h) { tmp_x += area_xc_yc * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xc_yc * field_map_y_tensor[k * num_bins_y + h]; } tmp_x += area_xc_yh * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xc_yh * field_map_y_tensor[k * num_bins_y + h]; } if (k == bin_index_xh) { T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl); T area_xh_yl = px_h * py_l; T area_xh_yc = px_h * py_c; T area_xh_yh = px_h * py_h; h = bin_index_yl; tmp_x += area_xh_yl * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xh_yl * field_map_y_tensor[k * num_bins_y + h]; for (++h; h < bin_index_yh; ++h) { tmp_x += area_xh_yc * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xh_yc * field_map_y_tensor[k * num_bins_y + h]; } tmp_x += area_xh_yh * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xh_yh * field_map_y_tensor[k * num_bins_y + h]; } atomicAdd(&s_x[threadIdx.y], tmp_x * ratio); atomicAdd(&s_y[threadIdx.y], tmp_y * ratio); __syncthreads(); if (threadIdx.x == 0) { grad_x_tensor[i] = s_x[threadIdx.y]; grad_y_tensor[i] = s_y[threadIdx.y]; } return; } case 1: { extern __shared__ unsigned char shared_memory[]; T *s_x = (T *)shared_memory; T *s_y = s_x + blockDim.y; if (threadIdx.x == 0) { s_x[threadIdx.y] = s_y[threadIdx.y] = 0; } __syncthreads(); T tmp_x = 0; T tmp_y = 0; T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; h = bin_index_yl; k = bin_index_xl; if (threadIdx.x == 0) { T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T area_xl = px_l * py; tmp_x = area_xl * field_map_x_tensor[k * num_bins_y + h]; tmp_y = area_xl * field_map_y_tensor[k * num_bins_y + h]; k += blockDim.x; } T px_c = bin_size_x; T area_xc = px_c * py; for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) { tmp_x += area_xc * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xc * field_map_y_tensor[k * num_bins_y + h]; } if (k == bin_index_xh) { T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl); T area_xh = px_h * py; tmp_x += area_xh * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_xh * field_map_y_tensor[k * num_bins_y + h]; } atomicAdd(&s_x[threadIdx.y], tmp_x * ratio); atomicAdd(&s_y[threadIdx.y], tmp_y * ratio); __syncthreads(); if (threadIdx.x == 0) { grad_x_tensor[i] = s_x[threadIdx.y]; grad_y_tensor[i] = s_y[threadIdx.y]; } return; } case 2: { extern __shared__ unsigned char shared_memory[]; T *s_x = (T *)shared_memory; T *s_y = s_x + blockDim.y; if (threadIdx.x == 0) { s_x[threadIdx.y] = s_y[threadIdx.y] = 0; } __syncthreads(); T tmp_x = 0; T tmp_y = 0; T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; k = bin_index_xl; h = bin_index_yl; if (threadIdx.x == 0) { T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T area_yl = px * py_l; tmp_x = area_yl * field_map_x_tensor[k * num_bins_y + h]; tmp_y = area_yl * field_map_y_tensor[k * num_bins_y + h]; h += blockDim.x; } T py_c = bin_size_y; T area_yc = px * py_c; for (h += threadIdx.x; h < bin_index_yh; h += blockDim.x) { tmp_x += area_yc * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_yc * field_map_y_tensor[k * num_bins_y + h]; } if (h == bin_index_yh) { T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl); T area_yh = px * py_h; tmp_x += area_yh * field_map_x_tensor[k * num_bins_y + h]; tmp_y += area_yh * field_map_y_tensor[k * num_bins_y + h]; } atomicAdd(&s_x[threadIdx.y], tmp_x * ratio); atomicAdd(&s_y[threadIdx.y], tmp_y * ratio); __syncthreads(); if (threadIdx.x == 0) { grad_x_tensor[i] = s_x[threadIdx.y]; grad_y_tensor[i] = s_y[threadIdx.y]; } return; } case 3: { if (threadIdx.x == 0) { T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x; T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y; T area_by_ratio = px * py * ratio; k = bin_index_xl; h = bin_index_yl; grad_x_tensor[i] = area_by_ratio * field_map_x_tensor[k * num_bins_y + h]; grad_y_tensor[i] = area_by_ratio * field_map_y_tensor[k * num_bins_y + h]; } return; } default: assert(0); } } } template <typename T> __global__ void computeElectricForceSimpleLikeCPU( int num_bins_x, int num_bins_y, int num_impacted_bins_x, int num_impacted_bins_y, const T *field_map_x_tensor, const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl, T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor, T *grad_y_tensor) { // density_map_tensor should be initialized outside T inv_bin_size_x = 1.0 / bin_size_x; T inv_bin_size_y = 1.0 / bin_size_y; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < num_nodes) { // use stretched node size T node_size_x = node_size_x_clamped_tensor[i]; T node_size_y = node_size_y_clamped_tensor[i]; T node_x = x_tensor[i] + offset_x_tensor[i]; T node_y = y_tensor[i] + offset_y_tensor[i]; T ratio = ratio_tensor[i]; // Yibo: looks very weird implementation, but this is how RePlAce implements // it the common practice should be floor Zixuan and Jiaqi: use the common // practice of floor int bin_index_xl = int((node_x - xl) * inv_bin_size_x); int bin_index_xh = int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0); bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x); // int bin_index_xh = bin_index_xl+num_impacted_bins_x; // Yibo: looks very weird implementation, but this is how RePlAce implements // it the common practice should be floor Zixuan and Jiaqi: use the common // practice of floor int bin_index_yl = int((node_y - yl) * inv_bin_size_y); int bin_index_yh = int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0); bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y); // int bin_index_yh = bin_index_yl+num_impacted_bins_y; T &gx = grad_x_tensor[i]; T &gy = grad_y_tensor[i]; gx = 0; gy = 0; // update density potential map for (int k = bin_index_xl; k < bin_index_xh; ++k) { T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x); for (int h = bin_index_yl; h < bin_index_yh; ++h) { T py = triangle_density_function(node_y, node_size_y, yl, h, bin_size_y); T area = px * py; int idx = k * num_bins_y + h; gx += area * field_map_x_tensor[idx]; gy += area * field_map_y_tensor[idx]; } } gx *= ratio; gy *= ratio; } } template <typename T> int computeElectricForceCudaLauncher( int num_bins_x, int num_bins_y, int num_impacted_bins_x, int num_impacted_bins_y, const T *field_map_x_tensor, const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor, const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor, const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor, const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl, T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor, T *grad_y_tensor, const int *sorted_node_map) { int thread_count = 64; dim3 blockSize(2, 2, thread_count); size_t shared_mem_size = sizeof(T) * thread_count * 2; int block_count_nodes = (num_nodes + thread_count - 1) / thread_count; computeElectricForce<<<block_count_nodes, blockSize, shared_mem_size>>>( num_bins_x, num_bins_y, field_map_x_tensor, field_map_y_tensor, x_tensor, y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor, offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor, bin_center_y_tensor, xl, yl, xh, yh, bin_size_x / 2, bin_size_y / 2, bin_size_x, bin_size_y, 1 / bin_size_x, 1 / bin_size_y, num_nodes, grad_x_tensor, grad_y_tensor, sorted_node_map); // computeElectricForceSimpleLikeCPU<<<block_count_nodes, thread_count>>>( // num_bins_x, num_bins_y, // num_impacted_bins_x, num_impacted_bins_y, // field_map_x_tensor, field_map_y_tensor, // x_tensor, y_tensor, // node_size_x_clamped_tensor, node_size_y_clamped_tensor, // offset_x_tensor, offset_y_tensor, // ratio_tensor, // bin_center_x_tensor, bin_center_y_tensor, // xl, yl, xh, yh, // bin_size_x, bin_size_y, // num_nodes, // grad_x_tensor, grad_y_tensor // ); return 0; } #define REGISTER_KERNEL_LAUNCHER(T) \ template int computeElectricForceCudaLauncher<T>( \ int num_bins_x, int num_bins_y, int num_impacted_bins_x, \ int num_impacted_bins_y, const T *field_map_x_tensor, \ const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor, \ const T *node_size_x_clamped_tensor, \ const T *node_size_y_clamped_tensor, const T *offset_x_tensor, \ const T *offset_y_tensor, const T *ratio_tensor, \ const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl, \ T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor, \ T *grad_y_tensor, const int *sorted_node_map); REGISTER_KERNEL_LAUNCHER(float); REGISTER_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
the_stack
#include <array> #include <functional> #include <numeric> #include <vector> #include "caffe2/core/context_gpu.h" #include "caffe2/core/cudnn_wrappers.h" #include "caffe2/operators/spatial_batch_norm_op_impl.cuh" #include "caffe2/utils/math.h" #if CUDNN_VERSION_MIN(5, 0, 0) namespace caffe2 { namespace { void SetTensorDescriptor( const cudnnDataType_t data_type, const cudnnBatchNormMode_t mode, const StorageOrder order, const std::vector<int>& input_dims, cudnnTensorDescriptor_t data_desc, cudnnTensorDescriptor_t param_desc) { const int ndim = input_dims.size(); const int N = input_dims[0]; const int C = order == StorageOrder::NCHW ? input_dims[1] : input_dims.back(); if (ndim == 3) { const int H = 1; const int W = order == StorageOrder::NCHW ? input_dims[2] : input_dims[1]; CUDNN_ENFORCE(cudnnSetTensor4dDescriptor( data_desc, GetCudnnTensorFormat(order), data_type, N, C, H, W)); } else if (ndim == 4) { const int H = order == StorageOrder::NCHW ? input_dims[2] : input_dims[1]; const int W = order == StorageOrder::NCHW ? input_dims[3] : input_dims[2]; CUDNN_ENFORCE(cudnnSetTensor4dDescriptor( data_desc, GetCudnnTensorFormat(order), data_type, N, C, H, W)); } else { const int H = order == StorageOrder::NCHW ? input_dims[2] : input_dims[1]; const int W = order == StorageOrder::NCHW ? input_dims[3] : input_dims[2]; const auto l_iter = order == StorageOrder::NCHW ? input_dims.cbegin() + 4 : input_dims.cbegin() + 3; const auto r_iter = order == StorageOrder::NCHW ? input_dims.cend() : input_dims.cend() - 1; const int D = std::accumulate(l_iter, r_iter, 1, std::multiplies<int>()); const std::array<int, 5> dims = {N, C, H, W, D}; const std::array<int, 5> strides = order == StorageOrder::NCHW ? std::array<int, 5>{C * H * W * D, H * W * D, W * D, D, 1} : std::array<int, 5>{C * H * W * D, 1, W * D * C, D * C, C}; CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( data_desc, data_type, 5, dims.data(), strides.data())); } CUDNN_ENFORCE(cudnnDeriveBNTensorDescriptor(param_desc, data_desc, mode)); } } // namespace class CuDNNSpatialBNOp final : public SpatialBNOp<CUDAContext> { public: USE_OPERATOR_FUNCTIONS(CUDAContext); CuDNNSpatialBNOp(const OperatorDef& operator_def, Workspace* ws) : SpatialBNOp<CUDAContext>(operator_def, ws), cudnn_wrapper_(&context_), #if CUDNN_VERSION_MIN(7, 0, 0) // TODO(T31829456): The new CUDNN_BATCHNORM_SPATIAL_PERSISTENT mode was // introduced in CuDNN 7 for performance optimization, but it results in // accuracy losses in convolution models such as ResNeXt-101 and // video R(2+1)D. We will fall back to the normal // CUDNN_BATCHNORM_SPATIAL for now mode_(CUDNN_BATCHNORM_SPATIAL) { #else mode_(CUDNN_BATCHNORM_SPATIAL) { #endif CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&data_desc_)); CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&param_desc_)); if (epsilon_ < CUDNN_BN_MIN_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than CUDNN_BN_MIN_EPSILON. " "Setting it to CUDNN_BN_MIN_EPSILON instead."; epsilon_ = CUDNN_BN_MIN_EPSILON; } } ~CuDNNSpatialBNOp() { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(param_desc_)); } bool RunOnDevice() override { // CuDNN doesn't support multi-batch SpatialBN and it's NHWC order SpatialBN // is much slower, so in such cases fallback to SpatialBNOp<CUDAContext>. if (num_batches_ > 1 || order_ == StorageOrder::NHWC) { return SpatialBNOp<CUDAContext>::RunOnDevice(); } return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0)); } template <typename T> bool DoRunWithType() { typedef typename cudnnTypeWrapper<T>::BNParamType BNParamType; const auto& X = Input(INPUT); const auto& scale = Input(SCALE); const auto& bias = Input(BIAS); auto* Y = Output(OUTPUT); const int ndim = X.ndim(); CAFFE_ENFORCE_GE(ndim, 3); const int N = X.dim32(0); const int C = (order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1)); CAFFE_ENFORCE_EQ(scale.size(), C); CAFFE_ENFORCE_EQ(bias.size(), C); Y->ResizeLike(X); const T* X_data = X.data<T>(); const BNParamType* scale_data = scale.data<BNParamType>(); const BNParamType* bias_data = bias.data<BNParamType>(); T* Y_data = Y->mutable_data<T>(); if (N > 0) { const std::vector<int> input_dims(X.sizes().cbegin(), X.sizes().cend()); if (input_dims != data_dims_) { data_dims_ = input_dims; SetTensorDescriptor( cudnnTypeWrapper<T>::type, mode_, order_, input_dims, data_desc_, param_desc_); } } if (is_test_) { const auto& mean = Input(EST_MEAN); const auto& var = Input(EST_VAR); CAFFE_ENFORCE_EQ(mean.size(), C); CAFFE_ENFORCE_EQ(var.size(), C); if (N == 0) { return true; } CUDNN_ENFORCE(cudnnBatchNormalizationForwardInference( cudnn_wrapper_.inline_cudnn_handle(), // Note: PERSISTENT not implemented for inference CUDNN_BATCHNORM_SPATIAL, cudnnTypeWrapper<T>::kOne(), cudnnTypeWrapper<T>::kZero(), data_desc_, X_data, data_desc_, Y_data, param_desc_, scale_data, bias_data, mean.data<BNParamType>(), var.data<BNParamType>(), epsilon_)); } else { auto* saved_mean = Output(SAVED_MEAN); auto* saved_inv_std = Output(SAVED_INV_STD); saved_mean->Resize(C); saved_inv_std->Resize(C); BNParamType* saved_mean_data = saved_mean->mutable_data<BNParamType>(); BNParamType* saved_inv_std_data = saved_inv_std->mutable_data<BNParamType>(); auto* running_mean = Output(RUNNING_MEAN); auto* running_var = Output(RUNNING_VAR); if (running_mean->size() != C) { running_mean->Resize(C); math::Set<BNParamType, CUDAContext>( C, BNParamType(0), running_mean->mutable_data<BNParamType>(), &context_); } if (running_var->size() != C) { running_var->Resize(C); math::Set<BNParamType, CUDAContext>( C, BNParamType(0), running_var->mutable_data<BNParamType>(), &context_); } BNParamType* running_mean_data = running_mean->mutable_data<BNParamType>(); BNParamType* running_var_data = running_var->mutable_data<BNParamType>(); if (N == 0) { math::Set<BNParamType, CUDAContext>( C, BNParamType(0), saved_mean_data, &context_); math::Set<BNParamType, CUDAContext>( C, BNParamType(0), saved_inv_std_data, &context_); return true; } const double alpha = static_cast<double>(1.0f - momentum_); CUDNN_ENFORCE(cudnnBatchNormalizationForwardTraining( cudnn_wrapper_.inline_cudnn_handle(), mode_, cudnnTypeWrapper<T>::kOne(), cudnnTypeWrapper<T>::kZero(), data_desc_, X_data, data_desc_, Y_data, param_desc_, scale_data, bias_data, alpha, running_mean_data, running_var_data, epsilon_, saved_mean_data, saved_inv_std_data)); } return true; } private: CuDNNWrapper cudnn_wrapper_; cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t param_desc_; cudnnBatchNormMode_t mode_; std::vector<int> data_dims_; }; class CuDNNSpatialBNGradientOp final : public SpatialBNGradientOp<CUDAContext> { public: USE_OPERATOR_FUNCTIONS(CUDAContext); CuDNNSpatialBNGradientOp(const OperatorDef& operator_def, Workspace* ws) : SpatialBNGradientOp<CUDAContext>(operator_def, ws), cudnn_wrapper_(&context_), #if CUDNN_VERSION_MIN(7, 0, 0) // TODO(T31829456): The new CUDNN_BATCHNORM_SPATIAL_PERSISTENT mode was // introduced in CuDNN 7 for performance optimization, but it results in // accuracy losses in convolution models such as ResNeXt-101 and // video R(2+1)D. We will fall back to the normal // CUDNN_BATCHNORM_SPATIAL for now mode_(CUDNN_BATCHNORM_SPATIAL) { #else mode_(CUDNN_BATCHNORM_SPATIAL) { #endif CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&data_desc_)); CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&param_desc_)); if (epsilon_ < CUDNN_BN_MIN_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than CUDNN_BN_MIN_EPSILON. " "Setting it to CUDNN_BN_MIN_EPSILON instead."; epsilon_ = CUDNN_BN_MIN_EPSILON; } } ~CuDNNSpatialBNGradientOp() { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_)); CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(param_desc_)); } bool RunOnDevice() override { // CuDNN doesn't support multi-batch SpatialBN and it's NHWC order SpatialBN // is much slower, so in such cases fallback to SpatialBNOp<CUDAContext>. if (num_batches_ > 1 || order_ == StorageOrder::NHWC) { return SpatialBNGradientOp<CUDAContext>::RunOnDevice(); } return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0)); } template <typename T> bool DoRunWithType() { typedef typename cudnnTypeWrapper<T>::BNParamType BNParamType; const auto& X = Input(INPUT); const auto& scale = Input(SCALE); const auto& dY = Input(OUTPUT_GRAD); const auto& saved_mean = Input(SAVED_MEAN); const auto& saved_rstd = Input(SAVED_INV_STD); auto* dX = Output(INPUT_GRAD); auto* dscale = Output(SCALE_GRAD); auto* dbias = Output(BIAS_GRAD); const int ndim = X.ndim(); CAFFE_ENFORCE_GE(ndim, 3); const int N = X.dim32(0); const int C = (order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1)); CAFFE_ENFORCE_EQ(scale.size(), C); CAFFE_ENFORCE_EQ(saved_mean.size(), C); CAFFE_ENFORCE_EQ(saved_rstd.size(), C); dX->ResizeLike(X); dscale->ResizeLike(scale); dbias->ResizeLike(scale); const T* X_data = X.template data<T>(); const T* scale_data = scale.template data<T>(); const T* dY_data = dY.template data<T>(); const BNParamType* saved_mean_data = saved_mean.template data<BNParamType>(); const BNParamType* saved_rstd_data = saved_rstd.template data<BNParamType>(); T* dX_data = dX->template mutable_data<T>(); BNParamType* dscale_data = dscale->template mutable_data<BNParamType>(); BNParamType* dbias_data = dbias->template mutable_data<BNParamType>(); if (N == 0) { math::Set<BNParamType, CUDAContext>( C, BNParamType(0), dscale_data, &context_); math::Set<BNParamType, CUDAContext>( C, BNParamType(0), dbias_data, &context_); return true; } const std::vector<int> input_dims(X.sizes().cbegin(), X.sizes().cend()); if (input_dims != data_dims_) { data_dims_ = input_dims; SetTensorDescriptor( cudnnTypeWrapper<T>::type, mode_, order_, input_dims, data_desc_, param_desc_); } CUDNN_ENFORCE(cudnnBatchNormalizationBackward( cudnn_wrapper_.inline_cudnn_handle(), mode_, cudnnTypeWrapper<T>::kOne(), cudnnTypeWrapper<T>::kZero(), cudnnTypeWrapper<T>::kOne(), cudnnTypeWrapper<T>::kZero(), data_desc_, X_data, data_desc_, dY_data, data_desc_, dX_data, param_desc_, scale_data, dscale_data, dbias_data, epsilon_, saved_mean_data, saved_rstd_data)); return true; } private: CuDNNWrapper cudnn_wrapper_; cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t param_desc_; cudnnBatchNormMode_t mode_; // TODO: int -> int64_t std::vector<int> data_dims_; }; REGISTER_CUDNN_OPERATOR(SpatialBN, CuDNNSpatialBNOp); REGISTER_CUDNN_OPERATOR(SpatialBNGradient, CuDNNSpatialBNGradientOp); } // namespace caffe2 #endif // CUDNN_VERSION_MIN(5, 0, 0)
the_stack
#include <cuda_runtime_api.h> #include <cuda.h> #include <cuComplex.h> #include "num/gpu_reduce.h" #include "num/multind.h" #define CFL_SIZE 8 #define FL_SIZE 4 #define BLOCKSIZE 1024 static long gridsizeX(long N, unsigned int blocksize) { return (N + blocksize - 1) / blocksize; } static unsigned int gridsizeY(long N, unsigned int blocksize) { return (N + blocksize - 1) / blocksize; } #define MIN(a, b) ((a < b) ? a : b) #define MAX(a, b) ((a > b) ? a : b) __device__ static __inline__ cuFloatComplex dev_zadd(cuFloatComplex arg1, cuFloatComplex arg2) { return cuCaddf(arg1, arg2); } __device__ static __inline__ void dev_atomic_zadd(cuFloatComplex* arg, cuFloatComplex val) { atomicAdd(&(arg->x), val.x); atomicAdd(&(arg->y), val.y); } __global__ static void kern_reduce_zadd_outer(long dim_reduce, long dim_batch, cuFloatComplex* dst, const cuFloatComplex* src) { extern __shared__ cuFloatComplex sdata_c[]; int tidx = threadIdx.x; int tidy = threadIdx.y; int idxx = blockIdx.x * blockDim.x + threadIdx.x; int idxy = blockIdx.y * blockDim.y + threadIdx.y; for (long ix = idxx; ix < dim_batch; ix += gridDim.x * blockDim.x){ sdata_c[tidy * blockDim.x + tidx] = src[ idxy * dim_batch + ix]; for (long j = blockDim.y * gridDim.y + idxy; j < dim_reduce; j += blockDim.y * gridDim.y) sdata_c[tidy * blockDim.x + tidx] = dev_zadd(sdata_c[tidy * blockDim.x + tidx], src[j * dim_batch + ix]); __syncthreads(); for (unsigned int s = blockDim.y / 2; s > 0; s >>= 1){ if (tidy < s) sdata_c[tidy * blockDim.x + tidx] = dev_zadd(sdata_c[tidy * blockDim.x + tidx], sdata_c[(tidy + s) * blockDim.x + tidx]); __syncthreads(); } if (0 == tidy) dev_atomic_zadd(dst + ix, sdata_c[tidx]); } } extern "C" void cuda_reduce_zadd_outer(long dim_reduce, long dim_batch, _Complex float* dst, const _Complex float* src) { long maxBlockSizeX_dim = 1; while (maxBlockSizeX_dim < dim_batch) maxBlockSizeX_dim *= 2; long maxBlockSizeY_dim = 1; while (8 * maxBlockSizeY_dim < dim_reduce) maxBlockSizeY_dim *= 2; long maxBlockSizeX_gpu = 32; unsigned int blockSizeX = MIN(maxBlockSizeX_gpu, maxBlockSizeX_dim); unsigned int blockSizeY = MIN(maxBlockSizeY_dim, BLOCKSIZE / blockSizeX); dim3 blockDim(blockSizeX, blockSizeY); dim3 gridDim(gridsizeX(dim_batch, blockSizeX), gridsizeY(maxBlockSizeY_dim, blockSizeY)); kern_reduce_zadd_outer<<<gridDim, blockDim, blockSizeX * blockSizeY * CFL_SIZE>>>(dim_reduce, dim_batch, (cuFloatComplex*)dst, (const cuFloatComplex*)src); } __global__ static void kern_reduce_zadd_inner(long dim_reduce, long dim_batch, cuFloatComplex* dst, const cuFloatComplex* src) { extern __shared__ cuFloatComplex sdata_c[]; int tidx = threadIdx.x; int tidy = threadIdx.y; int idxx = blockIdx.x * blockDim.x + threadIdx.x; int idxy = blockIdx.y * blockDim.y + threadIdx.y; for (long iy = idxy; iy < dim_batch; iy += gridDim.y * blockDim.y){ sdata_c[tidy * blockDim.x + tidx] = src[ idxx + dim_reduce * iy]; //printf("%d %ld\n", idxx, iy); for (long j = blockDim.x * gridDim.x + idxx; j < dim_reduce; j += blockDim.x * gridDim.x) sdata_c[tidy * blockDim.x + tidx] = dev_zadd(sdata_c[tidy * blockDim.x + tidx], src[j + dim_reduce * iy]); __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1){ if (tidx < s) sdata_c[tidy * blockDim.x + tidx] = dev_zadd(sdata_c[tidy * blockDim.x + tidx], sdata_c[tidy * blockDim.x + tidx + s]); __syncthreads(); } if (0 == tidx) dev_atomic_zadd(dst + iy, sdata_c[tidy * blockDim.x]); } } extern "C" void cuda_reduce_zadd_inner(long dim_reduce, long dim_batch, _Complex float* dst, const _Complex float* src) { long maxBlockSizeX_dim = 1; while (8 * maxBlockSizeX_dim < dim_reduce) maxBlockSizeX_dim *= 2; long maxBlockSizeY_dim = 1; while (maxBlockSizeY_dim < dim_batch) maxBlockSizeY_dim *= 2; long maxBlockSizeX_gpu = 32; unsigned int blockSizeX = MIN(maxBlockSizeX_gpu, maxBlockSizeX_dim); unsigned int blockSizeY = MIN(maxBlockSizeY_dim, BLOCKSIZE / blockSizeX); dim3 blockDim(blockSizeX, blockSizeY); dim3 gridDim(gridsizeX(maxBlockSizeX_dim, blockSizeX), gridsizeY(dim_batch, blockSizeY)); kern_reduce_zadd_inner<<<gridDim, blockDim, blockSizeX * blockSizeY * CFL_SIZE>>>(dim_reduce, dim_batch, (cuFloatComplex*)dst, (const cuFloatComplex*)src); } __device__ static __inline__ float dev_add(float arg1, float arg2) { return arg1 + arg2; } __device__ static __inline__ void dev_atomic_add(float* arg, float val) { atomicAdd(arg, val); } __global__ static void kern_reduce_add_outer(long dim_reduce, long dim_batch, float* dst, const float* src) { extern __shared__ float sdata_s[]; int tidx = threadIdx.x; int tidy = threadIdx.y; int idxx = blockIdx.x * blockDim.x + threadIdx.x; int idxy = blockIdx.y * blockDim.y + threadIdx.y; for (long ix = idxx; ix < dim_batch; ix += gridDim.x * blockDim.x){ sdata_s[tidy * blockDim.x + tidx] = src[ idxy * dim_batch + ix]; for (long j = blockDim.y * gridDim.y + idxy; j < dim_reduce; j += blockDim.y * gridDim.y) sdata_s[tidy * blockDim.x + tidx] = dev_add(sdata_s[tidy * blockDim.x + tidx], src[j * dim_batch + ix]); __syncthreads(); for (unsigned int s = blockDim.y / 2; s > 0; s >>= 1){ if (tidy < s) sdata_s[tidy * blockDim.x + tidx] = dev_add(sdata_s[tidy * blockDim.x + tidx], sdata_s[(tidy + s) * blockDim.x + tidx]); __syncthreads(); } if (0 == tidy) dev_atomic_add(dst + ix, sdata_s[tidx]); } } extern "C" void cuda_reduce_add_outer(long dim_reduce, long dim_batch, float* dst, const float* src) { long maxBlockSizeX_dim = 1; while (maxBlockSizeX_dim < dim_batch) maxBlockSizeX_dim *= 2; long maxBlockSizeY_dim = 1; while (8 * maxBlockSizeY_dim < dim_reduce) maxBlockSizeY_dim *= 2; long maxBlockSizeX_gpu = 32; unsigned int blockSizeX = MIN(maxBlockSizeX_gpu, maxBlockSizeX_dim); unsigned int blockSizeY = MIN(maxBlockSizeY_dim, BLOCKSIZE / blockSizeX); dim3 blockDim(blockSizeX, blockSizeY); dim3 gridDim(gridsizeX(dim_batch, blockSizeX), gridsizeY(maxBlockSizeY_dim, blockSizeY)); kern_reduce_add_outer<<<gridDim, blockDim, blockSizeX * blockSizeY * FL_SIZE>>>(dim_reduce, dim_batch, dst, src); } __global__ static void kern_reduce_add_inner(long dim_reduce, long dim_batch, float* dst, const float* src) { extern __shared__ float sdata_s[]; int tidx = threadIdx.x; int tidy = threadIdx.y; int idxx = blockIdx.x * blockDim.x + threadIdx.x; int idxy = blockIdx.y * blockDim.y + threadIdx.y; for (long iy = idxy; iy < dim_batch; iy += gridDim.y * blockDim.y){ sdata_s[tidy * blockDim.x + tidx] = src[ idxx + dim_reduce * iy]; //printf("%d %ld\n", idxx, iy); for (long j = blockDim.x * gridDim.x + idxx; j < dim_reduce; j += blockDim.x * gridDim.x) sdata_s[tidy * blockDim.x + tidx] = dev_add(sdata_s[tidy * blockDim.x + tidx], src[j + dim_reduce * iy]); __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1){ if (tidx < s) sdata_s[tidy * blockDim.x + tidx] = dev_add(sdata_s[tidy * blockDim.x + tidx], sdata_s[tidy * blockDim.x + tidx + s]); __syncthreads(); } if (0 == tidx) dev_atomic_add(dst + iy, sdata_s[tidy * blockDim.x]); } } extern "C" void cuda_reduce_add_inner(long dim_reduce, long dim_batch, float* dst, const float* src) { long maxBlockSizeX_dim = 1; while (8 * maxBlockSizeX_dim < dim_reduce) maxBlockSizeX_dim *= 2; long maxBlockSizeY_dim = 1; while (maxBlockSizeY_dim < dim_batch) maxBlockSizeY_dim *= 2; long maxBlockSizeX_gpu = 32; unsigned int blockSizeX = MIN(maxBlockSizeX_gpu, maxBlockSizeX_dim); unsigned int blockSizeY = MIN(maxBlockSizeY_dim, BLOCKSIZE / blockSizeX); dim3 blockDim(blockSizeX, blockSizeY); dim3 gridDim(gridsizeX(maxBlockSizeX_dim, blockSizeX), gridsizeY(dim_batch, blockSizeY)); kern_reduce_add_inner<<<gridDim, blockDim, blockSizeX * blockSizeY * FL_SIZE>>>(dim_reduce, dim_batch, dst, src); } __device__ static __inline__ cuFloatComplex dev_zmax(cuFloatComplex arg1, cuFloatComplex arg2) { return make_cuFloatComplex(MAX(cuCrealf(arg1), cuCrealf(arg2)), 0.); } __device__ static __inline__ void dev_atomic_zmax(cuFloatComplex* arg, cuFloatComplex val) { unsigned long long int* address_as_ull = (unsigned long long int*)arg; unsigned long long int old_ull = *address_as_ull; unsigned long long int assumed; unsigned long long int new_ull; cuFloatComplex new_cf; do { assumed = old_ull; new_cf = dev_zmax(*((cuFloatComplex*)(&old_ull)), val); new_ull = *((unsigned long long int*)(&new_cf)); old_ull = atomicCAS(address_as_ull, assumed, new_ull); } while (assumed != old_ull); } __global__ static void kern_reduce_zmax_outer(long dim_reduce, long dim_batch, cuFloatComplex* dst, const cuFloatComplex* src) { extern __shared__ cuFloatComplex sdata_c[]; int tidx = threadIdx.x; int tidy = threadIdx.y; int idxx = blockIdx.x * blockDim.x + threadIdx.x; int idxy = blockIdx.y * blockDim.y + threadIdx.y; for (long ix = idxx; ix < dim_batch; ix += gridDim.x * blockDim.x){ sdata_c[tidy * blockDim.x + tidx] = src[ idxy * dim_batch + ix]; for (long j = blockDim.y * gridDim.y + idxy; j < dim_reduce; j += blockDim.y * gridDim.y) sdata_c[tidy * blockDim.x + tidx] = dev_zmax(sdata_c[tidy * blockDim.x + tidx], src[j * dim_batch + ix]); __syncthreads(); for (unsigned int s = blockDim.y / 2; s > 0; s >>= 1){ if (tidy < s) sdata_c[tidy * blockDim.x + tidx] = dev_zmax(sdata_c[tidy * blockDim.x + tidx], sdata_c[(tidy + s) * blockDim.x + tidx]); __syncthreads(); } if (0 == tidy) dev_atomic_zmax(dst + ix, sdata_c[tidx]); } } extern "C" void cuda_reduce_zmax_outer(long dim_reduce, long dim_batch, _Complex float* dst, const _Complex float* src) { long maxBlockSizeX_dim = 1; while (maxBlockSizeX_dim < dim_batch) maxBlockSizeX_dim *= 2; long maxBlockSizeY_dim = 1; while (8 * maxBlockSizeY_dim < dim_reduce) maxBlockSizeY_dim *= 2; long maxBlockSizeX_gpu = 32; unsigned int blockSizeX = MIN(maxBlockSizeX_gpu, maxBlockSizeX_dim); unsigned int blockSizeY = MIN(maxBlockSizeY_dim, BLOCKSIZE / blockSizeX); dim3 blockDim(blockSizeX, blockSizeY); dim3 gridDim(gridsizeX(dim_batch, blockSizeX), gridsizeY(maxBlockSizeY_dim, blockSizeY)); kern_reduce_zmax_outer<<<gridDim, blockDim, blockSizeX * blockSizeY * CFL_SIZE>>>(dim_reduce, dim_batch, (cuFloatComplex*)dst, (const cuFloatComplex*)src); } __global__ static void kern_reduce_zmax_inner(long dim_reduce, long dim_batch, cuFloatComplex* dst, const cuFloatComplex* src) { extern __shared__ cuFloatComplex sdata_c[]; int tidx = threadIdx.x; int tidy = threadIdx.y; int idxx = blockIdx.x * blockDim.x + threadIdx.x; int idxy = blockIdx.y * blockDim.y + threadIdx.y; for (long iy = idxy; iy < dim_batch; iy += gridDim.y * blockDim.y){ sdata_c[tidy * blockDim.x + tidx] = src[ idxx + dim_reduce * iy]; //printf("%d %ld\n", idxx, iy); for (long j = blockDim.x * gridDim.x + idxx; j < dim_reduce; j += blockDim.x * gridDim.x) sdata_c[tidy * blockDim.x + tidx] = dev_zmax(sdata_c[tidy * blockDim.x + tidx], src[j + dim_reduce * iy]); __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1){ if (tidx < s) sdata_c[tidy * blockDim.x + tidx] = dev_zmax(sdata_c[tidy * blockDim.x + tidx], sdata_c[tidy * blockDim.x + tidx + s]); __syncthreads(); } if (0 == tidx) dev_atomic_zmax(dst + iy, sdata_c[tidy * blockDim.x]); } } extern "C" void cuda_reduce_zmax_inner(long dim_reduce, long dim_batch, _Complex float* dst, const _Complex float* src) { long maxBlockSizeX_dim = 1; while (8 * maxBlockSizeX_dim < dim_reduce) maxBlockSizeX_dim *= 2; long maxBlockSizeY_dim = 1; while (maxBlockSizeY_dim < dim_batch) maxBlockSizeY_dim *= 2; long maxBlockSizeX_gpu = 32; unsigned int blockSizeX = MIN(maxBlockSizeX_gpu, maxBlockSizeX_dim); unsigned int blockSizeY = MIN(maxBlockSizeY_dim, BLOCKSIZE / blockSizeX); dim3 blockDim(blockSizeX, blockSizeY); dim3 gridDim(gridsizeX(maxBlockSizeX_dim, blockSizeX), gridsizeY(dim_batch, blockSizeY)); kern_reduce_zmax_inner<<<gridDim, blockDim, blockSizeX * blockSizeY * CFL_SIZE>>>(dim_reduce, dim_batch, (cuFloatComplex*)dst, (const cuFloatComplex*)src); }
the_stack
#include <cstdio> #include "cuda_kernel_utils.h" #define POINT_BLOCK_SIZE 128 #define OFFSET_BLOCK_SIZE 512 ////////////////////////////////////////////////////////////////////////////////// GPU /** * Method to compute the key of each point. * @param pNumPoints Number of points. * @param pBatchSize Size of the batch. * @param pNumCells Number of cells of the grid. * @param pAABBMinPoint Minimum point of the grid (3 componenets). * @param pAABBMaxPoint Maximum point of the grid (3 componenets). * @param pPoints List of points. * @param pBatchIds List of batch ids. * @param pOutKeys Output parameter with the keys of each point. */ __global__ void calc_key( const int pNumPoints, const int pBatchSize, const int pNumCells, const float* __restrict__ pAABBMinPoint, const float* __restrict__ pAABBMaxPoint, const float* __restrict__ pPoints, const int* __restrict__ pBatchIds, int* __restrict__ pOutKeys) { int currentIndex = threadIdx.x + blockIdx.x * blockDim.x; if(currentIndex < pNumPoints){ int currBatchId = pBatchIds[currentIndex]; int pointIndex = currentIndex * 3; float maxAabbSize = max(max(pAABBMaxPoint[currBatchId*3] - pAABBMinPoint[currBatchId*3], pAABBMaxPoint[currBatchId*3+1] - pAABBMinPoint[currBatchId*3+1]), pAABBMaxPoint[currBatchId*3+2] - pAABBMinPoint[currBatchId*3+2]); float cellSize = maxAabbSize/(float)pNumCells; int xCell = max(min((int)floor((pPoints[pointIndex] - pAABBMinPoint[currBatchId*3])/cellSize), pNumCells -1), 0); int yCell = max(min((int)floor((pPoints[pointIndex+1] - pAABBMinPoint[currBatchId*3+1])/cellSize), pNumCells -1), 0); int zCell = max(min((int)floor((pPoints[pointIndex+2] - pAABBMinPoint[currBatchId*3+2])/cellSize), pNumCells -1), 0); pOutKeys[currentIndex] = currBatchId*pNumCells*pNumCells*pNumCells + xCell*pNumCells*pNumCells + yCell*pNumCells + zCell; } } /** * Method to update the counters of each cell. * @param pNumKeys Number of keys. * @param pKeys List of keys. * @param pOutCounters Output parameter with the counters. */ __global__ void update_counters( const int pNumKeys, const int* __restrict__ pKeys, int* __restrict__ pOutCounters) { int currentIndex = threadIdx.x + blockIdx.x * blockDim.x; if(currentIndex < pNumKeys) atomicAdd(&pOutCounters[pKeys[currentIndex]], 1); } /** * Second method to finish to propagate the offsets. * @param pStep1 Boolean that indicates if this is the first step. * @param pNumOffsets Number of offsets. * @param pNumOffsets2 Number of second level offsets. * @param pOffsets Input/Output parameter with the list of offsets. * @param pNumOffsets2 Output parameter with the list of second level offsets. */ __global__ void propagate_offsets( const bool pStep1, const int pNumOffsets, const int pNumOffsets2, int* __restrict__ pOffsets, int* __restrict__ pOffsets2) { __shared__ int groupCounter[OFFSET_BLOCK_SIZE]; //Get the local and global counter. int currCounter = threadIdx.x; int currGlobalCounter = threadIdx.x + blockIdx.x * blockDim.x; //Update the shared memory. if(currGlobalCounter < pNumOffsets) groupCounter[currCounter] = pOffsets[currGlobalCounter]; else groupCounter[currCounter] = 0; //SIMD scan. for(int i = 1; i <= OFFSET_BLOCK_SIZE/2; i*=2) { __syncthreads(); //Get the values of the pass. int currIndex = currCounter + i; int value1 = 0; int value2 = 0; if(currIndex < OFFSET_BLOCK_SIZE){ value1 = groupCounter[currCounter]; value2 = groupCounter[currIndex]; } __syncthreads(); //Update with the new value. if(currIndex < OFFSET_BLOCK_SIZE) groupCounter[currIndex] = value1 + value2; } __syncthreads(); //Save the counter into global memory. if(currGlobalCounter < pNumOffsets){ if(currCounter > 0) pOffsets[currGlobalCounter] = groupCounter[currCounter-1]; else pOffsets[currGlobalCounter] = 0; } if(pStep1){ //Update the offset buffer. if(currCounter == (OFFSET_BLOCK_SIZE-1) && blockIdx.x < pNumOffsets2) pOffsets2[blockIdx.x] = groupCounter[OFFSET_BLOCK_SIZE-1]; }else{ //Update the second level offset buffer. if(currCounter > blockIdx.x && currCounter < pNumOffsets2) atomicAdd(&pOffsets2[currCounter], groupCounter[OFFSET_BLOCK_SIZE-1]); } } /** * Method to determine the new indexs of the points. * @param pNumPoints Number of points. * @param pKeys Input parameter with the list of keys. * @param pCounters Input/Output parameter with the list of counters. * @param pOffset Input parameter with the list of first level offsets. * @param pOffset2 Input parameter with the list of second level offsets. * @param pOutNewIndexs Output parameter with the list of new indexes. */ __global__ void determine_new_index( const int pNumPoints, const int* __restrict__ pKeys, int* __restrict__ pCounters, const int* __restrict__ pOffset, const int* __restrict__ pOffset2, int* __restrict__ pOutNewIndexs) { int currPointId = threadIdx.x + blockIdx.x * blockDim.x; if(currPointId < pNumPoints){ int counterIndex = pKeys[currPointId]; int offsetIndex = counterIndex/OFFSET_BLOCK_SIZE; int globalOffsetIndex = offsetIndex/OFFSET_BLOCK_SIZE; int localIndex = atomicAdd(&pCounters[counterIndex], 1); int index = localIndex + pOffset[offsetIndex] + pOffset2[globalOffsetIndex]; pOutNewIndexs[currPointId] = index; } } /** * Method to move the points into their respective cells. * @param pNumPoints Number of points. * @param pBatchSize Size of the batch. * @param pNumFeatures Number of features. * @param pPoints Input parameter with the list of points. * @param pBatchIds Input parameter with the list of batch ids. * @param pFeatures Input parameter with the list of features. * @param pKeys Input parameter with the list of keys. * @param pNewIndexs Input parameter with the list of new indexs. * @param pOutPoints Output parameter with the list of points. * @param pOutBatchIds Output parameter with the list of batch ids. * @param pOutFeatures Output parameter with the list of features. * @param pOutKeys Output parameter with the list of keys. */ __global__ void move_points( const int pNumPoints, const int pBatchSize, const int pNumFeatures, const float* __restrict__ pPoints, const int* __restrict__ pBatchIds, const float* __restrict__ pFeatures, const int* __restrict__ pKeys, const int* __restrict__ pNewIndexs, float* __restrict__ pOutPoints, int* __restrict__ pOutBatchIds, float* __restrict__ pOutFeatures, int* __restrict__ pOutKeys) { int currPointId = threadIdx.x + blockIdx.x * blockDim.x; if(currPointId < pNumPoints){ int index = pNewIndexs[currPointId]; pOutPoints[index*3] = pPoints[currPointId*3]; pOutPoints[index*3 +1] = pPoints[currPointId*3 +1]; pOutPoints[index*3 +2] = pPoints[currPointId*3 +2]; for(int i = 0; i < pNumFeatures; ++i) pOutFeatures[index*pNumFeatures + i] = pFeatures[currPointId*pNumFeatures + i]; pOutKeys[index] = pKeys[currPointId]; pOutBatchIds[index] = pBatchIds[currPointId]; } } /** * Method to update the indexs of the cells. * @param pNumPoints Number of points. * @param pKeys Input parameter with the points keys. * @param pIndexs Output parameter with the pair of indexs of each cell in the grid. */ __global__ void save_indexs( const int pNumPoints, const int* __restrict__ pKeys, int* __restrict__ pIndexs) { int currIndex = threadIdx.x + blockIdx.x * blockDim.x; if(currIndex < pNumPoints){ int currKey = pKeys[currIndex]; int prevIndex = currIndex-1; int currKeyIndex = currKey*2; if(prevIndex < 0){ pIndexs[currKeyIndex] = 0; }else if(currKey != pKeys[prevIndex]){ pIndexs[currKeyIndex] = currIndex; } int nextIndex = currIndex+1; if(nextIndex >= pNumPoints){ pIndexs[currKeyIndex+1] = pNumPoints; }else if(currKey != pKeys[nextIndex]){ pIndexs[currKeyIndex+1] = nextIndex; } } } /** * Method to update the indexs of the cells. * @param pNumPoints Number of points. * @param pNumFeatures Number of features. * @param pOutputGrads Gradients of the output of the operation. * @param pOututFeatureGrads Gradients of the feature outputs of the operation. * @param pNewIndexs New indexs of the points. * @param pOutInputGrads Output parameter with the input gradients. * @param pOutInputFeatureGrads Output parameter with the input gradients of the features. */ __global__ void compute_gradients( const int pNumPoints, const int pNumFeatures, const float* __restrict__ pOutputGrads, const float* __restrict__ pOututFeatureGrads, const int* __restrict__ pNewIndexs, float* __restrict__ pOutInputGrads, float* __restrict__ pOutInputFeatureGrads) { int currPointId = threadIdx.x + blockIdx.x * blockDim.x; if(currPointId < pNumPoints){ int index = pNewIndexs[currPointId]; pOutInputGrads[currPointId*3] = pOutputGrads[index*3]; pOutInputGrads[currPointId*3 +1] = pOutputGrads[index*3 +1]; pOutInputGrads[currPointId*3 +2] = pOutputGrads[index*3 +2]; for(int i = 0 ; i < pNumFeatures; ++i) pOutInputFeatureGrads[currPointId*pNumFeatures + i] = pOututFeatureGrads[index*pNumFeatures + i]; } } /** * Method to sort the features back. * @param pNumPoints Number of points. * @param pNumFeatures Number of features. * @param pInFeatures List of input features. * @param pNewIndexs New indexs of the points. * @param pOutFeatures Output parameter with the list of features. */ __global__ void sort_features_back( const int pNumPoints, const int pNumFeatures, const float* __restrict__ pInFeatures, const int* __restrict__ pNewIndexs, float* __restrict__ pOutFeatures) { int currPointId = threadIdx.x + blockIdx.x * blockDim.x; if(currPointId < pNumPoints){ int index = pNewIndexs[currPointId]; for(int i = 0 ; i < pNumFeatures; ++i) pOutFeatures[currPointId*pNumFeatures + i] = pInFeatures[index*pNumFeatures + i]; } } /** * Method to sort the gradients of the features back. * @param pNumPoints Number of points. * @param pNumFeatures Number of features. * @param pOutFeatureGrads List of output gradients of the features. * @param pNewIndexs New indexs of the points. * @param pInFeatureGrad Output parameter with the list of input gradients of the features. */ __global__ void sort_features_back_grad( const int pNumPoints, const int pNumFeatures, const float* __restrict__ pOutFeatureGrads, const int* __restrict__ pNewIndexs, float* __restrict__ pInFeatureGrad) { int currPointId = threadIdx.x + blockIdx.x * blockDim.x; if(currPointId < pNumPoints){ int index = pNewIndexs[currPointId]; for(int i = 0 ; i < pNumFeatures; ++i) pInFeatureGrad[index*pNumFeatures + i] = pOutFeatureGrads[currPointId*pNumFeatures + i]; } } /** * Method to compute the inverse of the new position index list. * @param pNumPoints Number of points. * @param pIndexs` List of the new position of each point. * @param pOutIndexs Output parameter with the list of old positions for each sorted point. */ __global__ void compute_inverse_indexs( const int pNumPoints, const int* __restrict__ pIndexs, int* __restrict__ pOutIndexs) { int currPointId = threadIdx.x + blockIdx.x * blockDim.x; if(currPointId < pNumPoints){ int newIndex = pIndexs[currPointId]; pOutIndexs[newIndex] = currPointId; } } /** * Method to transofrm a list of indexs. * @param pNumIndexs Number of indexs to transform. * @param pStartIndexs List of indexs to transform. * @param pNewIndexs List of the new position of each point. * @param pOutIndexs Output parameter with the list of transformed indexs. */ __global__ void transform_indexs( const int pNumIndexs, const int* __restrict__ pStartIndexs, const int* __restrict__ pNewIndexs, int* __restrict__ pOutIndexs) { int currIndexId = threadIdx.x + blockIdx.x * blockDim.x; if(currIndexId < pNumIndexs){ int index = pStartIndexs[currIndexId]; int newIndex = pNewIndexs[index]; pOutIndexs[currIndexId] = newIndex; } } /** * Method to determine the cell size. * @param pBatchSize Number of elements per batch. * @param pCellSize Desired cell size. * @param pAABBMin Minimum points of the bounding boxes. * @param pAABBMax Maximum points of the bounding boxes. * @param pNumCells Output parameter with the number of cells. */ __global__ void determine_cell_size( const int pBatchSize, const float pCellSize, const float* __restrict__ pAABBMin, const float* __restrict__ pAABBMax, int* __restrict__ pNumCells) { int currBatchId = threadIdx.x; if(currBatchId == 0){ float maxAabbSize = max(max(pAABBMax[currBatchId*3] - pAABBMin[currBatchId*3], pAABBMax[currBatchId*3+1] - pAABBMin[currBatchId*3+1]), pAABBMax[currBatchId*3+2] - pAABBMin[currBatchId*3+2]); int numCells = (int)(maxAabbSize/pCellSize); numCells = (numCells == 0)?1:numCells; //printf("Num cells: %d %f\n", numCells, maxAabbSize); *pNumCells = numCells; } } ////////////////////////////////////////////////////////////////////////////////// CPU int determineNumCells( const bool pScaleInv, const int pBatchSize, const float pCellSize, const float* pAABBMin, const float* pAABBMax) { if(pScaleInv){ int numCellsCPU = (int)(1.0f/pCellSize); numCellsCPU = (numCellsCPU == 0)?1:numCellsCPU; return numCellsCPU; } int* numCells; cudaMalloc(&numCells, sizeof(int)); cudaMemset(numCells, 0x3F, sizeof(int)); determine_cell_size<<<1, pBatchSize>>>(pBatchSize, pCellSize, pAABBMin, pAABBMax, numCells); int numCellsCPU = 0; cudaMemcpy(&numCellsCPU, numCells, sizeof(int), cudaMemcpyDeviceToHost); gpuErrchk(cudaFree(numCells)); return numCellsCPU; } void computeAuxiliarBuffersSize( const int pBatchSize, const int pNumCells, int* PBufferSize1, int* PBufferSize2, int* PBufferSize3) { (*PBufferSize1) = pBatchSize*pNumCells*pNumCells*pNumCells; (*PBufferSize2) = (*PBufferSize1)/OFFSET_BLOCK_SIZE; (*PBufferSize2) += (((*PBufferSize1)%OFFSET_BLOCK_SIZE) != 0)?1:0; (*PBufferSize3) = (*PBufferSize2)/OFFSET_BLOCK_SIZE; (*PBufferSize3) += (((*PBufferSize2)%OFFSET_BLOCK_SIZE) != 0)?1:0; } void sortPointsStep1GPUKernel( const int pNumPoints, const int pBatchSize, const int pNumCells, const float* pAABBMin, const float* pAABBMax, const float* pPoints, const int* pBatchIds, int* pAuxBuffCounters, int* pAuxBuffOffsets, int* pAuxBuffOffsets2, int* pKeys, int* pNewIndexs) { int numBlocksPoints = pNumPoints/POINT_BLOCK_SIZE; numBlocksPoints += (pNumPoints%POINT_BLOCK_SIZE != 0)?1:0; int totalNumCells = pBatchSize*pNumCells*pNumCells*pNumCells; cudaMemset(pAuxBuffCounters, 0, totalNumCells*sizeof(int)); int numOffsets = totalNumCells/OFFSET_BLOCK_SIZE; numOffsets += ((totalNumCells%OFFSET_BLOCK_SIZE) != 0)?1:0; int numOffsets2 = numOffsets/OFFSET_BLOCK_SIZE; numOffsets2 += ((numOffsets%OFFSET_BLOCK_SIZE) != 0)?1:0; cudaMemset(pAuxBuffOffsets, 0, numOffsets*sizeof(int)); cudaMemset(pAuxBuffOffsets2, 0, numOffsets2*sizeof(int)); calc_key<<<numBlocksPoints,POINT_BLOCK_SIZE>>>( pNumPoints, pBatchSize, pNumCells, pAABBMin, pAABBMax, pPoints, pBatchIds, pKeys); update_counters<<<numBlocksPoints,POINT_BLOCK_SIZE>>>(pNumPoints, pKeys, pAuxBuffCounters); propagate_offsets<<<numOffsets, OFFSET_BLOCK_SIZE>>>(true, totalNumCells, numOffsets, pAuxBuffCounters, pAuxBuffOffsets); propagate_offsets<<<numOffsets2, OFFSET_BLOCK_SIZE>>>(false, numOffsets, numOffsets2, pAuxBuffOffsets, pAuxBuffOffsets2); determine_new_index<<<numBlocksPoints,POINT_BLOCK_SIZE>>>(pNumPoints, pKeys, pAuxBuffCounters, pAuxBuffOffsets, pAuxBuffOffsets2, pNewIndexs); } void sortPointsStep2GPUKernel( const int pNumPoints, const int pBatchSize, const int pNumFeatures, const int pNumCells, const float* pPoints, const int* pBatchIds, const float* pFeatures, const int* pKeys, const int* pNewIndexs, int* pAuxBuffer, float* pOutPoints, int* pOutBatchIds, float* pOutFeatures, int* pOutCellIndexs) { int numBlocksPoints = pNumPoints/POINT_BLOCK_SIZE; numBlocksPoints += (pNumPoints%POINT_BLOCK_SIZE != 0)?1:0; cudaMemset(pOutCellIndexs, 0, pBatchSize*pNumCells*pNumCells*pNumCells*sizeof(int)*2); move_points<<<numBlocksPoints,POINT_BLOCK_SIZE>>> (pNumPoints, pBatchSize, pNumFeatures, pPoints, pBatchIds, pFeatures, pKeys, pNewIndexs, pOutPoints, pOutBatchIds, pOutFeatures, pAuxBuffer); save_indexs<<<numBlocksPoints,POINT_BLOCK_SIZE>>>(pNumPoints, pAuxBuffer, pOutCellIndexs); } void sortPointsStep2GradGPUKernel( const int pNumPoints, const int pNumFeatures, const float* pOutGradients, const float* pOutFeatureGradients, const int* pNewIndexs, float* pInGradients, float* pInFeatureGradients) { int numBlocksPoints = pNumPoints/POINT_BLOCK_SIZE; numBlocksPoints += (pNumPoints%POINT_BLOCK_SIZE != 0)?1:0; compute_gradients<<<numBlocksPoints,POINT_BLOCK_SIZE>>> (pNumPoints, pNumFeatures, pOutGradients, pOutFeatureGradients, pNewIndexs, pInGradients, pInFeatureGradients); } void sortFeaturesBack( const int pNumPoints, const int pNumFeatures, const float* pInFeatures, const int* pIndexs, float* pOutFeatures) { int numBlocksPoints = pNumPoints/POINT_BLOCK_SIZE; numBlocksPoints += (pNumPoints%POINT_BLOCK_SIZE != 0)?1:0; sort_features_back<<<numBlocksPoints,POINT_BLOCK_SIZE>>> (pNumPoints, pNumFeatures, pInFeatures, pIndexs, pOutFeatures); } void sortFeaturesBackGrad( const int pNumPoints, const int pNumFeatures, const float* pOutFeatureGrads, const int* pIndexs, float* pInFeatureGrads) { int numBlocksPoints = pNumPoints/POINT_BLOCK_SIZE; numBlocksPoints += (pNumPoints%POINT_BLOCK_SIZE != 0)?1:0; sort_features_back_grad<<<numBlocksPoints,POINT_BLOCK_SIZE>>> (pNumPoints, pNumFeatures, pOutFeatureGrads, pIndexs, pInFeatureGrads); } void computeInverseIndexs( const int pNumPoints, const int* pIndexs, int* pOutIndexs) { int numBlocksPoints = pNumPoints/POINT_BLOCK_SIZE; numBlocksPoints += (pNumPoints%POINT_BLOCK_SIZE != 0)?1:0; compute_inverse_indexs<<<numBlocksPoints,POINT_BLOCK_SIZE>>> (pNumPoints, pIndexs, pOutIndexs); } void transformIndexs( const int pNumIndexs, const int pNumPoints, const int* pInStartIndexs, const int* pInNewIndexs, int* pOutIndexs) { int numBlocksPoints = pNumIndexs/POINT_BLOCK_SIZE; numBlocksPoints += (pNumIndexs%POINT_BLOCK_SIZE != 0)?1:0; transform_indexs<<<numBlocksPoints, POINT_BLOCK_SIZE>>>(pNumIndexs, pInStartIndexs, pInNewIndexs, pOutIndexs); }
the_stack
#include "ew_op_gpu.h" __device__ __forceinline__ int div16(int numerator, int magic, int shift) { int res; asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, 0;" : "=r"(res) : "r"(numerator), "r"(magic)); return res >> shift; } __device__ __forceinline__ int mod16(int numerator, int div, int maxdiv) { int res; asm("vmad.s32.u32.u32 %0, -%1.h0, %2.h0, %3;" : "=r"(res) : "r"(div), "r"(maxdiv), "r"(numerator)); return res; } __device__ __forceinline__ int mad16(int a, int b, int c) { int res; asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(c)); return res; } // y = gain * x / sqrt(max(sum(x**2), epsilon)) template <typename TY, typename TX> __global__ void __launch_bounds__(32) l2_normalize_KCTRS( TY* Y, float* S, const TX* __restrict__ X, const float* __restrict__ G, const int2* __restrict__ Lut, float epsilon, int apply_gain) { int tid = threadIdx.x; int k = blockIdx.x; int2 block_data = Lut[k]; float gain = 1.0f; if (apply_gain) gain = G[k]; int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid int CTRS = block_data.y; // block_C * TRS const TX* X1 = X + offset; const TX* X2 = X + offset; Y += offset; // sum_sqr_x = sum(x**2) float sum_sqr_x = 0.0f; for (int i = tid; i < CTRS; i += 32) { float x = load(X1); X1 += 32; sum_sqr_x += x * x; } #pragma unroll for (int i = 16; i > 0; i >>= 1) sum_sqr_x += shfl_xor(sum_sqr_x, i); // store reduction for gradient pass if (tid == 0) store(S, sum_sqr_x, k); // rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain; // y = x * rnorm for (int i = tid; i < CTRS; i += 32) { float x = load(X2); store(Y, x * rnorm); X2 += 32; Y += 32; } } // y = gain * x / sqrt(max(sum(x**2), epsilon)) template <typename TY, typename TX> __global__ void __launch_bounds__(32) l2_normalize_CKTRS( TY* Y, float* S, const TX* __restrict__ X, const float* __restrict__ G, const int4* __restrict__ Lut, float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS) { int tid = threadIdx.x; int k = blockIdx.x; int4 block_data = Lut[k]; float gain = 1.0f; if (apply_gain) gain = G[k]; int idx_k = block_data.x; int CTRS = block_data.y; int KTRS = block_data.z; int block_F = block_data.w; int offset_F = block_F + idx_k * TRS; const TX* X1 = X + offset_F; const TX* X2 = X + offset_F; Y += offset_F; // y_val = sum(x**2) float sum_sqr_x = 0.0f; for (int ctrs = tid; ctrs < CTRS; ctrs += 32) { // c = i / TRS; // trs = i % TRS; // offset = c * KTRS + trs int c = div16(ctrs, magic_TRS, shift_TRS); int trs = mod16(ctrs, c, TRS); int offset = mad16(c, KTRS, trs); float x = load(X1, offset); sum_sqr_x += x * x; } #pragma unroll for (int i = 16; i > 0; i >>= 1) sum_sqr_x += shfl_xor(sum_sqr_x, i); // store reduction for gradient pass if (tid == 0) store(S, sum_sqr_x, k); // rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain; // y = x * rnorm for (int ctrs = tid; ctrs < CTRS; ctrs += 32) { int c = div16(ctrs, magic_TRS, shift_TRS); int trs = mod16(ctrs, c, TRS); int offset = mad16(c, KTRS, trs); float x = load(X2, offset); store(Y, x * rnorm, offset); } } // y = gain * x / sqrt(max(sum(x**2), epsilon)) template <typename TY, typename TX> __global__ void __launch_bounds__(128) l2_normalize_CK_32( TY* Y, float* S, const TX* __restrict__ X, const float* __restrict__ G, const int* __restrict__ Lut, float epsilon, int apply_gain) { extern __shared__ int iShare[]; // 96 + max(lut_size) extern __shared__ float fShare[]; // 96 + max(lut_size) int tid = threadIdx.x; int idx_L = blockIdx.x; int4 lut_head = ((const int4*)Lut)[idx_L]; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int k = idx_K*32 + (tid & 31); float gain = 1.0f; if (apply_gain) gain = G[k]; Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 128) iShare[i + 96] = Lut[i] * 32 * 32; __syncthreads(); // sum_sqr_x = sum(x**2) float sum_sqr_x = 0.0f; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { const TX* X1 = X + iShare[i + 96] + tid; #pragma unroll for (int j = 0; j < 8; j++) { float x = load(X1, j*128); sum_sqr_x += x * x; } } // reduce sum_sqr_x across the 4 warps if (tid >= 32) fShare[tid-32] = sum_sqr_x; __syncthreads(); if (tid < 32) { sum_sqr_x += fShare[tid] + fShare[tid + 32] + fShare[tid + 64]; fShare[tid] = sum_sqr_x; // store reduction for gradient pass store(S, sum_sqr_x, k); } __syncthreads(); // get the final reduced value for all warps: sum_sqr_x = fShare[tid & 31]; // rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain; // y = x * rnorm #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int block_offset = iShare[i + 96]; const TX* X2 = X + block_offset + tid; TY* Y2 = Y + block_offset + tid; #pragma unroll for (int j = 0; j < 8; j++) { float x = load(X2, j*128); store(Y2, x * rnorm, j*128); } } } // y = gain * x / sqrt(max(sum(x**2), epsilon)) template <typename TY, typename TX> __global__ void __launch_bounds__(32) l2_normalize_CK_16( TY* Y, float* S, const TX* __restrict__ X, const float* __restrict__ G, const int* __restrict__ Lut, float epsilon, int apply_gain) { extern __shared__ int lut[]; // max(lut_size) int tid = threadIdx.x; int idx_L = blockIdx.x; int4 lut_head = ((const int4*)Lut)[idx_L]; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int k = idx_K*16 + (tid & 15); float gain = 1.0f; if (apply_gain) gain = G[k]; Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 32) lut[i] = Lut[i] * 16 * 16; // sum_sqr_x = sum(x**2) float sum_sqr_x = 0.0f; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { const TX* X0 = X + lut[i] + tid; #pragma unroll for (int j = 0; j < 8; j++) { float x = load(X0, j*32); sum_sqr_x += x * x; } } // reduce sum_sqr_x across the 4 rows of the warp sum_sqr_x += shfl_xor(sum_sqr_x, 16); store(S, sum_sqr_x, k, tid < 16); // rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain; // y = x * rnorm #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int block_offset = lut[i]; const TX* X0 = X + block_offset + tid; TY* Y0 = Y + block_offset + tid; #pragma unroll for (int j = 0; j < 8; j++) { float x = load(X0, j*32); store(Y0, x * rnorm, j*32); } } } // y = gain * x / sqrt(max(sum(x**2), epsilon)) template <typename TY, typename TX> __global__ void __launch_bounds__(32) l2_normalize_CK_8( TY* Y, float* S, const TX* __restrict__ X, const float* __restrict__ G, const int* __restrict__ Lut, float epsilon, int apply_gain) { extern __shared__ int lut[]; // max(lut_size) int tid = threadIdx.x; int idx_L = blockIdx.x; int4 lut_head = ((const int4*)Lut)[idx_L]; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int k = idx_K*8 + (tid & 7); float gain = 1.0f; if (apply_gain) gain = G[k]; Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 32) lut[i] = Lut[i] * 8 * 8; // sum_sqr_x = sum(x**2) float sum_sqr_x = 0.0f; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { const TX* X0 = X + lut[i] + tid; float x0 = load(X0, 0*32); float x1 = load(X0, 1*32); sum_sqr_x += x0 * x0 + x1 * x1; } // reduce sum_sqr_x across the 4 rows of the warp sum_sqr_x += shfl_xor(sum_sqr_x, 16); sum_sqr_x += shfl_xor(sum_sqr_x, 8); store(S, sum_sqr_x, k, tid < 8); // rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain; // y = x * rnorm #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int block_offset = lut[i]; const TX* X0 = X + block_offset + tid; TY* Y0 = Y + block_offset + tid; float x0 = load(X0, 0*32); float x1 = load(X0, 1*32); store(Y0, x0 * rnorm, 0*32); store(Y0, x1 * rnorm, 1*32); } } template <typename TY, typename TX> bool L2NormalizeKCTRS(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K) { dim3 grid(K, 1, 1); dim3 block(32, 1, 1); l2_normalize_KCTRS<TY,TX><<<grid, block, 0, stream>>>(y, sum_sqr_x, x, g, (const int2*)lut, epsilon, g != 0); return true; // TODO } template <typename TY, typename TX> bool L2NormalizeCKTRS(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS) { dim3 grid(K, 1, 1); dim3 block(32, 1, 1); l2_normalize_CKTRS<TY,TX><<<grid, block, 0, stream>>>(y, sum_sqr_x, x, g, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS); return true; // TODO } template <typename TY, typename TX> bool L2NormalizeCK(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize) { if (bsize == 32) { dim3 grid(K>>5, 1, 1); dim3 block(128, 1, 1); l2_normalize_CK_32<TY,TX><<<grid, block, shared+96*4, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0); } else if (bsize == 16) { dim3 grid(K>>4, 1, 1); dim3 block(32, 1, 1); l2_normalize_CK_16<TY,TX><<<grid, block, shared, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0); } else // if (bsize == 8) { dim3 grid(K>>3, 1, 1); dim3 block(32, 1, 1); l2_normalize_CK_8<TY,TX><<<grid, block, shared, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0); } return true; // TODO } /////////////////////////////////////// Gradients /////////////////////////////////////////// // sum_sqr_x = sum(x**2) // norm_x = sqrt(maximum(sum_sqr_x, epsilon)) // grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x // grad_g = sum(grad_y * l2_norm(x)) template <typename TY, typename TX> __global__ void __launch_bounds__(32) l2_normalize_grad_KCTRS( TX* DX, float* DG, const TY* __restrict__ DY, const TX* __restrict__ X, const float* __restrict__ G, const float* __restrict__ S, const int2* __restrict__ Lut, float epsilon, int apply_gain) { int tid = threadIdx.x; int k = blockIdx.x; int2 block_data = Lut[k]; float gain = 1.0f; if (apply_gain) gain = G[k]; int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid int CTRS = block_data.y; // block_C * TRS const TX* X1 = X + offset; const TX* X2 = X + offset; const TY* DY1 = DY + offset; const TY* DY2 = DY + offset; DX += offset; float sum_sqr_x = S[k]; float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon); float norm_xi = rsqrtf(max_sum_sqr_x); float norm_x2i = 1.0f / max_sum_sqr_x; // sum(-d * x / norm_x**2) float red_val = 0.0f; float dg = 0.0f; for (int i = tid; i < CTRS; i += 32) { float dy = load(DY1); float x = load(X1); DY1 += 32; X1 += 32; dg += dy * x * norm_xi; red_val += (-dy * x * gain) * norm_x2i; } #pragma unroll for (int i = 16; i > 0; i >>= 1) { red_val += shfl_xor(red_val, i); dg += shfl_xor(dg, i); } if (apply_gain && tid == 0) DG[k] = dg; red_val *= sum_sqr_x >= epsilon; for (int i = tid; i < CTRS; i += 32) { float dy = load(DY2); float x = load(X2); float dx = dy * gain + x * red_val; store(DX, dx * norm_xi, 0); DY2 += 32; X2 += 32; DX += 32; } } // sum_sqr_x = sum(x**2) // norm_x = sqrt(maximum(sum_sqr_x, epsilon)) // grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x // grad_g = sum(grad_y * l2_norm(x)) template <typename TY, typename TX> __global__ void __launch_bounds__(32) l2_normalize_grad_CKTRS( TX* DX, float* DG, const TY* __restrict__ DY, const TX* __restrict__ X, const float* __restrict__ G, const float* __restrict__ S, const int4* __restrict__ Lut, float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS) { int tid = threadIdx.x; int k = blockIdx.x; int4 block_data = Lut[k]; float gain = 1.0f; if (apply_gain) gain = G[k]; int idx_k = block_data.x; int CTRS = block_data.y; int KTRS = block_data.z; int block_F = block_data.w; int offset_F = block_F + idx_k * TRS; const TX* X1 = X + offset_F; const TX* X2 = X + offset_F; const TY* DY1 = DY + offset_F; const TY* DY2 = DY + offset_F; DX += offset_F; float sum_sqr_x = S[k]; float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon); float norm_xi = rsqrtf(max_sum_sqr_x); float norm_x2i = 1.0f / max_sum_sqr_x; // sum(-d * x / norm_x**2) float red_val = 0.0f; float dg = 0.0f; for (int ctrs = tid; ctrs < CTRS; ctrs += 32) { // c = i / TRS; // trs = i % TRS; // offset = c * KTRS + trs int c = div16(ctrs, magic_TRS, shift_TRS); int trs = mod16(ctrs, c, TRS); int offset = mad16(c, KTRS, trs); float x = load( X1, offset); float dy = load(DY1, offset); dg += dy * x * norm_xi; red_val += (-dy * x * gain) * norm_x2i; } #pragma unroll for (int i = 16; i > 0; i >>= 1) { red_val += shfl_xor(red_val, i); dg += shfl_xor(dg, i); } if (apply_gain && tid == 0) DG[k] = dg; red_val *= sum_sqr_x >= epsilon; for (int ctrs = tid; ctrs < CTRS; ctrs += 32) { int c = div16(ctrs, magic_TRS, shift_TRS); int trs = mod16(ctrs, c, TRS); int offset = mad16(c, KTRS, trs); float x = load( X2, offset); float dy = load(DY2, offset); float dx = dy * gain + x * red_val; store(DX, dx * norm_xi, offset); } } // sum_sqr_x = sum(x**2) // norm_x = sqrt(maximum(sum_sqr_x, epsilon)) // grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x // grad_g = sum(grad_y * l2_norm(x)) template <typename TY, typename TX> __global__ void __launch_bounds__(128) l2_normalize_grad_CK_32( TX* DX, float* DG, const TY* __restrict__ DY, const TX* __restrict__ X, const float* __restrict__ G, const float* __restrict__ S, const int* __restrict__ Lut, float epsilon, int apply_gain) { extern __shared__ float fShare[]; // 96*2 + max(lut_size) extern __shared__ int iShare[]; // 96*2 + max(lut_size) float* redShare1 = &fShare[96*0]; float* redShare2 = &fShare[96*1]; int* lutShare = &iShare[96*2]; int tid = threadIdx.x; int idx_L = blockIdx.x; int4 lut_head = ((const int4*)Lut)[idx_L]; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int k = idx_K*32 + (tid & 31); float gain = 1.0f; if (apply_gain) gain = G[k]; float sum_sqr_x = S[k]; Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 128) lutShare[i] = Lut[i] * 32 * 32; __syncthreads(); float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon); float norm_xi = rsqrtf(max_sum_sqr_x); float norm_x2i = 1.0f / max_sum_sqr_x; float red_val = 0.0f; float dg = 0.0f; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int offset = lutShare[i] + tid; const TY* DY1 = DY + offset; const TX* X1 = X + offset; #pragma unroll for (int j = 0; j < 8; j++) { float x = load( X1, j*128); float dy = load(DY1, j*128); red_val += (-dy * gain * x) * norm_x2i; dg += dy * x * norm_xi; } } // reduce red_val across the 4 warps if (tid >= 32) { redShare1[tid-32] = red_val; redShare2[tid-32] = dg; } __syncthreads(); if (tid < 32) { red_val += redShare1[tid] + redShare1[tid + 32] + redShare1[tid + 64]; dg += redShare2[tid] + redShare2[tid + 32] + redShare2[tid + 64]; redShare1[tid] = red_val; if (apply_gain) DG[k] = dg; } __syncthreads(); // get the final reduced value for all warps: red_val = redShare1[tid & 31]; red_val *= sum_sqr_x >= epsilon; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int offset = lutShare[i] + tid; TX* DX2 = DX + offset; const TY* DY2 = DY + offset; const TX* X2 = X + offset; #pragma unroll for (int j = 0; j < 8; j++) { float x = load( X2, j*128); float dy = load(DY2, j*128); float dx = dy * gain + x * red_val; store(DX2, dx * norm_xi, j*128); } } } // sum_sqr_x = sum(x**2) // norm_x = sqrt(maximum(sum_sqr_x, epsilon)) // grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x // grad_g = sum(grad_y * l2_norm(x)) template <typename TY, typename TX> __global__ void __launch_bounds__(32) l2_normalize_grad_CK_16( TX* DX, float* DG, const TY* __restrict__ DY, const TX* __restrict__ X, const float* __restrict__ G, const float* __restrict__ S, const int* __restrict__ Lut, float epsilon, int apply_gain) { extern __shared__ int lut[]; // max(lut_size) int tid = threadIdx.x; int idx_L = blockIdx.x; int4 lut_head = ((const int4*)Lut)[idx_L]; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int k = idx_K*16 + (tid & 15); float gain = 1.0f; if (apply_gain) gain = G[k]; float sum_sqr_x = S[k]; Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 32) lut[i] = Lut[i] * 16 * 16; float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon); float norm_xi = rsqrtf(max_sum_sqr_x); float norm_x2i = 1.0f / max_sum_sqr_x; float red_val = 0.0f; float dg = 0.0f; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int offset = lut[i] + tid; const TY* DY1 = DY + offset; const TX* X1 = X + offset; #pragma unroll for (int j = 0; j < 8; j++) { float x = load( X1, j*32); float dy = load(DY1, j*32); red_val += (-dy * gain * x) * norm_x2i; dg += dy * x * norm_xi; } } // reduce red_val,dg across the 4 rows of the warp red_val += shfl_xor(red_val, 16); dg += shfl_xor(dg, 16); store(DG, dg, k, apply_gain && tid < 16); red_val *= sum_sqr_x >= epsilon; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int offset = lut[i] + tid; TX* DX2 = DX + offset; const TY* DY2 = DY + offset; const TX* X2 = X + offset; #pragma unroll for (int j = 0; j < 8; j++) { float x = load( X2, j*32); float dy = load(DY2, j*32); float dx = dy * gain + x * red_val; store(DX2, dx * norm_xi, j*32); } } } // sum_sqr_x = sum(x**2) // norm_x = sqrt(maximum(sum_sqr_x, epsilon)) // grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x // grad_g = sum(grad_y * l2_norm(x)) template <typename TY, typename TX> __global__ void __launch_bounds__(32) l2_normalize_grad_CK_8( TX* DX, float* DG, const TY* __restrict__ DY, const TX* __restrict__ X, const float* __restrict__ G, const float* __restrict__ S, const int* __restrict__ Lut, float epsilon, int apply_gain) { extern __shared__ int lut[]; // max(lut_size) int tid = threadIdx.x; int idx_L = blockIdx.x; int4 lut_head = ((const int4*)Lut)[idx_L]; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int k = idx_K*8 + (tid & 7); float gain = 1.0f; if (apply_gain) gain = G[k]; float sum_sqr_x = S[k]; Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 32) lut[i] = Lut[i] * 8 * 8; float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon); float norm_xi = rsqrtf(max_sum_sqr_x); float norm_x2i = 1.0f / max_sum_sqr_x; float red_val = 0.0f; float dg = 0.0f; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int offset = lut[i] + tid; const TY* DY1 = DY + offset; const TX* X1 = X + offset; #pragma unroll for (int j = 0; j < 2; j++) { float x = load( X1, j*32); float dy = load(DY1, j*32); red_val += (-dy * gain * x) * norm_x2i; dg += dy * x * norm_xi; } } // reduce red_val,dg across the 4 rows of the warp red_val += shfl_xor(red_val, 16); dg += shfl_xor(dg, 16); red_val += shfl_xor(red_val, 8); dg += shfl_xor(dg, 8); store(DG, dg, k, apply_gain && tid < 8); red_val *= sum_sqr_x >= epsilon; #pragma unroll 1 for (int i = 0; i < lut_size; i++) { int offset = lut[i] + tid; TX* DX2 = DX + offset; const TY* DY2 = DY + offset; const TX* X2 = X + offset; #pragma unroll for (int j = 0; j < 2; j++) { float x = load( X2, j*32); float dy = load(DY2, j*32); float dx = dy * gain + x * red_val; store(DX2, dx * norm_xi, j*32); } } } template <typename TY, typename TX> bool L2NormalizeGradKCTRS(CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K) { dim3 grid(K, 1, 1); dim3 block(32, 1, 1); l2_normalize_grad_KCTRS<TY,TX><<<grid, block, 0, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int2*)lut, epsilon, g != 0); return true; // TODO } template <typename TY, typename TX> bool L2NormalizeGradCKTRS(CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS) { dim3 grid(K, 1, 1); dim3 block(32, 1, 1); l2_normalize_grad_CKTRS<TY,TX><<<grid, block, 0, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS); return true; // TODO } template <typename TY, typename TX> bool L2NormalizeGradCK (CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize) { if (bsize == 32) { dim3 grid(K>>5, 1, 1); dim3 block(128, 1, 1); l2_normalize_grad_CK_32<TY,TX><<<grid, block, shared+96*2*4, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0); } else if (bsize == 16) { dim3 grid(K>>4, 1, 1); dim3 block(32, 1, 1); l2_normalize_grad_CK_16<TY,TX><<<grid, block, shared, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0); } else // if (bsize == 8) { dim3 grid(K>>3, 1, 1); dim3 block(32, 1, 1); l2_normalize_grad_CK_8<TY,TX><<<grid, block, shared, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0); } return true; // TODO } template bool L2NormalizeKCTRS<float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K); template bool L2NormalizeCKTRS<float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeCK <float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeGradKCTRS<float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K); template bool L2NormalizeGradCKTRS<float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeGradCK <float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeKCTRS<ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K); template bool L2NormalizeCKTRS<ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeCK <ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeGradKCTRS<ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K); template bool L2NormalizeGradCKTRS<ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeGradCK <ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeKCTRS<ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K); template bool L2NormalizeCKTRS<ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeCK <ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeGradKCTRS<ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K); template bool L2NormalizeGradCKTRS<ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeGradCK <ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeKCTRS<bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K); template bool L2NormalizeCKTRS<bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeCK <bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeGradKCTRS<bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K); template bool L2NormalizeGradCKTRS<bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeGradCK <bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeKCTRS<bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K); template bool L2NormalizeCKTRS<bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeCK <bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize); template bool L2NormalizeGradKCTRS<bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K); template bool L2NormalizeGradCKTRS<bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS); template bool L2NormalizeGradCK <bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize); #endif // GOOGLE_CUDA
the_stack
template<typename Ktraits> __global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_bwd_kernel(void * __restrict__ dx_, void * __restrict__ dg_, void * __restrict__ db_, const void * __restrict__ dw_, const void * __restrict__ x_, const void * __restrict__ mu_, const void * __restrict__ rs_, const void * __restrict__ g_, const int rows ){ using Vec = typename Ktraits::Vec; enum { BYTES_PER_LDG = Ktraits::BYTES_PER_LDG }; enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA }; enum { WARPS_M = Ktraits::WARPS_M }; enum { WARPS_N = Ktraits::WARPS_N }; enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW }; enum { COLS = Ktraits::COLS }; enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW }; enum { LDGS = BYTES_PER_ROW / Ktraits::BYTES_PER_ROW_PER_CTA }; static_assert(LDGS * Ktraits::BYTES_PER_ROW_PER_CTA == BYTES_PER_ROW, ""); enum { NUM_ELTS = Vec::NUM_ELTS }; using vec_t = typename Ktraits::vec_t; using base_t = typename Ktraits::base_t; using compute_t = typename Ktraits::compute_t; const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int lane = tidx % THREADS_PER_WARP; const int warp = tidx / THREADS_PER_WARP; const int warp_m = warp / Ktraits::WARPS_N; const int warp_n = warp % Ktraits::WARPS_N; const int tid_r = warp_n * THREADS_PER_WARP + lane; const int r = bidx * Ktraits::ROWS_PER_CTA + warp_m; const int c = warp_n * THREADS_PER_WARP + lane; const char *dw_ptr = static_cast<const char *>(dw_); const char *x_ptr = static_cast<const char *>(x_); const char *g_ptr = static_cast<const char *>(g_); char *dx_ptr = static_cast<char *>(dx_); const compute_t *mu_ptr = static_cast<const compute_t *>(mu_); const compute_t *rs_ptr = static_cast<const compute_t *>(rs_); static_assert(COLS == THREADS_PER_ROW * LDGS * NUM_ELTS, ""); // smem for final reduction //__shared__ compute_t smem_[ROWS_PER_CTA * COLS]; extern __shared__ compute_t smem_[]; // static_assert(sizeof(smem_dw_sum) == 32*1024,""); // Using the grid stride loop we can assign multiple rows to each thread // by using a number of CTAs smaller than rows / ROWS_PER_CTA // We accumulate them here, one in smem, one in registers, because the smem // capacity is limited compute_t * dw_sum = &smem_dw_sum[warp_m * COLS + tid_r // * LDGS * NUM_ELTS]; compute_t dwy_sum[LDGS * NUM_ELTS]; compute_t dw_sum[LDGS * NUM_ELTS]; memset(dwy_sum, 0, sizeof(compute_t) * LDGS * NUM_ELTS); memset(dw_sum, 0, sizeof(compute_t) * LDGS * NUM_ELTS); // Debug 8 rows, 4B, 1024 cols __shared__ compute_t smem_mdy[ROWS_PER_CTA * WARPS_N]; __shared__ compute_t smem_mdyy[ROWS_PER_CTA * WARPS_N]; compute_t *mdy_shared = &smem_mdy[warp_m * WARPS_N]; compute_t *mdyy_shared = &smem_mdyy[warp_m * WARPS_N]; constexpr float rn = 1.f / float(COLS); Vec gamma[LDGS]; int col = c; #pragma unroll for (int it = 0; it < LDGS; it++) { gamma[it].load_from(g_ptr + col * BYTES_PER_LDG); col += Ktraits::THREADS_PER_ROW; } // TODO if ROWS_PER_CTA does not divice rows, we might get divergence in the // last blocks with syncthreads! // grid stride over rows #pragma unroll 1 for (int row = r; row < rows; row += gridDim.x * ROWS_PER_CTA) { const compute_t mu_r = mu_ptr[row]; const compute_t rs_r = rs_ptr[row]; Vec dw[LDGS], x[LDGS], dx[LDGS]; int col = c; #pragma unroll for (int it = 0; it < LDGS; it++) { dw[it].load_from(dw_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG); x[it].load_from(x_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG); col += THREADS_PER_ROW; } // local reductions compute_t dy[LDGS * NUM_ELTS]; compute_t y[LDGS * NUM_ELTS]; compute_t mdy_local = 0.f; compute_t mdyy_local = 0.f; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < Vec::NUM_ELTS; jt++) { compute_t x_tmp = x[it].data.elt[jt]; compute_t y_tmp = rs_r * (x_tmp - mu_r); compute_t dy_tmp = gamma[it].data.elt[jt] * dw[it].data.elt[jt]; compute_t dw_tmp = dw[it].data.elt[jt]; mdy_local += dy_tmp; mdyy_local += dy_tmp * y_tmp; dy[it * NUM_ELTS + jt] = dy_tmp; y[it * NUM_ELTS + jt] = y_tmp; dwy_sum[it * NUM_ELTS + jt] += dw_tmp * y_tmp; dw_sum[it * NUM_ELTS + jt] += dw_tmp; } } // reduction across row for mdy, mdyy if (WARPS_N == 1) { // no need to go through smem! #pragma unroll for (int it = 1; it < THREADS_PER_WARP; it *= 2) { mdy_local += __shfl_xor_sync(uint32_t(-1), mdy_local, it); mdyy_local += __shfl_xor_sync(uint32_t(-1), mdyy_local, it); } mdy_local *= rn; mdyy_local *= rn; } else { #pragma unroll for (int it = 16; it > 0; it /= 2) { mdy_local += __shfl_down_sync(uint32_t(-1), mdy_local, it); mdyy_local += __shfl_down_sync(uint32_t(-1), mdyy_local, it); } // lane 0 holds the result! if (lane == 0) { mdy_shared[warp_n] = mdy_local; mdyy_shared[warp_n] = mdyy_local; } __syncthreads(); if (warp_n == 0 && lane == 0) { mdy_local = 0.f; mdyy_local = 0.f; for (int it = 0; it < WARPS_N; it++) { mdy_local += mdy_shared[it]; mdyy_local += mdyy_shared[it]; } mdy_shared[0] = mdy_local; mdyy_shared[0] = mdyy_local; } __syncthreads(); mdy_local = mdy_shared[0] * rn; mdyy_local = mdyy_shared[0] * rn; } #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { compute_t dy_tmp = dy[it * NUM_ELTS + jt]; compute_t y_tmp = y[it * NUM_ELTS + jt]; compute_t dx_tmp = compute_t(rs_r) * (dy_tmp - mdyy_local * y_tmp - mdy_local); dx[it].data.elt[jt] = dx_tmp; } } col = c; #pragma unroll for (int it = 0; it < LDGS; it++) { dx[it].store_to(dx_ptr + row * BYTES_PER_ROW + col * BYTES_PER_LDG); col += Ktraits::THREADS_PER_ROW; } } // end: grid stride loop // Finalize reduction of part dgamma and dbeta for this CTA // by reducing over the rows held across the WARPS_M warps enum { NUM_RES = COLS / Ktraits::THREADS_PER_CTA }; static_assert(NUM_RES * Ktraits::THREADS_PER_CTA == COLS, ""); compute_t *smem_write; smem_write = &smem_[warp_m * COLS + tid_r * NUM_ELTS]; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { smem_write[jt] = dw_sum[it * NUM_ELTS + jt]; } smem_write += THREADS_PER_ROW * NUM_ELTS; } __syncthreads(); compute_t cta_dw_sum[NUM_RES]; memset(cta_dw_sum, 0, sizeof(compute_t) * NUM_RES); for (int it = 0; it < ROWS_PER_CTA; it++) { for (int jt = 0; jt < NUM_RES; jt++) { cta_dw_sum[jt] += smem_[it * COLS + tidx + jt * Ktraits::THREADS_PER_CTA]; } } __syncthreads(); smem_write = &smem_[warp_m * COLS + tid_r * NUM_ELTS]; #pragma unroll for (int it = 0; it < LDGS; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { smem_write[jt] = dwy_sum[it * NUM_ELTS + jt]; } smem_write += THREADS_PER_ROW * NUM_ELTS; } __syncthreads(); compute_t cta_dwy_sum[NUM_RES]; memset(cta_dwy_sum, 0, sizeof(compute_t) * NUM_RES); for (int it = 0; it < ROWS_PER_CTA; it++) { for (int jt = 0; jt < NUM_RES; jt++) { cta_dwy_sum[jt] += smem_[it * COLS + tidx + jt * Ktraits::THREADS_PER_CTA]; } } compute_t *dgamma_part = static_cast<compute_t *>(dg_) + bidx * COLS + tidx; for (int jt = 0; jt < NUM_RES; jt++) { *dgamma_part = cta_dwy_sum[jt]; dgamma_part += Ktraits::THREADS_PER_CTA; } compute_t *dbeta_part = static_cast<compute_t *>(db_) + bidx * COLS + tidx; for (int jt = 0; jt < NUM_RES; jt++) { *dbeta_part = cta_dw_sum[jt]; dbeta_part += Ktraits::THREADS_PER_CTA; } } template<typename Ktraits, typename out_t> __global__ __launch_bounds__(Ktraits::THREADS_PER_CTA) void ln_bwd_finalize_kernel(void * __restrict__ dg_, void * __restrict__ db_, const void * __restrict__ dg_part_, const void * __restrict__ db_part_, const int rows ){ using Vec = typename Ktraits::Vec; enum { NUM_ELTS = Vec::NUM_ELTS }; using vec_t = typename Ktraits::vec_t; using base_t = typename Ktraits::base_t; using compute_t = typename Ktraits::compute_t; enum { BYTES_PER_LDG = Ktraits::BYTES_PER_LDG }; enum { ROWS_PER_CTA = Ktraits::ROWS_PER_CTA }; enum { WARPS_M = Ktraits::WARPS_M }; enum { WARPS_N = Ktraits::WARPS_N }; enum { THREADS_PER_ROW = Ktraits::THREADS_PER_ROW }; enum { COLS = Ktraits::COLS }; enum { BYTES_PER_ROW = Ktraits::BYTES_PER_ROW }; enum {VEC_COLS = BYTES_PER_ROW / BYTES_PER_LDG}; //dbg static_assert(VEC_COLS == COLS / NUM_ELTS, ""); //static_assert(VEC_COLS == 1024,""); const int tidx = threadIdx.x; const int bidx = blockIdx.x; const int lane = tidx % THREADS_PER_WARP; const int warp = tidx / THREADS_PER_WARP; const int warp_m = warp / Ktraits::WARPS_N; const int warp_n = warp % Ktraits::WARPS_N; const int tid_c = warp_n * THREADS_PER_WARP + lane; const int c =bidx * THREADS_PER_ROW + tid_c; const int r = warp_m; __shared__ compute_t smem_[(WARPS_M - 1) * THREADS_PER_ROW * NUM_ELTS]; //Will probably run this with WARPS_N = 1 and grid = 1024 / (32*4) = 8, or NUM_ELTS=1 and grid = 32 // and WARPS_M = 4 (or 1??) for(int col = c; col < VEC_COLS; col += gridDim.x * THREADS_PER_ROW){ const char* dg_part_ptr = static_cast<const char*>(dg_part_) + r * BYTES_PER_ROW + col * BYTES_PER_LDG; const char* db_part_ptr = static_cast<const char*>(db_part_) + r * BYTES_PER_ROW + col * BYTES_PER_LDG; compute_t dg_sum[NUM_ELTS]; compute_t db_sum[NUM_ELTS]; memset(dg_sum, 0, sizeof(compute_t) * NUM_ELTS); memset(db_sum, 0, sizeof(compute_t) * NUM_ELTS); #pragma unroll for(int row = r; row < rows;row += ROWS_PER_CTA){ Vec dg; Vec db; dg.load_from(dg_part_ptr); db.load_from(db_part_ptr); dg_part_ptr += ROWS_PER_CTA * BYTES_PER_ROW; db_part_ptr += ROWS_PER_CTA * BYTES_PER_ROW; #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { dg_sum[jt] += dg.data.elt[jt]; db_sum[jt] += db.data.elt[jt]; } } // Finalize the reduction across rows of the CTA compute_t * smem_write; smem_write = smem_ + (warp_m -1) *THREADS_PER_ROW * NUM_ELTS + tid_c; if (warp_m > 0) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { *smem_write = dg_sum[jt]; smem_write+=THREADS_PER_ROW; } } __syncthreads(); compute_t *smem_read ; smem_read = smem_ + tid_c ; if (warp_m == 0) { #pragma unroll for (int it = 0; it < WARPS_M - 1; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { dg_sum[jt] += *smem_read; smem_read += THREADS_PER_ROW; } } } __syncthreads(); smem_write = smem_ + (warp_m -1) *THREADS_PER_ROW * NUM_ELTS + tid_c; if (warp_m > 0) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { *smem_write = db_sum[jt]; smem_write+=THREADS_PER_ROW; } } __syncthreads(); smem_read = smem_ + tid_c; if (warp_m == 0) { #pragma unroll for (int it = 0; it < WARPS_M - 1; it++) { #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { db_sum[jt] += *smem_read; smem_read += THREADS_PER_ROW; } } using vout_t = typename Vec_type<sizeof(out_t) * NUM_ELTS>::Type; union { vout_t raw; out_t elt[NUM_ELTS]; } dg_out, db_out; // out_t dg_out[NUM_ELTS], db_out[NUM_ELTS]; #pragma unroll for (int jt = 0; jt < NUM_ELTS; jt++) { dg_out.elt[jt] = dg_sum[jt]; db_out.elt[jt] = db_sum[jt]; } vout_t *dg_ptr = reinterpret_cast<vout_t *>(dg_) + col ; vout_t *db_ptr = reinterpret_cast<vout_t *>(db_) + col ; *dg_ptr = dg_out.raw; *db_ptr = db_out.raw; } } } template<typename scalar_t> void launch(at::Tensor &dx, at::Tensor &dgamma, at::Tensor &dbeta, at::Tensor &dgamma_part, at::Tensor &dbeta_part, const at::Tensor &dw, const at::Tensor &x, const at::Tensor &mu, const at::Tensor &rsigma, const at::Tensor &gamma, const int rows, const int cols, const int gridx, cudaStream_t stream){ if (cols == 1024) { using Ktraits = Kernel_traits<scalar_t, 1024, 4, 1>; if (Ktraits::SMEM_BYTES >= 48 * 1024) { AT_CUDA_CHECK(cudaFuncSetAttribute( ln_bwd_kernel<Ktraits>, cudaFuncAttributeMaxDynamicSharedMemorySize, Ktraits::SMEM_BYTES)); } ln_bwd_kernel<Ktraits> <<<gridx, Ktraits::THREADS_PER_CTA, Ktraits::SMEM_BYTES, stream>>>( dx.data_ptr(), dgamma_part.data_ptr(), dbeta_part.data_ptr(), dw.data_ptr(), x.data_ptr(), mu.data_ptr(), rsigma.data_ptr(), gamma.data_ptr(), rows); using Ktraits2 = Kernel_traits<float, 1024, 16, 1, 4>; constexpr int grid2 = DIVUP(1024, Ktraits2::THREADS_PER_ROW * Ktraits2::Vec::NUM_ELTS); ln_bwd_finalize_kernel<Ktraits2, scalar_t> <<<grid2, Ktraits2::THREADS_PER_CTA, 0, stream>>>( dgamma.data_ptr(), dbeta.data_ptr(), dgamma_part.data_ptr(), dbeta_part.data_ptr(), gridx); } else { assert(false && "Not implemented"); } AT_CUDA_CHECK(cudaPeekAtLastError()); } void ln_bwd_cuda(at::Tensor &dx, at::Tensor &dgamma, at::Tensor &dbeta, const at::Tensor &dw, const at::Tensor &x, const at::Tensor &mu, const at::Tensor &rsigma, const at::Tensor &gamma, const int rows, const int cols, cudaStream_t stream) { const auto dtype = x.scalar_type(); const auto props = at::cuda::getCurrentDeviceProperties(); const int smCount = props->multiProcessorCount; // Launch 2 CTAs per SM const int grid = 2 * smCount; //request workspace for two-step reduction. We always reduce in FP32. auto opts = x.options(); auto dbeta_part = torch::empty({grid, cols}, opts.dtype(torch::kFloat32)); auto dgamma_part = torch::empty({grid, cols}, opts.dtype(torch::kFloat32)); if (dtype == torch::kFloat16) { launch<half>(dx, dgamma, dbeta, dgamma_part, dbeta_part, dw, x, mu, rsigma, gamma, rows, cols, grid, stream); } else if (dtype == torch::kFloat32) { launch<float>(dx, dgamma, dbeta, dgamma_part, dbeta_part, dw, x, mu, rsigma, gamma, rows, cols, grid, stream); } else { assert(false && "Not implemented"); } }
the_stack
#include <cublas_v2.h> #include <cuda_runtime.h> // includes cublaslt #include <cublasLt.h> // constants for fused bias+relu kernel #define BIAS_RELU_FW_NTHREADS 128 // forward number of thread per block #define BIAS_RELU_BW_NTHREADS_X 32 // backward number of thread in feature dim #define BIAS_RELU_BW_NTHREADS_Y 16 // backward number of thread in batch dim #define BIAS_RELU_RED_PER_THREAD 16 // backward minimal reduction length per thread // move to a header later on #define ILP 4 template<typename T> __host__ __device__ __forceinline__ bool is_aligned(T* p){ return ((uint64_t)p) % (ILP*sizeof(T)) == 0; } template<typename T> __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } template<typename T> __device__ __forceinline__ void load_store(T* dst, volatile T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } template<typename T> __device__ __forceinline__ void load_store(volatile T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } // Keep ReLU in float only. When using half, cast to float before calling. __device__ __inline__ float relu(float a) { float retf = max(a, 0.f); return (retf); } // Keep Sigmoid in float only. When using half, cast to float before calling. __device__ __inline__ float sigmoid(float a) { float retf = 1.f / (1.f + expf(-a)); return (retf); } // FP64 Wrapper around cublas GEMMEx cublasStatus_t mlp_gemm( cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float* alpha, const double* A, int lda, const double* B, int ldb, const float* beta, double* C, int ldc) { return cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, CUDA_R_64F, lda, B, CUDA_R_64F, ldb, beta, C, CUDA_R_64F, ldc, CUDA_R_64F, CUBLAS_GEMM_DEFAULT); } // FP32 Wrapper around cublas GEMMEx cublasStatus_t mlp_gemm( cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float* alpha, const float* A, int lda, const float* B, int ldb, const float* beta, float* C, int ldc) { return cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, CUDA_R_32F, lda, B, CUDA_R_32F, ldb, beta, C, CUDA_R_32F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); } // FP16 Tensor core wrapper around cublas GEMMEx cublasStatus_t mlp_gemm( cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float* alpha, const at::Half* A, int lda, const at::Half* B, int ldb, float* beta, at::Half* C, int ldc) { return cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, CUDA_R_16F, lda, B, CUDA_R_16F, ldb, beta, C, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); } int mlp_gemm_lt( cublasLtHandle_t ltHandle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float *alpha, /* host pointer */ const at::Half* A, int lda, const at::Half* B, int ldb, float *beta, /* host pointer */ at::Half* C, int ldc, void *workspace, size_t workspaceSize, cudaStream_t stream, bool use_bias, bool use_relu, const void* bias) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; cublasLtMatmulDescOpaque_t operationDesc = {}; cublasLtMatrixLayoutOpaque_t Adesc = {}, Bdesc = {}, Cdesc = {}; cublasLtMatmulPreferenceOpaque_t preference = {}; int returnedResults = 0; cublasLtMatmulHeuristicResult_t heuristicResult = {}; cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DEFAULT; // Create operation descriptor; see cublasLtMatmulDescAttributes_t // for details about defaults; here we just set the transforms for // A and B. status = cublasLtMatmulDescInit(&operationDesc, CUBLAS_COMPUTE_32F, CUDA_R_32F); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa)); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa)); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; if (use_bias) { status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias)); if (status != CUBLAS_STATUS_SUCCESS) { goto CLEANUP; } if (use_relu) { epilogue = CUBLASLT_EPILOGUE_RELU_BIAS; } else { epilogue = CUBLASLT_EPILOGUE_BIAS; } } else { if (use_relu) { epilogue = CUBLASLT_EPILOGUE_RELU; } } status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epilogue, sizeof(epilogue)); if (status != CUBLAS_STATUS_SUCCESS) { goto CLEANUP; } // Create matrix descriptors. Not setting any extra attributes. status = cublasLtMatrixLayoutInit( &Adesc, CUDA_R_16F, transa == CUBLAS_OP_N ? m : k, transa == CUBLAS_OP_N ? k : m, lda); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatrixLayoutInit( &Bdesc, CUDA_R_16F, transb == CUBLAS_OP_N ? k : n, transb == CUBLAS_OP_N ? n : k, ldb); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatrixLayoutInit(&Cdesc, CUDA_R_16F, m, n, ldc); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; // Create preference handle; In general, extra attributes can be // used here to disable tensor ops or to make sure algo selected // will work with badly aligned A, B, C. However, for simplicity // here we assume A,B,C are always well aligned (e.g., directly // come from cudaMalloc) status = cublasLtMatmulPreferenceInit(&preference); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatmulPreferenceSetAttribute( &preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &workspaceSize, sizeof(workspaceSize)); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; // We just need the best available heuristic to try and run matmul. // There is no guarantee that this will work. For example, if A is // badly aligned, you can request more (e.g. 32) algos and try to // run them one by one until something works. status = cublasLtMatmulAlgoGetHeuristic( ltHandle, &operationDesc, &Adesc, &Bdesc, &Cdesc, &Cdesc, &preference, 1, &heuristicResult, &returnedResults); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; if (returnedResults == 0) { status = CUBLAS_STATUS_NOT_SUPPORTED; goto CLEANUP; } status = cublasLtMatmul(ltHandle, &operationDesc, alpha, A, &Adesc, B, &Bdesc, beta, C, &Cdesc, C, &Cdesc, &heuristicResult.algo, workspace, workspaceSize, stream); CLEANUP: // Descriptors are no longer needed as all GPU work was already // enqueued. return status == CUBLAS_STATUS_SUCCESS ? 0 : 1; } int mlp_gemm_lt( cublasLtHandle_t ltHandle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float *alpha, /* host pointer */ const double* A, int lda, const double* B, int ldb, float *beta, /* host pointer */ double* C, int ldc, void *workspace, size_t workspaceSize, cudaStream_t stream, bool use_bias, bool use_relu, const void* bias) { return 1; } int mlp_gemm_lt( cublasLtHandle_t ltHandle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float *alpha, /* host pointer */ const float *A, int lda, const float *B, int ldb, float *beta, /* host pointer */ float *C, int ldc, void *workspace, size_t workspaceSize, cudaStream_t stream, bool use_bias, bool use_relu, const void* bias) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; cublasLtMatmulDescOpaque_t operationDesc = {}; cublasLtMatrixLayoutOpaque_t Adesc = {}, Bdesc = {}, Cdesc = {}; cublasLtMatmulPreferenceOpaque_t preference = {}; int returnedResults = 0; cublasLtMatmulHeuristicResult_t heuristicResult = {}; cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DEFAULT; // Create operation descriptor; see cublasLtMatmulDescAttributes_t // for details about defaults; here we just set the transforms for // A and B. status = cublasLtMatmulDescInit(&operationDesc, CUBLAS_COMPUTE_32F, CUDA_R_32F); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &transa, sizeof(transa)); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &transb, sizeof(transa)); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; if (use_bias) { status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias)); if (status != CUBLAS_STATUS_SUCCESS) { goto CLEANUP; } if (use_relu) { epilogue = CUBLASLT_EPILOGUE_RELU_BIAS; } else { epilogue = CUBLASLT_EPILOGUE_BIAS; } } else { if (use_relu) { epilogue = CUBLASLT_EPILOGUE_RELU; } } status = cublasLtMatmulDescSetAttribute(&operationDesc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epilogue, sizeof(epilogue)); if (status != CUBLAS_STATUS_SUCCESS) { goto CLEANUP; } // Create matrix descriptors. Not setting any extra attributes. status = cublasLtMatrixLayoutInit( &Adesc, CUDA_R_32F, transa == CUBLAS_OP_N ? m : k, transa == CUBLAS_OP_N ? k : m, lda); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatrixLayoutInit( &Bdesc, CUDA_R_32F, transb == CUBLAS_OP_N ? k : n, transb == CUBLAS_OP_N ? n : k, ldb); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatrixLayoutInit(&Cdesc, CUDA_R_32F, m, n, ldc); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; // Create preference handle; In general, extra attributes can be // used here to disable tensor ops or to make sure algo selected // will work with badly aligned A, B, C. However, for simplicity // here we assume A,B,C are always well aligned (e.g., directly // come from cudaMalloc) status = cublasLtMatmulPreferenceInit(&preference); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; status = cublasLtMatmulPreferenceSetAttribute( &preference, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &workspaceSize, sizeof(workspaceSize)); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; // We just need the best available heuristic to try and run matmul. // There is no guarantee that this will work. For example, if A is // badly aligned, you can request more (e.g. 32) algos and try to // run them one by one until something works. status = cublasLtMatmulAlgoGetHeuristic( ltHandle, &operationDesc, &Adesc, &Bdesc, &Cdesc, &Cdesc, &preference, 1, &heuristicResult, &returnedResults); if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; if (returnedResults == 0) { status = CUBLAS_STATUS_NOT_SUPPORTED; goto CLEANUP; } status = cublasLtMatmul(ltHandle, &operationDesc, alpha, A, &Adesc, B, &Bdesc, beta, C, &Cdesc, C, &Cdesc, &heuristicResult.algo, workspace, workspaceSize, stream); CLEANUP: // Descriptors are no longer needed as all GPU work was already // enqueued. return status == CUBLAS_STATUS_SUCCESS ? 0 : 1; } // Bias ADD. Assume input X is [features x batch size], column major. // Bias is one 'features' long vector, with implicit broadcast. template <typename T> __global__ void biasAdd_fprop(T *X, T *b, uint batch_size, uint features) { T r_x[ILP]; T r_b[ILP]; if(is_aligned(X) && is_aligned(b) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { int row = tid % (features / ILP); load_store(r_x, X, 0 , tid); load_store(r_b, b, 0 , row); #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = bias_sum; } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { int row = tid % features; r_x[ii] = X[idx]; r_b[ii] = b[row]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = bias_sum; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // Bias ADD + ReLU. Assume input X is [features x batch size], column major. // Activation support fuesed ReLU. Safe to call in-place. template <typename T> __global__ void biasAddRelu_fprop(T *X, T *b, uint batch_size, uint features) { T r_x[ILP]; T r_b[ILP]; if(is_aligned(X) && is_aligned(b) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { int row = tid % (features / ILP); load_store(r_x, X, 0 , tid); load_store(r_b, b, 0 , row); #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = relu(bias_sum); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { int row = tid % features; r_x[ii] = X[idx]; r_b[ii] = b[row]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = relu(bias_sum); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // ReLU. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Relu_fprop(T *X, uint batch_size, uint features) { T r_x[ILP]; if(is_aligned(X) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_x, X, 0 , tid); #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = relu(static_cast<float>(r_x[ii])); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_x[ii] = X[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = relu(static_cast<float>(r_x[ii])); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // Sigmoid. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Sigmoid_fprop(T *X, uint batch_size, uint features) { T r_x[ILP]; if(is_aligned(X) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_x, X, 0 , tid); #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = sigmoid(static_cast<float>(r_x[ii])); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_x[ii] = X[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = sigmoid(static_cast<float>(r_x[ii])); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // ReLU. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Relu_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) { T r_dy[ILP]; T r_y[ILP]; if(is_aligned(dY) && is_aligned(Y) && is_aligned(dX) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_dy, dY, 0 , tid); load_store(r_y, Y, 0 , tid); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; } load_store(dX, r_dy, tid, 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_dy[ii] = dY[idx]; r_y[ii] = Y[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { dX[idx] = r_dy[ii]; } } } } } // Sigmoid. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Sigmoid_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) { T r_dy[ILP]; T r_y[ILP]; if(is_aligned(dY) && is_aligned(Y) && is_aligned(dX) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_dy, dY, 0 , tid); load_store(r_y, Y, 0 , tid); #pragma unroll for(int ii=0;ii<ILP;ii++){ float grad_out = r_dy[ii]; float out = r_y[ii]; float grad_i = out * ( 1.f - out) * grad_out; r_dy[ii] = grad_i; } load_store(dX, r_dy, tid, 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_dy[ii] = dY[idx]; r_y[ii] = Y[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float grad_out = r_dy[ii]; float out = r_y[ii]; float grad_i = out * ( 1.f - out) * grad_out; r_dy[ii] = grad_i; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { dX[idx] = r_dy[ii]; } } } } } // Compute grid size for pointwise backward kernel. // block_x/y is total elment being handled per block, not number of threads void get_biasAddRelu_bprop_grid_size( int yfeat, int batch_size, int block_x, int block_y, int* grid_x, int* grid_y) { *grid_x = (yfeat + block_x - 1) / block_x; // Get number of SMs for efficient reduction. int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; // can switch to occupancy calculation. use 4 below now for sm_70 int max_blocks_y = (num_SMs * 4+(*grid_x)-1) / (*grid_x); // block_y should be from minimal work per thread int nRedSplits = (batch_size + block_y - 1) / block_y; // increase number of elem per thread redcution to not launch more than enough // kernel adjust work, so here we just launch max block *grid_y = std::min(nRedSplits, max_blocks_y); return; } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAdd_bprop( T* dY, int features, int batch_size, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y]; // Accumulate db in FP32 always float db_local = 0; if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int64_t row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; db_local += (float)dY[flat_idx]; } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int64_t row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; #pragma unroll 4 for (int u = 0; u < UNROLL_FACTOR; u++) { db_local += (float)dY[flat_idx]; flat_idx += features; } } // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; smem[linear_idx] = db_local; } __syncthreads(); if (f < features) { if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ db_local += smem[yidx * blockDim.x + threadIdx.x]; } // block result is in db_local now for all threadIdx.y == 0 // Write out partial result out[f] = db_local; } } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); db_local = 0; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock && f < features) { if(threadIdx.y == 0) { for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; db_local += (float)(intermediate[col * features + row]); } db[f] = (T)db_local; } } } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAddRelu_bprop( T* Y, T* dY, int features, int batch_size, T* dX, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y]; // Accumulate db in FP32 always float db_local = 0; if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; T y_val = Y[flat_idx]; T dy_val = dY[flat_idx]; T dx_val; if ((float)y_val > 0.f) dx_val = dy_val; else dx_val = 0; dX[flat_idx] = dx_val; db_local += (float)dx_val; } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; #pragma unroll 4 for (int u = 0; u < UNROLL_FACTOR; u++) { T y_val = Y[flat_idx]; T dy_val = dY[flat_idx]; T dx_val; if ((float)y_val > 0.f) dx_val = dy_val; else dx_val = 0; dX[flat_idx] = dx_val; db_local += (float)dx_val; flat_idx += features; } } // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; smem[linear_idx] = db_local; } __syncthreads(); if (f < features) { if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ db_local += smem[yidx * blockDim.x + threadIdx.x]; } // block result is in db_local now for all threadIdx.y == 0 // Write out partial result out[f] = db_local; } } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); db_local = 0; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock && f < features) { if(threadIdx.y == 0) { for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; db_local += (float)(intermediate[col * features + row]); } db[f] = (T)db_local; } } } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAddRelu_bprop_aligned( T* Y, T* dY, int features, int batch_size, T* dX, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // Accumulate db in FP32 always float db_local[ILP]; T r_y[ILP]; T r_dy[ILP]; #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] = 0.f; } // f always <= features in this case //if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features / ILP + row; load_store(r_y, Y, 0, flat_idx); load_store(r_dy, dY, 0, flat_idx); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; db_local[ii] += (float)r_dy[ii]; } load_store(dX, r_dy, flat_idx, 0); } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features / ILP + row; // total threads in x == features/ILP #pragma unroll for (int u = 0; u < UNROLL_FACTOR; u++) { load_store(r_y, Y, 0, flat_idx); load_store(r_dy, dY, 0, flat_idx); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; db_local[ii] += (float)r_dy[ii]; } load_store(dX, r_dy, flat_idx, 0); flat_idx += features/ILP; } } // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y*ILP]; // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; float* smem_out = smem + ILP * linear_idx; #pragma unroll for(int ii=0;ii<ILP;ii++){ smem_out[ii] = db_local[ii]; // reuse local dy buffer } __syncthreads(); if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ float* smem_in = smem + ILP * (yidx * blockDim.x + threadIdx.x); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] += smem_in[ii]; // reuse local dy buffer } } // block result is in db_local now for all threadIdx.y == 0 if(gridDim.y == 1) { #pragma unroll for(int ii=0;ii<ILP;ii++){ r_dy[ii] = db_local[ii]; // reuse local dy buffer } load_store(db, r_dy, f, 0); return; } // Write out partial result load_store(out, db_local, f, 0); } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] = 0.f; } float r_db[ILP]; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock) { if(threadIdx.y == 0){ for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; load_store(r_db, intermediate, 0, col * features / ILP + row); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] += r_db[ii]; } } #pragma unroll for(int ii=0;ii<ILP;ii++){ r_dy[ii] = db_local[ii]; // reuse local dy buffer } load_store(db, r_dy, f, 0); } } } // Lists where the num_layers-1 intermediate Y buffers start in reserved space on fprop, starting // offset 0. The last Y value is, of course, stored in the user provided output buffer. void get_y_offsets( int batch_size, int num_layers, const int* output_features, int* y_start_offsets) { y_start_offsets[0] = 0; for (int i = 1; i < num_layers; i++) { y_start_offsets[i] = y_start_offsets[i - 1] + batch_size * output_features[i - 1]; } } // Returns the reserved space (in elements) needed for the MLP size_t get_mlp_reserved_space(int64_t batch_size, int num_layers, const int* output_features) { size_t res_space = 0; // Need to store output of every intermediate MLP - size equal to output_features[i] * batch_size // for all 'i' in [0, num_layers-1) for (int l = 0; l < num_layers; l++) { res_space += output_features[l] * batch_size; } return res_space; } // Returns the size of all fprop activations combined size_t get_all_activations_size(int64_t batch_size, int num_layers, const int* output_features) { size_t acts_size = 0; for (int l = 0; l < num_layers; l++) { acts_size += output_features[l] * batch_size; } return acts_size; } #if 0 // Returns the work space (in elements) needed for the MLP bprop. size_t get_mlp_bp_workspace (int batch_size, int num_layers, const int* output_features) { /* Workspace is partitioned as DY_GEMMs : DX_GEMMs */ size_t work_space = 0; // Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p // of biasReLU_bp and one for o/p of dgrad GEMM). work_space += 2*get_all_activations_size(batch_size, num_layers, output_features); return work_space; } #endif // Scratch space needed for reductions in number of elements size_t get_reduction_scratch_space(int batch_size, int num_layers, const int* output_features) { size_t max_scratch_space = 0; // Loop over all layers to see which one needs the max scratch space for (int l = 0; l < num_layers; l++) { // need to find max(aligned, not_aligned) int tmp, res0, res1; int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size( output_features[l], batch_size, block_x, block_y, &tmp, &res0); block_x = ILP * BIAS_RELU_BW_NTHREADS_X; get_biasAddRelu_bprop_grid_size( output_features[l], batch_size, block_x, block_y, &tmp, &res1); max_scratch_space = std::max(max_scratch_space, (size_t)(output_features[l] * res0)); max_scratch_space = std::max(max_scratch_space, (size_t)(output_features[l] * res1)); } return max_scratch_space; } // Buffer for semaphores size_t get_semaphores_size(int num_layers, const int* output_features) { // Upper bound on semaphores is one per feature for the layer // with the most features. int max_features = 0; for (int l = 0; l < num_layers; l++) { max_features = std::max(max_features, output_features[l]); } return (size_t)max_features; } // Returns the work space (in elements) needed for the MLP bprop. template <typename T> size_t get_mlp_bp_workspace_in_bytes(int batch_size, int num_layers, const int* output_features) { size_t work_space = 0; // Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p // of biasReLU_bp and one for o/p of dgrad GEMM). work_space += 2 * get_all_activations_size(batch_size, num_layers, output_features) * sizeof(T); work_space += get_reduction_scratch_space(batch_size, num_layers, output_features) * sizeof(float); work_space += get_semaphores_size(num_layers, output_features) * sizeof(int); return work_space; } // Returns pointers to each segment of the workspace template <typename T> void partition_mlp_bp_workspace( int batch_size, int num_layers, const int* output_features, void* work_space, T** dy_gemms, T** dx_gemms, float** db_scratch, int** semaphores) { /* Workspace is partitioned as DY_GEMMs : DX_GEMMs : DB_SCRATCH : SEMAPHORES */ // Start address where dy_gemm tensors are stored *dy_gemms = reinterpret_cast<T*>(work_space); // Start address where dx_gemm tensors are stored *dx_gemms = *dy_gemms + get_all_activations_size(batch_size, num_layers, output_features); // Start address where db intermediate tensors are stored *db_scratch = reinterpret_cast<float*>( *dx_gemms + get_all_activations_size(batch_size, num_layers, output_features)); // Start address of semaphores *semaphores = reinterpret_cast<int*>( *db_scratch + get_reduction_scratch_space(batch_size, num_layers, output_features)); return; } // Does a simple MLP fprop (GEMM+bias+ReLU). // Can handle num_layers number of layers, each with its own shape. Output of layer i is assumed // to be input of layer i+1. output_features, WPtr and BPtr are arrays of length num_layers, and // must be in the same order i.e. WPtr[i] and BPtr[i] are respectively the weight and bias of layer // 'i'. template <typename T> int mlp_fp( T* X, int input_features, int batch_size, T** WPtr, int num_layers, int* output_features, T** BPtr, T* Y, T* reserved_space, int use_bias, int activation, void* lt_workspace) { T *weight, *input, *output, *bias; T *reserved_space_x, *reserved_space_y; reserved_space_x = NULL; reserved_space_y = reserved_space; // Get cublas handle from Pytorch cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); // Get the stream from cublas handle to reuse for biasReLU kernel. cudaStream_t stream; cublasGetStream(handle, &stream); for (int layer = 0; layer < num_layers; layer++) { weight = WPtr[layer]; input = (layer == 0) ? X : reserved_space_x; output = (layer == num_layers - 1) ? Y : reserved_space_y; if (use_bias) { bias = BPtr[layer]; } int ifeat = (layer == 0) ? input_features : output_features[layer - 1]; int ofeat = output_features[layer]; float one = 1.f; float zero = 0.f; // try with cublaslt first for supported case with valid handle int cublaslt_status = 1; if(activation < 1){ cublaslt_status = mlp_gemm_lt( //ltHandle, (cublasLtHandle_t)handle, CUBLAS_OP_T, CUBLAS_OP_N, ofeat, batch_size, ifeat, &one, weight, ifeat, input, ifeat, &zero, output, ofeat, lt_workspace, 1 << 22, stream, use_bias == 1, activation == 1, bias); } // if cublaslt failed or not executed, fallback to cublas if (cublaslt_status != 0) { cublasStatus_t cublas_status; // Call GEMM: fprop is Y = W'X cublas_status = mlp_gemm( handle, CUBLAS_OP_T, CUBLAS_OP_N, ofeat, batch_size, ifeat, &one, weight, ifeat, input, ifeat, &zero, output, ofeat); if (cublas_status != CUBLAS_STATUS_SUCCESS) { printf("GEMM fprop failed with %d\n", cublas_status); return 1; } const uint &input_size = ofeat; int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; // Call biasReLU if(use_bias == 1) { if (activation == 0) { // no activation cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); biasAdd_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size); } else if (activation == 1) { // relu cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAddRelu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); biasAddRelu_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size); } else if (activation == 2) { // sigmoid cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); biasAdd_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size); cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); Sigmoid_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size); } } else { // don't need to do anything in case of no activation and no bias if (activation == 1) { // relu cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); Relu_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size); } else if (activation == 2) { // sigmoid cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); Sigmoid_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size); } } } // Set current output as next layer input reserved_space_x = reserved_space_y; // Set next layer output reserved_space_y += ofeat * batch_size; } return 0; } // Does a simple MLP bprop (GEMM+bias+ReLU). // Needs reserved space to come back exactly as it was populated in fprop. // Does dgrad and wgrad sequentially. template <typename T> int mlp_bp( T* X, T* Y, int input_features, int batch_size, T** WPtr, int num_layers, int* output_features, T* dY, T* reserved_space, T* work_space, T* dX, T** dwPtr, T** dbPtr, bool requires_grad, int use_bias, int activation) { T* weight; T *dweight, *dx, *dy, *dbias; T *x, *y; // Where the dx of the biasReLU (== dy of gemm) is stored. Can be thrown away // after bp call. T* dy_gemm_base; // Where the dx after GEMM is stored. T* dx_gemm_base; // Where partial reduction results are stored. float* db_scratch; // Semaphores for reduction. int* semaphores; partition_mlp_bp_workspace<T>( batch_size, num_layers, output_features, work_space, &dy_gemm_base, &dx_gemm_base, &db_scratch, &semaphores); size_t semaphore_size = get_semaphores_size(num_layers, output_features) * sizeof(int); // Get cublas handle from Pytorch cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); // Get the stream from cublas handle to reuse for biasReLU kernel. cudaStream_t stream; cublasGetStream(handle, &stream); int* y_offsets = (int*)malloc(num_layers * sizeof(int)); get_y_offsets(batch_size, num_layers, output_features, y_offsets); for (int layer = num_layers - 1; layer >= 0; layer--) { weight = WPtr[layer]; dweight = dwPtr[layer]; // x is read from reserved space x = (layer == 0) ? X : reserved_space + y_offsets[layer - 1]; // dx is written in workspace for all but layer==0 dx = (layer == 0) ? dX : dx_gemm_base + y_offsets[layer - 1]; // y is read from reserved space y = (layer == num_layers - 1) ? Y : reserved_space + y_offsets[layer]; // dx from layer+1 dy = (layer == num_layers - 1) ? dY : dx_gemm_base + y_offsets[layer]; // dy_gemm is written to and read immediately T* dy_gemm = dy_gemm_base + y_offsets[layer]; dbias = dbPtr[layer]; int xfeat = (layer == 0) ? input_features : output_features[layer - 1]; int yfeat = output_features[layer]; float one = 1.f; float zero = 0.f; if (use_bias == 1) { if (activation == 0) { // no acitvation // bgrad dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; cudaMemsetAsync(semaphores, 0, semaphore_size, stream); int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); biasAdd_bprop<T, 4><<<grid, block, 0, stream>>>( dy, yfeat, batch_size, db_scratch, semaphores, dbias); // bypass dgrad through reset pointer dy_gemm = dy; } else if (activation == 1) { // relu dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; cudaMemsetAsync(semaphores, 0, semaphore_size, stream); if(yfeat % (ILP * BIAS_RELU_BW_NTHREADS_X) == 0 && is_aligned(y) && is_aligned(dy) && is_aligned(dy_gemm) && is_aligned(dbias)){ int block_x = ILP * BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); biasAddRelu_bprop_aligned<T, 4><<<grid, block, 0, stream>>>( y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias); } else { int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); biasAddRelu_bprop<T, 4><<<grid, block, 0, stream>>>( y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias); } } else if (activation == 2) { // sigmoid // activation backward int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); Sigmoid_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm); // bgrad, from dy_gemm dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; cudaMemsetAsync(semaphores, 0, semaphore_size, stream); int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); biasAdd_bprop<T, 4><<<grid, block, 0, stream>>>( dy_gemm, yfeat, batch_size, db_scratch, semaphores, dbias); } } else { // no bias below if (activation == 0) { // bypass dgrad through reset pointer dy_gemm = dy; } else if (activation == 1) { // relu int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); Relu_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm); } else if (activation == 2) { // sigmoid int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); Sigmoid_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm); } } cublasStatus_t cublas_status; // Call GEMM dgrad if (layer > 0 || requires_grad == 1) { cublas_status = mlp_gemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, xfeat, batch_size, yfeat, &one, weight, xfeat, dy_gemm, yfeat, &zero, dx, xfeat); if (cublas_status != CUBLAS_STATUS_SUCCESS) { printf("GEMM dgrad failed with %d\n", cublas_status); return 1; } } // Call GEMM wgrad cublas_status = mlp_gemm( handle, CUBLAS_OP_N, CUBLAS_OP_T, xfeat, yfeat, batch_size, &one, x, xfeat, dy_gemm, yfeat, &zero, dweight, xfeat); if (cublas_status != CUBLAS_STATUS_SUCCESS) { printf("GEMM wgrad failed with %d\n", cublas_status); return 1; } } return 0; } // Instantiate for floating point types template int mlp_fp<float>( float* X, int input_features, int batch_size, float** WPtr, int num_layers, int* output_features, float** BPtr, float* Y, float* reserved_space, int use_bias, int activation, void* lt_workspace); template int mlp_bp<float>( float* X, float* Y, int input_features, int batch_size, float** WPtr, int num_layers, int* output_features, float* dY, float* reserved_space, float* work_space, float* dX, float** dwPtr, float** dbPtr, bool requires_grad, int use_bias, int activation); template int mlp_fp<at::Half>( at::Half* X, int input_features, int batch_size, at::Half** WPtr, int num_layers, int* output_features, at::Half** BPtr, at::Half* Y, at::Half* reserved_space, int use_bias, int activation, void* lt_workspace); template int mlp_bp<at::Half>( at::Half* X, at::Half* Y, int input_features, int batch_size, at::Half** WPtr, int num_layers, int* output_features, at::Half* dY, at::Half* reserved_space, at::Half* work_space, at::Half* dX, at::Half** dwPtr, at::Half** dbPtr, bool requires_grad, int use_bias, int activation); template int mlp_fp<double>( double* X, int input_features, int batch_size, double** WPtr, int num_layers, int* output_features, double** BPtr, double* Y, double* reserved_space, int use_bias, int activation, void* lt_workspace); template int mlp_bp<double>( double* X, double* Y, int input_features, int batch_size, double** WPtr, int num_layers, int* output_features, double* dY, double* reserved_space, double* work_space, double* dX, double** dwPtr, double** dbPtr, bool requires_grad, int use_bias, int activation); template size_t get_mlp_bp_workspace_in_bytes<float>( int batch_size, int num_layers, const int* output_features); template size_t get_mlp_bp_workspace_in_bytes<at::Half>( int batch_size, int num_layers, const int* output_features); template size_t get_mlp_bp_workspace_in_bytes<double>( int batch_size, int num_layers, const int* output_features);
the_stack
#include "CUFLU.h" #if ( MODEL == HYDRO ) // external functions #ifdef __CUDACC__ #include "CUFLU_Shared_FluUtility.cu" #if ( CHECK_INTERMEDIATE == EXACT ) # include "CUFLU_Shared_RiemannSolver_Exact.cu" #elif ( CHECK_INTERMEDIATE == HLLE ) # include "CUFLU_Shared_RiemannSolver_HLLE.cu" #elif ( CHECK_INTERMEDIATE == HLLC ) # include "CUFLU_Shared_RiemannSolver_HLLC.cu" #elif ( CHECK_INTERMEDIATE == HLLD ) # include "CUFLU_Shared_RiemannSolver_HLLD.cu" #endif #else // #ifdef __CUDACC__ void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset ); void Hydro_Con2Flux( const int XYZ, real Flux[], const real In[], const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], const real* const PresIn ); #if ( CHECK_INTERMEDIATE == EXACT ) void Hydro_RiemannSolver_Exact( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real Dens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #elif ( CHECK_INTERMEDIATE == HLLE ) void Hydro_RiemannSolver_HLLE( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real Dens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #elif ( CHECK_INTERMEDIATE == HLLC ) void Hydro_RiemannSolver_HLLC( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real Dens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #elif ( CHECK_INTERMEDIATE == HLLD ) void Hydro_RiemannSolver_HLLD( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real Dens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #endif #endif // #ifdef __CUDACC__ ... else ... //------------------------------------------------------------------------------------------------------- // Function : Hydro_RiemannSolver_Roe // Description : Approximate Riemann solver of Roe // // Note : 1. Input data should be conserved variables // 2. Ref : (a) "Riemann Solvers and Numerical Methods for Fluid Dynamics - A Practical Introduction // ~ by Eleuterio F. Toro" // (b) Stone et al., ApJS, 178, 137 (2008) // 3. Shared by MHM, MHM_RP, and CTU schemes // // Parameter : XYZ : Target spatial direction : (0/1/2) --> (x/y/z) // Flux_Out : Array to store the output flux // L_In : Input left state (conserved variables) // R_In : Input right state (conserved variables) // MinDens/Pres : Density and pressure floors // EoS_DensEint2Pres : EoS routine to compute the gas pressure // EoS_DensPres2CSqr : EoS routine to compute the sound speed squared // EoS_AuxArray_* : Auxiliary arrays for the EoS routines // EoS_Table : EoS tables //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_RiemannSolver_Roe( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ) { // check # if ( EOS == EOS_GAMMA ) const real *Passive = NULL; // EOS_GAMMA does not involve passive scalars # elif ( defined GAMER_DEBUG ) printf( "ERROR : EOS != EOS_GAMMA is NOT supported at file <%s>, line <%d>, function <%s> !!\n", __FILE__, __LINE__, __FUNCTION__ ); # endif // 1. reorder the input variables for different spatial directions real L[NCOMP_TOTAL_PLUS_MAG], R[NCOMP_TOTAL_PLUS_MAG]; for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) { L[v] = L_In[v]; R[v] = R_In[v]; } Hydro_Rotate3D( L, XYZ, true, MAG_OFFSET ); Hydro_Rotate3D( R, XYZ, true, MAG_OFFSET ); // longitudinal B field in the left and right states should be the same # if ( defined GAMER_DEBUG && defined MHD ) if ( L[MAG_OFFSET] != R[MAG_OFFSET] ) printf( "ERROR : BxL (%24.17e) != BxR (%24.17e) for XYZ %d at file <%s>, line <%d>, function <%s>!!\n", L[MAG_OFFSET], R[MAG_OFFSET], XYZ, __FILE__, __LINE__, __FUNCTION__ ); # endif // 2. evaluate the average values const real ZERO = (real)0.0; const real ONE = (real)1.0; const real _TWO = (real)0.5; const real Gamma = EoS_AuxArray_Flt[0]; // only support constant-gamma EoS (i.e., EOS_GAMMA) const real Gamma_m1 = EoS_AuxArray_Flt[1]; const bool CheckMinPres_Yes = true; # ifdef MHD const real TWO = (real)2.0; const real Gamma_m2 = Gamma - TWO; # endif real Rho, _Rho, _RhoL, _RhoR, RhoL_sqrt, RhoR_sqrt, _RhoL_sqrt, _RhoR_sqrt, _RhoLR_sqrt_sum; real PL, PR, HL, HR, u, v, w, V2, H, a, a2, GammaP_Rho, EmagL=NULL_REAL, EmagR=NULL_REAL; # ifdef MHD real Rho_sqrt, _Rho_sqrt; // Roe-average density real ByL, BzL, ByR, BzR; // magnetic field from left and right states real Bx, By, Bz, B2, Bn2, Bn, B2_Rho; // Roe-average magnetic field real Cax, Cax2, Cat2, Cs, Cs2, Cf, Cf2; // Alfven, slow, and fast waves real alpha_f, alpha_s, beta_y, beta_z; // Eqs. (A16) and (A17) in ref-b real Ca2_plus_a2, Ca2_min_a2, Cf2_min_Cs2; // Ca^2+a^2, Ca^2-a^2, Cf^2-Cs^2 real X, Y; // Eqs. (B15) and (B16) in ref-b real S; // sign(Bx) real Bn_star; // Eq. (B20) in ref-b real beta_n_star2, beta_y_star, beta_z_star; // Eq. (B28) in ref-b # endif _RhoL = ONE / L[0]; _RhoR = ONE / R[0]; # ifdef MHD Bx = L[ MAG_OFFSET + 0 ]; // assuming Bx=BxL=BxR ByL = L[ MAG_OFFSET + 1 ]; BzL = L[ MAG_OFFSET + 2 ]; ByR = R[ MAG_OFFSET + 1 ]; BzR = R[ MAG_OFFSET + 2 ]; EmagL = _TWO*( SQR(Bx) + SQR(ByL) + SQR(BzL) ); EmagR = _TWO*( SQR(Bx) + SQR(ByR) + SQR(BzR) ); # endif PL = Hydro_Con2Pres( L[0], L[1], L[2], L[3], L[4], L+NCOMP_FLUID, CheckMinPres_Yes, MinPres, EmagL, EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL ); PR = Hydro_Con2Pres( R[0], R[1], R[2], R[3], R[4], R+NCOMP_FLUID, CheckMinPres_Yes, MinPres, EmagR, EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL ); # ifdef MHD HL = _RhoL*( L[4] + PL + EmagL ); HR = _RhoR*( R[4] + PR + EmagR ); # else HL = _RhoL*( L[4] + PL ); HR = _RhoR*( R[4] + PR ); # endif # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(L[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", L[0], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(R[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", R[0], __FILE__, __LINE__, __FUNCTION__ ); # endif RhoL_sqrt = SQRT( L[0] ); RhoR_sqrt = SQRT( R[0] ); Rho = RhoL_sqrt*RhoR_sqrt; _Rho = ONE/Rho; _RhoL_sqrt = ONE/RhoL_sqrt; _RhoR_sqrt = ONE/RhoR_sqrt; _RhoLR_sqrt_sum = ONE/(RhoL_sqrt + RhoR_sqrt); u = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[1] + _RhoR_sqrt*R[1] ); v = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[2] + _RhoR_sqrt*R[2] ); w = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[3] + _RhoR_sqrt*R[3] ); V2 = u*u + v*v + w*w; H = _RhoLR_sqrt_sum*( RhoL_sqrt*HL + RhoR_sqrt*HR ); # ifdef MHD Rho_sqrt = SQRT( Rho ); _Rho_sqrt = ONE/Rho_sqrt; By = _RhoLR_sqrt_sum*( RhoL_sqrt*ByR + RhoR_sqrt*ByL ); Bz = _RhoLR_sqrt_sum*( RhoL_sqrt*BzR + RhoR_sqrt*BzL ); Bn2 = SQR( By ) + SQR( Bz ); Bn = SQRT( Bn2 ); B2 = SQR( Bx ) + Bn2; B2_Rho = B2*_Rho; S = SIGN( Bx ); X = _TWO*( SQR(ByR-ByL) + SQR(BzR-BzL) )*SQR( _RhoLR_sqrt_sum ); X *= Gamma_m2; # ifdef EULERY Y = _TWO*( L[0] + R[0] )*_Rho ; # else Y = ONE; # endif Y *= Gamma_m2; Bn_star = SQRT( Gamma_m1 - Y )*Bn; # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(Gamma_m1-Y) ) printf( "ERROR : invalid Gamma_m1-Y (%14.7e, Gamma_m1 %14.7e, Y %14.7e) at file <%s>, line <%d>, function <%s>\n", Gamma_m1-Y, Gamma_m1, Y, __FILE__, __LINE__, __FUNCTION__ ); # endif if ( Bn == ZERO ) { beta_y = ONE; beta_z = ZERO; } else { const real _Bn = ONE/Bn; beta_y = By*_Bn; beta_z = Bz*_Bn; } if ( Bn_star == ZERO ) { beta_y_star = ONE; beta_z_star = ZERO; } else { const real _Bn_star = ONE/Bn_star; beta_y_star = By*_Bn_star; beta_z_star = Bz*_Bn_star; } beta_n_star2 = SQR( beta_y_star ) + SQR( beta_z_star ); # endif // #ifdef MHD GammaP_Rho = Gamma_m1*( H - _TWO*V2 ); # ifdef MHD GammaP_Rho -= Gamma_m1*B2_Rho; // H = 0.5*v^2 + B^2/rho + gamma/(gamma-1)*P/rho # endif GammaP_Rho = Gamma*_Rho*Hydro_CheckMinPres( GammaP_Rho*Rho/Gamma, MinPres ); // apply pressure floor a2 = GammaP_Rho; # ifdef MHD a2 -= X; # endif # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(a2) ) printf( "ERROR : invalid a2 (%14.7e) at file <%s>, line <%d>, function <%s>\n", a2, __FILE__, __LINE__, __FUNCTION__ ); # endif a = SQRT( a2 ); # ifdef MHD Cax2 = SQR(Bx)*_Rho; Cax = SQRT( Cax2 ); Cat2 = ( Gamma_m1 - Y )*Bn2*_Rho; Ca2_plus_a2 = Cat2 + Cax2 + a2; Ca2_min_a2 = Cat2 + Cax2 - a2; Cf2_min_Cs2 = SQRT( SQR(Ca2_min_a2) + (real)4.0*a2*Cat2 ); // evaluate the fast/slow wave speed (Cf/Cs) if ( Cat2 == ZERO ) { if ( Cax2 == a2 ) { Cf2 = a2; Cs2 = a2; } else if ( Cax2 > a2 ) { Cf2 = Cax2; Cs2 = a2; } else { Cf2 = a2; Cs2 = Cax2; } } else { if ( Cax2 == ZERO ) { Cf2 = a2 + Cat2; Cs2 = ZERO; } else { Cf2 = _TWO*( Ca2_plus_a2 + Cf2_min_Cs2 ); Cs2 = a2*Cax2/Cf2; // do not use "Cf2 - Cf2_min_Cs2" to avoid negative values caused by round-off errors // Cs2 = Cf2 - Cf2_min_Cs2; } } // if ( Cat2 == ZERO ) ... else ... Cf = SQRT( Cf2 ); Cs = SQRT( Cs2 ); const real a2_min_Cs2 = a2 - Cs2; const real Cf2_min_a2 = Cf2 - a2; if ( Cf2_min_Cs2 == ZERO ) { alpha_f = ONE; alpha_s = ZERO; } else if ( a2_min_Cs2 <= ZERO ) { alpha_f = ZERO; alpha_s = ONE; } else if ( Cf2_min_a2 <= ZERO ) { alpha_f = ONE; alpha_s = ZERO; } else { # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(a2_min_Cs2) ) printf( "ERROR : invalid a2_min_Cs2 (%14.7e) at file <%s>, line <%d>, function <%s>\n", a2_min_Cs2, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(Cf2_min_a2) ) printf( "ERROR : invalid Cf2_min_a2 (%14.7e) at file <%s>, line <%d>, function <%s>\n", Cf2_min_a2, __FILE__, __LINE__, __FUNCTION__ ); # endif const real _Cf2_min_Cs2 = ONE/Cf2_min_Cs2; alpha_f = SQRT( a2_min_Cs2*_Cf2_min_Cs2 ); alpha_s = SQRT( Cf2_min_a2*_Cf2_min_Cs2 ); } # endif // #ifdef MHD // 3. evaluate the eigenvalues # ifdef MHD const real EigenVal[NWAVE] = { u-Cf, u-Cax, u-Cs, u, u+Cs, u+Cax, u+Cf }; # else const real EigenVal[NWAVE] = { u-a, u, u, u, u+a }; # endif // 4. evaluate the left and right fluxes real Flux_L[NCOMP_TOTAL_PLUS_MAG], Flux_R[NCOMP_TOTAL_PLUS_MAG]; Hydro_Con2Flux( 0, Flux_L, L, MinPres, NULL, NULL, NULL, NULL, &PL ); Hydro_Con2Flux( 0, Flux_R, R, MinPres, NULL, NULL, NULL, NULL, &PR ); // 5. return the upwind fluxes if flow is supersonic if ( EigenVal[0] >= ZERO ) { for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) Flux_Out[v] = Flux_L[v]; Hydro_Rotate3D( Flux_Out, XYZ, false, MAG_OFFSET ); return; } if ( EigenVal[NWAVE-1] <= ZERO ) { for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) Flux_Out[v] = Flux_R[v]; Hydro_Rotate3D( Flux_Out, XYZ, false, MAG_OFFSET ); return; } // 6. evaluate the eigenvectors // --> right eigenvectors: columns of REigenVec[][] // left eigenvectors: rows of LEigenVec[][] # ifdef MHD real REigenVec[NWAVE][NWAVE], LEigenVec[NWAVE][NWAVE]; // Eqs. [A13]-[A17] in ref-b const real Af = a*alpha_f*Rho_sqrt; const real As = a*alpha_s*Rho_sqrt; const real Cff = Cf*alpha_f; const real Css = Cs*alpha_s; const real Qf = Cff*S; const real Qs = Css*S; // right eigenvectors REigenVec[0][0] = alpha_f; REigenVec[0][1] = ZERO; REigenVec[0][2] = alpha_s; REigenVec[0][3] = ONE; REigenVec[0][4] = alpha_s; REigenVec[0][5] = ZERO; REigenVec[0][6] = alpha_f; const real u_alpha_f = u*alpha_f; const real u_alpha_s = u*alpha_s; REigenVec[1][0] = u_alpha_f - Cff; REigenVec[1][1] = ZERO; REigenVec[1][2] = u_alpha_s - Css; REigenVec[1][3] = u; REigenVec[1][4] = u_alpha_s + Css; REigenVec[1][5] = ZERO; REigenVec[1][6] = u_alpha_f + Cff; const real v_alpha_f = v*alpha_f; const real v_alpha_s = v*alpha_s; const real Qs_beta_y_star = Qs*beta_y_star; const real Qf_beta_y_star = Qf*beta_y_star; REigenVec[2][0] = v_alpha_f + Qs_beta_y_star; REigenVec[2][1] = -beta_z; REigenVec[2][2] = v_alpha_s - Qf_beta_y_star; REigenVec[2][3] = v; REigenVec[2][4] = v_alpha_s + Qf_beta_y_star; REigenVec[2][5] = beta_z; REigenVec[2][6] = v_alpha_f - Qs_beta_y_star; const real w_alpha_f = w*alpha_f; const real w_alpha_s = w*alpha_s; const real Qs_beta_z_star = Qs*beta_z_star; const real Qf_beta_z_star = Qf*beta_z_star; REigenVec[3][0] = w_alpha_f + Qs_beta_z_star; REigenVec[3][1] = beta_y; REigenVec[3][2] = w_alpha_s - Qf_beta_z_star; REigenVec[3][3] = w; REigenVec[3][4] = w_alpha_s + Qf_beta_z_star; REigenVec[3][5] = -beta_y; REigenVec[3][6] = w_alpha_f - Qs_beta_z_star; const real Hp = H - B2_Rho; // H_prime = H - B^2/rho const real u_Cf = u*Cf; const real u_Cs = u*Cs; const real v_by_w_bz = v*beta_y_star + w*beta_z_star; const real Qs_v_by_w_bz = Qs*v_by_w_bz; const real Qf_v_by_w_bz = Qf*v_by_w_bz; const real Bn_b2_Rho = Bn_star*beta_n_star2*_Rho; const real As_Bn_b2_Rho = As*Bn_b2_Rho; const real Af_Bn_b2_Rho = Af*Bn_b2_Rho; REigenVec[4][0] = alpha_f*( Hp - u_Cf ) + Qs_v_by_w_bz + As_Bn_b2_Rho; REigenVec[4][1] = -( v*beta_z - w*beta_y ); REigenVec[4][2] = alpha_s*( Hp - u_Cs ) - Qf_v_by_w_bz - Af_Bn_b2_Rho; REigenVec[4][3] = _TWO*V2 + X/Gamma_m1; REigenVec[4][4] = alpha_s*( Hp + u_Cs ) + Qf_v_by_w_bz - Af_Bn_b2_Rho; REigenVec[4][5] = -REigenVec[4][1]; REigenVec[4][6] = alpha_f*( Hp + u_Cf ) - Qs_v_by_w_bz + As_Bn_b2_Rho; const real beta_y_star_Rho = beta_y_star*_Rho; REigenVec[5][0] = As*beta_y_star_Rho; REigenVec[5][1] = -S*beta_z*_Rho_sqrt; REigenVec[5][2] = -Af*beta_y_star_Rho; REigenVec[5][3] = ZERO; REigenVec[5][4] = REigenVec[5][2]; REigenVec[5][5] = REigenVec[5][1]; REigenVec[5][6] = REigenVec[5][0]; const real beta_z_star_Rho = beta_z_star*_Rho; REigenVec[6][0] = As*beta_z_star_Rho; REigenVec[6][1] = S*beta_y*_Rho_sqrt; REigenVec[6][2] = -Af*beta_z_star_Rho; REigenVec[6][3] = ZERO; REigenVec[6][4] = REigenVec[6][2]; REigenVec[6][5] = REigenVec[6][1]; REigenVec[6][6] = REigenVec[6][0]; // left eigenvectors const real Qy_star = beta_y_star / beta_n_star2; // Eq. [B30] in ref-b const real Qz_star = beta_z_star / beta_n_star2; const real norm_hat = _TWO / a2; const real norm_bar = norm_hat*Gamma_m1; const real Cff_hat = norm_hat*Cff; const real Css_hat = norm_hat*Css; const real Af_hat = norm_hat*Af; const real As_hat = norm_hat*As; const real Qf_hat = norm_hat*Qf; const real Qs_hat = norm_hat*Qs; const real X_hat = norm_hat*X; const real alpha_f_bar = norm_bar*alpha_f; const real alpha_s_bar = norm_bar*alpha_s; const real alpha_f_V2_Hp = alpha_f_bar*( V2 - Hp); const real alpha_s_V2_Hp = alpha_s_bar*( V2 - Hp); const real v_Qy_w_Qz = v*Qy_star + w*Qz_star; const real Qf_v_Qy_w_Qz = Qf_hat*v_Qy_w_Qz; const real Qs_v_Qy_w_Qz = Qs_hat*v_Qy_w_Qz; const real Bn_star_Rho = Bn_star*_Rho; const real Af_Bn_star_Rho = Af_hat*Bn_star_Rho; const real As_Bn_star_Rho = As_hat*Bn_star_Rho; LEigenVec[0][0] = alpha_f_V2_Hp + Cff_hat*( Cf + u ) - Qs_v_Qy_w_Qz - As_Bn_star_Rho; LEigenVec[1][0] = _TWO*( v*beta_z - w*beta_y ); LEigenVec[2][0] = alpha_s_V2_Hp + Css_hat*( Cs + u ) + Qf_v_Qy_w_Qz + Af_Bn_star_Rho; LEigenVec[3][0] = ONE - norm_bar*V2 + TWO*X_hat; LEigenVec[4][0] = alpha_s_V2_Hp + Css_hat*( Cs - u ) - Qf_v_Qy_w_Qz + Af_Bn_star_Rho; LEigenVec[5][0] = -LEigenVec[1][0]; LEigenVec[6][0] = alpha_f_V2_Hp + Cff_hat*( Cf - u ) + Qs_v_Qy_w_Qz - As_Bn_star_Rho; const real u_alpha_f_bar = u*alpha_f_bar; const real u_alpha_s_bar = u*alpha_s_bar; LEigenVec[0][1] = -u_alpha_f_bar - Cff_hat; LEigenVec[1][1] = ZERO; LEigenVec[2][1] = -u_alpha_s_bar - Css_hat; LEigenVec[3][1] = TWO*norm_bar*u; LEigenVec[4][1] = -u_alpha_s_bar + Css_hat; LEigenVec[5][1] = ZERO; LEigenVec[6][1] = -u_alpha_f_bar + Cff_hat; const real v_alpha_f_bar = v*alpha_f_bar; const real v_alpha_s_bar = v*alpha_s_bar; const real Qs_Qy_star = Qs_hat*Qy_star; const real Qf_Qy_star = Qf_hat*Qy_star; LEigenVec[0][2] = -v_alpha_f_bar + Qs_Qy_star; LEigenVec[1][2] = -_TWO*beta_z; LEigenVec[2][2] = -v_alpha_s_bar - Qf_Qy_star; LEigenVec[3][2] = TWO*norm_bar*v; LEigenVec[4][2] = -v_alpha_s_bar + Qf_Qy_star; LEigenVec[5][2] = -LEigenVec[1][2]; LEigenVec[6][2] = -v_alpha_f_bar - Qs_Qy_star; const real w_alpha_f_bar = w*alpha_f_bar; const real w_alpha_s_bar = w*alpha_s_bar; const real Qs_Qz_star = Qs_hat*Qz_star; const real Qf_Qz_star = Qf_hat*Qz_star; LEigenVec[0][3] = -w_alpha_f_bar + Qs_Qz_star; LEigenVec[1][3] = _TWO*beta_y; LEigenVec[2][3] = -w_alpha_s_bar - Qf_Qz_star; LEigenVec[3][3] = TWO*norm_bar*w; LEigenVec[4][3] = -w_alpha_s_bar + Qf_Qz_star; LEigenVec[5][3] = -LEigenVec[1][3]; LEigenVec[6][3] = -w_alpha_f_bar - Qs_Qz_star; LEigenVec[0][4] = alpha_f_bar; LEigenVec[1][4] = ZERO; LEigenVec[2][4] = alpha_s_bar; LEigenVec[3][4] = -Gamma_m1 / a2; LEigenVec[4][4] = alpha_s_bar; LEigenVec[5][4] = ZERO; LEigenVec[6][4] = alpha_f_bar; LEigenVec[0][5] = As_hat*Qy_star - alpha_f_bar*By; LEigenVec[1][5] = -_TWO*S*beta_z*Rho_sqrt; LEigenVec[2][5] = -Af_hat*Qy_star - alpha_s_bar*By; LEigenVec[3][5] = TWO*norm_bar*By; LEigenVec[4][5] = LEigenVec[2][5]; LEigenVec[5][5] = LEigenVec[1][5]; LEigenVec[6][5] = LEigenVec[0][5]; LEigenVec[0][6] = As_hat*Qz_star - alpha_f_bar*Bz; LEigenVec[1][6] = _TWO*S*beta_y*Rho_sqrt; LEigenVec[2][6] = -Af_hat*Qz_star - alpha_s_bar*Bz; LEigenVec[3][6] = TWO*norm_bar*Bz; LEigenVec[4][6] = LEigenVec[2][6]; LEigenVec[5][6] = LEigenVec[1][6]; LEigenVec[6][6] = LEigenVec[0][6]; # else // #ifdef MHD const real REigenVec[NWAVE][NWAVE] = { { ONE, ONE, ZERO, ZERO, ONE }, { u-a, u, ZERO, ZERO, u+a }, { v, v, ONE, ZERO, v }, { w, w, ZERO, ONE, w }, { H-u*a, _TWO*V2, v, w, H+u*a } }; # endif // #ifdef MHD ... else ... // 7. evaluate the amplitudes along different characteristics (eigenvectors) // index mapping between arrays with size NWAVE and NCOMP_TOTAL_PLUS_MAG; # ifdef MHD const int idx_wave[NWAVE] = { 0, 1, 2, 3, 4, MAG_OFFSET+1, MAG_OFFSET+2 }; # else const int idx_wave[NWAVE] = { 0, 1, 2, 3, 4 }; # endif real Jump[NWAVE], Amp[NWAVE]; for (int v=0; v<NWAVE; v++) Jump[v] = R[ idx_wave[v] ] - L[ idx_wave[v] ]; # ifdef MHD Amp[0] = LEigenVec[0][0]*Jump[0] + LEigenVec[0][1]*Jump[1] + LEigenVec[0][2]*Jump[2] + LEigenVec[0][3]*Jump[3]; Amp[1] = LEigenVec[1][0]*Jump[0] + LEigenVec[1][2]*Jump[2] + LEigenVec[1][3]*Jump[3]; Amp[2] = LEigenVec[2][0]*Jump[0] + LEigenVec[2][1]*Jump[1] + LEigenVec[2][2]*Jump[2] + LEigenVec[2][3]*Jump[3]; Amp[3] = LEigenVec[3][0]*Jump[0] + LEigenVec[3][1]*Jump[1] + LEigenVec[3][2]*Jump[2] + LEigenVec[3][3]*Jump[3] + LEigenVec[3][4]*Jump[4] + LEigenVec[3][5]*Jump[5] + LEigenVec[3][6]*Jump[6]; Amp[4] = LEigenVec[4][0]*Jump[0] + LEigenVec[4][1]*Jump[1] + LEigenVec[4][2]*Jump[2] + LEigenVec[4][3]*Jump[3]; Amp[5] = -Amp[1]; Amp[6] = LEigenVec[6][0]*Jump[0] + LEigenVec[6][1]*Jump[1] + LEigenVec[6][2]*Jump[2] + LEigenVec[6][3]*Jump[3]; const real tmp0 = LEigenVec[0][4]*Jump[4] + LEigenVec[0][5]*Jump[5] + LEigenVec[0][6]*Jump[6]; const real tmp1 = LEigenVec[1][5]*Jump[5] + LEigenVec[1][6]*Jump[6]; const real tmp2 = LEigenVec[2][4]*Jump[4] + LEigenVec[2][5]*Jump[5] + LEigenVec[2][6]*Jump[6]; Amp[0] += tmp0; Amp[1] += tmp1; Amp[2] += tmp2; Amp[4] += tmp2; Amp[5] += tmp1; Amp[6] += tmp0; # else // #ifdef MHD Amp[2] = Jump[2] - v*Jump[0]; Amp[3] = Jump[3] - w*Jump[0]; Amp[1] = Gamma_m1/a2*( Jump[0]*(H-SQR(u)) + u*Jump[1] - Jump[4] + v*Amp[2] + w*Amp[3] ); Amp[0] = _TWO/a*( Jump[0]*(u+a) - Jump[1] - a*Amp[1] ); Amp[4] = Jump[0] - Amp[0] - Amp[1]; # endif // #ifdef MHD ... else ... // 8. verify that the density and pressure in the intermediate states are positive # ifdef CHECK_INTERMEDIATE const bool CheckMinPres_No = false; real I_Pres, I_States[ NCOMP_FLUID + NCOMP_MAG ]; for (int v=0; v<NCOMP_FLUID; v++) I_States[ v ] = L[ v ]; # ifdef MHD for (int v=0; v<NCOMP_MAG; v++) I_States[ v + NCOMP_FLUID ] = L[ v + MAG_OFFSET ]; # endif for (int t=0; t<NWAVE-1; t++) { for (int v=0; v<NCOMP_FLUID; v++) I_States[ v ] += Amp[t]*REigenVec[v][t]; # ifdef MHD for (int v=NCOMP_FLUID; v<NWAVE; v++) I_States[ v + 1 ] += Amp[t]*REigenVec[v][t]; # endif if ( EigenVal[t+1] > EigenVal[t] ) // skip the degenerate states { # ifdef MHD const real Emag = _TWO*( SQR( I_States[NCOMP_FLUID+0] ) + SQR( I_States[NCOMP_FLUID+1] ) + SQR( I_States[NCOMP_FLUID+2] ) ); # else const real Emag = NULL_REAL; # endif I_Pres = Hydro_Con2Pres( I_States[0], I_States[1], I_States[2], I_States[3], I_States[4], Passive, CheckMinPres_No, NULL_REAL, Emag, EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL ); // if unphysical results occur, recalculate fluxes by a substitute Riemann solver if ( I_States[0] <= ZERO || I_Pres <= ZERO ) { # ifdef GAMER_DEBUG printf( "WARNING : intermediate states check failed (density %14.7e, pressure %14.7e) !!\n", I_States[0], I_Pres ); # endif # if ( CHECK_INTERMEDIATE == EXACT && !defined MHD ) Hydro_RiemannSolver_Exact( 0, Flux_Out, L, R, MinDens, MinPres, EoS_DensEint2Pres, EoS_DensPres2CSqr, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); # elif ( CHECK_INTERMEDIATE == HLLE ) Hydro_RiemannSolver_HLLE ( 0, Flux_Out, L, R, MinDens, MinPres, EoS_DensEint2Pres, EoS_DensPres2CSqr, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); # elif ( CHECK_INTERMEDIATE == HLLC && !defined MHD ) Hydro_RiemannSolver_HLLC ( 0, Flux_Out, L, R, MinDens, MinPres, EoS_DensEint2Pres, EoS_DensPres2CSqr, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); # elif ( CHECK_INTERMEDIATE == HLLD && defined MHD ) Hydro_RiemannSolver_HLLD ( 0, Flux_Out, L, R, MinDens, MinPres, EoS_DensEint2Pres, EoS_DensPres2CSqr, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); # else # error : ERROR : unsupported CHECK_INTERMEDIATE (EXACT/HLLE/HLLC/HLLD) !! # endif // CHECK_INTERMEDIATE Hydro_Rotate3D( Flux_Out, XYZ, false, MAG_OFFSET ); return; } // if ( I_States[0] <= ZERO || I_Pres <= ZERO ) } // if ( EigenVal[t+1] > EigenVal[t] ) } // for (int t=0; t<NWAVE-1; t++) # endif // #ifdef CHECK_INTERMEDIATE // 9. evaluate the Roe fluxes for (int v=0; v<NWAVE; v++) Amp[v] *= FABS( EigenVal[v] ); for (int v=0; v<NWAVE; v++) { const int vv = idx_wave[v]; Flux_Out[vv] = Flux_L[vv] + Flux_R[vv]; for (int t=0; t<NWAVE; t++) Flux_Out[vv] -= Amp[t]*REigenVec[v][t]; Flux_Out[vv] *= _TWO; } // longitudinal magnetic flux is always zero # ifdef MHD Flux_Out[MAG_OFFSET] = ZERO; # endif // 10. evaluate the fluxes for passive scalars # if ( NCOMP_PASSIVE > 0 ) if ( Flux_Out[FLUX_DENS] >= ZERO ) { const real vx = Flux_Out[FLUX_DENS]*_RhoL; for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = L[v]*vx; } else { const real vx = Flux_Out[FLUX_DENS]*_RhoR; for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = R[v]*vx; } # endif // 11. restore the correct order Hydro_Rotate3D( Flux_Out, XYZ, false, MAG_OFFSET ); } // FUNCTION : Hydro_RiemannSolver_Roe #endif // #if ( MODEL == HYDRO ) #endif // #ifndef __CUFLU_RIEMANNSOLVER_ROE__
the_stack
\brief Unit tests for warp-level wmma gemm */ #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED) #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/gemm/warp/default_mma_wmma_tensor_op.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" #include "testbed.h" /// Test name format: SM[arch]_warp_wmma_[alayout]_[blayout]_[clayout]_[dtype].[threadblock_shape]_[warp_shape] //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// f16 accumulation point wmma.mma ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////// [START] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [START] ////////////////////// //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// // 4 tests for {N,T}x{N,T}=>{T} TEST(SM70_warp_wmma_row_col_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_row_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_col_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } // 4 tests for {N,T}x{N,T}=>{N} TEST(SM70_warp_wmma_row_col_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_row_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_col_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } /////////// [END] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [END] /////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 64x64x16_64x64x16_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_64x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_64x32x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_32x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_32x32x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 128x128x16_64x64x16_16x16x16) { // Even though the test launches 128x128x16 CTA tile this test only verfies one warp // , i.e., warp_0 of size 64x64x16 out of the four warps required to cover the CTA using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<128, 128, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m32n8k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m8n32k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 8x32x16_8x32x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m8n32k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_row_f16, 8x32x16_8x32x16_8x32x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } TEST(SM70_warp_wmma_col_row_row_f16, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// f32 accumulation point wmma.mma ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m16n16k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 64x64x16_64x64x16_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 64x64x32_64x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 128x128x16_64x64x16_16x16x16) { // Even though the test launches 128x128x16 CTA tile this test only verfies one warp // , i.e., warp_0 of size 64x64x16 out of the four warps required to cover the CTA using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<128, 128, 16> >().run(); } ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m32n8k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m8n32k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 8x32x16_8x32x16_8x32x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } #endif //CUTLASS_ARCH_WMMA_SM70_ENABLED
the_stack
#define STR1(X) #X #define STR(X) STR1(X) #define STRINGIFY(X,Y) X ## Y #define CON(X,Y) STRINGIFY(X,Y) #define KDir kernels #include "includes/ourmacros.h" /* __device__ __inline__ double ld_gbl_cg(const double *addr) { double return_value; asm("ld.global.cg.f64 %0, [%1];" : "=d"(return_value) : "l"(addr)); return return_value; }*/ extern __shared__ type tile[]; __device__ __forceinline__ void fvimatchl32_main(const type * Atmp, type * Btmp,const int lda1,const int ldb1,const int size0, const int plain, const int sbp, const unsigned short * __restrict__ offset, type alpha, type beta) { const int id = threadIdx.x; const int i2 = id / 32; const int rest = id %32; const type *Adisp = Atmp +i2 * lda1; const int tile_disp = i2 * (sbp); int regs[8];int j = rest; #pragma unroll 8 for(int i = 0; i < 8; i++) { regs[i] = offset[j]; j+=32; if(j >= plain) break; } #pragma unroll for( int j = rest; j < plain; j+=32){ //tile[tile_disp+j] = ld_gbl_cg(&Adisp[j]); tile[tile_disp+j] = Adisp[j]; } __syncthreads(); type *Bdisp = &Btmp[i2 * ldb1]; const int tile_disp1 = i2 * size0; j = rest; #pragma unroll 8 for(int i = 0; i < 8; i++) { Bdisp[j] = beta*Bdisp[j] + alpha* tile[regs[i]+tile_disp1]; j+=32; if(j >= plain) break; } } __device__ __forceinline__ void fvimatchl32_rem(const type * Atmp, type * Btmp, const int lda1, const int ldb1,const int remainderx,const int remaindery,const int size0, const int plain, const int sbp, const int ilimit, const int olimit, const unsigned short int* __restrict__ offset, type alpha, type beta) { int id = threadIdx.x; int i2 = id / 32; int rest = id %32; if(i2 < remaindery){ const type *Adisp = &Atmp[i2 * lda1]; const int tile_disp = i2 * (sbp); #pragma unroll for( int j = rest; j < ilimit; j+=32){ tile[tile_disp+j] = Adisp[j]; } } __syncthreads(); if(i2 >= remainderx) return; type *Bdisp = &Btmp[i2 * ldb1]; int regs[8];int j = rest; #pragma unroll 8 for(int i = 0; i < 8; i++) { if(j >= olimit) break; regs[i] = offset[j]; j+=32; } const int tile_disp1 = i2 * size0; j = rest; #pragma unroll 8 for(int i = 0; i < 8; i++) { if(j >= olimit) break; Bdisp[j] = beta*Bdisp[j]+ alpha* tile[regs[i]+tile_disp1]; j+=32; } } __device__ __forceinline__ void fvimatchl32_main_coars(const type * Atmp, type * Btmp,const int lda1,const int ldb1,const int size0, const int plain, const int sbp, const unsigned short * __restrict__ offset, const int acoars, const int bcoars, const int size, type alpha, type beta) { const int id = threadIdx.x; const int i2 = id / 32; const int rest = id %32; const int tile_disp = i2 * (sbp); int regs[8]; for(int c = 0; c < size; c++) { int j = rest; const type *Adisp = Atmp +i2 * lda1 + c*acoars; #pragma unroll 8 for(int i = 0; i < 8; i++) { regs[i] = offset[j]; j+=32; if(j >= plain) break; } #pragma unroll for(j = rest; j < plain; j+=32){ tile[tile_disp+j] = Adisp[j]; } __syncthreads(); type *Bdisp = Btmp + i2 * ldb1 + c*bcoars; const int tile_disp1 = i2 * size0; j = rest; #pragma unroll 8 for(int i = 0; i < 8; i++) { Bdisp[j] = alpha* tile[regs[i]+tile_disp1] + beta*Bdisp[j]; j+=32; if(j >= plain) break; } __syncthreads(); } } __device__ __forceinline__ void fvimatchl32_rem_coars(const type * Atmp, type * Btmp, const int lda1, const int ldb1,const int remainderx,const int remaindery,const int size0, const int plain, const int sbp, const int ilimit, const int olimit, const unsigned short int* __restrict__ offset, const int acoars, const int bcoars, const int size, type alpha, type beta) { int id = threadIdx.x; int i2 = id / 32; int rest = id %32; for(int c = 0; c< size; c++) { __syncthreads(); if(i2 < remaindery){ const type *Adisp = Atmp + i2 * lda1 + c*acoars; const int tile_disp = i2 * (sbp); #pragma unroll for( int j = rest; j < ilimit; j+=32){ tile[tile_disp+j] = Adisp[j]; } } __syncthreads(); if(i2 >= remainderx) continue; type *Bdisp = Btmp + i2 * ldb1 + c*bcoars; int regs[8]; int j = rest; #pragma unroll 8 for(int i = 0; i < 8; i++) { if(j >= olimit) break; regs[i] = offset[j]; j+=32; } const int tile_disp1 = i2 * size0; j = rest; #pragma unroll 8 for(int i = 0; i < 8; i++) { if(j >= olimit) break; Bdisp[j] = alpha* tile[regs[i]+tile_disp1] + beta*Bdisp[j]; j+=32; } } } #define FNAME fvimatchl32.h #include "includes/macro.h" #undef FNAME #define FNAME fvimatchl32_coars.h #include "includes/macro.h" #undef FNAME void fvimatchl32CallerWrapper(const int ndim,const type * __restrict__ A, type * B,const int size0, const int param0, const int param1, const int numthreads1, const int numblocks, const int numthreads, const int shmsize , const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s , const int remainder1, const int remainder2, const int lda_kernel1, const int ldb_kernel1, unsigned short int* offset, const int ilimit, const int olimit, const int plain, const int sbp, const int ldal, const int ldap2l, const int acoars, const int bcoars, const int size,type alpha,type beta ) { #ifdef printd printf("ndim = %d, size = %d, numblocks = %d, numthreads = %d, acoars = %d, bcoars = %d\n", ndim, size, numblocks, numthreads, acoars, bcoars); #endif if(size > 0) { #ifdef printd printf("Coarsening... No. of blocks = %d\n", numblocks/size); #endif dim3 thread_blocks(numblocks/size, 1, 1); switch(ndim) { EXPANDDIMS(fvimatchl32_coars_kernel_, thread_blocks, numthreads, shmsize, (A, B,size0, ldal, ldap2l,param0, param1,plain,sbp, lda_s,ldb_s,idx_s,remainder1,remainder2,ilimit,olimit,lda_kernel1, ldb_kernel1,offset, acoars, bcoars, size, alpha, beta)) default: { } } } else { dim3 thread_blocks(numblocks, 1, 1); switch(ndim) { EXPANDDIMS(fvimatchl32_kernel_, thread_blocks, numthreads, shmsize, (A, B,size0, ldal, ldap2l,param0, param1,plain,sbp, lda_s,ldb_s,idx_s,remainder1,remainder2,ilimit,olimit,lda_kernel1, ldb_kernel1,offset, alpha, beta)) default: { } } } } void swap(int array[], int ind1, int ind2); int cancoarsen(int *lda, int newndim); extern "C" void fvimatchl32_transpose_kernel(int ndim, type *A, type *B, const int *lda, const int *ldb, const int* params, const int * rperm, type alpha, type beta) { //printf("l32 normal\n"); // int numBlocks = computeNumBlocksCode ; #ifdef printd printf("\nA Dims: %d \t %d \t %d\t %d\t %d\n", lda[0], lda[1], lda[2], lda[3], lda[4]); printf("\nParams: %d \t %d \t %d\t %d\t %d\t %d\t %d\n", params[0], params[1], params[2], params[3], params[4], params[5], params[6]); printf("\nB Dims: %d \t %d \t %d\t %d\t %d\n", ldb[0], ldb[1], ldb[2], ldb[3], ldb[4]); printf("\n R perm: %d \t %d \t %d\t %d\t %d\n", rperm[0], rperm[1], rperm[2], rperm[3], rperm[4]); #endif int numBlocks = params[6];//((size[1] + 8 -1)/8) * size[2] * ((size[3] + 8 -1)/8) * size[4] ; int *d_lda_s, *d_ldb_s, *d_idx_s; const int size0 = lda[0]; const int remainder1 = lda[1] % params[0]; const int remainder2 = lda[params[3]] % params[0]; int lda_s[20], ldb_s[20], idx_s[20], temp[20]; lda_s[0] = 1; ldb_s[0] = 1; int i, blockA=params[0]; int blockB = blockA; idx_s[1] = (lda[1] + blockA - 1) / blockA; lda_s[1] = lda_s[0] * lda[0]; ldb_s[1] = ldb_s[0] * ldb[0]; for(i = 2; i < ndim; i++) { if( i == params[3]) { idx_s[i] = (lda[i] + blockA - 1)/blockA; } else { idx_s[i] = lda[i]; } lda_s[i] = lda_s[i-1] * lda[i-1]; ldb_s[i] = ldb_s[i-1] * ldb[i-1]; } for(i = 1; i < ndim; i++) { temp[i] = ldb_s[rperm[i]]; } const int lda_kernel1 = lda_s[params[3]]; const int ldb_kernel1 = ldb_s[params[4]]; lda_s[1] *= blockA; lda_s[params[3]] *= blockA; temp[1] *= blockA; temp[params[3]] *= blockA; unsigned short int offset[9000]; unsigned short int *d_offset; unsigned short limit = lda[0] * params[0]; int tlimit = -1; for(i = 0; i < limit ; i++) { offset[i] = (i/lda[0]) * (lda[0] * params[0] + params[1]) +(i%lda[0]); if(i / lda[0] >= remainder2 && tlimit == -1) tlimit = i; } #ifdef printd printf("Offset memory size = %d \n", limit); #endif if(params[3] != 2) { swap(idx_s, 2, params[3]); swap(lda_s, 2, params[3]); swap(temp, 2, params[3]); } int newndim = ndim; int acoars = -1, bcoars = -1, size = -1; #ifndef NOCOARSEN int noblock = 3;// (params[3] != 2); int cd = cancoarsen(idx_s+ noblock, ndim- noblock); if(cd >= 0) { #ifdef printd printf("cd = %d, noblock = %d, ndim-noblock = %d\n", cd, noblock, ndim-noblock); #endif acoars = lda_s[noblock+cd]; bcoars = temp[noblock+cd]; size = idx_s[noblock+cd]; for(int j = cd+1; j < newndim; j++) { idx_s[noblock+j-1] = idx_s[noblock+j]; lda_s[noblock+j-1] = lda_s[noblock+j]; temp[noblock+j-1] = temp[noblock+j]; } newndim--; } #endif newndim--; SAFECUDAMALLOC(&d_offset,limit*sizeof(short)); SAFECUDAMEMCPY(d_offset, offset,limit*sizeof(short), cudaMemcpyHostToDevice); SAFECUDAMALLOC(&d_lda_s,newndim*sizeof(int)); SAFECUDAMALLOC(&d_ldb_s,newndim*sizeof(int)); SAFECUDAMALLOC(&d_idx_s,newndim*sizeof(int)); SAFECUDAMEMCPY(d_idx_s, idx_s+1,newndim*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(d_lda_s, lda_s+1,newndim*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(d_ldb_s, temp+1,newndim*sizeof(int), cudaMemcpyHostToDevice); const int ilimit = remainder1 * size0; const int olimit = remainder2 * size0; const int plain = params[0] * size0; const int sbp = plain+params[1]; const int ldal = (lda[1] - remainder1)/blockA; const int ldp2l = (lda[params[3]] - remainder2)/blockB; /* #ifdef MODEL printf("\t%d\t%d\t", plain, blockA); printf("\t%d\t%d\t", plain/32, plain%32); double f1, f2, f3, f4, f; int minlimit = min(ilimit, olimit); printf("\tf1=%lf\t", f1 = ((plain/32) + (double)(plain%32) /32)/ (int)((plain+31)/32)); printf("\tf2=%lf\t", f2 = ((ilimit/32) + (double)(ilimit%32) /32)/ (int)(max(1,(ilimit+31)/32))); printf("\tf3=%lf\t", f3 = ((olimit/32) + (double)(olimit%32) /32)/ (int)(max(1,(olimit+31)/32))); printf("\tf4=%lf\t", f4 = ((minlimit/32) + (double)(minlimit%32) /32)/ (int)(max(1,(minlimit+31)/32))); //printf("\t%d\t%d\t", lda[1], ldb[1]); int asize = lda[1]; int bsize = lda[1]; //printf("\t%d\t%d\t%d\t%d\t", asize/blockA, asize%blockA, bsize/blockB,bsize%blockB ); //int amax = min(blockA, 32); //int bmax = min(blockB, 32); int amax = blockA; int bmax = blockB; printf("\tf=%lf\t", f = ((asize/amax) * (bsize/bmax) *f1 + (double)(asize/amax) * (bsize%bmax > 0) *f3+ (double)(asize%amax>0) * (bsize/bmax)*f2 + (double)(asize%amax > 0) * (bsize%bmax > 0) *f4 )/ (int)(((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax))); printf("\t%lf\t", f); #endif */ #ifdef NOHTIME #include "includes/nohtimestart.h" #endif fvimatchl32CallerWrapper(newndim, A, B,lda[0],params[0], params[1], params[3]-1, numBlocks, params[2], params[5]*sizeof(type) , d_lda_s,d_ldb_s,d_idx_s ,remainder1,remainder2,lda_kernel1, ldb_kernel1, d_offset, ilimit, olimit, plain, sbp, ldal, ldp2l, acoars, bcoars, size, alpha, beta); #ifdef NOHTIME #include "includes/nohtimestop.h" #endif {cudaError_t err = cudaGetLastError(); if(err != cudaSuccess){ printf("\nKernel ERROR in fvimatchl32: %s (line: %d)\n", cudaGetErrorString(err), __LINE__); //exit(-1); }} cudaFree(d_lda_s); cudaFree(d_ldb_s); cudaFree(d_idx_s); cudaFree(d_offset); }
the_stack
#include <pycaUtils.h> #include <GMemOpers.h> // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif namespace PyCA { namespace Splatting{ __device__ void atomicSplatDistance(int* d_d, float x, float y, float z, int w, int h, int l) { int xInt = int(x); int yInt = int(y); int zInt = int(z); if (x < 0 && x != xInt) --xInt; if (y < 0 && y != yInt) --yInt; if (z < 0 && z != zInt) --zInt; float dx = 1.f - (x - xInt); float dy = 1.f - (y - yInt); float dz = 1.f - (z - zInt); uint nid = (zInt * h + yInt) * w + xInt; float dist; if (isInside3D(xInt, yInt, zInt, w, h, l)){ dist = dx * dy * dz; atomicAdd(&d_d[nid],S2p20(dist)); } if (isInside3D(xInt + 1, yInt, zInt, w, h, l)){ dist = (1.f-dx) * dy * dz; atomicAdd(&d_d[nid + 1], S2p20(dist)); } if (isInside3D(xInt, yInt+1, zInt, w, h, l)){ dist = dx * (1.f - dy) * dz; atomicAdd(&d_d[nid + w], S2p20(dist)); } if (isInside3D(xInt+1, yInt+1, zInt, w, h, l)){ dist = (1.f -dx) * (1.f - dy) * dz; atomicAdd(&d_d[nid + w + 1], S2p20(dist)); } nid += w*h; if (isInside3D(xInt, yInt, zInt + 1, w, h, l)){ dist = dx * dy * (1.f - dz); atomicAdd(&d_d[nid],S2p20(dist)); } if (isInside3D(xInt + 1, yInt, zInt+1, w, h, l)){ dist = (1.f-dx) * dy * (1.f -dz); atomicAdd(&d_d[nid + 1], S2p20(dist)); } if (isInside3D(xInt, yInt+1, zInt+1, w, h, l)){ dist = dx * (1.f - dy) * (1.f -dz); atomicAdd(&d_d[nid + w], S2p20(dist)); } if (isInside3D(xInt+1, yInt+1, zInt+1, w, h, l)){ dist = (1.f -dx) * (1.f - dy) * (1.f -dz); atomicAdd(&d_d[nid + w + 1], S2p20(dist)); } } __global__ void atomicSplatDistance_kernel( int* d_d, int w, int h, int l, const float* d_px, const float* d_py, const float* d_pz, uint nP) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id >= nP) return; float x = d_px[id]; float y = d_py[id]; float z = d_pz[id]; atomicSplatDistance(d_d, x, y, z, w, h, l); } void splatDistance(int* d_id, size_t sizeX, size_t sizeY, size_t sizeZ, const float* d_px , const float* d_py, const float* d_pz, size_t nP, StreamT stream) { //1.Init accumulate array 0 size_t nVox = sizeX * sizeY * sizeZ; GMemOpers<int>::SetMem(d_id, 0 , nVox, stream, false); //2.Splat value dim3 threads(256); dim3 grids=make_grid(iDivUp(nP, threads.x)); atomicSplatDistance_kernel<<<grids, threads, 0, stream>>> (d_id, sizeX, sizeY, sizeZ, d_px, d_py, d_pz, nP); } __global__ void atomicSplatPos_kernel(int* d_wd , int w, int h, int l, const float* d_w, const float* d_px, const float* d_py, const float* d_pz, int nP) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < nP){ float mass = d_w[id]; float x = d_px[id]; float y = d_py[id]; float z = d_pz[id]; Splatting::atomicSplat(d_wd, mass, x, y, z, w, h, l); } } void splat3D(int* d_iwd, size_t sizeX, size_t sizeY, size_t sizeZ, const float* d_w, const float* d_px , const float* d_py, const float* d_pz, size_t nP, StreamT stream) { //1.Init accumulate array 0 size_t nVox = sizeX * sizeY * sizeZ; GMemOpers<int>::SetMem(d_iwd, 0, nVox, stream, false); //2.Splat value dim3 threads(256); dim3 grids=make_grid(iDivUp(nP, threads.x)); atomicSplatPos_kernel<<<grids, threads, 0, stream>>>(d_iwd, sizeX, sizeY, sizeZ, d_w, d_px, d_py, d_pz, nP); } __global__ void atomicSplatPos_kernel( int* d_wd , int* d_wd1, int* d_wd2, int w, int h, int l, const float* d_w, const float* d_w1, const float* d_w2, const float* d_px,const float* d_py, const float* d_pz, uint nP) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < nP){ float mass = d_w[id], mass1 = d_w1[id], mass2 = d_w2[id]; float x = d_px[id]; float y = d_py[id]; float z = d_pz[id]; Splatting::atomicSplat(d_wd, d_wd1, d_wd2, mass, mass1, mass2, x, y, z, w, h, l); } } __global__ void atomicSplatPos_kernel( int* d_wd , int* d_wd1, int* d_wd2, int w, int h, int l, const float* d_w, const float* d_w1, const float* d_w2, const float* d_px,const float* d_py, const float* d_pz) { uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float mass = d_w[id], mass1 = d_w1[id], mass2 = d_w2[id]; float x = d_px[id]; float y = d_py[id]; float z = d_pz[id]; Splatting::atomicSplat(d_wd, d_wd1, d_wd2, mass, mass1, mass2, x, y, z, w, h, l); } } } void splat3D(int* d_iwdx, int* d_iwdy, int* d_iwdz, size_t sizeX, size_t sizeY, size_t sizeZ, const float* d_wx, const float* d_wy, const float* d_wz, const float* d_px , const float* d_py, const float* d_pz, size_t nP, StreamT stream) { //1.Init accumulate array 0 size_t nVox = sizeX * sizeY * sizeZ; GMemOpers<int>::SetMem(d_iwdx, 0, nVox, stream, false); GMemOpers<int>::SetMem(d_iwdy, 0, nVox, stream, false); GMemOpers<int>::SetMem(d_iwdz, 0, nVox, stream, false); //2.Splat value dim3 threads(256); dim3 grids=make_grid(iDivUp(nP, threads.x)); atomicSplatPos_kernel<<<grids, threads, 0, stream>>>(d_iwdx, d_iwdy, d_iwdz, sizeX, sizeY, sizeZ, d_wx, d_wy, d_wz, d_px, d_py, d_pz, nVox); } __device__ void atomicSplatWeightPos(int* d_wd, int* d_d, float mass, float x, float y, float z, int w, int h, int l) { int xInt = int(x); int yInt = int(y); int zInt = int(z); if (x < 0 && x != xInt) --xInt; if (y < 0 && y != yInt) --yInt; if (z < 0 && z != zInt) --zInt; float dx = 1.f - (x - xInt); float dy = 1.f - (y - yInt); float dz = 1.f - (z - zInt); uint nid = (zInt * h + yInt) * w + xInt; float dist; if (isInside3D(xInt, yInt, zInt, w, h, l)){ dist = dx * dy * dz; atomicAdd(&d_wd[nid],S2p20(mass * dist)); atomicAdd(&d_d[nid],S2p20(dist)); } if (isInside3D(xInt + 1, yInt, zInt, w, h, l)){ dist = (1.f-dx) * dy * dz; atomicAdd(&d_wd[nid + 1], S2p20(mass * dist)); atomicAdd(&d_d[nid + 1], S2p20(dist)); } if (isInside3D(xInt, yInt+1, zInt, w, h, l)){ dist = dx * (1.f - dy) * dz; atomicAdd(&d_wd[nid + w], S2p20(mass * dist)); atomicAdd(&d_d[nid + w], S2p20(dist)); } if (isInside3D(xInt+1, yInt+1, zInt, w, h, l)){ dist = (1.f -dx) * (1.f - dy) * dz; atomicAdd(&d_wd[nid + w + 1], S2p20(mass * dist)); atomicAdd(&d_d[nid + w + 1], S2p20(dist)); } nid += w*h; if (isInside3D(xInt, yInt, zInt + 1, w, h, l)){ dist = dx * dy * (1.f - dz); atomicAdd(&d_wd[nid],S2p20(mass * dist)); atomicAdd(&d_d[nid],S2p20(dist)); } if (isInside3D(xInt + 1, yInt, zInt+1, w, h, l)){ dist = (1.f-dx) * dy * (1.f -dz); atomicAdd(&d_wd[nid + 1], S2p20(mass * dist)); atomicAdd(&d_d[nid + 1], S2p20(dist)); } if (isInside3D(xInt, yInt+1, zInt+1, w, h, l)){ dist = dx * (1.f - dy) * (1.f -dz); atomicAdd(&d_wd[nid + w], S2p20(mass * dist)); atomicAdd(&d_d[nid + w], S2p20(dist)); } if (isInside3D(xInt+1, yInt+1, zInt+1, w, h, l)){ dist = (1.f -dx) * (1.f - dy) * (1.f -dz); atomicAdd(&d_wd[nid + w + 1], S2p20(mass * dist)); atomicAdd(&d_d[nid + w + 1], S2p20(dist)); } } __global__ void atomicSplatWeightPos_kernel( int* d_wd, int* d_d, int w, int h, int l, const float* d_w, const float* d_px, const float* d_py, const float* d_pz, uint nP) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id >= nP) return; float mass = d_w[id]; float x = d_px[id]; float y = d_py[id]; float z = d_pz[id]; atomicSplatWeightPos(d_wd, d_d, mass, x, y, z, w, h, l); } void splat3D(int* d_iwd, int* d_id, uint sizeX, uint sizeY, uint sizeZ, const float* d_w, const float* d_px , const float* d_py, const float* d_pz, uint nP, StreamT stream) { //1.Init accumulate array 0 size_t nVox = sizeX * sizeY * sizeZ; GMemOpers<int>::SetMem(d_iwd, 0, nVox, stream, false); GMemOpers<int>::SetMem(d_id, 0 , nVox, stream, false); //2.Splat value dim3 threads(256); dim3 grids=make_grid(iDivUp(nP, threads.x)); atomicSplatWeightPos_kernel<<<grids, threads, 0, stream>>> (d_iwd, d_id, sizeX, sizeY, sizeZ, d_w, d_px, d_py, d_pz, nP); } __global__ void atomicSplatV_kernel( int* d_wd , int* d_wd1, int* d_wd2, int w, int h, int l, const float* d_w, const float* d_w1, const float* d_w2, const float* d_vx,const float* d_vy, const float* d_vz, float iSpx, float iSpy, float iSpz) { uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float mass = d_w[id], mass1 = d_w1[id], mass2 = d_w2[id]; float x = i + d_vx[id] * iSpx; float y = j + d_vy[id] * iSpy; float z = k + d_vz[id] * iSpz; Splatting::atomicSplat(d_wd, d_wd1, d_wd2, mass, mass1, mass2, x, y, z, w, h, l); } } } __device__ void atomicSplatWeightPos(int* d_wd, int* d_wd1, int* d_wd2, int* d_d, float mass, float mass1, float mass2, float x, float y, float z, int w, int h, int l) { int xInt = int(x); int yInt = int(y); int zInt = int(z); if (x < 0 && x != xInt) --xInt; if (y < 0 && y != yInt) --yInt; if (z < 0 && z != zInt) --zInt; float dx = 1.f - (x - xInt); float dy = 1.f - (y - yInt); float dz = 1.f - (z - zInt); uint nid = (zInt * h + yInt) * w + xInt; int dist; float weight; if (isInside3D(xInt, yInt, zInt, w, h, l)){ weight = dx * dy * dz; atomicAdd(&d_d[nid],S2p20(weight)); dist = S2p20(mass * weight); atomicAdd(&d_wd[nid],dist); dist = S2p20(mass1 * weight); atomicAdd(&d_wd1[nid],dist); dist = S2p20(mass2 * weight); atomicAdd(&d_wd2[nid],dist); } if (isInside3D(xInt + 1, yInt, zInt, w, h, l)){ weight = (1.f-dx) * dy * dz; atomicAdd(&d_d[nid + 1],S2p20(weight)); dist = S2p20(mass * weight); atomicAdd(&d_wd[nid + 1], dist); dist = S2p20(mass1 * weight); atomicAdd(&d_wd1[nid + 1], dist); dist = S2p20(mass2 * weight); atomicAdd(&d_wd2[nid + 1], dist); } if (isInside3D(xInt, yInt+1, zInt, w, h, l)){ weight = dx * (1.f - dy) * dz; atomicAdd(&d_d[nid + w],S2p20(weight)); dist = S2p20(mass * weight); atomicAdd(&d_wd[nid + w], dist); dist = S2p20(mass1 * weight); atomicAdd(&d_wd1[nid + w], dist); dist = S2p20(mass2 * weight); atomicAdd(&d_wd2[nid + w], dist); } if (isInside3D(xInt+1, yInt+1, zInt, w, h, l)){ weight = (1.f -dx) * (1.f - dy) * dz; atomicAdd(&d_d[nid + 1 + w],S2p20(weight)); dist = S2p20(mass * weight); atomicAdd(&d_wd[nid + w + 1], dist); dist = S2p20(mass1 * weight); atomicAdd(&d_wd1[nid + w + 1], dist); dist = S2p20(mass2 * weight); atomicAdd(&d_wd2[nid + w + 1], dist); } nid += w*h; if (isInside3D(xInt, yInt, zInt + 1, w, h, l)){ weight = dx * dy * (1.f - dz); atomicAdd(&d_d[nid],S2p20(weight)); dist = S2p20(mass * weight); atomicAdd(&d_wd[nid],dist); dist = S2p20(mass1 * weight); atomicAdd(&d_wd1[nid],dist); dist = S2p20(mass2 * weight); atomicAdd(&d_wd2[nid],dist); } if (isInside3D(xInt + 1, yInt, zInt+1, w, h, l)){ weight = (1.f-dx) * dy * (1.f -dz); atomicAdd(&d_d[nid + 1],S2p20(weight)); dist = S2p20(mass * weight); atomicAdd(&d_wd[nid + 1], dist); dist = S2p20(mass1 * weight); atomicAdd(&d_wd1[nid + 1], dist); dist = S2p20(mass2 * weight); atomicAdd(&d_wd2[nid + 1], dist); } if (isInside3D(xInt, yInt+1, zInt+1, w, h, l)){ weight = dx * (1.f - dy) * (1.f -dz); atomicAdd(&d_d[nid + w],S2p20(weight)); dist = S2p20(mass * weight); atomicAdd(&d_wd[nid + w], dist); dist = S2p20(mass1 * weight); atomicAdd(&d_wd1[nid + w], dist); dist = S2p20(mass2 * weight); atomicAdd(&d_wd2[nid + w], dist); } if (isInside3D(xInt+1, yInt+1, zInt+1, w, h, l)){ weight = (1.f -dx) * (1.f - dy) * (1.f -dz); atomicAdd(&d_d[nid + 1 + w],S2p20(weight)); dist = S2p20(mass * weight); atomicAdd(&d_wd[nid + w + 1], dist); dist = S2p20(mass1 * weight); atomicAdd(&d_wd1[nid + w + 1], dist); dist = S2p20(mass2 * weight); atomicAdd(&d_wd2[nid + w + 1], dist); } } __global__ void atomicSplatWeightPos_kernel( int* d_wd , int* d_wd1, int* d_wd2, int * d_d, int w, int h, int l, const float* d_w, const float* d_w1, const float* d_w2, const float* d_px,const float* d_py, const float* d_pz, uint nP) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id >= nP) return; float mass = d_w[id], mass1 = d_w1[id], mass2 = d_w2[id]; float x = d_px[id]; float y = d_py[id]; float z = d_pz[id]; atomicSplatWeightPos(d_wd, d_wd1, d_wd2, d_d, mass, mass1, mass2, x, y, z, w, h, l); } void splat3D(int* d_iwdx, int* d_iwdy, int* d_iwdz, int* d_id, size_t sizeX, size_t sizeY, size_t sizeZ, const float* d_wx, const float* d_wy, const float* d_wz, const float* d_px , const float* d_py, const float* d_pz, size_t nP, StreamT stream) { //1.Init accumulate array 0 size_t nVox = sizeX * sizeY * sizeZ; GMemOpers<int>::SetMem(d_iwdx, 0, nVox, stream, false); GMemOpers<int>::SetMem(d_iwdy, 0, nVox, stream, false); GMemOpers<int>::SetMem(d_iwdz, 0, nVox, stream, false); GMemOpers<int>::SetMem(d_id, 0, nVox, stream, false); //2.Splat value dim3 threads(256); dim3 grids=make_grid(iDivUp(nP, threads.x)); atomicSplatWeightPos_kernel<<<grids, threads, 0, stream>>>(d_iwdx, d_iwdy, d_iwdz, d_id, sizeX, sizeY, sizeZ, d_wx, d_wy, d_wz, d_px, d_py, d_pz, nP); } //////////////////////////////////////////////////////////////////////////////// // The safe version do the normalization on the data first // so that the input of the data in the range of [0,1] // and scale the data back to the original range //////////////////////////////////////////////////////////////////////////////// template<bool inverse> __global__ void atomicSplatPos_kernel(int* d_wd , int w, int h, int l, const float* d_w, float max, const float* d_px, const float* d_py, const float* d_pz, int nP) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < nP){ float mass; if (inverse){ mass = d_w[id] * max; // normalized the mass } else { mass = d_w[id] / max; // normalized the mass } float x = d_px[id]; float y = d_py[id]; float z = d_pz[id]; Splatting::atomicSplat(d_wd, mass, x, y, z, w, h, l); } } //////////////////////////////////////////////////////////////////////////////// // Fixed point division // //////////////////////////////////////////////////////////////////////////////// // d_fwd : floating point weighted distance // d_fd : floating point total linear interpolation distance // d_iwd : fixed point weighted distance // d_ifd : fixed point total linear interpolation distance //////////////////////////////////////////////////////////////////////////////// __global__ void convertWeightedDistance_kernel(float* d_fwd, const int* d_iwd, const int* d_id, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) { d_fwd[id] = (d_id[id] == 0) ? 0.f : float(d_iwd[id]) / float(d_id[id]); } } void convertWeightedDistance(float* d_fwd, const int* d_iwd, const int* d_id, size_t n, StreamT stream) { dim3 threads(256); dim3 grids = make_grid(iDivUp(n, threads.x)); convertWeightedDistance_kernel<<<grids, threads, 0, stream>>>(d_fwd, d_iwd, d_id, n); } __global__ void convertWeightedDistance_I_kernel(float* d_fwd, const int* d_id, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) { d_fwd[id] = (d_id[id] == 0) ? 0.f : float(__float_as_int(d_fwd[id])) / float(d_id[id]); } } void convertWeightedDistance_I(float* d_fwd, const int* d_id, size_t n, StreamT stream) { dim3 threads(256); dim3 grids = make_grid(iDivUp(n, threads.x)); convertWeightedDistance_I_kernel<<<grids, threads, 0, stream>>>(d_fwd, d_id, n); } __global__ void convertWeightedDistance_I_kernel(float* d_fx, float* d_fy, float* d_fz, const int* d_id, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) { if (d_id[id] == 0) { d_fx[id] = d_fy[id] = d_fz[id] = 0.f; } else { d_fx[id] = float(__float_as_int(d_fx[id])) / float(d_id[id]); d_fy[id] = float(__float_as_int(d_fy[id])) / float(d_id[id]); d_fz[id] = float(__float_as_int(d_fz[id])) / float(d_id[id]); } } } void convertWeightedDistance_I(float* d_fx, float* d_fy, float* d_fz, const int* d_id, size_t n, StreamT stream) { dim3 threads(256); dim3 grids = make_grid(iDivUp(n, threads.x)); convertWeightedDistance_I_kernel<<<grids, threads, 0, stream>>>(d_fx, d_fy, d_fz, d_id, n); } // __global__ void convertWeightedDistance_kernel(float* d_fwd, float* d_fd, // int* d_iwd, int* d_id, uint n) // { // uint blockId = get_blockID(); // uint id = get_threadID(blockId); // if (id < n){ // if (d_id[id] == 0) { // d_fwd[id] = 0.f; // d_fd[id] = 0.f; // } // else { // d_fwd[id] = float(d_iwd[id]) / float(d_id[id]); // d_fd[id] = S2n20(d_id[id]); // } // } // } // void convertWeightedDistance_fixed(float* d_fwd, float* d_fd, // int* d_iwd, int* d_id, uint n, StreamT stream) // { // dim3 threads(256); // dim3 grids(iDivUp(n, threads.x)); // checkConfig(grids); // convertWeightedDistance_kernel<<<grids, threads, 0, stream>>> // (d_fwd, d_fd, d_iwd, d_id, n); // } __global__ void atomicVelocitySplat_kernel_shared(int* d_wd, const float* d_w, const float* vx, const float* vy, const float* vz, int w, int h, int l) { __shared__ int s_0[16*16]; __shared__ int s_1[16*16]; __shared__ int s_2[16*16]; const uint wh = w * h; int xc = blockIdx.x * blockDim.x; int yc = blockIdx.y * blockDim.y; int i = xc + threadIdx.x; int j = yc + threadIdx.y; if (i < w && j < h){ uint id = i + j * w; s_0[threadIdx.y * blockDim.x + threadIdx.x] = 0; s_1[threadIdx.y * blockDim.x + threadIdx.x] = 0; int* s_p = s_0, *s_c = s_1, *s_n = s_2; for (int k=0; k < l; ++k, id+=wh) { // Initialize the new buffer with zero s_n[threadIdx.y * blockDim.x + threadIdx.x] = 0; //__syncthreads(); float mass = d_w[id]; float x = i + vx[id]; float y = j + vy[id]; float z = k + vz[id]; int xInt = int(x); int yInt = int(y); int zInt = int(z); if (x < 0 && x != xInt) --xInt; if (y < 0 && y != yInt) --yInt; if (z < 0 && z != zInt) --zInt; float dx = 1.f - (x - xInt); float dy = 1.f - (y - yInt); float dz = 1.f - (z - zInt); uint new_id = (zInt * h + yInt) * w + xInt; int dist; if (isInside3D(xInt - xc, yInt - yc, zInt + 1 - k, blockDim.x-1, blockDim.y-1, 2)) { int* s_l0, *s_l1; if (zInt == k){ s_l0 = s_c; s_l1 = s_n; } else { s_l0 = s_p; s_l1 = s_c; } uint sid = (xInt - xc) + (yInt-yc) * 16; dist = S2p20(mass * dx * dy * dz); atomicAdd(s_l0 + sid, dist); dist = S2p20(mass * (1.f-dx) * dy * dz); atomicAdd(s_l0 + sid + 1, dist); dist = S2p20(mass * dx * (1.f - dy) * dz); atomicAdd(s_l0 + sid + 16, dist); dist = S2p20(mass * (1.f -dx) * (1.f - dy) * dz); atomicAdd(s_l0 + sid + 16 +1, dist); dist = S2p20(mass * dx * dy * (1-dz)); atomicAdd(s_l1 + sid, dist); dist = S2p20(mass * (1.f-dx) * dy * (1-dz)); atomicAdd(s_l1 + sid + 1, dist); dist = S2p20(mass * dx * (1.f - dy) * (1-dz)); atomicAdd(s_l1 + sid + 16, dist); dist = S2p20(mass * (1.f -dx) * (1.f - dy) * (1-dz)); atomicAdd(s_l1 + sid + 16 +1, dist); }else #if 1 if (isInside3D(xInt, yInt, zInt, w-1, h-1, l-1)){ dist = S2p20(mass * dx * dy * dz); atomicAdd(&d_wd[new_id],dist); dist = S2p20(mass * (1.f-dx) * dy * dz); atomicAdd(&d_wd[new_id + 1], dist); dist = S2p20(mass * dx * (1.f - dy) * dz); atomicAdd(&d_wd[new_id + w], dist); dist = S2p20(mass * (1.f -dx) * (1.f - dy) * dz); atomicAdd(&d_wd[new_id + w + 1], dist); new_id += w*h; dist = S2p20(mass * dx * dy * (1.f - dz)); atomicAdd(&d_wd[new_id],dist); dist = S2p20(mass * (1.f-dx) * dy * (1.f -dz)); atomicAdd(&d_wd[new_id + 1], dist); dist = S2p20(mass * dx * (1.f - dy) * (1.f -dz)); atomicAdd(&d_wd[new_id + w], dist); dist = S2p20(mass * (1.f -dx) * (1.f - dy) * (1.f -dz)); atomicAdd(&d_wd[new_id + w + 1], dist); } #else { if (isInside3D(xInt, yInt, zInt, w, h, l)){ dist = S2p20(mass * dx * dy * dz); atomicAdd(&d_wd[new_id],dist); } if (isInside3D(xInt + 1, yInt, zInt, w, h, l)){ dist = S2p20(mass * (1.f-dx) * dy * dz); atomicAdd(&d_wd[new_id + 1], dist); } if (isInside3D(xInt, yInt+1, zInt, w, h, l)){ dist = S2p20(mass * dx * (1.f - dy) * dz); atomicAdd(&d_wd[new_id + w], dist); } if (isInside3D(xInt+1, yInt+1, zInt, w, h, l)){ dist = S2p20(mass * (1.f -dx) * (1.f - dy) * dz); atomicAdd(&d_wd[new_id + w + 1], dist); } new_id += w*h; if (isInside3D(xInt, yInt, zInt + 1, w, h, l)){ dist = S2p20(mass * dx * dy * (1.f - dz)); atomicAdd(&d_wd[new_id],dist); } if (isInside3D(xInt + 1, yInt, zInt+1, w, h, l)){ dist = S2p20(mass * (1.f-dx) * dy * (1.f -dz)); atomicAdd(&d_wd[new_id + 1], dist); } if (isInside3D(xInt, yInt+1, zInt+1, w, h, l)){ dist = S2p20(mass * dx * (1.f - dy) * (1.f -dz)); atomicAdd(&d_wd[new_id + w], dist); } if (isInside3D(xInt+1, yInt+1, zInt+1, w, h, l)){ dist = S2p20(mass * (1.f -dx) * (1.f - dy) * (1.f -dz)); atomicAdd(&d_wd[new_id + w + 1], dist); } } #endif __syncthreads(); //write out the previous layer if( k > 0){ atomicAdd(&d_wd[id - wh], s_p[threadIdx.x + threadIdx.y * 16]); } //write out the current layer if it is the last if ( k == l - 1){ atomicAdd(&d_wd[id], s_c[threadIdx.x + threadIdx.y * 16]); } int* temp = s_p; s_p = s_c; s_c = s_n; s_n = temp; } } } __global__ void FixedToFloating_kernel(float* d_o, const int* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = S2n20(d_i[id]); } void FixedToFloating(float* d_o, const int* d_i, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); FixedToFloating_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } __global__ void FixedToFloating_kernel(float* d_ox, float* d_oy, float* d_oz, const int* d_ix, const int* d_iy,const int* d_iz, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) { d_ox[id] = S2n20(d_ix[id]); d_oy[id] = S2n20(d_iy[id]); d_oz[id] = S2n20(d_iz[id]); } } void FixedToFloating(float* d_ox, float* d_oy, float* d_oz, const int* d_ix, const int* d_iy,const int* d_iz, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); FixedToFloating_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, n); } __global__ void FixedToFloating_I_kernel(float* d_o, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) { int v = __float_as_int(d_o[id]); d_o[id] = S2n20(v); } } void FixedToFloating_I(float* d_o, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); FixedToFloating_I_kernel<<<grids, threads, 0, stream>>>(d_o, n); } __global__ void FixedToFloating_I_kernel(float* d_ox, float* d_oy, float* d_oz, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) { int vx = __float_as_int(d_ox[id]); int vy = __float_as_int(d_oy[id]); int vz = __float_as_int(d_oz[id]); d_ox[id] = S2n20(vx); d_oy[id] = S2n20(vy); d_oz[id] = S2n20(vz); } } void FixedToFloating_I(float* d_ox, float* d_oy, float* d_oz, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); FixedToFloating_I_kernel<<<grids, threads, 0, stream>>>(d_ox, d_oy, d_oz, n); } } // end namespace Splatting } // end namespace PyCA
the_stack
// TODO: Pass expected values to the device, and results back to the host for // comparison. At the moment, errors only tell you which check failed, not // what the actual and expected values were, nor what arguments the tested // function was invoked with. // Note: // Testcases are adapted from those used in the Public-Domain C Library. See: // https://rootdirectory.ddns.net/dokuwiki/doku.php?id=pdclib:start constexpr const std::size_t max_num_checks_per_test { 100 }; namespace kernels { __global__ void test_strcmp(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s1, const char* s2, bool (*predicate)(int) ) { *(result++) = predicate(kat::c_std_lib::strcmp(s1, s2)); }; const auto abcde = "abcde"; const auto abcdx = "abcdx"; const auto cmpabcde = "abcde"; const auto cmpabcd_ = "abcd\xfc"; const auto empty = ""; auto is_negative = [](int i) { return i < 0; }; auto is_positive = [](int i) { return i > 0; }; auto is_zero = [](int i) { return i == 0; }; constexpr int line_before_first_check = __LINE__; single_check(abcde, cmpabcde, is_zero ); single_check(abcde, abcdx, is_negative ); single_check(abcdx, abcde, is_positive ); single_check(empty, abcde, is_negative ); single_check(abcde, empty, is_positive ); single_check(abcde, cmpabcd_, is_negative ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strncmp(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s1, const char* s2, std::size_t n, bool (*predicate)(int) ) { *(result++) = predicate(kat::c_std_lib::strncmp(s1, s2, n)); }; const auto abcde = "abcde"; const auto abcdx = "abcdx"; const auto cmpabcde = "abcde"; const auto cmpabcd_ = "abcd\xfc"; const auto empty = ""; const auto x = "x"; auto is_negative = [](int i) { return i < 0; }; auto is_positive = [](int i) { return i > 0; }; auto is_zero = [](int i) { return i == 0; }; constexpr int line_before_first_check = __LINE__; single_check(abcde, cmpabcde, 5, is_zero); single_check(abcde, cmpabcde, 10, is_zero); single_check(abcde, abcdx, 5, is_negative); single_check(abcdx, abcde, 5, is_positive); single_check(empty, abcde, 5, is_negative); single_check(abcde, empty, 5, is_positive); single_check(abcde, abcdx, 4, is_zero); single_check(abcde, x, 0, is_zero); single_check(abcde, x, 1, is_negative); single_check(abcde, cmpabcd_, 10, is_negative); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_memcmp(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const void* s1, const void* s2, std::size_t n, bool (*predicate)(int) ) { *(result++) = predicate(kat::c_std_lib::memcmp(s1, s2, n)); }; const auto abcde = "abcde"; const auto abcdx = "abcdx"; const auto xxxxx = "xxxxx"; auto is_negative = [](int i) { return i < 0; }; auto is_positive = [](int i) { return i > 0; }; auto is_zero = [](int i) { return i == 0; }; constexpr int line_before_first_check = __LINE__; single_check(abcde, abcdx, 5, is_negative); single_check(abcde, abcdx, 4, is_zero); single_check(abcdx, xxxxx, 0, is_zero); single_check(xxxxx, abcde, 1, is_positive); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strcpy(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check_invocation = [&](char* dest, const char* src ) { auto ret = kat::c_std_lib::strcpy(dest, src); *(result++) = (ret == dest); }; auto single_check_char_value = [&](const char* strcpy_dest, std::size_t pos, char expected_value) { *(result++) = (strcpy_dest[pos] == expected_value); }; const auto abcde = "abcde"; char s[] = "xxxxx"; constexpr int line_before_first_check = __LINE__; single_check_invocation(s, "" ); single_check_char_value(s, 0, '\0' ); single_check_char_value(s, 1, 'x' ); single_check_invocation(s, abcde); single_check_char_value(s, 0, 'a' ); single_check_char_value(s, 4, 'e' ); single_check_char_value(s, 5, '\0' ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strncpy(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check_invocation = [&](char* dest, const char* src, std::size_t n ) { auto ret = kat::c_std_lib::strncpy(dest, src, n); *(result++) = (ret == dest); }; auto single_check_char_value = [&](const char* strncpy_dest, std::size_t pos, char expected_value) { *(result++) = (strncpy_dest[pos] == expected_value); }; const auto abcde = "abcde"; char s[] = "xxxxxxx"; constexpr int line_before_first_check = __LINE__; single_check_invocation( s, "", 1 ); single_check_char_value( s, 0, '\0' ); single_check_char_value( s, 1, 'x' ); single_check_invocation( s, abcde, 6 ); single_check_char_value( s, 0, 'a' ); single_check_char_value( s, 4, 'e' ); single_check_char_value( s, 5, '\0' ); single_check_char_value( s, 6, 'x' ); single_check_invocation( s, abcde, 7 ); single_check_char_value( s, 6, '\0' ); single_check_invocation( s, "xxxx", 3 ); single_check_char_value( s, 0, 'x' ); single_check_char_value( s, 2, 'x' ); single_check_char_value( s, 3, 'd' ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strlen(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s, std::size_t expected) { *(result++) = (kat::c_std_lib::strlen(s) == expected); }; constexpr int line_before_first_check = __LINE__; single_check( "abcde", 5 ); single_check( "", 0 ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strcat(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check_invocation = [&](char* dest, const char* src ) { auto ret = kat::c_std_lib::strcat(dest, src); *(result++) = (ret == dest); }; auto single_check_char_value = [&](const char* strcat_dest, std::size_t pos, char expected_value) { *(result++) = (strcat_dest[pos] == expected_value); }; const auto abcde = "abcde"; const auto abcdx = "abcdx"; char s[] = "xx\0xxxxxx"; constexpr int line_before_first_check = __LINE__; single_check_invocation(s, abcde); single_check_char_value(s, 2, 'a' ); single_check_char_value(s, 6, 'e' ); single_check_char_value(s, 7, '\0' ); single_check_char_value(s, 8, 'x' ); s[0] = '\0'; single_check_invocation(s, abcdx); single_check_char_value(s, 4, 'x' ); single_check_char_value(s, 5, '\0' ); single_check_invocation(s, "\0"); single_check_char_value(s, 5, '\0' ); single_check_char_value(s, 6, 'e' ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strncat(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check_invocation = [&](char* dest, const char* src, std::size_t n ) { auto ret = kat::c_std_lib::strncat(dest, src, n); *(result++) = (ret == dest); }; auto single_check_char_value = [&](const char* strncat_dest, std::size_t pos, char expected_value) { *(result++) = (strncat_dest[pos] == expected_value); }; const auto abcde = "abcde"; const auto abcdx = "abcdx"; char s[] = "xx\0xxxxxx"; constexpr int line_before_first_check = __LINE__; single_check_invocation(s, abcde, 10); single_check_char_value(s, 2, 'a' ); single_check_char_value(s, 6, 'e' ); single_check_char_value(s, 7, '\0' ); single_check_char_value(s, 8, '\0' ); // Additional nulls must have been written, even beyond the end of the concatenation string s[0] = '\0'; single_check_invocation(s, abcdx, 10); single_check_char_value(s, 4, 'x' ); single_check_char_value(s, 5, '\0' ); single_check_invocation(s, "\0", 10); single_check_char_value(s, 5, '\0' ); single_check_char_value(s, 6, '\0' ); // Additional nulls must have been written, even beyond the end of the concatenation string single_check_invocation(s, abcde, 0); single_check_char_value(s, 4, 'x' ); single_check_char_value(s, 5, '\0' ); single_check_char_value(s, 6, '\0' ); // Additional nulls must have been written, even beyond the end of the concatenation string single_check_invocation(s, abcde, 3); single_check_char_value(s, 5, 'a' ); single_check_char_value(s, 7, 'c' ); single_check_char_value(s, 8, '\0' ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_memcpy(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check_invocation = [&](char* dest, const char* src, std::size_t n ) { auto ret = kat::c_std_lib::memcpy(dest, src, n); *(result++) = (ret == dest); }; auto single_check_char_value = [&](const char* memcpy_dest, std::size_t pos, char expected_value) { *(result++) = (memcpy_dest[pos] == expected_value); }; const auto abcde = "abcde"; char s[] = "xxxxxxxxxxx"; constexpr int line_before_first_check = __LINE__; single_check_invocation(s, abcde, 6); single_check_char_value(s, 4, 'e' ); single_check_char_value(s, 5, '\0' ); single_check_char_value(s, 6, 'x' ); single_check_invocation(s + 5, abcde, 5); single_check_char_value(s, 9, 'e' ); single_check_char_value(s, 10, 'x' ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_memset(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check_invocation = [&](void* s, int c, std::size_t n ) { auto ret = kat::c_std_lib::memset(s, c, n); *(result++) = (ret == s); }; auto single_check_char_value = [&](const char* memset_dest, std::size_t pos, char expected_value) { *(result++) = (memset_dest[pos] == expected_value); }; char s[] = "xxxxxxxxx"; constexpr int line_before_first_check = __LINE__; single_check_invocation(s, 'o', 10); single_check_char_value(s, 0, 'o' ); single_check_char_value(s, 9, 'o' ); single_check_invocation(s, '_', 0); single_check_char_value(s, 0, 'o' ); single_check_invocation(s, '_', 1); single_check_char_value(s, 0, '_' ); single_check_invocation(s, '\xfd', 3); single_check_char_value(s, 2, '\xfd' ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_memchr(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s, int c, std::size_t n, const char* expected ) { *(result++) = (kat::c_std_lib::memchr(s, c, n) == expected); }; const auto abcde = "abcde"; constexpr int line_before_first_check = __LINE__; single_check(abcde, 'c', 5, &abcde[2] ); single_check(abcde, 'a', 1, &abcde[0] ); single_check(abcde, 'a', 0, nullptr ); single_check(abcde, '\0', 5, nullptr ); single_check(abcde, '\0', 6, &abcde[5] ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strchr(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s, int c, const char* expected ) { *(result++) = (kat::c_std_lib::strchr(s, c) == expected); }; const auto abccd = "abccd"; constexpr int line_before_first_check = __LINE__; single_check(abccd, 'x', nullptr ); single_check(abccd, 'a', &abccd[0] ); single_check(abccd, 'd', &abccd[4] ); single_check(abccd, '\0', &abccd[5] ); single_check(abccd, 'c', &abccd[2] ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strrchr(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s, int c, const char* expected ) { *(result++) = (kat::c_std_lib::strrchr(s, c) == expected); }; const auto abcde = "abcde"; const auto abccd = "abccd"; constexpr int line_before_first_check = __LINE__; single_check(abcde, '\0', &abcde[5] ); single_check(abcde, 'e', &abcde[4] ); single_check(abcde, 'a', &abcde[0] ); single_check(abccd, 'c', &abccd[3] ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strpbrk(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s, const char* accept, const char* expected ) { *(result++) = (kat::c_std_lib::strpbrk(s, accept) == expected); }; const auto abcde = "abcde"; const auto abcdx = "abcdx"; constexpr int line_before_first_check = __LINE__; single_check(abcde, "x", nullptr ); single_check(abcde, "xyz", nullptr ); single_check(abcdx, "x", &abcdx[4] ); single_check(abcdx, "xyz", &abcdx[4] ); single_check(abcdx, "zyx", &abcdx[4] ); single_check(abcde, "a", &abcde[0] ); single_check(abcde, "abc", &abcde[0] ); single_check(abcde, "cba", &abcde[0] ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strspn(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s, const char* accept, std::size_t expected ) { *(result++) = (kat::c_std_lib::strspn(s, accept) == expected); }; const auto abcde = "abcde"; constexpr int line_before_first_check = __LINE__; single_check(abcde, "abc", 3 ); single_check(abcde, "b", 0 ); single_check(abcde, abcde, 5 ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strcspn(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* s, const char* reject, std::size_t expected ) { *(result++) = (kat::c_std_lib::strcspn(s, reject) == expected); }; const auto abcde = "abcde"; const auto abcdx = "abcdx"; constexpr int line_before_first_check = __LINE__; single_check(abcde, "x", 5 ); single_check(abcde, "xyz", 5 ); single_check(abcde, "zyx", 5 ); single_check(abcdx, "x", 4 ); single_check(abcdx, "xyz", 4 ); single_check(abcdx, "zyx", 4 ); single_check(abcde, "a", 0 ); single_check(abcde, "abc", 0 ); single_check(abcde, "cba", 0 ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strstr(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* haystack, const char* needle, const char* expected ) { // printf("Haystack: %s , Needle: %s , strstr pos: %d\n", haystack, needle, kat::c_std_lib::strstr(haystack, needle) == nullptr ? -1 : kat::c_std_lib::strstr(haystack, needle) - haystack); *(result++) = (kat::c_std_lib::strstr(haystack, needle) == expected); }; char s[] = "abcabcabcdabcde"; constexpr int line_before_first_check = __LINE__; single_check(s, "x", nullptr ); single_check(s, "xyz", nullptr ); single_check(s, "a", &s[0] ); single_check(s, "abc", &s[0] ); single_check(s, "abcd", &s[6] ); single_check(s, "abcde", &s[10] ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } __global__ void test_strrstr(bool* results, std::size_t* num_checks) { bool* result = results; auto single_check = [&](const char* haystack, const char* needle, const char* expected ) { *(result++) = (kat::c_std_lib::strrstr(haystack, needle) == expected); }; const auto s = "abcabcabcdabcde"; constexpr int line_before_first_check = __LINE__; single_check(s, "x", nullptr ); single_check(s, "xyz", nullptr ); single_check(s, "a", &s[10] ); single_check(s, "abc", &s[10] ); single_check(s, "abca", &s[3] ); single_check(s, "abcab", &s[3] ); single_check(s, "abcabca", &s[0] ); constexpr int line_after_last_check = __LINE__; *num_checks = line_after_last_check - line_before_first_check - 1; } } // namespace kernels TEST_SUITE("c_string") { using kernel_type = void (*)(bool*, std::size_t*); void conduct_test(kernel_type kernel, const char* kernel_name) { cuda::device_t device { cuda::device::current::get() }; auto block_size { 1 }; auto num_grid_blocks { 1 }; auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; auto device_side_results { cuda::memory::device::make_unique<bool[]>(device, max_num_checks_per_test) }; auto device_side_num_checks { cuda::memory::device::make_unique<std::size_t>(device) }; bool host_side_results[max_num_checks_per_test]; std::size_t host_side_num_checks; cuda::launch( kernel, launch_config, device_side_results.get(), device_side_num_checks.get() ); cuda::memory::copy(host_side_results, device_side_results.get(), sizeof(bool) * max_num_checks_per_test); cuda::memory::copy_single(&host_side_num_checks, device_side_num_checks.get()); for(std::size_t i = 0; i < host_side_num_checks; i++) { CHECK(host_side_results[i] == true); if (not host_side_results[i]) { auto width_4 { std::setw(4) }; auto i_plus_1 { i+1 }; CHECK_MESSAGE(false, kernel_name << " check " << width_4 << i_plus_1 << " (1-based) of " << host_side_num_checks << " failed."); } } } TEST_CASE("strcmp" ) { conduct_test(kernels::test_strcmp, "strcmp"); } TEST_CASE("strncmp") { conduct_test(kernels::test_strncmp, "strncmp"); } TEST_CASE("memcmp" ) { conduct_test(kernels::test_memcmp, "memcmp"); } TEST_CASE("strcpy" ) { conduct_test(kernels::test_strcpy, "strcpy"); } TEST_CASE("strncpy") { conduct_test(kernels::test_strncpy, "strncpy"); } TEST_CASE("strlen" ) { conduct_test(kernels::test_strlen, "strlen"); } TEST_CASE("strcat" ) { conduct_test(kernels::test_strcat, "strcat"); } TEST_CASE("strncat") { conduct_test(kernels::test_strncat, "strncat"); } TEST_CASE("memcpy" ) { conduct_test(kernels::test_memcpy, "memcpy"); } TEST_CASE("memset" ) { conduct_test(kernels::test_memset, "memset"); } TEST_CASE("memchr" ) { conduct_test(kernels::test_memchr, "memchr"); } TEST_CASE("strchr" ) { conduct_test(kernels::test_strchr, "strchr"); } TEST_CASE("strrchr") { conduct_test(kernels::test_strrchr, "strrchr"); } TEST_CASE("strpbrk") { conduct_test(kernels::test_strpbrk, "strpbrk"); } TEST_CASE("strspn" ) { conduct_test(kernels::test_strspn, "strspn"); } TEST_CASE("strcspn") { conduct_test(kernels::test_strcspn, "strcspn"); } TEST_CASE("strstr" ) { conduct_test(kernels::test_strstr, "strstr"); } TEST_CASE("strrstr") { conduct_test(kernels::test_strrstr, "strrstr"); } } // TEST_SUITE("c_string")
the_stack
#ifdef WIN32 bool IsOpenGLAvailable(const char *appName) { return true; } #else #if (defined(__APPLE__) || defined(MACOSX)) bool IsOpenGLAvailable(const char *appName) { return true; } #else // check if this is a linux machine #include <X11/Xlib.h> bool IsOpenGLAvailable(const char *appName) { Display *Xdisplay = XOpenDisplay(NULL); if (Xdisplay == NULL) { return false; } else { XCloseDisplay(Xdisplay); return true; } } #endif #endif #if defined(__APPLE__) || defined(MACOSX) #include <GLUT/glut.h> #else #include <GL/glut.h> #endif #include "fluidsGL_kernels.cu" #define MAX_EPSILON_ERROR 1.0f const char *sSDKname = "fluidsGL"; // Define the files that are to be save and the reference images for validation const char *sOriginal[] = { "fluidsGL.ppm", NULL }; const char *sReference[] = { "ref_fluidsGL.ppm", NULL }; // CUDA example code that implements the frequency space version of // Jos Stam's paper 'Stable Fluids' in 2D. This application uses the // CUDA FFT library (CUFFT) to perform velocity diffusion and to // force non-divergence in the velocity field at each time step. It uses // CUDA-OpenGL interoperability to update the particle field directly // instead of doing a copy to system memory before drawing. Texture is // used for automatic bilinear interpolation at the velocity advection step. #ifdef __DEVICE_EMULATION__ #define DIM 64 // Square size of solver domain #else #define DIM 512 // Square size of solver domain #endif #define DS (DIM*DIM) // Total domain size #define CPADW (DIM/2+1) // Padded width for real->complex in-place FFT #define RPADW (2*(DIM/2+1)) // Padded width for real->complex in-place FFT #define PDS (DIM*CPADW) // Padded total domain size #define DT 0.09f // Delta T for interative solver #define VIS 0.0025f // Viscosity constant #define FORCE (5.8f*DIM) // Force scale factor #define FR 4 // Force update radius #define TILEX 64 // Tile width #define TILEY 64 // Tile height #define TIDSX 64 // Tids in X #define TIDSY 4 // Tids in Y void cleanup(void); void reshape(int x, int y); // CUFFT plan handle static cufftHandle planr2c; static cufftHandle planc2r; static cData *vxfield = NULL; static cData *vyfield = NULL; cData *hvfield = NULL; cData *dvfield = NULL; static int wWidth = max(512,DIM); static int wHeight = max(512,DIM); static int clicked = 0; static int fpsCount = 0; static int fpsLimit = 1; unsigned int timer; // Particle data GLuint vbo = 0; // OpenGL vertex buffer object struct cudaGraphicsResource *cuda_vbo_resource; // handles OpenGL-CUDA exchange static cData *particles = NULL; // particle positions in host memory static int lastx = 0, lasty = 0; // Texture pitch size_t tPitch = 0; // Now this is compatible with gcc in 64-bit bool g_bQAReadback = false; bool g_bQAAddTestForce = true; int g_iFrameToCompare = 100; int g_TotalErrors = 0; // CheckFBO/BackBuffer class objects CheckRender *g_CheckRender = NULL; void autoTest(); void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r) { dim3 tids(2*r+1, 2*r+1); addForces_k<<<1, tids>>>(v, dx, dy, spx, spy, fx, fy, r, tPitch); cutilCheckMsg("addForces_k failed."); } void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateTexture(v, DIM*sizeof(cData), DIM, tPitch); advectVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, dt, TILEY/TIDSY); cutilCheckMsg("advectVelocity_k failed."); } void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc) { // Forward FFT cufftExecR2C(planr2c, (cufftReal*)vx, (cufftComplex*)vx); cufftExecR2C(planr2c, (cufftReal*)vy, (cufftComplex*)vy); uint3 grid = make_uint3((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1), 1); uint3 tids = make_uint3(TIDSX, TIDSY, 1); diffuseProject_k<<<grid, tids>>>(vx, vy, dx, dy, dt, visc, TILEY/TIDSY); cutilCheckMsg("diffuseProject_k failed."); // Inverse FFT cufftExecC2R(planc2r, (cufftComplex*)vx, (cufftReal*)vx); cufftExecC2R(planc2r, (cufftComplex*)vy, (cufftReal*)vy); } void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, TILEY/TIDSY, tPitch); cutilCheckMsg("updateVelocity_k failed."); } void advectParticles(GLuint vbo, cData *v, int dx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); cData *p; cutilSafeCall(cudaGraphicsMapResources(1, &cuda_vbo_resource, 0)); size_t num_bytes; cutilSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&p, &num_bytes, cuda_vbo_resource)); cutilCheckMsg("cudaGraphicsResourceGetMappedPointer failed"); advectParticles_k<<<grid, tids>>>(p, v, dx, dy, dt, TILEY/TIDSY, tPitch); cutilCheckMsg("advectParticles_k failed."); cutilSafeCall(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0)); cutilCheckMsg("cudaGraphicsUnmapResources failed"); } void simulateFluids(void) { // simulate fluid advectVelocity(dvfield, (float*)vxfield, (float*)vyfield, DIM, RPADW, DIM, DT); diffuseProject(vxfield, vyfield, CPADW, DIM, DT, VIS); updateVelocity(dvfield, (float*)vxfield, (float*)vyfield, DIM, RPADW, DIM); advectParticles(vbo, dvfield, DIM, DIM, DT); } void display(void) { if (!g_bQAReadback) { cutilCheckError(cutStartTimer(timer)); simulateFluids(); } // render points from vertex buffer glClear(GL_COLOR_BUFFER_BIT); glColor4f(0,1,0,0.5f); glPointSize(1); glEnable(GL_POINT_SMOOTH); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glEnableClientState(GL_VERTEX_ARRAY); glDisable(GL_DEPTH_TEST); glDisable(GL_CULL_FACE); glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo); glVertexPointer(2, GL_FLOAT, 0, NULL); glDrawArrays(GL_POINTS, 0, DS); glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); glDisableClientState(GL_VERTEX_ARRAY); glDisableClientState(GL_TEXTURE_COORD_ARRAY); glDisable(GL_TEXTURE_2D); if (g_bQAReadback) { return; } // Finish timing before swap buffers to avoid refresh sync cutilCheckError(cutStopTimer(timer)); glutSwapBuffers(); fpsCount++; if (fpsCount == fpsLimit) { char fps[256]; float ifps = 1.f / (cutGetAverageTimerValue(timer) / 1000.f); sprintf(fps, "Cuda/GL Stable Fluids (%d x %d): %3.1f fps", DIM, DIM, ifps); glutSetWindowTitle(fps); fpsCount = 0; fpsLimit = (int)max(ifps, 1.f); cutilCheckError(cutResetTimer(timer)); } glutPostRedisplay(); } void autoTest(char **argv) { CFrameBufferObject *fbo = new CFrameBufferObject(wWidth, wHeight, 4, false, GL_TEXTURE_2D); g_CheckRender = new CheckFBO(wWidth, wHeight, 4, fbo); g_CheckRender->setPixelFormat(GL_RGBA); g_CheckRender->setExecPath(argv[0]); g_CheckRender->EnableQAReadback(true); fbo->bindRenderPath(); reshape(wWidth, wHeight); for(int count=0;count<g_iFrameToCompare;count++) { simulateFluids(); // add in a little force so the automated testing is interesing. if(g_bQAReadback && g_bQAAddTestForce) { int x = wWidth/(count+1); int y = wHeight/(count+1); float fx = (x / (float)wWidth); float fy = (y / (float)wHeight); int nx = (int)(fx * DIM); int ny = (int)(fy * DIM); int ddx = 35; int ddy = 35; fx = ddx / (float)wWidth; fy = ddy / (float)wHeight; int spy = ny-FR; int spx = nx-FR; addForces(dvfield, DIM, DIM, spx, spy, FORCE * DT * fx, FORCE * DT * fy, FR); lastx = x; lasty = y; //g_bQAAddTestForce = false; // only add it once } } display(); fbo->unbindRenderPath(); // compare to offical reference image, printing PASS or FAIL. printf("> (Frame %d) Readback BackBuffer\n", 100); g_CheckRender->readback( wWidth, wHeight ); g_CheckRender->savePPM(sOriginal[0], true, NULL); if (!g_CheckRender->PPMvsPPM(sOriginal[0], sReference[0], MAX_EPSILON_ERROR, 0.25f)) { g_TotalErrors++; } } // very simple von neumann middle-square prng. can't use rand() in -qatest // mode because its implementation varies across platforms which makes testing // for consistency in the important parts of this program difficult. float myrand(void) { static int seed = 72191; char sq[22]; if (g_bQAReadback) { seed *= seed; sprintf(sq, "%010d", seed); // pull the middle 5 digits out of sq sq[8] = 0; seed = atoi(&sq[3]); return seed/99999.f; } else { return rand()/(float)RAND_MAX; } } void initParticles(cData *p, int dx, int dy) { int i, j; for (i = 0; i < dy; i++) { for (j = 0; j < dx; j++) { p[i*dx+j].x = (j+0.5f+(myrand() - 0.5f))/dx; p[i*dx+j].y = (i+0.5f+(myrand() - 0.5f))/dy; } } } void keyboard( unsigned char key, int x, int y) { switch( key) { case 27: exit (0); break; case 'r': memset(hvfield, 0, sizeof(cData) * DS); cudaMemcpy(dvfield, hvfield, sizeof(cData) * DS, cudaMemcpyHostToDevice); initParticles(particles, DIM, DIM); cudaGraphicsUnregisterResource(cuda_vbo_resource); cutilCheckMsg("cudaGraphicsUnregisterBuffer failed"); glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo); glBufferDataARB(GL_ARRAY_BUFFER_ARB, sizeof(cData) * DS, particles, GL_DYNAMIC_DRAW_ARB); glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, cudaGraphicsMapFlagsNone); cutilCheckMsg("cudaGraphicsGLRegisterBuffer failed"); break; default: break; } } void click(int button, int updown, int x, int y) { lastx = x; lasty = y; clicked = !clicked; } void motion (int x, int y) { // Convert motion coordinates to domain float fx = (lastx / (float)wWidth); float fy = (lasty / (float)wHeight); int nx = (int)(fx * DIM); int ny = (int)(fy * DIM); if (clicked && nx < DIM-FR && nx > FR-1 && ny < DIM-FR && ny > FR-1) { int ddx = x - lastx; int ddy = y - lasty; fx = ddx / (float)wWidth; fy = ddy / (float)wHeight; int spy = ny-FR; int spx = nx-FR; addForces(dvfield, DIM, DIM, spx, spy, FORCE * DT * fx, FORCE * DT * fy, FR); lastx = x; lasty = y; } glutPostRedisplay(); } void reshape(int x, int y) { wWidth = x; wHeight = y; glViewport(0, 0, x, y); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(0, 1, 1, 0, 0, 1); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glutPostRedisplay(); } void cleanup(void) { cudaGraphicsUnregisterResource(cuda_vbo_resource); cutilCheckMsg("cudaGLUnregisterResource failed"); unbindTexture(); deleteTexture(); // Free all host and device resources free(hvfield); free(particles); cudaFree(dvfield); cudaFree(vxfield); cudaFree(vyfield); cufftDestroy(planr2c); cufftDestroy(planc2r); glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); glDeleteBuffersARB(1, &vbo); cutilCheckError(cutDeleteTimer(timer)); } int initGL(int *argc, char **argv) { if (IsOpenGLAvailable(sSDKname)) { fprintf( stderr, " OpenGL device is Available\n"); } else { fprintf( stderr, " OpenGL device is NOT Available, [%s] exiting...\n PASSED\n", sSDKname ); return CUTFalse; } glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(wWidth, wHeight); glutCreateWindow("Compute Stable Fluids"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(click); glutMotionFunc(motion); glutReshapeFunc(reshape); glewInit(); if (! glewIsSupported( "GL_ARB_vertex_buffer_object" )) { fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush( stderr); return CUTFalse; } return CUTTrue; } int main(int argc, char** argv) { int devID; cudaDeviceProp deviceProps; printf("[%s] - [OpenGL/CUDA simulation] starting...\n", sSDKname); // First initialize OpenGL context, so we can properly set the GL for CUDA. // This is necessary in order to achieve optimal performance with OpenGL/CUDA interop. if (CUTFalse == initGL(&argc, argv)) { cutilExit(argc, argv); exit(0); } // use command-line specified CUDA device, otherwise use device with highest Gflops/s if (cutCheckCmdLineFlag(argc, (const char**)argv, "device")) { devID = cutilGLDeviceInit(argc, argv); if (devID < 0) { printf("no device. exiting...\n"); cutilExit(argc, argv); exit(0); } } else { devID = cutGetMaxGflopsDeviceId(); cutilSafeCall(cudaGLSetGLDevice(devID)); } // get number of SMs on this GPU cutilSafeCall(cudaGetDeviceProperties(&deviceProps, devID)); printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount); // automated build testing harness if (cutCheckCmdLineFlag(argc, (const char **)argv, "qatest") || cutCheckCmdLineFlag(argc, (const char **)argv, "noprompt")) { g_bQAReadback = true; } // Allocate and initialize host data GLint bsize; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutResetTimer(timer)); hvfield = (cData*)malloc(sizeof(cData) * DS); memset(hvfield, 0, sizeof(cData) * DS); // Allocate and initialize device data cudaMallocPitch((void**)&dvfield, &tPitch, sizeof(cData)*DIM, DIM); cudaMemcpy(dvfield, hvfield, sizeof(cData) * DS, cudaMemcpyHostToDevice); // Temporary complex velocity field data cudaMalloc((void**)&vxfield, sizeof(cData) * PDS); cudaMalloc((void**)&vyfield, sizeof(cData) * PDS); setupTexture(DIM, DIM); bindTexture(); // Create particle array particles = (cData*)malloc(sizeof(cData) * DS); memset(particles, 0, sizeof(cData) * DS); initParticles(particles, DIM, DIM); // Create CUFFT transform plan configuration cufftPlan2d(&planr2c, DIM, DIM, CUFFT_R2C); cufftPlan2d(&planc2r, DIM, DIM, CUFFT_C2R); // TODO: update kernels to use the new unpadded memory layout for perf // rather than the old FFTW-compatible layout cufftSetCompatibilityMode(planr2c, CUFFT_COMPATIBILITY_FFTW_PADDING); cufftSetCompatibilityMode(planc2r, CUFFT_COMPATIBILITY_FFTW_PADDING); glGenBuffersARB(1, &vbo); glBindBufferARB(GL_ARRAY_BUFFER_ARB, vbo); glBufferDataARB(GL_ARRAY_BUFFER_ARB, sizeof(cData) * DS, particles, GL_DYNAMIC_DRAW_ARB); glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &bsize); if (bsize != (sizeof(cData) * DS)) goto EXTERR; glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); cutilSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, cudaGraphicsMapFlagsNone)); cutilCheckMsg("cudaGraphicsGLRegisterBuffer failed"); if (g_bQAReadback) { autoTest(argv); printf("[fluidsGL] - Test Results: %d Failures\n", g_TotalErrors); printf((g_TotalErrors == 0) ? "PASSED" : "FAILED"); printf("\n"); cleanup(); } else { atexit(cleanup); glutMainLoop(); } cudaThreadExit(); cutilExit(argc, argv); return 0; EXTERR: printf("Failed to initialize GL extensions.\n"); cudaThreadExit(); return 1; }
the_stack
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "common.cuh" #include <kat/containers/array.hpp> #include <array> #include <memory> #include <kat/tuple.hpp> #include <stdio.h> #include <doctest.h> #include <cuda/api_wrappers.hpp> #include <type_traits> #include <cstdint> #include <vector> #include <algorithm> // We can use doctest's CHECK() macro on the GPU. Instead, // whenever we check some boolean expression, we'll keep // the raw result and the line number where we checked in one of // these, and send it back to the host // // Note: Not including the function name for now, but // maybe we should add it. It's not simple, obviously, // unless we use a plain array and limit the length struct result_of_check { bool result; kat::size_t line_number; }; #define GPU_CHECK(check_expression) \ do { \ results[check_index++] = result_of_check{ ( check_expression ) , __LINE__ }; \ } while(false); namespace kernels { template <typename F, typename... Ts> __global__ void run_on_gpu( F function, Ts... args ) { function(std::forward<Ts>(args)...); } template <typename F>//, typename... Ts> __global__ void run_simple_test( F test_function, result_of_check* __restrict results, kat::size_t num_checks // , Ts... args ) { test_function(results, num_checks);//, std::forward<Ts>(args)...); } } // namespace kernels #if __cplusplus >= 201703L template<typename T, typename U> struct require_same; template<typename T> struct require_same<T, T> { using type = void; }; template<typename T, typename U> typename require_same<T, U>::type check_type(U&) { } #endif // TODO: Don't pass the number of checks, have the device function return // a dynamically-allocated std-vector-like object, and carefull copy it to // the host side (first its size, then its data after corresponding allocation // on the host side). Perhaps with thrust device_vector? Or roll my own? template <typename F> auto execute_simple_testcase_on_gpu( F testcase_device_function, size_t num_checks = 0) { cuda::device_t device { cuda::device::current::get() }; auto host_side_results { std::vector<result_of_check>(num_checks) }; if (num_checks == 0) { cuda::launch( kernels::run_simple_test<F>, single_thread_launch_config(), testcase_device_function, nullptr, num_checks ); } else { auto device_side_results { cuda::memory::device::make_unique<result_of_check[]>(device, num_checks) }; cuda::memory::device::zero(device_side_results.get(), num_checks * sizeof(result_of_check)); // just to be on the safe side cuda::launch( kernels::run_simple_test<F>, single_thread_launch_config(), testcase_device_function, device_side_results.get(), num_checks ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(result_of_check) * num_checks); } device.synchronize(); // Probably unnecessary, but let's just be on the safe side return host_side_results; } void check_results( std::string test_or_testcase_name, const result_of_check* results, kat::size_t num_checks) { std::stringstream ss; // Note that it's possible for there to be _no_ results for(kat::size_t i = 0; i < num_checks; i++) { ss.str(""); ss << test_or_testcase_name << " failed check #" << (i+1) << " (1-based) at source line " << results[i].line_number; auto message = ss.str(); CHECK_MESSAGE(results[i].result, message); } } template <typename F> void execute_simple_testcase_on_gpu_and_check( std::string testcase_name, F testcase_device_function, size_t num_checks) { auto results = execute_simple_testcase_on_gpu(testcase_device_function, num_checks); check_results(testcase_name, results.data(), results.size()); } constexpr const auto checks { 1 }; // Notes: // The test suites prefixed with libstdcxx are derived from the // stdlibc++ tests for std::array. They are therefore subject to the // same license as the code for kat::array itself - see // <kat/array.hpp> for details. // // ... however... we can't use those unit tests which are expected to // fail, nor do we process exit's/abort's. At least, it seems that's // not possible with doctest and in the same file as other tests. So, // in particular, none of the tests ending with "-neg" are used. #if __cplusplus >= 201703L struct unswappable_type { }; void swap(unswappable_type&, unswappable_type&) = delete; // Not swappable, and pair not swappable via the generic std::swap. struct immovable_type { immovable_type(immovable_type&&) = delete; }; #endif namespace test_hdfs { namespace capacity { struct empty { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; GPU_CHECK( not a.empty() ); } { const size_t len = 0; typedef kat::array<int, len> array_type; array_type a; GPU_CHECK( a.empty() ); } } }; struct max_size { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; GPU_CHECK( a.max_size() == len ); } { const size_t len = 0; typedef kat::array<int, len> array_type; array_type a; GPU_CHECK( a.max_size() == len ); } } }; struct size { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; GPU_CHECK( a.size() == len ); } { const size_t len = 0; typedef kat::array<int, len> array_type; array_type a; GPU_CHECK( a.size() == len ); } } }; } // namespace capacity namespace comparison_operators { struct equal { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3 } }; GPU_CHECK( a == b ); GPU_CHECK( !(a == c) ); } }; struct greater { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3, 7 } }; GPU_CHECK( !(a > b) ); GPU_CHECK( c > a ); } }; struct greater_or_equal { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3, 7 } }; GPU_CHECK( a >= b ); GPU_CHECK( c >= a ); } }; struct less { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3, 7 } }; GPU_CHECK( !(a < b) ); GPU_CHECK( a < c ); } }; struct less_or_equal { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3, 7 } }; GPU_CHECK( a <= b ); GPU_CHECK( a <= c ); } }; struct not_equal { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3 } }; GPU_CHECK( !(a != b) ); GPU_CHECK( a != c ); } }; } // namespace comparison_operators namespace cons { struct aggregate_init { KAT_HD void operator()( result_of_check*, kat::size_t) { typedef kat::array<int, 5> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3 } }; a = b; b = a; } }; #if __cplusplus >= 201703L struct deduction { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::array a1{ 1, 2, 3 }; check_type<kat::array<int, 3>>(a1); int y = 2; const int z = 3; kat::array a2{ 1, y, z }; check_type<kat::array<int, 3>>(a2); kat::array a3{ 'a', 'b', 'c', 'd', 'e' }; check_type<kat::array<char, 5>>(a3); kat::array copy = a1; check_type<decltype(a1)>(copy); kat::array move = std::move(a1); check_type<decltype(a1)>(move); } }; #endif } // namespace cons namespace element_access { struct tc54338 { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; struct A { bool valid = true; KAT_HD ~A() { valid = false; } }; #pragma push #pragma diag_suppress = missing_default_constructor_on_const const kat::array<A, 1> a; const A& aa = a.at(0); GPU_CHECK(aa.valid); #pragma pop } }; struct back { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; { array_type a = { { 0, 1, 2, 3, 4 } }; int& ri = a.back(); GPU_CHECK( ri == 4 ); } { const array_type ca = { { 4, 3, 2, 1, 0 } }; const int& cri = ca.back(); GPU_CHECK( cri == 0 ); } } }; struct data { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; { array_type a = { { 0, 1, 2, 3, 4 } }; int* pi = a.data(); GPU_CHECK( *pi == 0 ); } { const array_type ca = { { 4, 3, 2, 1, 0 } }; const int* pci = ca.data(); GPU_CHECK( *pci == 4 ); } } }; struct front { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; { array_type a = { { 0, 1, 2, 3, 4 } }; int& ri = a.front(); GPU_CHECK( ri == 0 ); } { const array_type ca = { { 4, 3, 2, 1, 0 } }; const int& cri = ca.front(); GPU_CHECK( cri == 4 ); } } }; } // namespace element_access namespace iterators { struct end_is_one_past { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type::iterator b = a.begin(); array_type::iterator e = a.end(); GPU_CHECK( e != (b + a.size() - 1) ); } }; } // namespace iterators namespace requirements { struct contiguous { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; // &a[n] == &a[0] + n for all 0 <= n < N. for (size_t i = 0; i < len; ++i) { GPU_CHECK( &a[i] == &a[0] + i ); } } }; struct fill { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 3; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2 } }; const int value = 5; a.fill(value); GPU_CHECK( a[0] == value ); GPU_CHECK( a[1] == value ); GPU_CHECK( a[2] == value ); } }; struct member_swap { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; const array_type a_ref = a; array_type b = { { 4, 3, 2, 1, 0 } }; const array_type b_ref = b; a.swap(b); GPU_CHECK( a == b_ref ); GPU_CHECK( b == a_ref ); } }; struct zero_sized_arrays { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 0; typedef kat::array<int, len> array_type; // 1: ? array_type a = { }; // 2 array_type b; // 3 // begin() == end() GPU_CHECK( a.begin() == a.end() ); GPU_CHECK( b.begin() == b.end() ); } }; } // namespace requirements namespace specialized_algorithms { struct swap { KAT_HD void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; const array_type a_ref = a; array_type b = { { 4, 3, 2, 1, 0 } }; const array_type b_ref = b; kat::swap(a, b); GPU_CHECK( a == b_ref ); GPU_CHECK( b == a_ref ); } }; #if __cplusplus >= 201703L struct swap_cpp17 { KAT_HD void operator()( result_of_check*, kat::size_t ) { static_assert( not std::is_swappable<kat::array<unswappable_type, 42>>::value ); static_assert( not std::is_swappable<kat::array<immovable_type, 42>>::value ); } }; #endif } // namespace specialized_algorithms struct range_access { KAT_DEV void operator()( result_of_check* results, kat::size_t num_checks) { kat::size_t check_index = 0; kat::array<int, 3> a{{1, 2, 3}}; auto b = kat::begin(a); GPU_CHECK(&(*b) == &(a[0])); auto e = kat::end(a); GPU_CHECK(&(*e) == &a[a.size()]); printf("*b is %d", (int) *b); } }; } // namespace test_hdfs TEST_SUITE("device-side-libstdcxx") { TEST_CASE("capacity") { SUBCASE("empty") { execute_simple_testcase_on_gpu_and_check("empty", test_hdfs::capacity::empty{}, 2 * checks); } SUBCASE("max_size") { execute_simple_testcase_on_gpu_and_check("max_size", test_hdfs::capacity::max_size{}, 2 * checks); } // SUBCASE("size") { execute_simple_testcase_on_gpu_and_check("size", test_hdfs::capacity::size{}, 2 * checks); } } // TEST_CASE("capacity") /* TEST_CASE("comparison operators") { SUBCASE("equal") { execute_simple_testcase_on_gpu_and_check("empty", test_hdfs::comparison_operators::equal{}, 2 * checks); } SUBCASE("greater") { execute_simple_testcase_on_gpu_and_check("greater", test_hdfs::comparison_operators::greater{}, 2 * checks); } SUBCASE("greater_or_equal") { execute_simple_testcase_on_gpu_and_check("greater_or_equal", test_hdfs::comparison_operators::greater_or_equal{}, 2 * checks); } SUBCASE("less") { execute_simple_testcase_on_gpu_and_check("less", test_hdfs::comparison_operators::less{}, 2 * checks); } SUBCASE("less_or_equal") { execute_simple_testcase_on_gpu_and_check("less_or_equal", test_hdfs::comparison_operators::less_or_equal{}, 2 * checks); } SUBCASE("not_equal") { execute_simple_testcase_on_gpu_and_check("not_equal", test_hdfs::comparison_operators::not_equal{}, 2 * checks); } } TEST_CASE("cons") { SUBCASE("aggregate_init") { execute_simple_testcase_on_gpu_and_check("aggregate_init", test_hdfs::cons::aggregate_init{}, 0 * checks); } #if __cplusplus >= 201703L SUBCASE("deduction") { execute_simple_testcase_on_gpu_and_check("deduction", test_hdfs::cons::deduction{}, 0 * checks); } #endif } TEST_CASE("element_access") { SUBCASE("54338") { execute_simple_testcase_on_gpu_and_check("54338", test_hdfs::element_access::tc54338{}, 1 * checks); } // Not testing at_invalid_index, as for device_side, that's a test where the kernel will fail // Not including test in 60497.cc - it involes std::debug:: SUBCASE("back") { execute_simple_testcase_on_gpu_and_check("back", test_hdfs::element_access::back{}, 2 * checks); } SUBCASE("data") { execute_simple_testcase_on_gpu_and_check("data", test_hdfs::element_access::data{}, 2 * checks); } SUBCASE("front") { execute_simple_testcase_on_gpu_and_check("front", test_hdfs::element_access::front{}, 2 * checks); } } // Note: Not including anything from the debug/ folder TEST_CASE("iterators") { SUBCASE("end_is_one_past") { execute_simple_testcase_on_gpu_and_check("end_is_one_past", test_hdfs::iterators::end_is_one_past{}, 1 * checks); } } TEST_CASE("requirements") { SUBCASE("contiguous") { execute_simple_testcase_on_gpu_and_check("contiguous", test_hdfs::requirements::contiguous{}, 5 * checks); } SUBCASE("fill") { execute_simple_testcase_on_gpu_and_check("fill", test_hdfs::requirements::fill{}, 3 * checks); } SUBCASE("member_swap") { execute_simple_testcase_on_gpu_and_check("member_swap", test_hdfs::requirements::member_swap{}, 2 * checks); } // Not including the non_default_constructible test - where's the test in there? SUBCASE("zero_sized_arrays"){ execute_simple_testcase_on_gpu_and_check("zero_sized_arrays",test_hdfs::requirements::zero_sized_arrays{}, 2 * checks); } } TEST_CASE("specialized algorithms") { SUBCASE("swap") { execute_simple_testcase_on_gpu_and_check("swap", test_hdfs::specialized_algorithms::swap{}, 2 * checks); } #if __cplusplus >= 201703L SUBCASE("swap_cpp17") { execute_simple_testcase_on_gpu_and_check("swap_cpp17", test_hdfs::specialized_algorithms::swap_cpp17{}, 0 * checks); } #endif } TEST_CASE("range_access") { execute_simple_testcase_on_gpu_and_check("range_access", test_hdfs::range_access{}, 0 * checks); } */ } // TEST_SUITE("device-side-libstdcxx") TEST_SUITE("host-side-libstdcxx") { TEST_CASE("capacity") { SUBCASE("empty") { SUBCASE("") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; CHECK_FALSE( a.empty() ); } SUBCASE("") { const size_t len = 0; typedef kat::array<int, len> array_type; array_type a; CHECK( a.empty() ); } } SUBCASE("max_size") { SUBCASE("") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; CHECK( a.max_size() == len ); } SUBCASE("") { const size_t len = 0; typedef kat::array<int, len> array_type; array_type a; CHECK( a.max_size() == len ); } } SUBCASE("size") { SUBCASE("") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; CHECK( a.size() == len ); } SUBCASE("") { const size_t len = 0; typedef kat::array<int, len> array_type; array_type a; CHECK( a.size() == len ); } } } // TEST_CASE("capacity") TEST_CASE("comparison operators") { SUBCASE("equal") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3 } }; CHECK( a == b ); CHECK( !(a == c) ); } SUBCASE("greater") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3, 7 } }; CHECK( !(a > b) ); CHECK( c > a ); } SUBCASE("greater_or_equal") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3, 7 } }; CHECK( a >= b ); CHECK( c >= a ); } SUBCASE("less") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3, 7 } }; CHECK( !(a < b) ); CHECK( a < c ); } SUBCASE("less_or_equal") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3, 7 } }; CHECK( a <= b ); CHECK( a <= c ); } SUBCASE("not_equal") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3, 4 } }; array_type c = { { 0, 1, 2, 3 } }; CHECK( !(a != b) ); CHECK( a != c ); } } TEST_CASE("cons") { SUBCASE("aggregate initialization") { typedef kat::array<int, 5> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type b = { { 0, 1, 2, 3 } }; a = b; b = a; } #if __cplusplus >= 201703L SUBCASE("deduction") { kat::array a1{ 1, 2, 3 }; check_type<kat::array<int, 3>>(a1); int y = 2; const int z = 3; kat::array a2{ 1, y, z }; check_type<kat::array<int, 3>>(a2); kat::array a3{ 'a', 'b', 'c', 'd', 'e' }; check_type<kat::array<char, 5>>(a3); kat::array copy = a1; check_type<decltype(a1)>(copy); kat::array move = std::move(a1); check_type<decltype(a1)>(move); } // doctest doesn't support testcases which you expect to fail to _compile_. #if 0 SUBCASE("deduction" * should_fail(true)) { kat::array a1{}; kat::array a2{1, 2u, 3}; } #endif #endif } // Using nothing from the "constrution" and "debug" testcase directory TEST_CASE("element access") { SUBCASE("54338") { struct A { bool valid = true; ~A() { valid = false; } }; #pragma push #pragma diag_suppress = missing_default_constructor_on_const const kat::array<A, 1> a; const A& aa = a.at(0); CHECK(aa.valid); #pragma pop } // Not including test in 60497.cc - it involes std::debug:: // Not testing what happens when accessing an array at an invalid // index, because the intended behavior is execution termination, // and we don't support testing that. #if 0 SUBCASE("at_invalid_index") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; try { a.at(len); CHECK( false ); } catch(std::out_of_range& obj) { // Expected. CHECK( true ); } catch(...) { // Failed. CHECK( false ); } } #endif SUBCASE("back") { const size_t len = 5; typedef kat::array<int, len> array_type; { array_type a = { { 0, 1, 2, 3, 4 } }; int& ri = a.back(); CHECK( ri == 4 ); } { const array_type ca = { { 4, 3, 2, 1, 0 } }; const int& cri = ca.back(); CHECK( cri == 0 ); } } SUBCASE("") { const size_t len = 5; typedef kat::array<int, len> array_type; { array_type a = { { 0, 1, 2, 3, 4 } }; int* pi = a.data(); CHECK( *pi == 0 ); } { const array_type ca = { { 4, 3, 2, 1, 0 } }; const int* pci = ca.data(); CHECK( *pci == 4 ); } } SUBCASE("") { const size_t len = 5; typedef kat::array<int, len> array_type; { array_type a = { { 0, 1, 2, 3, 4 } }; int& ri = a.front(); CHECK( ri == 0 ); } { const array_type ca = { { 4, 3, 2, 1, 0 } }; const int& cri = ca.front(); CHECK( cri == 4 ); } } } TEST_CASE("iterators") { SUBCASE("end is one past") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; array_type::iterator b = a.begin(); array_type::iterator e = a.end(); CHECK( e != (b + a.size() - 1) ); } } TEST_CASE("requirements") { SUBCASE("contiguous") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; // &a[n] == &a[0] + n for all 0 <= n < N. for (size_t i = 0; i < len; ++i) { CHECK( &a[i] == &a[0] + i ); } } SUBCASE("fill") { const size_t len = 3; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2 } }; const int value = 5; a.fill(value); CHECK( a[0] == value ); CHECK( a[1] == value ); CHECK( a[2] == value ); } SUBCASE("member swap") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; const array_type a_ref = a; array_type b = { { 4, 3, 2, 1, 0 } }; const array_type b_ref = b; a.swap(b); CHECK( a == b_ref ); CHECK( b == a_ref ); } // Not include the non_default_constructible test - where's the test in there? SUBCASE("zero-sized arrays") { const size_t len = 0; typedef kat::array<int, len> array_type; // 1: ? array_type a = { }; // 2 array_type b; // 3 // begin() == end() CHECK( a.begin() == a.end() ); CHECK( b.begin() == b.end() ); } } TEST_CASE("specialized algorithms") { SUBCASE("swap") { const size_t len = 5; typedef kat::array<int, len> array_type; array_type a = { { 0, 1, 2, 3, 4 } }; const array_type a_ref = a; array_type b = { { 4, 3, 2, 1, 0 } }; const array_type b_ref = b; kat::swap(a, b); CHECK( a == b_ref ); CHECK( b == a_ref ); } #if __cplusplus >= 201703L SUBCASE("swap C++17") { static_assert( not std::is_swappable<kat::array<unswappable_type, 42>>::value ); static_assert( not std::is_swappable<kat::array<immovable_type, 42>>::value ); } #endif } TEST_CASE("range access") { SUBCASE("") { kat::array<int, 3> a{{1, 2, 3}}; auto b = kat::begin(a); CHECK(&(*b) == &(a[0])); auto e = kat::end(a); CHECK(&(*e) == &(a[a.size()])); } } } // TEST_SUITE("host-side-libstdcxx") TEST_SUITE("libstdcxx-constexpr") { TEST_CASE("capacity") { SUBCASE("constexpr functions") { constexpr const auto s { 60 }; constexpr const kat::array<long, s> arr{}; constexpr const auto size = arr.size(); (void) size; constexpr const auto max_size = arr.max_size(); (void) max_size; constexpr const auto is_empty = arr.empty(); (void) is_empty; } } TEST_CASE("comparison operators") { #if __cplusplus >= 202001L SUBCASE("constexpr") { constexpr const kat::array<const int, 3> a1{{1, 2, 3}}; constexpr const kat::array<const int, 3> a2{{4, 5, 6}}; constexpr const kat::array<const int, 3> a3{{1, 2, 4}}; constexpr const kat::array<const int, 3> a4{{1, 3, 3}}; static_assert(a1 == a1); static_assert(a1 != a2); static_assert(a1 < a3); static_assert(a4 > a1); static_assert(a1 <= a3); static_assert(a4 >= a1); static_assert(std::is_eq(a1 <=> a1)); static_assert(std::is_neq(a1 <=> a2)); static_assert(std::is_lt(a1 <=> a3)); static_assert(std::is_gt(a4 <=> a1)); constexpr const kat::array<unsigned char, 3> a5{{1, 2, 3}}; constexpr const kat::array<unsigned char, 3> a6{{4, 5, 6}}; constexpr const kat::array<unsigned char, 3> a7{{1, 2, 4}}; constexpr const kat::array<unsigned char, 3> a8{{1, 3, 3}}; static_assert(a5 == a5); static_assert(a5 != a6); static_assert(a5 < a7); static_assert(a8 > a5); static_assert(a5 <= a7); static_assert(a8 >= a5); static_assert(std::is_eq(a5 <=> a5)); static_assert(std::is_neq(a5 <=> a6)); static_assert(std::is_lt(a5 <=> a7)); static_assert(std::is_gt(a8 <=> a5)); } #endif } TEST_CASE("construction") { SUBCASE("constexpr") { #if __cplusplus >= 202001L SUBCASE("") { const char x[6]{}; kat::array<char, 6> y = std::to_array(x); constexpr char x2[] = "foo"; constexpr kat::array<char, 4> y2 = std::to_array(x2); static_assert( std::equal(y2.begin(), y2.end(), x2) ); } #endif #if __cplusplus >= 202001L SUBCASE("") { struct MoveOnly { constexpr MoveOnly(int i = 0) : i(i) { } constexpr MoveOnly(MoveOnly&& m) : i(m.i + 100) { } int i; }; struct X { MoveOnly m[3]; }; X x; kat::array<MoveOnly, 3> y = std::to_array(std::move(x).m); constexpr kat::array<MoveOnly, 3> y2 = std::to_array(X{{1, 2, 3}}.m); static_assert( y2[0].i == 101 && y2[1].i == 102 && y2[2].i == 103 ); } #endif } } TEST_CASE("element access") { SUBCASE("constexpr element access") { typedef kat::array<std::size_t, 6> array_type; constexpr array_type a = { { 0, 55, 66, 99, 4115, 2 } }; constexpr auto v1 = a[1]; (void) v1; #if __cplusplus >= 201703L constexpr auto v2 = a.at(2); (void) v2; #endif constexpr auto v3 = a.front(); (void) v3; constexpr auto v4 = a.back(); (void) v4; } } constexpr bool test_fill() { auto ok = true; kat::array<float,3> fa{}; fa.fill(3.333f); ok = ok && (fa[0] == fa[2]); return ok; } constexpr int test_iter() { constexpr kat::array<int, 3> a1{{1, 2, 3}}; static_assert(1 == *a1.begin()); auto n = a1[0] * a1[1]* a1[2]; static_assert(1 == *a1.cbegin()); kat::array<int, 3> a2{{0, 0, 0}}; auto a1i = a1.begin(); auto a1e = a1.end(); auto a2i = a2.begin(); while (a1i != a1e) *a2i++ = *a1i++; return n; } #if __cplusplus >= 202001L constexpr bool test_swap() { auto ok = true; kat::array<float,3> fa{{1.1f, 2.2f, 3.3f}}; kat::array<float,3> fb{{4.4f, 5.5f, 6.6f}}; fb.swap(fa); ok = ok && (fa[0] == 4.4f); std::swap(fa, fb); ok = ok && (fa[0] == 1.1f); return ok; } #endif TEST_CASE("requirements") { // Not including citerators test // Note: Not really constexpr SUBCASE("constexpr fill") { static_assert(test_fill()); } SUBCASE("constexpr iter") { static_assert(test_iter()); } #if __cplusplus >= 202001L SUBCASE("constexpr swap") { static_assert(test_swap()); } #endif } // Modified from the libstdc++ test - which seems like it should work // The following segment is part of the tuple interface testcase, below, // but must appear in file scope, not within a function. namespace testcase_tuple_interface { namespace subcase_get { kat::array<int, 5> ai; const kat::array<int, 5> cai(ai); constexpr const int& cri = kat::get<0>(cai); constexpr int& ri = kat::get<0>(ai); constexpr int&& rri = kat::get<0>(std::move(ai)); } //namespace subcase_get } // namespace testcase_tuple_interface TEST_CASE("tuple interface") { SUBCASE("tuple_element") { using kat::array; using std::tuple_element; using std::is_same; const std::size_t len = 3; typedef array<int, len> array_type; static_assert(is_same<tuple_element<0, array_type>::type, int>::value, "" ); static_assert(is_same<tuple_element<1, array_type>::type, int>::value, "" ); static_assert(is_same<tuple_element<2, array_type>::type, int>::value, ""); static_assert(is_same<tuple_element<0, const array_type>::type, const int>::value, ""); static_assert(is_same<tuple_element<1, const array_type>::type, const int>::value, ""); static_assert(is_same<tuple_element<2, const array_type>::type, const int>::value, ""); static_assert(is_same<tuple_element<0, volatile array_type>::type, volatile int>::value, ""); static_assert(is_same<tuple_element<1, volatile array_type>::type, volatile int>::value, ""); static_assert( (is_same<tuple_element<2, volatile array_type>::type, volatile int>::value == true), "" ); static_assert(is_same<tuple_element<0, const volatile array_type>::type, const volatile int>::value, ""); static_assert(is_same<tuple_element<1, const volatile array_type>::type, const volatile int>::value, ""); static_assert(is_same<tuple_element<2, const volatile array_type>::type, const volatile int>::value, ""); } #if __cplusplus >= 201402L SUBCASE("tuple_element C++14") { using std::is_same; using kat::tuple_element; using kat::tuple_element_t; const size_t len = 3; typedef kat::array<int, len> array_type; static_assert(is_same<tuple_element_t<0, array_type>, int>::value, ""); static_assert(is_same<tuple_element_t<1, array_type>, int>::value, ""); static_assert(is_same<tuple_element_t<2, array_type>, int>::value, ""); static_assert(is_same<tuple_element_t<0, const array_type>, const int>::value, ""); static_assert(is_same<tuple_element_t<1, const array_type>, const int>::value, ""); static_assert(is_same<tuple_element_t<2, const array_type>, const int>::value, ""); static_assert(is_same<tuple_element_t<0, volatile array_type>, volatile int>::value, ""); static_assert(is_same<tuple_element_t<1, volatile array_type>, volatile int>::value, ""); static_assert(is_same<tuple_element_t<2, volatile array_type>, volatile int>::value, ""); static_assert(is_same<tuple_element_t<0, const volatile array_type>, const volatile int>::value, ""); static_assert(is_same<tuple_element_t<1, const volatile array_type>, const volatile int>::value, ""); static_assert(is_same<tuple_element_t<2, const volatile array_type>, const volatile int>::value, ""); } #endif SUBCASE("get") { // see above } SUBCASE("tuple_size") { using kat::array; using std::tuple_size; using std::size_t; // This relies on the fact that <utility> includes <type_traits>: using std::is_same; SUBCASE("") { const size_t len = 5; typedef array<int, len> array_type; static_assert(tuple_size<array_type>::value == 5, ""); static_assert(tuple_size<const array_type>::value == 5, ""); static_assert(tuple_size<volatile array_type>::value == 5, ""); static_assert(tuple_size<const volatile array_type>::value == 5, ""); } SUBCASE("") { const size_t len = 0; typedef array<float, len> array_type; static_assert(tuple_size<array_type>::value == 0, ""); static_assert(tuple_size<const array_type>::value == 0, ""); static_assert(tuple_size<volatile array_type>::value == 0, ""); static_assert(tuple_size<const volatile array_type>::value == 0, ""); } } } // TESTCASE("tuple interface") } // TEST_SUITE("libstdcxx-constexpr") //TEST_SUITE("span-device-side") { //} // TEST_SUITE("span-device-side")
the_stack
//#include <openssl/sha.h> #include <stdint.h> #include <miner.h> #include <cuda_helper.h> static uint32_t *d_hash[MAX_GPUS]; static uint32_t* d_hash_br1[MAX_GPUS]; static uint32_t* d_hash_br2[MAX_GPUS]; extern void quark_skein512_cpu_init(int thr_id, uint32_t threads); extern void quark_skein512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); extern void x11_luffa512_cpu_init(int thr_id, uint32_t threads); extern void x11_luffa512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); extern void x13_hamsi512_cpu_init(int thr_id, uint32_t threads); extern void x13_hamsi512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); extern void x13_fugue512_cpu_init(int thr_id, uint32_t threads); extern void x13_fugue512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); extern void x13_fugue512_cpu_free(int thr_id); extern void x14_shabal512_cpu_init(int thr_id, uint32_t threads); extern void x14_shabal512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); extern void x15_whirlpool_cpu_init(int thr_id, uint32_t threads, int mode); extern void x15_whirlpool_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); extern void x15_whirlpool_cpu_free(int thr_id); extern void x11_echo512_cpu_init(int thr_id, uint32_t threads); extern void x11_echo512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); extern void bastion_init(const int thr_id, const uint32_t threads); extern void bastion_free(const int thr_id); extern uint32_t bastion_filter2(const int thr_id, const uint32_t threads, const uint32_t *inpHashes, uint32_t* d_hash1, uint32_t* d_hash2); extern void bastion_merge2(const int thr_id, const uint32_t threads, uint32_t *outpHashes, uint32_t* d_hash1, uint32_t* d_hash2); extern void hefty_cpu_hash(int thr_id, uint32_t threads, int startNounce); extern void hefty_cpu_setBlock(int thr_id, uint32_t threads, void *data, int len); extern void hefty_cpu_init(int thr_id, uint32_t threads); extern void hefty_cpu_free(int thr_id); extern void hefty_copy_hashes(int thr_id, uint32_t threads, uint32_t* d_outputhash); #define TRACE(algo) {} static bool init[MAX_GPUS] = { 0 }; int scanhash_bastion(int thr_id, struct work *work, uint32_t max_nonce, unsigned long *hashes_done) { uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; // CUDA will process thousands of threads. uint32_t throughput = cuda_default_throughput(thr_id, 1U << 20); if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); if (opt_benchmark) ptarget[7] = 0x00ff; if (!init[thr_id]) { cudaSetDevice(device_map[thr_id]); if (opt_cudaschedule == -1 && gpu_threads == 1) { cudaDeviceReset(); // reduce cpu usage cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); CUDA_LOG_ERROR(); } gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput)); CUDA_SAFE_CALL(cudaMalloc(&d_hash_br1[thr_id], (size_t) 64 * throughput)); CUDA_SAFE_CALL(cudaMalloc(&d_hash_br2[thr_id], (size_t) 64 * throughput)); bastion_init(thr_id, throughput); hefty_cpu_init(thr_id, throughput); x11_luffa512_cpu_init(thr_id, throughput); quark_skein512_cpu_init(thr_id, throughput); x13_hamsi512_cpu_init(thr_id, throughput); x13_fugue512_cpu_init(thr_id, throughput); x14_shabal512_cpu_init(thr_id, throughput); x15_whirlpool_cpu_init(thr_id, throughput, 0); x11_echo512_cpu_init(thr_id, throughput); cuda_check_cpu_init(thr_id, throughput); init[thr_id] = true; } uint32_t endiandata[20]; for (int k=0; k < 20; k++) be32enc(&endiandata[k], pdata[k]); hefty_cpu_setBlock(thr_id, throughput, endiandata, 80); cuda_check_cpu_setTarget(ptarget); do { uint32_t branchNonces; int order = 0; // hefty hefty_cpu_hash(thr_id, throughput, pdata[19]); hefty_copy_hashes(thr_id, throughput, d_hash[thr_id]); TRACE("hefty :"); x11_luffa512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("luffa :"); // fugue or skein branchNonces = bastion_filter2(thr_id, throughput, d_hash[thr_id], d_hash_br1[thr_id], d_hash_br2[thr_id]); x13_fugue512_cpu_hash_64(thr_id, branchNonces, pdata[19], NULL, d_hash_br1[thr_id], order++); quark_skein512_cpu_hash_64(thr_id, throughput-branchNonces, pdata[19], NULL, d_hash_br2[thr_id], order++); bastion_merge2(thr_id, throughput, d_hash[thr_id], d_hash_br1[thr_id], d_hash_br2[thr_id]); TRACE("perm1 :"); x15_whirlpool_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("whirl :"); x13_fugue512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); // echo or luffa branchNonces = bastion_filter2(thr_id, throughput, d_hash[thr_id], d_hash_br1[thr_id], d_hash_br2[thr_id]); x11_echo512_cpu_hash_64(thr_id, branchNonces, pdata[19], NULL, d_hash_br1[thr_id], order++); x11_luffa512_cpu_hash_64(thr_id, throughput-branchNonces, pdata[19], NULL, d_hash_br2[thr_id], order++); bastion_merge2(thr_id, throughput, d_hash[thr_id], d_hash_br1[thr_id], d_hash_br2[thr_id]); TRACE("perm2 :"); x14_shabal512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); quark_skein512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); // shabal or whirlpool branchNonces = bastion_filter2(thr_id, throughput, d_hash[thr_id], d_hash_br1[thr_id], d_hash_br2[thr_id]); x14_shabal512_cpu_hash_64(thr_id, branchNonces, pdata[19], NULL, d_hash_br1[thr_id], order++); x15_whirlpool_cpu_hash_64(thr_id, throughput-branchNonces, pdata[19], NULL, d_hash_br2[thr_id], order++); bastion_merge2(thr_id, throughput, d_hash[thr_id], d_hash_br1[thr_id], d_hash_br2[thr_id]); TRACE("perm3 :"); x14_shabal512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); // hamsi or luffa branchNonces = bastion_filter2(thr_id, throughput, d_hash[thr_id], d_hash_br1[thr_id], d_hash_br2[thr_id]); x13_hamsi512_cpu_hash_64(thr_id, branchNonces, pdata[19], NULL, d_hash_br1[thr_id], order++); x11_luffa512_cpu_hash_64(thr_id, throughput-branchNonces, pdata[19], NULL, d_hash_br2[thr_id], order++); bastion_merge2(thr_id, throughput, d_hash[thr_id], d_hash_br1[thr_id], d_hash_br2[thr_id]); TRACE("perm4 :"); *hashes_done = pdata[19] - first_nonce + throughput; CUDA_LOG_ERROR(); work->nonces[0] = cuda_check_hash(thr_id, throughput, pdata[19], d_hash[thr_id]); if (work->nonces[0] != UINT32_MAX) { uint32_t _ALIGN(64) vhash[8]; const uint32_t Htarg = ptarget[7]; endiandata[19] = work->nonces[0]; bastionhash(vhash, (uchar*) endiandata); if (vhash[7] <= Htarg && fulltest(vhash, ptarget)) { work->valid_nonces = 1; work_set_target_ratio(work, vhash); work->nonces[0] = swab32(work->nonces[0]); work->nonces[1] = cuda_check_hash_suppl(thr_id, throughput, pdata[19], d_hash[thr_id], 1); if (work->nonces[1] != 0) { endiandata[19] = work->nonces[1]; bastionhash(vhash, (uchar*) endiandata); bn_set_target_ratio(work, vhash, 1); work->valid_nonces++; work->nonces[1] = swab32(work->nonces[1]); pdata[19] = max(work->nonces[0], work->nonces[1])+1; } else { pdata[19] = work->nonces[0]+1; // cursor } return work->valid_nonces; } else if (vhash[7] > Htarg) { gpu_increment_reject(thr_id); if (!opt_quiet) gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", work->nonces[0]); pdata[19] = swab32(work->nonces[0]) + 1; continue; } } if ((uint64_t)throughput + pdata[19] >= max_nonce) { pdata[19] = max_nonce; break; } pdata[19] += throughput; } while (!work_restart[thr_id].restart); *hashes_done = pdata[19] - first_nonce; CUDA_LOG_ERROR(); return 0; } // cleanup extern "C" void free_bastion(int thr_id) { if (!init[thr_id]) return; cudaThreadSynchronize(); cudaFree(d_hash[thr_id]); cudaFree(d_hash_br1[thr_id]); cudaFree(d_hash_br2[thr_id]); hefty_cpu_free(thr_id); x13_fugue512_cpu_free(thr_id); x15_whirlpool_cpu_free(thr_id); bastion_free(thr_id); cuda_check_cpu_free(thr_id); init[thr_id] = false; cudaDeviceSynchronize(); } #undef SPH_C32 #undef SPH_T32 #undef SPH_C64 #undef SPH_T64 extern "C" { #include "hefty1.h" #include "sph/sph_luffa.h" #include "sph/sph_fugue.h" #include "sph/sph_skein.h" #include "sph/sph_whirlpool.h" #include "sph/sph_shabal.h" #include "sph/sph_echo.h" #include "sph/sph_hamsi.h" } __host__ void bastionhash(void* output, const uchar* input) { unsigned char _ALIGN(128) hash[64] = { 0 }; sph_echo512_context ctx_echo; sph_luffa512_context ctx_luffa; sph_fugue512_context ctx_fugue; sph_whirlpool_context ctx_whirlpool; sph_shabal512_context ctx_shabal; sph_skein512_context ctx_skein; sph_hamsi512_context ctx_hamsi; HEFTY1(input, 80, hash); sph_luffa512_init(&ctx_luffa); sph_luffa512(&ctx_luffa, hash, 64); sph_luffa512_close(&ctx_luffa, hash); if (hash[0] & 0x8) { sph_fugue512_init(&ctx_fugue); sph_fugue512(&ctx_fugue, hash, 64); sph_fugue512_close(&ctx_fugue, hash); } else { sph_skein512_init(&ctx_skein); sph_skein512(&ctx_skein, hash, 64); sph_skein512_close(&ctx_skein, hash); } sph_whirlpool_init(&ctx_whirlpool); sph_whirlpool(&ctx_whirlpool, hash, 64); sph_whirlpool_close(&ctx_whirlpool, hash); sph_fugue512_init(&ctx_fugue); sph_fugue512(&ctx_fugue, hash, 64); sph_fugue512_close(&ctx_fugue, hash); if (hash[0] & 0x8) { sph_echo512_init(&ctx_echo); sph_echo512(&ctx_echo, hash, 64); sph_echo512_close(&ctx_echo, hash); } else { sph_luffa512_init(&ctx_luffa); sph_luffa512(&ctx_luffa, hash, 64); sph_luffa512_close(&ctx_luffa, hash); } sph_shabal512_init(&ctx_shabal); sph_shabal512(&ctx_shabal, hash, 64); sph_shabal512_close(&ctx_shabal, hash); sph_skein512_init(&ctx_skein); sph_skein512(&ctx_skein, hash, 64); sph_skein512_close(&ctx_skein, hash); if (hash[0] & 0x8) { sph_shabal512_init(&ctx_shabal); sph_shabal512(&ctx_shabal, hash, 64); sph_shabal512_close(&ctx_shabal, hash); } else { sph_whirlpool_init(&ctx_whirlpool); sph_whirlpool(&ctx_whirlpool, hash, 64); sph_whirlpool_close(&ctx_whirlpool, hash); } sph_shabal512_init(&ctx_shabal); sph_shabal512(&ctx_shabal, hash, 64); sph_shabal512_close(&ctx_shabal, hash); if (hash[0] & 0x8) { sph_hamsi512_init(&ctx_hamsi); sph_hamsi512(&ctx_hamsi, hash, 64); sph_hamsi512_close(&ctx_hamsi, hash); } else { sph_luffa512_init(&ctx_luffa); sph_luffa512(&ctx_luffa, hash, 64); sph_luffa512_close(&ctx_luffa, hash); } memcpy(output, hash, 32); }
the_stack
#include <random> #include <vector> #include <raft/cudart_utils.h> #include <raft/cuda_utils.cuh> #include <raft/handle.hpp> #include <raft/random/rng.hpp> #include "test_utils.h" #include <cuml/common/logger.hpp> #include <linalg/block.cuh> namespace MLCommon { namespace LinAlg { using namespace std; /* GEMM */ template <typename T> struct BlockGemmInputs { int m, k, n; bool transa, transb; int batch_size; int vec_len; T eps; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const BlockGemmInputs<T>& dims) { return os; } template <typename Policy, typename T> __global__ void block_gemm_test_kernel( bool transa, bool transb, int m, int n, int k, T alpha, const T* a, const T* b, T* c) { __shared__ MLCommon::LinAlg::GemmStorage<Policy, T> gemm_storage; _block_gemm<Policy>(transa, transb, m, n, k, alpha, a + m * k * blockIdx.x, b + k * n * blockIdx.x, c + m * n * blockIdx.x, gemm_storage); } template <typename Policy, typename T> class BlockGemmTest : public ::testing::TestWithParam<BlockGemmInputs<T>> { protected: void basicTest() { raft::handle_t handle; params = ::testing::TestWithParam<BlockGemmInputs<T>>::GetParam(); rmm::device_uvector<T> a(params.m * params.k * params.batch_size, handle.get_stream()); rmm::device_uvector<T> b(params.k * params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> c(params.m * params.n * params.batch_size, handle.get_stream()); std::vector<T> h_a(params.m * params.k * params.batch_size); std::vector<T> h_b(params.k * params.n * params.batch_size); std::vector<T> h_c_ref(params.m * params.n * params.batch_size); /* Generate random data on device */ raft::random::Rng r(params.seed); r.uniform(a.data(), params.m * params.k * params.batch_size, (T)-2, (T)2, handle.get_stream()); r.uniform(b.data(), params.k * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); /* Generate random alpha */ std::default_random_engine generator(params.seed); std::uniform_real_distribution<T> distribution(-2.0, 2.0); T alpha = distribution(generator); /* Copy to host */ raft::update_host( h_a.data(), a.data(), params.m * params.k * params.batch_size, handle.get_stream()); raft::update_host( h_b.data(), b.data(), params.k * params.n * params.batch_size, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); /* Compute using tested prims */ block_gemm_test_kernel<Policy> <<<params.batch_size, Policy::BlockSize, 0, handle.get_stream()>>>(params.transa, params.transb, params.m, params.n, params.k, alpha, a.data(), b.data(), c.data()); /* Compute reference results */ for (int bid = 0; bid < params.batch_size; bid++) { for (int i = 0; i < params.m; i++) { for (int j = 0; j < params.n; j++) { T acc = (T)0; for (int h = 0; h < params.k; h++) { T _a = params.transa ? h_a[bid * params.m * params.k + i * params.k + h] : h_a[bid * params.m * params.k + h * params.m + i]; T _b = params.transb ? h_b[bid * params.k * params.n + h * params.n + j] : h_b[bid * params.k * params.n + j * params.k + h]; acc += _a * _b; } h_c_ref[bid * params.m * params.n + j * params.m + i] = alpha * acc; } } } /* Check results */ match = devArrMatchHost(h_c_ref.data(), c.data(), params.m * params.n * params.batch_size, raft::CompareApprox<T>(params.eps), handle.get_stream()); } void SetUp() override { basicTest(); } void TearDown() override {} protected: BlockGemmInputs<T> params; testing::AssertionResult match = testing::AssertionFailure(); }; const std::vector<BlockGemmInputs<float>> gemm_inputsf = { {42, 42, 42, false, false, 20, 1, 1e-4, 12345U}, {65, 10, 20, false, true, 50, 1, 1e-4, 12345U}, {5, 80, 31, true, false, 80, 1, 1e-4, 12345U}, {11, 50, 41, true, true, 100, 1, 1e-4, 12345U}, }; const std::vector<BlockGemmInputs<double>> gemm_inputsd = { {42, 42, 42, false, false, 20, 1, 1e-4, 12345U}, {65, 10, 20, false, true, 50, 1, 1e-4, 12345U}, {5, 80, 31, true, false, 80, 1, 1e-4, 12345U}, {11, 50, 41, true, true, 100, 1, 1e-4, 12345U}, }; const std::vector<BlockGemmInputs<float>> gemm_inputsf_vec2 = { {30, 34, 16, false, false, 20, 2, 1e-4, 12345U}, {10, 42, 20, false, true, 20, 2, 1e-4, 12345U}, {14, 8, 22, true, false, 20, 2, 1e-4, 12345U}, {56, 72, 28, true, true, 20, 2, 1e-4, 12345U}, }; const std::vector<BlockGemmInputs<double>> gemm_inputsd_vec2 = { {30, 34, 16, false, false, 20, 2, 1e-4, 12345U}, {10, 42, 20, false, true, 20, 2, 1e-4, 12345U}, {14, 8, 22, true, false, 20, 2, 1e-4, 12345U}, {56, 72, 28, true, true, 20, 2, 1e-4, 12345U}, }; typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 4, 16, 4>, float> BlockGemmTestF_1_16_1_4_16_4; TEST_P(BlockGemmTestF_1_16_1_4_16_4, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 4, 16, 4>, double> BlockGemmTestD_1_16_1_4_16_4; TEST_P(BlockGemmTestD_1_16_1_4_16_4, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 4, 32, 8>, float> BlockGemmTestF_1_32_1_4_32_8; TEST_P(BlockGemmTestF_1_32_1_4_32_8, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 4, 32, 8>, double> BlockGemmTestD_1_32_1_4_32_8; TEST_P(BlockGemmTestD_1_32_1_4_32_8, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 16, 64, 4>, float> BlockGemmTestF_1_32_1_16_64_4; TEST_P(BlockGemmTestF_1_32_1_16_64_4, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<1, 32, 1, 16, 64, 4>, double> BlockGemmTestD_1_32_1_16_64_4; TEST_P(BlockGemmTestD_1_32_1_16_64_4, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 16, 128, 2>, float> BlockGemmTestF_1_16_1_16_128_2; TEST_P(BlockGemmTestF_1_16_1_16_128_2, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<1, 16, 1, 16, 128, 2>, double> BlockGemmTestD_1_16_1_16_128_2; TEST_P(BlockGemmTestD_1_16_1_16_128_2, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<2, 32, 2, 2, 16, 16>, float> BlockGemmTestF_2_32_2_2_16_16; TEST_P(BlockGemmTestF_2_32_2_2_16_16, Result) { EXPECT_TRUE(match); } typedef BlockGemmTest<BlockGemmPolicy<2, 32, 2, 2, 16, 16>, double> BlockGemmTestD_2_32_2_2_16_16; TEST_P(BlockGemmTestD_2_32_2_2_16_16, Result) { EXPECT_TRUE(match); } INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestF_1_16_1_4_16_4, ::testing::ValuesIn(gemm_inputsf)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestD_1_16_1_4_16_4, ::testing::ValuesIn(gemm_inputsd)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestF_1_32_1_4_32_8, ::testing::ValuesIn(gemm_inputsf)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestD_1_32_1_4_32_8, ::testing::ValuesIn(gemm_inputsd)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestF_1_32_1_16_64_4, ::testing::ValuesIn(gemm_inputsf)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestD_1_32_1_16_64_4, ::testing::ValuesIn(gemm_inputsd)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestF_1_16_1_16_128_2, ::testing::ValuesIn(gemm_inputsf)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestD_1_16_1_16_128_2, ::testing::ValuesIn(gemm_inputsd)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestF_2_32_2_2_16_16, ::testing::ValuesIn(gemm_inputsf_vec2)); INSTANTIATE_TEST_CASE_P(BlockGemmTests, BlockGemmTestD_2_32_2_2_16_16, ::testing::ValuesIn(gemm_inputsd_vec2)); /* GEMV */ template <typename T> struct BlockGemvInputs { bool preload; int m, n; int batch_size; T eps; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const BlockGemvInputs<T>& dims) { return os; } template <typename Policy, typename T> __global__ void block_gemv_test_kernel( int m, int n, T alpha, const T* a, const T* x, T* y, bool preload) { __shared__ MLCommon::LinAlg::GemvStorage<Policy, T> gemv_storage; extern __shared__ char dyna_shared_mem[]; T* shared_vec = (T*)dyna_shared_mem; if (preload) { _block_gemv<Policy, true>(m, n, alpha, a + m * n * blockIdx.x, x + n * blockIdx.x, y + m * blockIdx.x, gemv_storage, shared_vec); } else { for (int i = threadIdx.x; i < n; i += Policy::BlockSize) { shared_vec[i] = x[n * blockIdx.x + i]; } __syncthreads(); _block_gemv<Policy, false>( m, n, alpha, a + m * n * blockIdx.x, shared_vec, y + m * blockIdx.x, gemv_storage); } } template <typename Policy, typename T> class BlockGemvTest : public ::testing::TestWithParam<BlockGemvInputs<T>> { protected: void basicTest() { raft::handle_t handle; params = ::testing::TestWithParam<BlockGemvInputs<T>>::GetParam(); rmm::device_uvector<T> a(params.m * params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> y(params.m * params.batch_size, handle.get_stream()); std::vector<T> h_a(params.m * params.n * params.batch_size); std::vector<T> h_x(params.n * params.batch_size); std::vector<T> h_y_ref(params.m * params.batch_size); /* Generate random data on device */ raft::random::Rng r(params.seed); r.uniform(a.data(), params.m * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); /* Generate random alpha */ std::default_random_engine generator(params.seed); std::uniform_real_distribution<T> distribution(-2.0, 2.0); T alpha = distribution(generator); /* Copy to host */ raft::update_host( h_a.data(), a.data(), params.m * params.n * params.batch_size, handle.get_stream()); raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); /* Compute using tested prims */ int shared_mem_size = params.n * sizeof(T); block_gemv_test_kernel<Policy> <<<params.batch_size, Policy::BlockSize, shared_mem_size, handle.get_stream()>>>( params.m, params.n, alpha, a.data(), x.data(), y.data(), params.preload); /* Compute reference results */ for (int bid = 0; bid < params.batch_size; bid++) { for (int i = 0; i < params.m; i++) { T acc = (T)0; for (int j = 0; j < params.n; j++) { acc += h_a[bid * params.m * params.n + j * params.m + i] * h_x[bid * params.n + j]; } h_y_ref[bid * params.m + i] = alpha * acc; } } /* Check results */ match = devArrMatchHost(h_y_ref.data(), y.data(), params.m * params.batch_size, raft::CompareApprox<T>(params.eps), handle.get_stream()); } void SetUp() override { basicTest(); } void TearDown() override {} protected: BlockGemvInputs<T> params; testing::AssertionResult match = testing::AssertionFailure(); }; const std::vector<BlockGemvInputs<float>> gemv_inputsf = {{true, 42, 42, 20, 1e-4, 12345U}, {true, 65, 10, 50, 1e-4, 12345U}, {false, 5, 80, 100, 1e-4, 12345U}}; const std::vector<BlockGemvInputs<double>> gemv_inputsd = {{true, 42, 42, 20, 1e-4, 12345U}, {true, 65, 10, 50, 1e-4, 12345U}, {false, 5, 80, 100, 1e-4, 12345U}}; typedef BlockGemvTest<BlockGemvPolicy<16, 4>, float> BlockGemvTestF_16_4; TEST_P(BlockGemvTestF_16_4, Result) { EXPECT_TRUE(match); } typedef BlockGemvTest<BlockGemvPolicy<16, 4>, double> BlockGemvTestD_16_4; TEST_P(BlockGemvTestD_16_4, Result) { EXPECT_TRUE(match); } typedef BlockGemvTest<BlockGemvPolicy<32, 8>, float> BlockGemvTestF_32_8; TEST_P(BlockGemvTestF_32_8, Result) { EXPECT_TRUE(match); } typedef BlockGemvTest<BlockGemvPolicy<32, 8>, double> BlockGemvTestD_32_8; TEST_P(BlockGemvTestD_32_8, Result) { EXPECT_TRUE(match); } typedef BlockGemvTest<BlockGemvPolicy<128, 2>, float> BlockGemvTestF_128_2; TEST_P(BlockGemvTestF_128_2, Result) { EXPECT_TRUE(match); } typedef BlockGemvTest<BlockGemvPolicy<128, 2>, double> BlockGemvTestD_128_2; TEST_P(BlockGemvTestD_128_2, Result) { EXPECT_TRUE(match); } INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_16_4, ::testing::ValuesIn(gemv_inputsf)); INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_16_4, ::testing::ValuesIn(gemv_inputsd)); INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_32_8, ::testing::ValuesIn(gemv_inputsf)); INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_32_8, ::testing::ValuesIn(gemv_inputsd)); INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestF_128_2, ::testing::ValuesIn(gemv_inputsf)); INSTANTIATE_TEST_CASE_P(BlockGemvTests, BlockGemvTestD_128_2, ::testing::ValuesIn(gemv_inputsd)); /* DOT */ template <typename T> struct BlockDotInputs { bool broadcast; int n; int batch_size; T eps; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const BlockDotInputs<T>& dims) { return os; } template <int BlockSize, bool Broadcast, typename T> __global__ void block_dot_test_kernel(int n, const T* x, const T* y, T* d_dot) { __shared__ ReductionStorage<BlockSize, T> reduction_storage; T dot_ = _block_dot<BlockSize, Broadcast>(n, x + n * blockIdx.x, y + n * blockIdx.x, reduction_storage); if (!Broadcast && threadIdx.x == 0) d_dot[blockIdx.x] = dot_; else if (Broadcast && threadIdx.x == BlockSize - 1) d_dot[blockIdx.x] = dot_; } template <typename T> class BlockDotTest : public ::testing::TestWithParam<BlockDotInputs<T>> { protected: void basicTest() { raft::handle_t handle; params = ::testing::TestWithParam<BlockDotInputs<T>>::GetParam(); rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> y(params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> dot_dev(params.batch_size, handle.get_stream()); std::vector<T> h_x(params.n * params.batch_size); std::vector<T> h_y(params.n * params.batch_size); std::vector<T> h_dot_ref(params.batch_size, (T)0); /* Generate random data on device */ raft::random::Rng r(params.seed); r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); r.uniform(y.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); /* Copy to host */ raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream()); raft::update_host(h_y.data(), y.data(), params.n * params.batch_size, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); /* Compute using tested prims */ constexpr int BlockSize = 64; if (params.broadcast) block_dot_test_kernel<BlockSize, true> <<<params.batch_size, BlockSize, 0, handle.get_stream()>>>( params.n, x.data(), y.data(), dot_dev.data()); else block_dot_test_kernel<BlockSize, false> <<<params.batch_size, BlockSize, 0, handle.get_stream()>>>( params.n, x.data(), y.data(), dot_dev.data()); /* Compute reference results */ for (int bid = 0; bid < params.batch_size; bid++) { for (int i = 0; i < params.n; i++) { h_dot_ref[bid] += h_x[bid * params.n + i] * h_y[bid * params.n + i]; } } /* Check results */ match = devArrMatchHost(h_dot_ref.data(), dot_dev.data(), params.batch_size, raft::CompareApprox<T>(params.eps), handle.get_stream()); } void SetUp() override { basicTest(); } void TearDown() override {} protected: BlockDotInputs<T> params; testing::AssertionResult match = testing::AssertionFailure(); }; const std::vector<BlockDotInputs<float>> dot_inputsf = {{true, 9, 20, 1e-4, 12345U}, {true, 65, 50, 1e-4, 12345U}, {true, 200, 100, 1e-4, 12345U}, {false, 200, 100, 1e-4, 12345U}}; const std::vector<BlockDotInputs<double>> dot_inputsd = {{true, 9, 20, 1e-4, 12345U}, {true, 65, 50, 1e-4, 12345U}, {true, 200, 100, 1e-4, 12345U}, {false, 200, 100, 1e-4, 12345U}}; typedef BlockDotTest<float> BlockDotTestF; TEST_P(BlockDotTestF, Result) { EXPECT_TRUE(match); } typedef BlockDotTest<double> BlockDotTestD; TEST_P(BlockDotTestD, Result) { EXPECT_TRUE(match); } INSTANTIATE_TEST_CASE_P(BlockDotTests, BlockDotTestF, ::testing::ValuesIn(dot_inputsf)); INSTANTIATE_TEST_CASE_P(BlockDotTests, BlockDotTestD, ::testing::ValuesIn(dot_inputsd)); /* x*A*x' */ template <typename T> struct BlockXaxtInputs { bool broadcast; bool preload; int n; int batch_size; T eps; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const BlockXaxtInputs<T>& dims) { return os; } template <int BlockSize, bool Broadcast, typename T> __global__ void block_xAxt_test_kernel(int n, const T* x, const T* A, T* d_res, bool preload) { extern __shared__ char dyna_shared_mem[]; T* shared_vec = (T*)dyna_shared_mem; __shared__ ReductionStorage<BlockSize, T> reduction_storage; T res_; if (preload) { res_ = _block_xAxt<BlockSize, Broadcast, true>( n, x + n * blockIdx.x, A + n * n * blockIdx.x, reduction_storage, shared_vec); } else { for (int i = threadIdx.x; i < n; i += BlockSize) { shared_vec[i] = x[n * blockIdx.x + i]; } __syncthreads(); res_ = _block_xAxt<BlockSize, Broadcast, false>( n, shared_vec, A + n * n * blockIdx.x, reduction_storage); } if (!Broadcast && threadIdx.x == 0) d_res[blockIdx.x] = res_; else if (Broadcast && threadIdx.x == BlockSize - 1) d_res[blockIdx.x] = res_; } template <typename T> class BlockXaxtTest : public ::testing::TestWithParam<BlockXaxtInputs<T>> { protected: void basicTest() { raft::handle_t handle; params = ::testing::TestWithParam<BlockXaxtInputs<T>>::GetParam(); rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> A(params.n * params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> res_dev(params.batch_size, handle.get_stream()); std::vector<T> h_x(params.n * params.batch_size); std::vector<T> h_A(params.n * params.n * params.batch_size); std::vector<T> h_res_ref(params.batch_size, (T)0); /* Generate random data on device */ raft::random::Rng r(params.seed); r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); r.uniform(A.data(), params.n * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); /* Copy to host */ raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream()); raft::update_host( h_A.data(), A.data(), params.n * params.n * params.batch_size, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); /* Compute using tested prims */ constexpr int BlockSize = 64; int shared_mem_size = params.n * sizeof(T); if (params.broadcast) block_xAxt_test_kernel<BlockSize, true> <<<params.batch_size, BlockSize, shared_mem_size, handle.get_stream()>>>( params.n, x.data(), A.data(), res_dev.data(), params.preload); else block_xAxt_test_kernel<BlockSize, false> <<<params.batch_size, BlockSize, shared_mem_size, handle.get_stream()>>>( params.n, x.data(), A.data(), res_dev.data(), params.preload); /* Compute reference results */ for (int bid = 0; bid < params.batch_size; bid++) { for (int i = 0; i < params.n; i++) { T acc = 0; for (int j = 0; j < params.n; j++) { acc += h_A[bid * params.n * params.n + j * params.n + i] * h_x[bid * params.n + j]; } h_res_ref[bid] += acc * h_x[bid * params.n + i]; } } /* Check results */ match = devArrMatchHost(h_res_ref.data(), res_dev.data(), params.batch_size, raft::CompareApprox<T>(params.eps), handle.get_stream()); } void SetUp() override { basicTest(); } void TearDown() override {} protected: BlockXaxtInputs<T> params; testing::AssertionResult match = testing::AssertionFailure(); }; const std::vector<BlockXaxtInputs<float>> xAxt_inputsf = {{true, true, 9, 20, 1e-2, 12345U}, {true, true, 65, 50, 1e-2, 12345U}, {true, true, 200, 100, 1e-2, 12345U}, {false, true, 200, 100, 1e-2, 12345U}, {true, false, 200, 100, 1e-2, 12345U}}; const std::vector<BlockXaxtInputs<double>> xAxt_inputsd = {{true, true, 9, 20, 1e-4, 12345U}, {true, true, 65, 50, 1e-4, 12345U}, {true, true, 200, 100, 1e-4, 12345U}, {false, true, 200, 100, 1e-4, 12345U}, {true, false, 200, 100, 1e-2, 12345U}}; typedef BlockXaxtTest<float> BlockXaxtTestF; TEST_P(BlockXaxtTestF, Result) { EXPECT_TRUE(match); } typedef BlockXaxtTest<double> BlockXaxtTestD; TEST_P(BlockXaxtTestD, Result) { EXPECT_TRUE(match); } INSTANTIATE_TEST_CASE_P(BlockXaxtTests, BlockXaxtTestF, ::testing::ValuesIn(xAxt_inputsf)); INSTANTIATE_TEST_CASE_P(BlockXaxtTests, BlockXaxtTestD, ::testing::ValuesIn(xAxt_inputsd)); /* y=alpha*x */ template <typename T> struct BlockAxInputs { int n; int batch_size; T eps; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const BlockAxInputs<T>& dims) { return os; } template <typename T> __global__ void block_ax_test_kernel(int n, T alpha, const T* x, T* y) { _block_ax(n, alpha, x + n * blockIdx.x, y + n * blockIdx.x); } template <typename T> class BlockAxTest : public ::testing::TestWithParam<BlockAxInputs<T>> { protected: void basicTest() { raft::handle_t handle; params = ::testing::TestWithParam<BlockAxInputs<T>>::GetParam(); rmm::device_uvector<T> x(params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> y(params.n * params.batch_size, handle.get_stream()); std::vector<T> h_x(params.n * params.batch_size); std::vector<T> h_y_ref(params.n * params.batch_size, (T)0); /* Generate random data on device */ raft::random::Rng r(params.seed); r.uniform(x.data(), params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); /* Generate random alpha */ std::default_random_engine generator(params.seed); std::uniform_real_distribution<T> distribution(-2.0, 2.0); T alpha = distribution(generator); /* Copy to host */ raft::update_host(h_x.data(), x.data(), params.n * params.batch_size, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); /* Compute using tested prims */ constexpr int BlockSize = 64; block_ax_test_kernel<<<params.batch_size, BlockSize, 0, handle.get_stream()>>>( params.n, alpha, x.data(), y.data()); /* Compute reference results */ for (int bid = 0; bid < params.batch_size; bid++) { for (int i = 0; i < params.n; i++) { h_y_ref[bid * params.n + i] = alpha * h_x[bid * params.n + i]; } } /* Check results */ match = devArrMatchHost(h_y_ref.data(), y.data(), params.n * params.batch_size, raft::CompareApprox<T>(params.eps), handle.get_stream()); } void SetUp() override { basicTest(); } void TearDown() override {} protected: BlockAxInputs<T> params; testing::AssertionResult match = testing::AssertionFailure(); }; const std::vector<BlockAxInputs<float>> ax_inputsf = { {9, 20, 1e-4, 12345U}, {65, 50, 1e-4, 12345U}, {200, 100, 1e-4, 12345U}}; const std::vector<BlockAxInputs<double>> ax_inputsd = { {9, 20, 1e-4, 12345U}, {65, 50, 1e-4, 12345U}, {200, 100, 1e-4, 12345U}}; typedef BlockAxTest<float> BlockAxTestF; TEST_P(BlockAxTestF, Result) { EXPECT_TRUE(match); } typedef BlockAxTest<double> BlockAxTestD; TEST_P(BlockAxTestD, Result) { EXPECT_TRUE(match); } INSTANTIATE_TEST_CASE_P(BlockAxTests, BlockAxTestF, ::testing::ValuesIn(ax_inputsf)); INSTANTIATE_TEST_CASE_P(BlockAxTests, BlockAxTestD, ::testing::ValuesIn(ax_inputsd)); /* Covariance stability */ template <typename T> struct BlockCovStabilityInputs { int n; int batch_size; T eps; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const BlockCovStabilityInputs<T>& dims) { return os; } template <typename CovPolicy, typename T> __global__ void block_cov_stability_test_kernel(int n, const T* in, T* out) { __shared__ CovStabilityStorage<CovPolicy, T> cov_stability_storage; _block_covariance_stability<CovPolicy>( n, in + n * n * blockIdx.x, out + n * n * blockIdx.x, cov_stability_storage); } template <typename CovPolicy, typename T> class BlockCovStabilityTest : public ::testing::TestWithParam<BlockCovStabilityInputs<T>> { protected: void basicTest() { raft::handle_t handle; params = ::testing::TestWithParam<BlockCovStabilityInputs<T>>::GetParam(); rmm::device_uvector<T> d_in(params.n * params.n * params.batch_size, handle.get_stream()); rmm::device_uvector<T> d_out(params.n * params.n * params.batch_size, handle.get_stream()); std::vector<T> h_in(params.n * params.n * params.batch_size); std::vector<T> h_out(params.n * params.n * params.batch_size); /* Generate random data on device */ raft::random::Rng r(params.seed); r.uniform( d_in.data(), params.n * params.n * params.batch_size, (T)-2, (T)2, handle.get_stream()); /* Copy to host */ raft::update_host( h_in.data(), d_in.data(), params.n * params.n * params.batch_size, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); /* Compute using tested prims */ block_cov_stability_test_kernel<CovPolicy> <<<params.batch_size, CovPolicy::BlockSize, 0, handle.get_stream()>>>( params.n, d_in.data(), d_out.data()); /* Compute reference results */ for (int bid = 0; bid < params.batch_size; bid++) { for (int i = 0; i < params.n - 1; i++) { for (int j = i + 1; j < params.n; j++) { T val = 0.5 * (h_in[bid * params.n * params.n + j * params.n + i] + h_in[bid * params.n * params.n + i * params.n + j]); h_out[bid * params.n * params.n + j * params.n + i] = val; h_out[bid * params.n * params.n + i * params.n + j] = val; } } for (int i = 0; i < params.n; i++) { h_out[bid * params.n * params.n + i * params.n + i] = abs(h_in[bid * params.n * params.n + i * params.n + i]); } } /* Check results */ match = devArrMatchHost(h_out.data(), d_out.data(), params.n * params.n * params.batch_size, raft::CompareApprox<T>(params.eps), handle.get_stream()); } void SetUp() override { basicTest(); } void TearDown() override {} protected: BlockCovStabilityInputs<T> params; testing::AssertionResult match = testing::AssertionFailure(); }; const std::vector<BlockCovStabilityInputs<float>> cs_inputsf = { {15, 4, 1e-4, 12345U}, {33, 10, 1e-4, 12345U}, {220, 130, 1e-4, 12345U}, }; const std::vector<BlockCovStabilityInputs<double>> cs_inputsd = { {15, 4, 1e-4, 12345U}, {33, 10, 1e-4, 12345U}, {220, 130, 1e-4, 12345U}, }; typedef BlockCovStabilityTest<BlockPolicy<1, 1, 8, 4>, float> BlockCovStabilityTestF_1_1_8_4; TEST_P(BlockCovStabilityTestF_1_1_8_4, Result) { EXPECT_TRUE(match); } typedef BlockCovStabilityTest<BlockPolicy<1, 1, 8, 4>, double> BlockCovStabilityTestD_1_1_8_4; TEST_P(BlockCovStabilityTestD_1_1_8_4, Result) { EXPECT_TRUE(match); } typedef BlockCovStabilityTest<BlockPolicy<1, 4, 32, 8>, float> BlockCovStabilityTestF_1_4_32_8; TEST_P(BlockCovStabilityTestF_1_4_32_8, Result) { EXPECT_TRUE(match); } typedef BlockCovStabilityTest<BlockPolicy<1, 4, 32, 8>, double> BlockCovStabilityTestD_1_4_32_8; TEST_P(BlockCovStabilityTestD_1_4_32_8, Result) { EXPECT_TRUE(match); } INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests, BlockCovStabilityTestF_1_1_8_4, ::testing::ValuesIn(cs_inputsf)); INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests, BlockCovStabilityTestD_1_1_8_4, ::testing::ValuesIn(cs_inputsd)); INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests, BlockCovStabilityTestF_1_4_32_8, ::testing::ValuesIn(cs_inputsf)); INSTANTIATE_TEST_CASE_P(BlockCovStabilityTests, BlockCovStabilityTestD_1_4_32_8, ::testing::ValuesIn(cs_inputsd)); } // namespace LinAlg } // namespace MLCommon
the_stack
#define real float // Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW) // we assume BHWD format in inputImages // we assume BHW(YX) format on grids __device__ void getTopLeft(float x, int width, int& point, float& weight) { /* for interpolation : stores in point and weight : - the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel) - the weight for interpolating */ float xcoord = (x + 1) * (width - 1) / 2; point = floor(xcoord); weight = 1 - (xcoord - point); } __device__ bool between(int value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } __device__ void sumReduceShMem(volatile float s[]) { /* obviously only works for 32 elements */ /* sums up a shared memory array of 32 elements, stores it in s[0] */ /* whole warp can then read first element (broadcasting) */ if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16]; } if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; } if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; } if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; } if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; } } // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) __global__ void bilinearSamplingFromGrid(const int nthreads, float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth, float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth, float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth, int inputImages_channels, int inputImages_height, int inputImages_width, int output_channels, int output_height, int output_width, int output_batchsize, int roiPerImage) { CUDA_KERNEL_LOOP(index, nthreads) { const int xOut = index % output_width; const int yOut = (index / output_width) % output_height; const int cOut = (index / output_width / output_height) % output_channels; const int b = index / output_width / output_height / output_channels; const int width = inputImages_width; const int height = inputImages_height; const int b_input = b / roiPerImage; float yf = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth]; float xf = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + 1]; int yInTopLeft, xInTopLeft; float yWeightTopLeft, xWeightTopLeft; getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft); getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft); // const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut; const int outAddress = output_strideBatch * b + output_strideChannels * cOut + output_strideHeight * yOut + xOut; const int inTopLeftAddress = inputImages_strideBatch * b_input + inputImages_strideChannels * cOut + inputImages_strideHeight * yInTopLeft + xInTopLeft; const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth; const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight; const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth; float v=0; float inTopLeft=0; float inTopRight=0; float inBottomLeft=0; float inBottomRight=0; bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1); bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1); bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1); bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1); if (!topLeftIsIn && !topRightIsIn && !bottomLeftIsIn && !bottomRightIsIn) continue; if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress]; if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress]; if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress]; if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress]; v = xWeightTopLeft * yWeightTopLeft * inTopLeft + (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight + xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft + (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight; output_data[outAddress] = v; } } __global__ void backwardBilinearSampling(const int nthreads, float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth, float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideHeight, int gradInputImages_strideWidth, float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth, float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideHeight, int gradGrids_strideWidth, float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideHeight, int gradOutput_strideWidth, int inputImages_channels, int inputImages_height, int inputImages_width, int gradOutput_channels, int gradOutput_height, int gradOutput_width, int gradOutput_batchsize, int roiPerImage) { CUDA_KERNEL_LOOP(index, nthreads) { const int xOut = index % gradOutput_width; const int yOut = (index / gradOutput_width) % gradOutput_height; const int cOut = (index / gradOutput_width / gradOutput_height) % gradOutput_channels; const int b = index / gradOutput_width / gradOutput_height / gradOutput_channels; const int b_input = b / roiPerImage; const int width = inputImages_width; const int height = inputImages_height; float yf = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth]; float xf = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + 1]; int yInTopLeft, xInTopLeft; float yWeightTopLeft, xWeightTopLeft; getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft); getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft); const int inTopLeftAddress = inputImages_strideBatch * b_input + inputImages_strideChannels * cOut + inputImages_strideHeight * yInTopLeft + xInTopLeft; const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth; const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight; const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth; const int gradInputImagesTopLeftAddress = gradInputImages_strideBatch * b_input + gradInputImages_strideChannels * cOut + gradInputImages_strideHeight * yInTopLeft + xInTopLeft; const int gradInputImagesTopRightAddress = gradInputImagesTopLeftAddress + gradInputImages_strideWidth; const int gradInputImagesBottomLeftAddress = gradInputImagesTopLeftAddress + gradInputImages_strideHeight; const int gradInputImagesBottomRightAddress = gradInputImagesBottomLeftAddress + gradInputImages_strideWidth; const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideChannels * cOut + gradOutput_strideHeight * yOut + xOut; float topLeftDotProduct = 0; float topRightDotProduct = 0; float bottomLeftDotProduct = 0; float bottomRightDotProduct = 0; bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1); bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1); bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1); bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1); float gradOutValue = gradOutput_data[gradOutputAddress]; // bool between(int value, int lowerBound, int upperBound) if(topLeftIsIn) { float inTopLeft = inputImages_data[inTopLeftAddress]; topLeftDotProduct += inTopLeft * gradOutValue; atomicAdd(&gradInputImages_data[gradInputImagesTopLeftAddress], xWeightTopLeft * yWeightTopLeft * gradOutValue); } if(topRightIsIn) { float inTopRight = inputImages_data[inTopRightAddress]; topRightDotProduct += inTopRight * gradOutValue; atomicAdd(&gradInputImages_data[gradInputImagesTopRightAddress], (1 - xWeightTopLeft) * yWeightTopLeft * gradOutValue); } if(bottomLeftIsIn) { float inBottomLeft = inputImages_data[inBottomLeftAddress]; bottomLeftDotProduct += inBottomLeft * gradOutValue; atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftAddress], xWeightTopLeft * (1 - yWeightTopLeft) * gradOutValue); } if(bottomRightIsIn) { float inBottomRight = inputImages_data[inBottomRightAddress]; bottomRightDotProduct += inBottomRight * gradOutValue; atomicAdd(&gradInputImages_data[gradInputImagesBottomRightAddress], (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * gradOutValue); } } } #ifdef __cplusplus extern "C" { #endif int BilinearSamplerBHWD_updateOutput_cuda_kernel(/*output->size[1]*/int oc, /*output->size[3]*/int ow, /*output->size[2]*/int oh, /*output->size[0]*/int ob, /*THCudaTensor_size(state, inputImages, 1)*/int ic, /*THCudaTensor_size(state, inputImages, 2)*/int ih, /*THCudaTensor_size(state, inputImages, 3)*/int iw, /*THCudaTensor_size(state, inputImages, 0)*/int ib, /*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int ish, int isw, /*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsh, int gsw, /*THCudaTensor *output*/float *output, int osb, int osc, int osh, int osw, /*THCState_getCurrentStream(state)*/cudaStream_t stream) { const int kThreadsPerBlock = 1024; int output_size = ob * oh * ow * oc; cudaError_t err; int roiPerImage = ob / ib; // printf("forward pass\n"); bilinearSamplingFromGrid<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, /*THCudaTensor_data(state, inputImages)*/inputImages, /*THCudaTensor_stride(state, inputImages, 0)*/isb, /*THCudaTensor_stride(state, inputImages, 3)*/isc, /*THCudaTensor_stride(state, inputImages, 1)*/ish, /*THCudaTensor_stride(state, inputImages, 2)*/isw, /*THCudaTensor_data(state, grids)*/grids, /*THCudaTensor_stride(state, grids, 0)*/gsb, /*THCudaTensor_stride(state, grids, 3)*/gsc, /*THCudaTensor_stride(state, grids, 1)*/gsh, /*THCudaTensor_stride(state, grids, 2)*/gsw, /*THCudaTensor_data(state, output)*/output, /*THCudaTensor_stride(state, output, 0)*/osb, /*THCudaTensor_stride(state, output, 3)*/osc, /*THCudaTensor_stride(state, output, 1)*/osh, /*THCudaTensor_stride(state, output, 2)*/osw, /*THCudaTensor_size(state, inputImages, 3)*/ic, /*THCudaTensor_size(state, inputImages, 1)*/ih, /*THCudaTensor_size(state, inputImages, 2)*/iw, /*THCudaTensor_size(state, output, 3)*/oc, /*THCudaTensor_size(state, output, 1)*/oh, /*THCudaTensor_size(state, output, 2)*/ow, /*THCudaTensor_size(state, output, 0)*/ob, /*Number of rois per image*/roiPerImage); // check for errors err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return 0; } return 1; } int BilinearSamplerBHWD_updateGradInput_cuda_kernel(/*gradOutput->size[1]*/int goc, /*gradOutput->size[3]*/int gow, /*gradOutput->size[2]*/int goh, /*gradOutput->size[0]*/int gob, /*THCudaTensor_size(state, inputImages, 1)*/int ic, /*THCudaTensor_size(state, inputImages, 2)*/int ih, /*THCudaTensor_size(state, inputImages, 3)*/int iw, /*THCudaTensor_size(state, inputImages, 0)*/int ib, /*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int ish, int isw, /*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsh, int gsw, /*THCudaTensor *gradInputImages*/float *gradInputImages, int gisb, int gisc, int gish, int gisw, /*THCudaTensor *gradGrids*/float *gradGrids, int ggsb, int ggsc, int ggsh, int ggsw, /*THCudaTensor *gradOutput*/float *gradOutput, int gosb, int gosc, int gosh, int gosw, /*THCState_getCurrentStream(state)*/cudaStream_t stream) { const int kThreadsPerBlock = 1024; int output_size = gob * goh * gow * goc; cudaError_t err; int roiPerImage = gob / ib; // printf("%d %d %d %d\n", gob, goh, gow, goc); // printf("%d %d %d %d\n", ib, ih, iw, ic); // printf("backward pass\n"); backwardBilinearSampling<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, /*THCudaTensor_data(state, inputImages)*/inputImages, /*THCudaTensor_stride(state, inputImages, 0)*/isb, /*THCudaTensor_stride(state, inputImages, 3)*/isc, /*THCudaTensor_stride(state, inputImages, 1)*/ish, /*THCudaTensor_stride(state, inputImages, 2)*/isw, /*THCudaTensor_data(state, gradInputImages)*/gradInputImages, /*THCudaTensor_stride(state, gradInputImages, 0)*/gisb, /*THCudaTensor_stride(state, gradInputImages, 3)*/gisc, /*THCudaTensor_stride(state, gradInputImages, 1)*/gish, /*THCudaTensor_stride(state, gradInputImages, 2)*/gisw, /*THCudaTensor_data(state, grids)*/grids, /*THCudaTensor_stride(state, grids, 0)*/gsb, /*THCudaTensor_stride(state, grids, 3)*/gsc, /*THCudaTensor_stride(state, grids, 1)*/gsh, /*THCudaTensor_stride(state, grids, 2)*/gsw, /*THCudaTensor_data(state, gradGrids)*/gradGrids, /*THCudaTensor_stride(state, gradGrids, 0)*/ggsb, /*THCudaTensor_stride(state, gradGrids, 3)*/ggsc, /*THCudaTensor_stride(state, gradGrids, 1)*/ggsh, /*THCudaTensor_stride(state, gradGrids, 2)*/ggsw, /*THCudaTensor_data(state, gradOutput)*/gradOutput, /*THCudaTensor_stride(state, gradOutput, 0)*/gosb, /*THCudaTensor_stride(state, gradOutput, 3)*/gosc, /*THCudaTensor_stride(state, gradOutput, 1)*/gosh, /*THCudaTensor_stride(state, gradOutput, 2)*/gosw, /*THCudaTensor_size(state, inputImages, 3)*/ic, /*THCudaTensor_size(state, inputImages, 1)*/ih, /*THCudaTensor_size(state, inputImages, 2)*/iw, /*THCudaTensor_size(state, gradOutput, 3)*/goc, /*THCudaTensor_size(state, gradOutput, 1)*/goh, /*THCudaTensor_size(state, gradOutput, 2)*/gow, /*THCudaTensor_size(state, gradOutput, 0)*/gob, /*Number of rois per image*/roiPerImage); // check for errors err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return 0; } return 1; } #ifdef __cplusplus } #endif
the_stack
template<typename scalar_t> __device__ scalar_t sign(scalar_t x0, scalar_t y0, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2) { return (x0 - x2) * (y1 - y2) - (x1 - x2) * (y0 - y2); } template<typename scalar_t> __device__ bool PointInTriangle(scalar_t xp, scalar_t yp, scalar_t x0, scalar_t y0, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2) { scalar_t d1, d2, d3; bool has_neg, has_pos; d1 = sign<scalar_t>(xp, yp, x0, y0, x1, y1); d2 = sign<scalar_t>(xp, yp, x1, y1, x2, y2); d3 = sign<scalar_t>(xp, yp, x2, y2, x0, y0); has_neg = (d1 < 0) || (d2 < 0) || (d3 < 0); has_pos = (d1 > 0) || (d2 > 0) || (d3 > 0); return !(has_neg && has_pos); } template <typename scalar_t> __global__ void preprocess_baycentric_kernel(const scalar_t* rearranged_mesh_vertex_xy, const int bs, const int nf, scalar_t* baycentric_coeffs) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= bs * nf) return; const scalar_t* triangle = &rearranged_mesh_vertex_xy[i*3*2]; scalar_t* baycentric_coeff = &baycentric_coeffs[i*9]; scalar_t p[3][2]; for (int num = 0; num < 3; num++) { p[num][0] = triangle[2 * num]; p[num][1] = triangle[2 * num + 1]; } // compute face_inv scalar_t face_inv[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; scalar_t face_inv_denominator = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); for (int k = 0; k < 9; k++) { face_inv[k] /= face_inv_denominator; } // set to global memory for (int k = 0; k < 9; k++) { baycentric_coeff[k] = face_inv[k]; } } template<typename scalar_t> __global__ void register_triangle_to_pixels_kernel(const scalar_t* rearranged_mesh_vertex_xy, const int bs, const int nf, const int image_H, const int image_W, const int nMaxTrianglesPerPixel, int* map_pixel_to_triangles) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= bs * nf) return; const int bn = i / nf; const int fn = i % nf; const int is_H = image_H; const int is_W = image_W; const scalar_t* triangle = &rearranged_mesh_vertex_xy[i * 3 * 2]; const scalar_t x0=triangle[0]; const scalar_t y0=triangle[1]; const scalar_t x1=triangle[2]; const scalar_t y1=triangle[3]; const scalar_t x2=triangle[4]; const scalar_t y2=triangle[5]; const int x_tl_i = min(min(x0, x1), x2); const int x_br_i = max(max(x0, x1), x2); const int y_tl_i = min(min(y0, y1), y2); const int y_br_i = max(max(y0, y1), y2); // iterate over all pixels within bounding box, to check if they are inside triangle for(int yi = y_tl_i - 1; yi < y_br_i + 2; yi++) { for(int xi = x_tl_i - 1; xi < x_br_i + 2; xi++) { if (xi > is_W-1 || yi > is_H-1 || xi < 0 || yi < 0) continue; if (!PointInTriangle<scalar_t>(xi - 1e-5, yi - 1e-4, x0, y0, x1, y1, x2, y2)) continue; // register to global memory int index = bn * is_H * is_W + yi * is_W + xi; int start_index = 0; while (atomicCAS(&map_pixel_to_triangles[index * nMaxTrianglesPerPixel + start_index], -1, fn) != -1) { start_index = start_index + 1; if (start_index == nMaxTrianglesPerPixel) { printf("register faces %d to pixels reaches maximum ...\n", i); break; } } } } } template<typename scalar_t> __global__ void forward_flow_renderer_kernel(const scalar_t* rearranged_mesh_vertex_xy, const scalar_t* rearranged_mesh_vertex_texture, const scalar_t* baycentric_coeffs, const int* map_pixel_to_triangles, const int bs, const int n_triangles_per_batch, const int image_H, const int image_W, const int nMaxTrianglesPerPixel, const int nchan_texture, scalar_t* rendered_image, scalar_t* rendered_weight, int * rendered_face_index, float* rendered_mask) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= bs * image_H * image_W) return; const int is_H = image_H; const int is_W = image_W; const int bn = i / (is_H * is_W); const int pn = i % (is_H * is_W); const int yi = pn / is_W; const int xi = pn % is_W; const int* fns = &map_pixel_to_triangles[i * nMaxTrianglesPerPixel]; for(int k = 0; k < nMaxTrianglesPerPixel; k++) { const int fn = fns[k]; if (fn == -1) break; const scalar_t* triangle_texture = &rearranged_mesh_vertex_texture[bn*n_triangles_per_batch*3*nchan_texture + fn*3*nchan_texture]; const scalar_t* baycentric_coeff = &baycentric_coeffs[bn*n_triangles_per_batch*9 + fn*9]; const scalar_t fi00 = baycentric_coeff[0]; const scalar_t fi01 = baycentric_coeff[1]; const scalar_t fi02 = baycentric_coeff[2]; const scalar_t fi10 = baycentric_coeff[3]; const scalar_t fi11 = baycentric_coeff[4]; const scalar_t fi12 = baycentric_coeff[5]; const scalar_t fi20 = baycentric_coeff[6]; const scalar_t fi21 = baycentric_coeff[7]; const scalar_t fi22 = baycentric_coeff[8]; scalar_t w[3]; w[0] = fi00 * xi + fi01 * yi + fi02; w[1] = fi10 * xi + fi11 * yi + fi12; w[2] = fi20 * xi + fi21 * yi + fi22; for(int j = 0; j < nchan_texture; j++) { rendered_image[nchan_texture*i+j] = w[0]*triangle_texture[j] + w[1]*triangle_texture[j+nchan_texture] + w[2]*triangle_texture[j+2*nchan_texture]; } if(rendered_mask != NULL) { rendered_mask[i] = 1.0; } rendered_face_index[i]=fn; rendered_weight[3*i]=w[0]; rendered_weight[3*i+1]=w[1]; rendered_weight[3*i+2]=w[2]; } } template <typename scalar_t> __global__ void backward_rgb_map_cuda_kernel(const scalar_t* rearranged_mesh_vertex_xy, const scalar_t* rearranged_mesh_vertex_texture, const scalar_t* rendered_image, const int32_t* rendered_face_index, const scalar_t* rendered_weight, const scalar_t* grad_rendered_image, const int nchan_texture, const size_t batch_size, const size_t n_triangles_per_batch, const int image_size_H, const int image_size_W, scalar_t* grad_rearranged_mesh_vertex_xy, scalar_t* grad_rearranged_mesh_vertex_texture) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size_H * image_size_W) { return; } const int fn = rendered_face_index[i]; if (0 <= fn) { const int nf = n_triangles_per_batch; const int is_H = image_size_H; const int is_W = image_size_W; const int bn = i / (is_H * is_W); const scalar_t* v_xy = &rearranged_mesh_vertex_xy[(bn*nf + fn)*6]; const scalar_t* v_rgb = &rearranged_mesh_vertex_texture[(bn*nf + fn)*3*nchan_texture]; const scalar_t* weight = &rendered_weight[i * 3]; scalar_t* grad_pos = &grad_rearranged_mesh_vertex_xy[(bn*nf+fn)*6]; scalar_t* grad_color = &grad_rearranged_mesh_vertex_texture[(bn*nf+fn)*3*nchan_texture]; const scalar_t* grad_rgb = &grad_rendered_image[i*nchan_texture]; /* derivative wrt rgb */ for (int k =0; k < 3; k++) { for(int j=0; j < nchan_texture; j++) { atomicAdd(&grad_color[nchan_texture * k + j], grad_rgb[j] * weight[k]); } } /* derivative wrt x, y */ const scalar_t x0 = v_xy[0]; const scalar_t y0 = v_xy[1]; const scalar_t x1 = v_xy[2]; const scalar_t y1 = v_xy[3]; const scalar_t x2 = v_xy[4]; const scalar_t y2 = v_xy[5]; const int pn = i % (is_H * is_W); const int y = pn / is_W; const int x = pn % is_W; const scalar_t dD_dx0 = y1 - y2; const scalar_t dD_dy0 = x2 - x1; const scalar_t dD_dx1 = y2 - y0; const scalar_t dD_dy1 = x0 - x2; const scalar_t dD_dx2 = y0 - y1; const scalar_t dD_dy2 = x1 - x0; const scalar_t dF0_dx1 = y2 - y; const scalar_t dF0_dy1 = x - x2; const scalar_t dF0_dx2 = y - y1; const scalar_t dF0_dy2 = x1 - x; const scalar_t dF1_dx0 = y - y2; const scalar_t dF1_dy0 = x2 - x; const scalar_t dF1_dx2 = y0 - y; const scalar_t dF1_dy2 = x - x0; const scalar_t dF2_dx0 = y1 - y; const scalar_t dF2_dy0 = x - x1; const scalar_t dF2_dx1 = y - y0; const scalar_t dF2_dy1 = x0 - x; const scalar_t D = x0 * (y1 - y2) + x1 * (y2 - y0) + x2 * (y0 - y1); const scalar_t dw0_dx0 = -weight[0] * dD_dx0 / D; const scalar_t dw0_dy0 = -weight[0] * dD_dy0 / D; const scalar_t dw0_dx1 = (dF0_dx1 - weight[0] * dD_dx1) / D; const scalar_t dw0_dy1 = (dF0_dy1 - weight[0] * dD_dy1) / D; const scalar_t dw0_dx2 = (dF0_dx2 - weight[0] * dD_dx2) / D; const scalar_t dw0_dy2 = (dF0_dy2 - weight[0] * dD_dy2) / D; const scalar_t dw1_dx0 = (dF1_dx0 - weight[1] * dD_dx0) / D; const scalar_t dw1_dy0 = (dF1_dy0 - weight[1] * dD_dy0) / D; const scalar_t dw1_dx1 = (-weight[1] * dD_dx1) / D; const scalar_t dw1_dy1 = (-weight[1] * dD_dy1) / D; const scalar_t dw1_dx2 = (dF1_dx2 - weight[1] * dD_dx2) / D; const scalar_t dw1_dy2 = (dF1_dy2 - weight[1] * dD_dy2) / D; const scalar_t dw2_dx0 = (dF2_dx0 - weight[2] * dD_dx0) / D; const scalar_t dw2_dy0 = (dF2_dy0 - weight[2] * dD_dy0) / D; const scalar_t dw2_dx1 = (dF2_dx1 - weight[2] * dD_dx1) / D; const scalar_t dw2_dy1 = (dF2_dy1 - weight[2] * dD_dy1) / D; const scalar_t dw2_dx2 = (-weight[2] * dD_dx2) / D; const scalar_t dw2_dy2 = (-weight[2] * dD_dy2) / D; for(int j=0; j < nchan_texture; j++) { scalar_t dIj_dx0 = v_rgb[j] * dw0_dx0 + v_rgb[j+nchan_texture] * dw1_dx0 + v_rgb[j+2*nchan_texture] * dw2_dx0; scalar_t dIj_dy0 = v_rgb[j] * dw0_dy0 + v_rgb[j+nchan_texture] * dw1_dy0 + v_rgb[j+2*nchan_texture] * dw2_dy0; scalar_t dIj_dx1 = v_rgb[j] * dw0_dx1 + v_rgb[j+nchan_texture] * dw1_dx1 + v_rgb[j+2*nchan_texture] * dw2_dx1; scalar_t dIj_dy1 = v_rgb[j] * dw0_dy1 + v_rgb[j+nchan_texture] * dw1_dy1 + v_rgb[j+2*nchan_texture] * dw2_dy1; scalar_t dIj_dx2 = v_rgb[j] * dw0_dx2 + v_rgb[j+nchan_texture] * dw1_dx2 + v_rgb[j+2*nchan_texture] * dw2_dx2; scalar_t dIj_dy2 = v_rgb[j] * dw0_dy2 + v_rgb[j+nchan_texture] * dw1_dy2 + v_rgb[j+2*nchan_texture] * dw2_dy2; atomicAdd(&grad_pos[0], grad_rgb[j] * dIj_dx0); atomicAdd(&grad_pos[1], grad_rgb[j] * dIj_dy0); atomicAdd(&grad_pos[2], grad_rgb[j] * dIj_dx1); atomicAdd(&grad_pos[3], grad_rgb[j] * dIj_dy1); atomicAdd(&grad_pos[4], grad_rgb[j] * dIj_dx2); atomicAdd(&grad_pos[5], grad_rgb[j] * dIj_dy2); } } } void forward_flow_renderer(const float* rearranged_mesh_vertex_xy, const float* rearranged_mesh_vertex_texture, const int batchSize, const int nTrianglesPerBatch, const int imageH, const int imageW, const int nMaxTrianglesPerPixel, const int nChannelsTexture, float* bufferBaycentricCoeffs, int* bufferMapPixelToTriangles, float* rendered_image, float* rendered_weight, int * rendered_face_index, float* rendered_mask) { const int threads = 512; const dim3 blocks_1 ((batchSize * nTrianglesPerBatch - 1) / threads +1); preprocess_baycentric_kernel<float><<<blocks_1, threads>>>( rearranged_mesh_vertex_xy, batchSize, nTrianglesPerBatch, bufferBaycentricCoeffs); cudaDeviceSynchronize(); register_triangle_to_pixels_kernel<float><<<blocks_1, threads>>>( rearranged_mesh_vertex_xy, batchSize, nTrianglesPerBatch, imageH, imageW, nMaxTrianglesPerPixel, bufferMapPixelToTriangles); cudaDeviceSynchronize(); const dim3 blocks_3 ((batchSize * imageH * imageW - 1) / threads +1); forward_flow_renderer_kernel<float><<<blocks_3, threads>>>( rearranged_mesh_vertex_xy, rearranged_mesh_vertex_texture, bufferBaycentricCoeffs, bufferMapPixelToTriangles, batchSize, nTrianglesPerBatch, imageH, imageW, nMaxTrianglesPerPixel, nChannelsTexture, rendered_image, rendered_weight, rendered_face_index, rendered_mask); cudaDeviceSynchronize(); } void backward_flow_renderer(const float* rearranged_mesh_vertex_xy, const float* rearranged_mesh_vertex_texture, const float* rendered_image, const int* rendered_face_index, const float* rendered_weight, const float* grad_rendered_image, const int nchan_texture, const int batch_size, const int n_triangles_per_batch, const int image_size_H, const int image_size_W, float* grad_rearranged_mesh_vertex_xy, float* grad_rearranged_mesh_vertex_texture) { const int threads = 512; const dim3 blocks ((batch_size * image_size_H * image_size_W - 1) / threads + 1); backward_rgb_map_cuda_kernel<float><<<blocks, threads>>>(rearranged_mesh_vertex_xy, rearranged_mesh_vertex_texture, rendered_image, rendered_face_index, rendered_weight, grad_rendered_image, nchan_texture, batch_size, n_triangles_per_batch, image_size_H, image_size_W, grad_rearranged_mesh_vertex_xy, grad_rearranged_mesh_vertex_texture); cudaDeviceSynchronize(); }
the_stack
#pragma once #include <gunrock/oprtr/1D_oprtr/for_all.cuh> #include <gunrock/oprtr/1D_oprtr/for_each.cuh> namespace gunrock { namespace oprtr { /** * @brief Add the source vector to the destination vector with the same length * * @tparam T datatype of the vector. * * @param[in] d_dst Destination device-side vector * @param[in] d_src Source device-side vector * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Set_Kernel(ValueT *d_dst, T *d_src, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_dst[idx] = d_src[idx]; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Set(ValueT *elements, T *values, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, values, [] __host__ __device__(ValueT & element, T value) { element = value; }, length, target, stream); } /** * @brief Add the source vector to the destination vector with the same length * * @tparam T datatype of the vector. * * @param[in] d_dst Destination device-side vector * @param[in] d_src Source device-side vector * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Add_Kernel(ValueT *d_dst, T *d_src, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_dst[idx] += d_src[idx]; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Add(ValueT *elements, T *values, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, values, [] __host__ __device__(ValueT & element, T value) { element += value; }, length, target, stream); } /** * @brief Minus the source vector to the destination vector with the same length * * @tparam T datatype of the vector. * * @param[in] d_dst Destination device-side vector * @param[in] d_src Source device-side vector * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Minus_Kernel(ValueT *d_dst, T *d_src, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_dst[idx] -= d_src[idx]; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Minus(ValueT *elements, T *values, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, values, [] __host__ __device__(ValueT & element, T value) { element -= value; }, length, target, stream); } /** * @brief Multiply the source vector to the destination vector with the same * length * * @tparam T datatype of the vector. * * @param[in] d_dst Destination device-side vector * @param[in] d_src Source device-side vector * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Mul_Kernel(ValueT *d_dst, T *d_src, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_dst[idx] *= d_src[idx]; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Mul(ValueT *elements, T *values, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, values, [] __host__ __device__(ValueT & element, T value) { element *= value; }, length, target, stream); } /** * @brief Divide the source vector to the destination vector with the same * length * TODO: divide by zero check * * @tparam T datatype of the vector. * * @param[in] d_dst Destination device-side vector * @param[in] d_src Source device-side vector * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Div_Kernel(ValueT *d_dst, T *d_src, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_dst[idx] /= d_src[idx]; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Div(ValueT *elements, T *values, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, values, [] __host__ __device__(ValueT & element, T value) { element /= value; }, length, target, stream); } /** * @brief Add the source vector to the destination vector with the same length * * @tparam T datatype of the vector. * * @param[in] d_dst Destination device-side vector * @param[in] d_src1 Source device-side vector 1 * @param[in] d_src2 Source device-side vector 2 * @param[in] scale Scale factor * @param[in] length Vector length */ template <typename ValueT, typename T1, typename T2, typename T3, typename SizeT> __global__ void Mad_Kernel(ValueT *d_dst, T1 *d_src1, T2 *d_src2, T3 scale, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_dst[idx] = d_src1[idx] * scale + d_src2[idx]; } } template <typename ValueT, typename T1, typename T2, typename T3, typename SizeT> cudaError_t Mad(ValueT *elements, T1 *src1s, T2 *src2s, T3 scale, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, src1s, src2s, [scale] __host__ __device__(ValueT & element, T1 src1, T2 src2) { element = src1 * scale + src2; }, length, target, stream); } } // namespace oprtr namespace util { template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename SizeT_in, typename ValueT_in, ArrayFlag FLAG_in, unsigned int cudaHostRegisterFlag_in> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Set( Array1D<SizeT_in, ValueT_in, FLAG_in, cudaHostRegisterFlag_in> &array_in, SizeT length, Location target, cudaStream_t stream) { return ForEach( array_in, [] __host__ __device__(ValueT & element, ValueT_in element_in) { element = element_in; }); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename SizeT_in, typename ValueT_in, ArrayFlag FLAG_in, unsigned int cudaHostRegisterFlag_in> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Add( Array1D<SizeT_in, ValueT_in, FLAG_in, cudaHostRegisterFlag_in> &array_in, SizeT length, Location target, cudaStream_t stream) { return ForEach( array_in, [] __host__ __device__(ValueT & element, ValueT_in element_in) { element += element_in; }); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename SizeT_in, typename ValueT_in, ArrayFlag FLAG_in, unsigned int cudaHostRegisterFlag_in> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Minus( Array1D<SizeT_in, ValueT_in, FLAG_in, cudaHostRegisterFlag_in> &array_in, SizeT length, Location target, cudaStream_t stream) { return ForEach( array_in, [] __host__ __device__(ValueT & element, ValueT_in element_in) { element -= element_in; }); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename SizeT_in, typename ValueT_in, ArrayFlag FLAG_in, unsigned int cudaHostRegisterFlag_in> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Mul( Array1D<SizeT_in, ValueT_in, FLAG_in, cudaHostRegisterFlag_in> &array_in, SizeT length, Location target, cudaStream_t stream) { return ForEach( array_in, [] __host__ __device__(ValueT & element, ValueT_in element_in) { element *= element_in; }); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename SizeT_in, typename ValueT_in, ArrayFlag FLAG_in, unsigned int cudaHostRegisterFlag_in> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Div( Array1D<SizeT_in, ValueT_in, FLAG_in, cudaHostRegisterFlag_in> &array_in, SizeT length, Location target, cudaStream_t stream) { return ForEach( array_in, [] __host__ __device__(ValueT & element, ValueT_in element_in) { element /= element_in; }); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename SizeT_in1, typename ValueT_in1, ArrayFlag FLAG_in1, unsigned int cudaHostRegisterFlag_in1, typename SizeT_in2, typename ValueT_in2, ArrayFlag FLAG_in2, unsigned int cudaHostRegisterFlag_in2, typename T> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Mad( Array1D<SizeT_in1, ValueT_in1, FLAG_in1, cudaHostRegisterFlag_in1> &array_in1, Array1D<SizeT_in2, ValueT_in2, FLAG_in2, cudaHostRegisterFlag_in2> &array_in2, T scale, SizeT length, Location target, cudaStream_t stream) { return ForEach( array_in1, array_in2, [scale] __host__ __device__(ValueT & element, ValueT_in1 element_in1, ValueT_in2 element_in2) { element = element_in1 * scale + element_in2; }); } } // namespace util } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/interpolate.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/cuda/utils/nd_index.cuh> #include <nbla/variable.hpp> namespace nbla { inline float compute_scale(int isize, int osize, bool align_corners) { return (osize <= 1) ? 0.0f : (align_corners ? float(isize - 1) / (osize - 1) : float(isize) / osize); } inline float compute_scale_for_nn(int isize, int osize, bool align_corners, bool half_pixel_for_nn) { return half_pixel_for_nn ? isize / static_cast<float>(osize) : compute_scale(isize, osize, align_corners); } __device__ __forceinline__ float get_src_index(float scale, int dst_index, bool half_pixel) { return half_pixel ? fmaxf(0.0f, scale * (float(dst_index) + 0.5f) - 0.5f) : scale * dst_index; } __device__ __forceinline__ float get_src_index_for_nn(float scale, int dst_index, bool half_pixel, bool half_pixel_for_nn) { return half_pixel_for_nn ? scale * (dst_index + 0.5f) : get_src_index(scale, dst_index, half_pixel); } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_1d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_x1 = make_int2(x1, oc); const auto nd_idx_x2 = make_int2(x2, oc); const auto idx_lx0 = device_2d_to_flat(nd_idx_x1, istride); const auto idx_lx1 = device_2d_to_flat(nd_idx_x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_lx0]; const T val1 = lx1 * src[idx_lx1]; dst[index] = val0 + val1; } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_2d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_y1x1 = make_int3(y1, x1, oc); const auto nd_idx_y1x2 = make_int3(y1, x2, oc); const auto nd_idx_y2x1 = make_int3(y2, x1, oc); const auto nd_idx_y2x2 = make_int3(y2, x2, oc); const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride); const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride); const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride); const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_ly0x0]; const T val1 = lx1 * src[idx_ly0x1]; const T val2 = lx0 * src[idx_ly1x0]; const T val3 = lx1 * src[idx_ly1x1]; dst[index] = ly0 * (val0 + val1) + ly1 * (val2 + val3); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_3d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index(sz, oz, half_pixel); const auto z1 = static_cast<int>(fz); const auto z2 = min(z1 + 1, id - 1); const auto lz1 = static_cast<T>(fz - z1); const auto lz0 = static_cast<T>(1) - lz1; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc); const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc); const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc); const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc); const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc); const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc); const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc); const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc); const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride); const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride); const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride); const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride); const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride); const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride); const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride); const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_lz0y0x0]; const T val1 = lx1 * src[idx_lz0y0x1]; const T val2 = lx0 * src[idx_lz0y1x0]; const T val3 = lx1 * src[idx_lz0y1x1]; const T val4 = lx0 * src[idx_lz1y0x0]; const T val5 = lx1 * src[idx_lz1y0x1]; const T val6 = lx0 * src[idx_lz1y1x0]; const T val7 = lx1 * src[idx_lz1y1x1]; const T val8 = ly0 * (val0 + val1) + ly1 * (val2 + val3); const T val9 = ly0 * (val4 + val5) + ly1 * (val6 + val7); dst[index] = lz0 * val8 + lz1 * val9; } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_1d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_x1 = make_int2(x1, oc); const auto nd_idx_x2 = make_int2(x2, oc); const auto idx_lx1 = device_2d_to_flat(nd_idx_x1, istride); const auto idx_lx2 = device_2d_to_flat(nd_idx_x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_lx1, lx0 * g); atomic_add(g_x + idx_lx2, lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_2d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_y1x1 = make_int3(y1, x1, oc); const auto nd_idx_y1x2 = make_int3(y1, x2, oc); const auto nd_idx_y2x1 = make_int3(y2, x1, oc); const auto nd_idx_y2x2 = make_int3(y2, x2, oc); const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride); const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride); const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride); const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_ly0x0, ly0 * lx0 * g); atomic_add(g_x + idx_ly0x1, ly0 * lx1 * g); atomic_add(g_x + idx_ly1x0, ly1 * lx0 * g); atomic_add(g_x + idx_ly1x1, ly1 * lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_3d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index(sz, oz, half_pixel); const auto z1 = static_cast<int>(fz); const auto z2 = min(z1 + 1, id - 1); const auto lz1 = static_cast<T>(fz - z1); const auto lz0 = static_cast<T>(1) - lz1; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc); const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc); const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc); const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc); const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc); const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc); const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc); const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc); const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride); const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride); const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride); const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride); const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride); const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride); const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride); const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_lz0y0x0, lz0 * ly0 * lx0 * g); atomic_add(g_x + idx_lz0y0x1, lz0 * ly0 * lx1 * g); atomic_add(g_x + idx_lz0y1x0, lz0 * ly1 * lx0 * g); atomic_add(g_x + idx_lz0y1x1, lz0 * ly1 * lx1 * g); atomic_add(g_x + idx_lz1y0x0, lz1 * ly0 * lx0 * g); atomic_add(g_x + idx_lz1y0x1, lz1 * ly0 * lx1 * g); atomic_add(g_x + idx_lz1y1x0, lz1 * ly1 * lx0 * g); atomic_add(g_x + idx_lz1y1x1, lz1 * ly1 * lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_1d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_x = make_int2(ix, oc); const auto idx_x = device_2d_to_flat(nd_idx_x, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_x]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_2d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_yx = make_int3(iy, ix, oc); const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_yx]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_3d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index_for_nn(sz, oz, half_pixel, half_pixel_for_nn); const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iz = min(static_cast<int>(fz), id - 1); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_zyx = make_int4(iz, iy, ix, oc); const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_zyx]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_1d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_x = make_int2(ix, oc); const auto idx_x = device_2d_to_flat(nd_idx_x, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_x, g_y[index]); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_2d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_yx = make_int3(iy, ix, oc); const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_yx, g_y[index]); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_3d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index_for_nn(sz, oz, half_pixel, half_pixel_for_nn); const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iz = min(static_cast<int>(fz), id - 1); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_zyx = make_int4(iz, iy, ix, oc); const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_zyx, g_y[index]); } } } template <typename T> void InterpolateCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); const int ndim = inputs[0]->ndim(); if (this->output_size_.size() == 1) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw : iw; const int dst_inner_size = this->channel_last_ ? oc * ow : ow; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = iw; const auto istride = this->channel_last_ ? ic : 1; const auto ostride = this->channel_last_ ? oc : 1; if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_1d<Tcu, true> : kernel_linear_interpolate_1d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_1d<Tcu, true> : kernel_nearest_interpolate_1d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 2) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih; const int dst_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = make_int2(ih, iw); const auto istride = this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1); const auto ostride = this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_2d<Tcu, true> : kernel_linear_interpolate_2d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_2d<Tcu, true> : kernel_nearest_interpolate_2d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 3) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 4]; const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 4]; const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw * ih * id : iw * ih * id; const int dst_inner_size = this->channel_last_ ? oc * ow * oh * od : ow * oh * od; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = make_int3(id, ih, iw); const auto istride = this->channel_last_ ? make_int3(ih * iw * ic, iw * ic, ic) : make_int3(ih * iw, iw, 1); const auto ostride = this->channel_last_ ? make_int3(oh * ow * oc, ow * oc, oc) : make_int3(oh * ow, ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); const float sz = compute_scale(id, od, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_3d<Tcu, true> : kernel_linear_interpolate_3d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); const float sz = compute_scale_for_nn(id, od, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_3d<Tcu, true> : kernel_nearest_interpolate_3d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_, this->half_pixel_for_nn_); } } } template <typename T> void InterpolateCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0])) { return; } cuda_set_device(this->device_); auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); const int ndim = inputs[0]->ndim(); if (this->output_size_.size() == 1) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw : iw; const int g_y_inner_size = this->channel_last_ ? oc * ow : ow; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = iw; const auto istride = this->channel_last_ ? ic : 1; const auto ostride = this->channel_last_ ? oc : 1; if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_1d_backward<Tcu, true> : kernel_linear_interpolate_1d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_1d_backward<Tcu, true> : kernel_nearest_interpolate_1d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 2) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih; const int g_y_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = make_int2(ih, iw); const auto istride = this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1); const auto ostride = this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_2d_backward<Tcu, true> : kernel_linear_interpolate_2d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_2d_backward<Tcu, true> : kernel_nearest_interpolate_2d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 3) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 4]; const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 4]; const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw * ih * id : iw * ih * id; const int g_y_inner_size = this->channel_last_ ? oc * ow * oh * od : ow * oh * od; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = make_int3(id, ih, iw); const auto istride = this->channel_last_ ? make_int3(ih * iw * ic, iw * ic, ic) : make_int3(ih * iw, iw, 1); const auto ostride = this->channel_last_ ? make_int3(oh * ow * oc, ow * oc, oc) : make_int3(oh * ow, ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); const float sz = compute_scale(id, od, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_3d_backward<Tcu, true> : kernel_linear_interpolate_3d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); const float sz = compute_scale_for_nn(id, od, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_3d_backward<Tcu, true> : kernel_nearest_interpolate_3d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_, this->half_pixel_for_nn_); } } } }
the_stack
#include "gpu_treeshap.h" #include "tree_shap.h" const float inf = std::numeric_limits<tfloat>::infinity(); struct ShapSplitCondition { ShapSplitCondition() = default; ShapSplitCondition(tfloat feature_lower_bound, tfloat feature_upper_bound, bool is_missing_branch) : feature_lower_bound(feature_lower_bound), feature_upper_bound(feature_upper_bound), is_missing_branch(is_missing_branch) { assert(feature_lower_bound <= feature_upper_bound); } /*! Feature values >= lower and < upper flow down this path. */ tfloat feature_lower_bound; tfloat feature_upper_bound; /*! Do missing values flow down this path? */ bool is_missing_branch; // Does this instance flow down this path? __host__ __device__ bool EvaluateSplit(float x) const { // is nan if (isnan(x)) { return is_missing_branch; } return x > feature_lower_bound && x <= feature_upper_bound; } // Combine two split conditions on the same feature __host__ __device__ void Merge(const ShapSplitCondition &other) { // Combine duplicate features feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound); feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound); is_missing_branch = is_missing_branch && other.is_missing_branch; } }; // Inspired by: https://en.cppreference.com/w/cpp/iterator/size // Limited implementation of std::size fo arrays template <class T, size_t N> constexpr size_t array_size(const T (&array)[N]) noexcept { return N; } void RecurseTree( unsigned pos, const TreeEnsemble &tree, std::vector<gpu_treeshap::PathElement<ShapSplitCondition>> *tmp_path, std::vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths, size_t *path_idx, int num_outputs) { if (tree.is_leaf(pos)) { for (auto j = 0ull; j < num_outputs; j++) { auto v = tree.values[pos * num_outputs + j]; if (v == 0.0) { // The tree has no output for this class, don't bother adding the path continue; } // Go back over path, setting v, path_idx for (auto &e : *tmp_path) { e.v = v; e.group = j; e.path_idx = *path_idx; } paths->insert(paths->end(), tmp_path->begin(), tmp_path->end()); // Increment path index (*path_idx)++; } return; } // Add left split to the path unsigned left_child = tree.children_left[pos]; double left_zero_fraction = tree.node_sample_weights[left_child] / tree.node_sample_weights[pos]; // Encode the range of feature values that flow down this path tmp_path->emplace_back(0, tree.features[pos], 0, ShapSplitCondition{-inf, tree.thresholds[pos], false}, left_zero_fraction, 0.0f); RecurseTree(left_child, tree, tmp_path, paths, path_idx, num_outputs); // Add left split to the path tmp_path->back() = gpu_treeshap::PathElement<ShapSplitCondition>( 0, tree.features[pos], 0, ShapSplitCondition{tree.thresholds[pos], inf, false}, 1.0 - left_zero_fraction, 0.0f); RecurseTree(tree.children_right[pos], tree, tmp_path, paths, path_idx, num_outputs); tmp_path->pop_back(); } std::vector<gpu_treeshap::PathElement<ShapSplitCondition>> ExtractPaths(const TreeEnsemble &trees) { std::vector<gpu_treeshap::PathElement<ShapSplitCondition>> paths; size_t path_idx = 0; for (auto i = 0; i < trees.tree_limit; i++) { TreeEnsemble tree; trees.get_tree(tree, i); std::vector<gpu_treeshap::PathElement<ShapSplitCondition>> tmp_path; tmp_path.reserve(tree.max_depth); tmp_path.emplace_back(0, -1, 0, ShapSplitCondition{-inf, inf, false}, 1.0, 0.0f); RecurseTree(0, tree, &tmp_path, &paths, &path_idx, tree.num_outputs); } return paths; } class DeviceExplanationDataset { thrust::device_vector<tfloat> data; thrust::device_vector<bool> missing; size_t num_features; size_t num_rows; public: DeviceExplanationDataset(const ExplanationDataset &host_data, bool background_dataset = false) { num_features = host_data.M; if (background_dataset) { num_rows = host_data.num_R; data = thrust::device_vector<tfloat>( host_data.R, host_data.R + host_data.num_R * host_data.M); missing = thrust::device_vector<bool>(host_data.R_missing, host_data.R_missing + host_data.num_R * host_data.M); } else { num_rows = host_data.num_X; data = thrust::device_vector<tfloat>( host_data.X, host_data.X + host_data.num_X * host_data.M); missing = thrust::device_vector<bool>(host_data.X_missing, host_data.X_missing + host_data.num_X * host_data.M); } } class DenseDatasetWrapper { const tfloat *data; const bool *missing; int num_rows; int num_cols; public: DenseDatasetWrapper() = default; DenseDatasetWrapper(const tfloat *data, const bool *missing, int num_rows, int num_cols) : data(data), missing(missing), num_rows(num_rows), num_cols(num_cols) { } __device__ tfloat GetElement(size_t row_idx, size_t col_idx) const { auto idx = row_idx * num_cols + col_idx; if (missing[idx]) { return std::numeric_limits<tfloat>::quiet_NaN(); } return data[idx]; } __host__ __device__ size_t NumRows() const { return num_rows; } __host__ __device__ size_t NumCols() const { return num_cols; } }; DenseDatasetWrapper GetDeviceAccessor() { return DenseDatasetWrapper(data.data().get(), missing.data().get(), num_rows, num_features); } }; inline void dense_tree_path_dependent_gpu( const TreeEnsemble &trees, const ExplanationDataset &data, tfloat *out_contribs, tfloat transform(const tfloat, const tfloat)) { auto paths = ExtractPaths(trees); DeviceExplanationDataset device_data(data); DeviceExplanationDataset::DenseDatasetWrapper X = device_data.GetDeviceAccessor(); thrust::device_vector<float> phis((X.NumCols() + 1) * X.NumRows() * trees.num_outputs); gpu_treeshap::GPUTreeShap(X, paths.begin(), paths.end(), trees.num_outputs, phis.begin(), phis.end()); // Add the base offset term to bias thrust::device_vector<double> base_offset( trees.base_offset, trees.base_offset + trees.num_outputs); auto counting = thrust::make_counting_iterator(size_t(0)); auto d_phis = phis.data().get(); auto d_base_offset = base_offset.data().get(); size_t num_groups = trees.num_outputs; thrust::for_each(counting, counting + X.NumRows() * trees.num_outputs, [=] __device__(size_t idx) { size_t row_idx = idx / num_groups; size_t group = idx % num_groups; auto phi_idx = gpu_treeshap::IndexPhi( row_idx, num_groups, group, X.NumCols(), X.NumCols()); d_phis[phi_idx] += d_base_offset[group]; }); // Shap uses a slightly different layout for multiclass thrust::device_vector<float> transposed_phis(phis.size()); auto d_transposed_phis = transposed_phis.data(); thrust::for_each( counting, counting + phis.size(), [=] __device__(size_t idx) { size_t old_shape[] = {X.NumRows(), num_groups, (X.NumCols() + 1)}; size_t old_idx[array_size(old_shape)]; gpu_treeshap::FlatIdxToTensorIdx(idx, old_shape, old_idx); // Define new tensor format, switch num_groups axis to end size_t new_shape[] = {X.NumRows(), (X.NumCols() + 1), num_groups}; size_t new_idx[] = {old_idx[0], old_idx[2], old_idx[1]}; size_t transposed_idx = gpu_treeshap::TensorIdxToFlatIdx(new_shape, new_idx); d_transposed_phis[transposed_idx] = d_phis[idx]; }); thrust::copy(transposed_phis.begin(), transposed_phis.end(), out_contribs); } inline void dense_tree_independent_gpu(const TreeEnsemble &trees, const ExplanationDataset &data, tfloat *out_contribs, tfloat transform(const tfloat, const tfloat)) { auto paths = ExtractPaths(trees); DeviceExplanationDataset device_data(data); DeviceExplanationDataset::DenseDatasetWrapper X = device_data.GetDeviceAccessor(); DeviceExplanationDataset background_device_data(data, true); DeviceExplanationDataset::DenseDatasetWrapper R = background_device_data.GetDeviceAccessor(); thrust::device_vector<float> phis((X.NumCols() + 1) * X.NumRows() * trees.num_outputs); gpu_treeshap::GPUTreeShapInterventional(X, R, paths.begin(), paths.end(), trees.num_outputs, phis.begin(), phis.end()); // Add the base offset term to bias thrust::device_vector<double> base_offset( trees.base_offset, trees.base_offset + trees.num_outputs); auto counting = thrust::make_counting_iterator(size_t(0)); auto d_phis = phis.data().get(); auto d_base_offset = base_offset.data().get(); size_t num_groups = trees.num_outputs; thrust::for_each(counting, counting + X.NumRows() * trees.num_outputs, [=] __device__(size_t idx) { size_t row_idx = idx / num_groups; size_t group = idx % num_groups; auto phi_idx = gpu_treeshap::IndexPhi( row_idx, num_groups, group, X.NumCols(), X.NumCols()); d_phis[phi_idx] += d_base_offset[group]; }); // Shap uses a slightly different layout for multiclass thrust::device_vector<float> transposed_phis(phis.size()); auto d_transposed_phis = transposed_phis.data(); thrust::for_each( counting, counting + phis.size(), [=] __device__(size_t idx) { size_t old_shape[] = {X.NumRows(), num_groups, (X.NumCols() + 1)}; size_t old_idx[array_size(old_shape)]; gpu_treeshap::FlatIdxToTensorIdx(idx, old_shape, old_idx); // Define new tensor format, switch num_groups axis to end size_t new_shape[] = {X.NumRows(), (X.NumCols() + 1), num_groups}; size_t new_idx[] = {old_idx[0], old_idx[2], old_idx[1]}; size_t transposed_idx = gpu_treeshap::TensorIdxToFlatIdx(new_shape, new_idx); d_transposed_phis[transposed_idx] = d_phis[idx]; }); thrust::copy(transposed_phis.begin(), transposed_phis.end(), out_contribs); } inline void dense_tree_path_dependent_interactions_gpu( const TreeEnsemble &trees, const ExplanationDataset &data, tfloat *out_contribs, tfloat transform(const tfloat, const tfloat)) { auto paths = ExtractPaths(trees); DeviceExplanationDataset device_data(data); DeviceExplanationDataset::DenseDatasetWrapper X = device_data.GetDeviceAccessor(); thrust::device_vector<float> phis((X.NumCols() + 1) * (X.NumCols() + 1) * X.NumRows() * trees.num_outputs); gpu_treeshap::GPUTreeShapInteractions(X, paths.begin(), paths.end(), trees.num_outputs, phis.begin(), phis.end()); // Add the base offset term to bias thrust::device_vector<double> base_offset( trees.base_offset, trees.base_offset + trees.num_outputs); auto counting = thrust::make_counting_iterator(size_t(0)); auto d_phis = phis.data().get(); auto d_base_offset = base_offset.data().get(); size_t num_groups = trees.num_outputs; thrust::for_each(counting, counting + X.NumRows() * num_groups, [=] __device__(size_t idx) { size_t row_idx = idx / num_groups; size_t group = idx % num_groups; auto phi_idx = gpu_treeshap::IndexPhiInteractions( row_idx, num_groups, group, X.NumCols(), X.NumCols(), X.NumCols()); d_phis[phi_idx] += d_base_offset[group]; }); // Shap uses a slightly different layout for multiclass thrust::device_vector<float> transposed_phis(phis.size()); auto d_transposed_phis = transposed_phis.data(); thrust::for_each( counting, counting + phis.size(), [=] __device__(size_t idx) { size_t old_shape[] = {X.NumRows(), num_groups, (X.NumCols() + 1), (X.NumCols() + 1)}; size_t old_idx[array_size(old_shape)]; gpu_treeshap::FlatIdxToTensorIdx(idx, old_shape, old_idx); // Define new tensor format, switch num_groups axis to end size_t new_shape[] = {X.NumRows(), (X.NumCols() + 1), (X.NumCols() + 1), num_groups}; size_t new_idx[] = {old_idx[0], old_idx[2], old_idx[3], old_idx[1]}; size_t transposed_idx = gpu_treeshap::TensorIdxToFlatIdx(new_shape, new_idx); d_transposed_phis[transposed_idx] = d_phis[idx]; }); thrust::copy(transposed_phis.begin(), transposed_phis.end(), out_contribs); } void dense_tree_shap_gpu(const TreeEnsemble &trees, const ExplanationDataset &data, tfloat *out_contribs, const int feature_dependence, unsigned model_transform, bool interactions) { // see what transform (if any) we have transform_f transform = get_transform(model_transform); // dispatch to the correct algorithm handler switch (feature_dependence) { case FEATURE_DEPENDENCE::independent: if (interactions) { std::cerr << "FEATURE_DEPENDENCE::independent with interactions not yet " "supported\n"; } else { dense_tree_independent_gpu(trees, data, out_contribs, transform); } return; case FEATURE_DEPENDENCE::tree_path_dependent: if (interactions) { dense_tree_path_dependent_interactions_gpu(trees, data, out_contribs, transform); } else { dense_tree_path_dependent_gpu(trees, data, out_contribs, transform); } return; case FEATURE_DEPENDENCE::global_path_dependent: std::cerr << "FEATURE_DEPENDENCE::global_path_dependent not supported\n"; return; default: std::cerr << "Unknown feature dependence option\n"; return; } }
the_stack
namespace anakin{ namespace saber{ template <typename dtype> __global__ void decode_bbox_corner_variance_kernel(const int count, \ const dtype* loc_data, const dtype* prior_data, const dtype* variance, \ const int num_priors, const bool share_location, const int num_loc_classes, \ const int background_label_id, dtype* bbox_data) { CUDA_KERNEL_LOOP(index, count) { const int c = index % num_loc_classes; const int idx_p = (index % num_priors) * 4; const int idx = index * 4; if (!share_location && c == background_label_id) { //! Ignore background class if not share_location. return; } //! variance is encoded in target, we simply need to add the offset predictions. bbox_data[idx] = prior_data[idx_p] + loc_data[idx]; bbox_data[idx + 1] = prior_data[idx_p + 1] + loc_data[idx + 1]; bbox_data[idx + 2] = prior_data[idx_p + 2] + loc_data[idx + 2]; bbox_data[idx + 3] = prior_data[idx_p + 3] + loc_data[idx + 3]; } } template <typename dtype> __global__ void decode_bbox_corner_no_variance_kernel(const int count, \ const dtype* loc_data, const dtype* prior_data, const dtype* variance, \ const int num_priors, const bool share_location, const int num_loc_classes, \ const int background_label_id, dtype* bbox_data) { CUDA_KERNEL_LOOP(index, count) { const int c = index % num_loc_classes; const int idx_p = (index % num_priors) * 4; const int idx = index * 4; if (!share_location && c == background_label_id) { //! Ignore background class if not share_location. return; } //! variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[idx] = prior_data[idx_p] + loc_data[idx] * variance[idx_p]; bbox_data[idx + 1] = prior_data[idx_p + 1] + loc_data[idx + 1] * variance[idx_p + 1]; bbox_data[idx + 2] = prior_data[idx_p + 2] + loc_data[idx + 2] * variance[idx_p + 2]; bbox_data[idx + 3] = prior_data[idx_p + 3] + loc_data[idx + 3] * variance[idx_p + 3]; } } template <typename dtype> __global__ void decode_bbox_center_variance_kernel(const int count, \ const dtype* loc_data, const dtype* prior_data, const dtype* variance, \ const int num_priors, const bool share_location, const int num_loc_classes, \ const int background_label_id, dtype* bbox_data) { CUDA_KERNEL_LOOP(index, count) { const int c = index % num_loc_classes; const int idx_p = (index % num_priors) * 4; const int idx = index * 4; if (!share_location && c == background_label_id) { //! Ignore background class if not share_location. return; } const dtype p_xmin = prior_data[idx_p]; const dtype p_ymin = prior_data[idx_p + 1]; const dtype p_xmax = prior_data[idx_p + 2]; const dtype p_ymax = prior_data[idx_p + 3]; const dtype prior_width = p_xmax - p_xmin; const dtype prior_height = p_ymax - p_ymin; const dtype prior_center_x = (p_xmin + p_xmax) / 2.; const dtype prior_center_y = (p_ymin + p_ymax) / 2.; const dtype xmin = loc_data[idx]; const dtype ymin = loc_data[idx + 1]; const dtype xmax = loc_data[idx + 2]; const dtype ymax = loc_data[idx + 3]; //! variance is encoded in target, we simply need to retore the offset predictions. dtype decode_bbox_center_x = xmin * prior_width + prior_center_x; dtype decode_bbox_center_y = ymin * prior_height + prior_center_y; dtype decode_bbox_width = exp(xmax) * prior_width; dtype decode_bbox_height = exp(ymax) * prior_height; bbox_data[idx] = decode_bbox_center_x - decode_bbox_width / 2.f; bbox_data[idx + 1] = decode_bbox_center_y - decode_bbox_height / 2.f; bbox_data[idx + 2] = decode_bbox_center_x + decode_bbox_width / 2.f; bbox_data[idx + 3] = decode_bbox_center_y + decode_bbox_height / 2.f; } } template <typename dtype> __global__ void decode_bbox_center_no_variance_kernel(const int count, \ const dtype* loc_data, const dtype* prior_data, const dtype* variance, \ const int num_priors, const bool share_location, const int num_loc_classes, \ const int background_label_id, dtype* bbox_data) { CUDA_KERNEL_LOOP(index, count) { const int c = index % num_loc_classes; const int idx_p = (index % num_priors) * 4; const int idx = index * 4; if (!share_location && c == background_label_id) { //! Ignore background class if not share_location. return; } const dtype p_xmin = prior_data[idx_p]; const dtype p_ymin = prior_data[idx_p + 1]; const dtype p_xmax = prior_data[idx_p + 2]; const dtype p_ymax = prior_data[idx_p + 3]; const dtype prior_width = p_xmax - p_xmin; const dtype prior_height = p_ymax - p_ymin; const dtype prior_center_x = (p_xmin + p_xmax) / 2.; const dtype prior_center_y = (p_ymin + p_ymax) / 2.; const dtype xmin = loc_data[idx]; const dtype ymin = loc_data[idx + 1]; const dtype xmax = loc_data[idx + 2]; const dtype ymax = loc_data[idx + 3]; //! variance is encoded in bbox, we need to scale the offset accordingly. dtype decode_bbox_center_x = variance[idx_p] * xmin * prior_width + prior_center_x; dtype decode_bbox_center_y = variance[idx_p + 1] * ymin * prior_height + prior_center_y; dtype decode_bbox_width = exp(variance[idx_p + 2] * xmax) * prior_width; dtype decode_bbox_height = exp(variance[idx_p + 3] * ymax) * prior_height; bbox_data[idx] = decode_bbox_center_x - decode_bbox_width / 2.f; bbox_data[idx + 1] = decode_bbox_center_y - decode_bbox_height / 2.f; bbox_data[idx + 2] = decode_bbox_center_x + decode_bbox_width / 2.f; bbox_data[idx + 3] = decode_bbox_center_y + decode_bbox_height / 2.f; } } template <typename dtype> __global__ void decode_bbox_corner_size_variance_kernel(const int count, \ const dtype* loc_data, const dtype* prior_data, const dtype* variance, \ const int num_priors, const bool share_location, const int num_loc_classes, \ const int background_label_id, dtype* bbox_data) { CUDA_KERNEL_LOOP(index, count) { const int c = index % num_loc_classes; const int idx_p = (index % num_priors) * 4; const int idx = index * 4; if (!share_location && c == background_label_id) { //! Ignore background class if not share_location. return; } const dtype p_xmin = prior_data[idx_p]; const dtype p_ymin = prior_data[idx_p + 1]; const dtype p_xmax = prior_data[idx_p + 2]; const dtype p_ymax = prior_data[idx_p + 3]; const dtype prior_width = p_xmax - p_xmin; const dtype prior_height = p_ymax - p_ymin; //! variance is encoded in target, we simply need to add the offset predictions. bbox_data[idx] = p_xmin + loc_data[idx] * prior_width; bbox_data[idx + 1] = p_ymin + loc_data[idx + 1] * prior_height; bbox_data[idx + 2] = p_xmax + loc_data[idx + 2] * prior_width; bbox_data[idx + 3] = p_ymax + loc_data[idx + 3] * prior_height; } } template <typename dtype> __global__ void decode_bbox_corner_size_no_variance_kernel(const int count, \ const dtype* loc_data, const dtype* prior_data, const dtype* variance, \ const int num_priors, const bool share_location, const int num_loc_classes, \ const int background_label_id, dtype* bbox_data) { CUDA_KERNEL_LOOP(index, count) { const int c = index % num_loc_classes; const int idx_p = (index % num_priors) * 4; const int idx = index * 4; if (!share_location && c == background_label_id) { //! Ignore background class if not share_location. return; } const dtype p_xmin = prior_data[idx_p]; const dtype p_ymin = prior_data[idx_p + 1]; const dtype p_xmax = prior_data[idx_p + 2]; const dtype p_ymax = prior_data[idx_p + 3]; const dtype prior_width = p_xmax - p_xmin; const dtype prior_height = p_ymax - p_ymin; //! variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[idx] = p_xmin + loc_data[idx] * variance[idx_p] * prior_width; bbox_data[idx + 1] = p_ymin + loc_data[idx + 1] * variance[idx_p + 1] * prior_height; bbox_data[idx + 2] = p_xmax + loc_data[idx + 2] * variance[idx_p + 2] * prior_width; bbox_data[idx + 3] = p_ymax + loc_data[idx + 3] * variance[idx_p + 3] * prior_height; } } template <typename Dtype> void decode_bboxes(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, Dtype* bbox_data, cudaStream_t stream) { int count = nthreads / 4; const Dtype* variance_data = prior_data + 4 * num_priors; if (code_type == CORNER) { if (variance_encoded_in_target) { decode_bbox_corner_variance_kernel<Dtype>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>\ (count, loc_data, prior_data, variance_data, num_priors, share_location, \ num_loc_classes, background_label_id, bbox_data); } else { decode_bbox_corner_no_variance_kernel<Dtype>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>\ (count, loc_data, prior_data, variance_data, num_priors, share_location, \ num_loc_classes, background_label_id, bbox_data); } } else if (code_type == CENTER_SIZE) { if (variance_encoded_in_target) { decode_bbox_center_variance_kernel<Dtype>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>\ (count, loc_data, prior_data, variance_data, num_priors, share_location, \ num_loc_classes, background_label_id, bbox_data); } else { decode_bbox_center_no_variance_kernel<Dtype>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>\ (count, loc_data, prior_data, variance_data, num_priors, share_location, \ num_loc_classes, background_label_id, bbox_data); } } else if (code_type == CORNER_SIZE) { if (variance_encoded_in_target) { decode_bbox_corner_size_variance_kernel<Dtype>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>\ (count, loc_data, prior_data, variance_data, num_priors, share_location, \ num_loc_classes, background_label_id, bbox_data); } else { decode_bbox_corner_size_no_variance_kernel<Dtype>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>\ (count, loc_data, prior_data, variance_data, num_priors, share_location, \ num_loc_classes, background_label_id, bbox_data); } } } template void decode_bboxes<float>(const int nthreads, const float* loc_data, const float* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, float* bbox_data, cudaStream_t stream); } //namespace anakin } //namespace anakin
the_stack
// GLEW #define GLEW_STATIC #include <GL/glew.h> // GLFW #include <GLFW/glfw3.h> // CUDA 8.0, only for test now #include "cuda_runtime.h" #include "device_launch_parameters.h" // PBF #include "include/arcball_camera.h" #include "include/boundary_gpu.h" #include "include/config.h" #include "include/constants.h" #include "include/gl_fix.h" #include "include/obj_model.h" #include "include/obj_models_helpers.h" #include "include/particle_system.h" #include "include/pbf_solver.h" #include "include/pbf_solver_gpu.h" #include "include/point_drawer.h" #include "include/renderer.h" #include "include/shader_wrapper.h" #include "include/shared_math.h" #include "include/spatial_hash.h" //////////////////////////////////////////////////// // TODO(k-ye): These global variables should be cleaned up // Window dimensions // const GLuint WIDTH = 1024, HEIGHT = 768; const GLuint WIDTH = 768; const GLuint HEIGHT = 900; float delta_time = 0.0f; glm::vec3 world_size_dim{0.0f}; // Camera instance pbf::ArcballCamera camera; // Particle System instance pbf::ParticleSystem ps; pbf::BoundaryConstraintGpu boundary_constraint; // PBF Solver instance // pbf::PbfSolver solver; pbf::PbfSolverGpu solver; // SceneRender instance pbf::SceneRenderer render; //////////////////////////////////////////////////// // Configure the parameters of the world void Configure(pbf::Config &config); void InitParticles(const pbf::Config &config); void InitDependencies(); //////////////////////////////////////////////////// // Callback function declarations bool is_paused = false; void KeyCallback(GLFWwindow *window, int key, int scancode, int action, int mode); bool left_btn_pressed = false; void MouseCallback(GLFWwindow *window, double xpos, double ypos); float max_arcball_radius = 100.0f; void ScrollCallback(GLFWwindow *window, double xoffset, double yoffset); //////////////////////////////////////////////////// // A class that moves the x hi boundary back and forth class MoveXBoundaryDriver { public: MoveXBoundaryDriver(pbf::BoundaryConstraintBase *bc) : bc_(bc) {} void Configure(const pbf::Config &config) { x_hi_index_ = 1; x_vel_ = 8.0f; const float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X); x_lo_ = world_size_x * 0.6f; x_hi_ = world_size_x - 0.5f; } void Update(float dt) { auto *bp = bc_->Get(x_hi_index_); bp->position.x += (bp->velocity.x * dt); if (bp->position.x < x_lo_) { bp->position.x = x_lo_ + kFloatEpsilon; bp->velocity.x = x_vel_; } else if (bp->position.x > x_hi_) { bp->position.x = x_hi_ - kFloatEpsilon; bp->velocity.x = -x_vel_; } } private: pbf::BoundaryConstraintBase *bc_; float x_vel_; float x_lo_; float x_hi_; size_t x_hi_index_; }; //////////////////////////////////////////////////// // The MAIN function, from here we start the application and run the game loop int main() { std::cout << "Starting GLFW context, OpenGL 3.3" << std::endl; // Init GLFW glfwInit(); // Set all the required options for GLFW glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); glfwWindowHint(GLFW_RESIZABLE, GL_FALSE); GLFW_FORWARD_COMPATIBLE(); // Create a GLFWwindow object that we can use for GLFW's functions GLFWwindow *window = glfwCreateWindow(WIDTH, HEIGHT, "PBF", nullptr, nullptr); glfwMakeContextCurrent(window); // Initialize PBF pbf::Config config; config.Load("Config/config.txt"); Configure(config); InitParticles(config); // Once loaded, |obj_models| should never change its size. std::vector<pbf::ObjModel> obj_models = pbf::LoadModelsFromConfigFile("Config/model_defs.txt"); { float interval = config.Get<float>(pbf::PARTICLE_INTERVAL); std::vector<pbf::point_t> obj_models_points = pbf::FillPointsInObjModels(obj_models, world_size_dim, interval); std::cout << "found " << obj_models_points.size() << " particles from the object models" << std::endl; for (const auto& pt : obj_models_points) { glm::vec3 vel; vel.x = 0.0f; // pbf::GenRandom(-0.05f, 0.05f); vel.y = pbf::GenRandom(-0.05f, 0.05f); vel.z = 0.0f; // pbf::GenRandom(-0.05f, 0.05f); ps.Add(pt, vel);; } } InitDependencies(); MoveXBoundaryDriver boundary_driver{&boundary_constraint}; boundary_driver.Configure(config); // Set the required callback functions glfwSetKeyCallback(window, KeyCallback); glfwSetCursorPosCallback(window, MouseCallback); glfwSetScrollCallback(window, ScrollCallback); // Set this to true so GLEW knows to use a modern approach to retrieving // function pointers and extensions glewExperimental = GL_TRUE; // Initialize GLEW to setup the OpenGL Function pointers glewInit(); // Define the viewport dimensions int width, height; glfwGetFramebufferSize(window, &width, &height); glViewport(0, 0, width, height); render.InitShaders("Shaders/vertex.vert", "Shaders/fragment.frag"); render.InitSpriteShaders("Shaders/sprite_vertex.vert", "Shaders/sprite_fragment.frag"); for (const pbf::ObjModel &obj_model : obj_models) { // render.RegisterObjModel(&obj_model); } render.InitScene(); is_paused = true; // Game loop while (!glfwWindowShouldClose(window)) { // Check if any events have been activiated (key pressed, mouse moved etc.) // and call corresponding response functions glfwPollEvents(); if (!is_paused) { // boundary_driver.Update(delta_time); solver.Update(delta_time); } render.Render(); // Swap the screen buffers glfwSwapBuffers(window); } // Terminate GLFW, clearing any resources allocated by GLFW. glfwTerminate(); return 0; } //////////////////////////////////////////////////// void ConfigureCamera(const pbf::Config &config) { // config camera camera.SetStageSize(WIDTH, HEIGHT); float radius = config.Get<float>(pbf::INIT_ARCBALL_RADIUS); camera.SetArcballRadius(radius); float sensitivity = 2.0f; config.GetOptional(pbf::CAMERA_SENSITIVITY, &sensitivity); camera.SetSensitivity(sensitivity); max_arcball_radius = config.Get<float>(pbf::MAX_ARCBALL_RADIUS); } void ConfigureBoundaryConstraint(const pbf::Config &config) { using pbf::vec_t; const float world_size_x = world_size_dim.x; const float world_size_y = world_size_dim.y; const float world_size_z = world_size_dim.z; pbf::BoundaryPlane bp; // X lo bp.position = vec_t{0.0f, 0.0f, 0.0f}; bp.velocity = vec_t{0.0f}; bp.normal = vec_t{1.0f, 0.0f, 0.0f}; boundary_constraint.Add(bp); // X hi bp.position = vec_t{world_size_x, 0.0f, world_size_z}; bp.velocity = vec_t{0.0f}; bp.normal = vec_t{-1.0f, 0.0f, 0.0f}; boundary_constraint.Add(bp); // Z lo bp.position = vec_t{world_size_x, 0.0f, 0.0f}; bp.velocity = vec_t{0.0f}; bp.normal = vec_t{0.0f, 0.0f, 1.0f}; boundary_constraint.Add(bp); // Z hi bp.position = vec_t{0.0f, 0.0f, world_size_z}; bp.velocity = vec_t{0.0f}; bp.normal = vec_t{0.0f, 0.0f, -1.0f}; boundary_constraint.Add(bp); // Y lo bp.position = vec_t{world_size_x, 0.0f, 0.0f}; bp.velocity = vec_t{0.0f}; bp.normal = vec_t{0.0f, 1.0f, 0.0f}; boundary_constraint.Add(bp); // No Y hi, top not covered } void ConfigureSolver(const pbf::Config &config) { pbf::PbfSolverConfig solver_config; solver_config.h = config.Get<float>(pbf::H_KERNEL); solver_config.mass = config.Get<float>(pbf::PARTICLE_MASS); solver_config.rho_0 = config.Get<float>(pbf::RHO_0); solver_config.epsilon = config.Get<float>(pbf::EPSILON); solver_config.num_iters = config.Get<unsigned>(pbf::NUM_ITERATIONS); solver_config.corr_delta_q_coeff = config.Get<float>(pbf::CORR_DELTA_Q_COEFF); solver_config.corr_k = config.Get<float>(pbf::CORR_K); solver_config.corr_n = config.Get<unsigned>(pbf::CORR_N); solver_config.vorticity_epsilon = config.Get<float>(pbf::VORTICITY_EPSILON); solver_config.xsph_c = config.Get<float>(pbf::XSPH_C); solver_config.world_size_x = world_size_dim.x; solver_config.world_size_y = world_size_dim.y; solver_config.world_size_z = world_size_dim.z; solver_config.spatial_hash_cell_size = config.Get<float>(pbf::SH_CELL_SIZE); solver.Configure(solver_config); } void ConfigureRenderer(const pbf::Config &config) { render.SetWorldSize(world_size_dim); float fov = 45.0f; config.GetOptional(pbf::FOV, &fov); float aspect = (float)WIDTH / (float)HEIGHT; float near = 0.1f; config.GetOptional(pbf::PROJECTION_NEAR, &near); float far = config.Get<float>(pbf::PROJECTION_FAR); render.SetPespectiveProjection(fov, aspect, near, far); } void Configure(pbf::Config &config) { delta_time = config.Get<float>(pbf::DELTA_TIME); float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X); float world_size_y = config.Get<float>(pbf::WORLD_SIZE_Y); float world_size_z = config.Get<float>(pbf::WORLD_SIZE_Z); world_size_dim = {world_size_x, world_size_y, world_size_z}; ConfigureCamera(config); ConfigureBoundaryConstraint(config); ConfigureSolver(config); ConfigureRenderer(config); } void InitParticles(const pbf::Config &config) { srand(time(nullptr)); unsigned num_x = config.Get<unsigned>(pbf::NUM_PTCS_WIDTH); unsigned num_z = config.Get<unsigned>(pbf::NUM_PTCS_HEIGHT); unsigned num_y = config.Get<unsigned>(pbf::NUM_PTC_LAYERS); float world_size_x = config.Get<float>(pbf::WORLD_SIZE_X); float world_size_y = config.Get<float>(pbf::WORLD_SIZE_Y); float world_size_z = config.Get<float>(pbf::WORLD_SIZE_Z); float interval = config.Get<float>(pbf::PARTICLE_INTERVAL); auto ComputeMargin = [=](float world_sz_dim, unsigned num_dim) -> float { return (world_sz_dim - ((num_dim - 1) * interval)) * 0.5f; }; // float margin_y = ComputeMargin(world_size_y, num_y); float margin_y = interval * 0.5; float margin_z = ComputeMargin(world_size_z, num_z); float margin_x = ComputeMargin(world_size_x, num_x); for (unsigned y = 0; y < num_y; ++y) { for (unsigned z = 0; z < num_z; ++z) { for (unsigned x = 0; x < num_x; ++x) { float xf = margin_x + x * interval; // float yf = world_size_y - margin_y - y * interval; float yf = margin_y + y * interval; float zf = margin_z + z * interval; const glm::vec3 pos{xf, yf, zf}; float vx = pbf::GenRandom(-0.5f, 0.5f); float vy = pbf::GenRandom(0.0f, 1.0f); float vz = pbf::GenRandom(-0.5f, 0.5f); const glm::vec3 vel{vx, vy, vz}; ps.Add(pos, vel); } } } } void InitDependencies() { solver.InitParticleSystems(&ps); solver.SetBoundaryConstraint(&boundary_constraint); render.SetCamera(&camera); render.SetParticleSystem(&ps); render.boundary_constraint_ = &boundary_constraint; for (size_t i = 0; i < boundary_constraint.NumBoundaries(); ++i) { pbf::SceneRenderer::BoundaryRecord brec; brec.index = i; if (i == 0 || i == 1) { brec.v1_len = world_size_dim.z; brec.v2_len = world_size_dim.y; } else if (i == 2 || i == 3) { brec.v1_len = world_size_dim.x; brec.v2_len = world_size_dim.y; } else { brec.v1_len = world_size_dim.z; brec.v2_len = world_size_dim.x; } render.boundary_records_.push_back(brec); } } //////////////////////////////////////////////////// // Is called whenever a key is pressed/released via GLFW void KeyCallback(GLFWwindow *window, int key, int scancode, int action, int mode) { if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS) glfwSetWindowShouldClose(window, GL_TRUE); if (key == GLFW_KEY_SPACE && action == GLFW_PRESS) is_paused = !is_paused; } void MouseCallback(GLFWwindow *window, double xpos, double ypos) { int action = glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT); if (action == GLFW_PRESS) { if (!left_btn_pressed) { std::cout << "mouse left button just pressed" << std::endl; left_btn_pressed = true; camera.OnMouseLeftClick(xpos, ypos); } else { std::cout << "mouse left button dragging" << std::endl; camera.OnMouseLeftDragging(xpos, ypos); } } else { if (left_btn_pressed) { left_btn_pressed = false; camera.OnMouseLeftRelease(xpos, ypos); std::cout << "mouse left button released" << std::endl; } } } void ScrollCallback(GLFWwindow *window, double xoffset, double yoffset) { float arcball_radius = camera.GetArcballRadius(); arcball_radius += yoffset * 0.25f; std::cout << "scroll! yoffset: " << yoffset << ", radius: " << arcball_radius << std::endl; if (arcball_radius > 0 && arcball_radius < max_arcball_radius) { camera.SetArcballRadius(arcball_radius); } }
the_stack
This sample has two kernels, one doing the rendering every frame, and another one used to generate the mip map levels at startup. For rendering we use a "virtual" texturing approach, where one 2d texture stores pointers to the actual textures used. This can be achieved by the new cudaTextureObject introduced in CUDA 5.0 and requiring sm3+ hardware. The mipmap generation kernel uses cudaSurfaceObject and cudaTextureObject passed as kernel arguments to compute the higher mip map level based on the lower. */ #ifndef _BINDLESSTEXTURE_KERNEL_CU_ #define _BINDLESSTEXTURE_KERNEL_CU_ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <vector> #include <helper_cuda.h> #include <helper_math.h> #include "bindlessTexture.h" // set this to just see the mipmap chain of first image //#define SHOW_MIPMAPS // local references to resources Image atlasImage; std::vector<Image> contentImages; float highestLod = 1.0f; #ifndef MAX #define MAX(a, b) ((a > b) ? a : b) #endif ////////////////////////////////////////////////////////////////////////// __host__ __device__ __inline__ uint2 encodeTextureObject( cudaTextureObject_t obj) { return make_uint2((uint)(obj & 0xFFFFFFFF), (uint)(obj >> 32)); } __host__ __device__ __inline__ cudaTextureObject_t decodeTextureObject( uint2 obj) { return (((cudaTextureObject_t)obj.x) | ((cudaTextureObject_t)obj.y) << 32); } __device__ __inline__ float4 to_float4(uchar4 vec) { return make_float4(vec.x, vec.y, vec.z, vec.w); } __device__ __inline__ uchar4 to_uchar4(float4 vec) { return make_uchar4((uchar)vec.x, (uchar)vec.y, (uchar)vec.z, (uchar)vec.w); } ////////////////////////////////////////////////////////////////////////// // Rendering // the atlas texture stores the 64 bit cudaTextureObjects // we use it for "virtual" texturing __global__ void d_render(uchar4 *d_output, uint imageW, uint imageH, float lod, cudaTextureObject_t atlasTexture) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)imageW; float v = y / (float)imageH; if ((x < imageW) && (y < imageH)) { // read from 2D atlas texture and decode texture object uint2 texCoded = tex2D<uint2>(atlasTexture, u, v); cudaTextureObject_t tex = decodeTextureObject(texCoded); // read from cuda texture object, use template to specify what data will be // returned. tex2DLod allows us to pass the lod (mip map level) directly. // There is other functions with CUDA 5, e.g. tex2DGrad, that allow you // to pass derivatives to perform automatic mipmap/anisotropic filtering. float4 color = tex2DLod<float4>(tex, u, 1 - v, lod); // In our sample tex is always valid, but for something like your own // sparse texturing you would need to make sure to handle the zero case. // write output color uint i = y * imageW + x; d_output[i] = to_uchar4(color * 255.0); } } extern "C" void renderAtlasImage(dim3 gridSize, dim3 blockSize, uchar4 *d_output, uint imageW, uint imageH, float lod) { // psuedo animate lod lod = fmodf(lod, highestLod * 2); lod = highestLod - fabs(lod - highestLod); #ifdef SHOW_MIPMAPS lod = 0.0f; #endif d_render<<<gridSize, blockSize>>>(d_output, imageW, imageH, lod, atlasImage.textureObject); checkCudaErrors(cudaGetLastError()); } ////////////////////////////////////////////////////////////////////////// // MipMap Generation // A key benefit of using the new surface objects is that we don't need any // global binding points anymore. We can directly pass them as function // arguments. __global__ void d_mipmap(cudaSurfaceObject_t mipOutput, cudaTextureObject_t mipInput, uint imageW, uint imageH) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; float px = 1.0 / float(imageW); float py = 1.0 / float(imageH); if ((x < imageW) && (y < imageH)) { // take the average of 4 samples // we are using the normalized access to make sure non-power-of-two textures // behave well when downsized. float4 color = (tex2D<float4>(mipInput, (x + 0) * px, (y + 0) * py)) + (tex2D<float4>(mipInput, (x + 1) * px, (y + 0) * py)) + (tex2D<float4>(mipInput, (x + 1) * px, (y + 1) * py)) + (tex2D<float4>(mipInput, (x + 0) * px, (y + 1) * py)); color /= 4.0; color *= 255.0; color = fminf(color, make_float4(255.0)); surf2Dwrite(to_uchar4(color), mipOutput, x * sizeof(uchar4), y); } } void generateMipMaps(cudaMipmappedArray_t mipmapArray, cudaExtent size) { size_t width = size.width; size_t height = size.height; #ifdef SHOW_MIPMAPS cudaArray_t levelFirst; checkCudaErrors(cudaGetMipmappedArrayLevel(&levelFirst, mipmapArray, 0)); #endif uint level = 0; while (width != 1 || height != 1) { width /= 2; width = MAX((size_t)1, width); height /= 2; height = MAX((size_t)1, height); cudaArray_t levelFrom; checkCudaErrors(cudaGetMipmappedArrayLevel(&levelFrom, mipmapArray, level)); cudaArray_t levelTo; checkCudaErrors( cudaGetMipmappedArrayLevel(&levelTo, mipmapArray, level + 1)); cudaExtent levelToSize; checkCudaErrors(cudaArrayGetInfo(NULL, &levelToSize, NULL, levelTo)); checkHost(levelToSize.width == width); checkHost(levelToSize.height == height); checkHost(levelToSize.depth == 0); // generate texture object for reading cudaTextureObject_t texInput; cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = levelFrom; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = 1; texDescr.filterMode = cudaFilterModeLinear; texDescr.addressMode[0] = cudaAddressModeClamp; texDescr.addressMode[1] = cudaAddressModeClamp; texDescr.addressMode[2] = cudaAddressModeClamp; texDescr.readMode = cudaReadModeNormalizedFloat; checkCudaErrors( cudaCreateTextureObject(&texInput, &texRes, &texDescr, NULL)); // generate surface object for writing cudaSurfaceObject_t surfOutput; cudaResourceDesc surfRes; memset(&surfRes, 0, sizeof(cudaResourceDesc)); surfRes.resType = cudaResourceTypeArray; surfRes.res.array.array = levelTo; checkCudaErrors(cudaCreateSurfaceObject(&surfOutput, &surfRes)); // run mipmap kernel dim3 blockSize(16, 16, 1); dim3 gridSize(((uint)width + blockSize.x - 1) / blockSize.x, ((uint)height + blockSize.y - 1) / blockSize.y, 1); d_mipmap<<<gridSize, blockSize>>>(surfOutput, texInput, (uint)width, (uint)height); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDestroySurfaceObject(surfOutput)); checkCudaErrors(cudaDestroyTextureObject(texInput)); #ifdef SHOW_MIPMAPS // we blit the current mipmap back into first level cudaMemcpy3DParms copyParams = {0}; copyParams.dstArray = levelFirst; copyParams.srcArray = levelTo; copyParams.extent = make_cudaExtent(width, height, 1); copyParams.kind = cudaMemcpyDeviceToDevice; checkCudaErrors(cudaMemcpy3D(&copyParams)); #endif level++; } } uint getMipMapLevels(cudaExtent size) { size_t sz = MAX(MAX(size.width, size.height), size.depth); uint levels = 0; while (sz) { sz /= 2; levels++; } return levels; } ////////////////////////////////////////////////////////////////////////// // Initalization extern "C" void randomizeAtlas() { uint2 *h_data = (uint2 *)atlasImage.h_data; // assign random texture object handles to our atlas image tiles for (size_t i = 0; i < atlasImage.size.width * atlasImage.size.height; i++) { #ifdef SHOW_MIPMAPS h_data[i] = encodeTextureObject(contentImages[0].textureObject); #else h_data[i] = encodeTextureObject( contentImages[rand() % contentImages.size()].textureObject); #endif } // copy data to atlas array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr( atlasImage.h_data, atlasImage.size.width * sizeof(uint2), atlasImage.size.width, atlasImage.size.height); copyParams.dstArray = atlasImage.dataArray; copyParams.extent = atlasImage.size; copyParams.extent.depth = 1; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors(cudaMemcpy3D(&copyParams)); }; extern "C" void deinitAtlasAndImages() { for (size_t i = 0; i < contentImages.size(); i++) { Image &image = contentImages[i]; if (image.h_data) { free(image.h_data); } if (image.textureObject) { checkCudaErrors(cudaDestroyTextureObject(image.textureObject)); } if (image.mipmapArray) { checkCudaErrors(cudaFreeMipmappedArray(image.mipmapArray)); } } if (atlasImage.h_data) { free(atlasImage.h_data); } if (atlasImage.textureObject) { checkCudaErrors(cudaDestroyTextureObject(atlasImage.textureObject)); } if (atlasImage.dataArray) { checkCudaErrors(cudaFreeArray(atlasImage.dataArray)); } } extern "C" void initAtlasAndImages(const Image *images, size_t numImages, cudaExtent atlasSize) { // create individual textures contentImages.resize(numImages); for (size_t i = 0; i < numImages; i++) { Image &image = contentImages[i]; image.size = images[i].size; image.size.depth = 0; image.type = cudaResourceTypeMipmappedArray; // how many mipmaps we need uint levels = getMipMapLevels(image.size); highestLod = MAX(highestLod, (float)levels - 1); cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>(); checkCudaErrors(cudaMallocMipmappedArray(&image.mipmapArray, &desc, image.size, levels)); // upload level 0 cudaArray_t level0; checkCudaErrors(cudaGetMipmappedArrayLevel(&level0, image.mipmapArray, 0)); cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(images[i].h_data, image.size.width * sizeof(uchar4), image.size.width, image.size.height); copyParams.dstArray = level0; copyParams.extent = image.size; copyParams.extent.depth = 1; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors(cudaMemcpy3D(&copyParams)); // compute rest of mipmaps based on level 0 generateMipMaps(image.mipmapArray, image.size); // generate bindless texture object cudaResourceDesc resDescr; memset(&resDescr, 0, sizeof(cudaResourceDesc)); resDescr.resType = cudaResourceTypeMipmappedArray; resDescr.res.mipmap.mipmap = image.mipmapArray; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = 1; texDescr.filterMode = cudaFilterModeLinear; texDescr.mipmapFilterMode = cudaFilterModeLinear; texDescr.addressMode[0] = cudaAddressModeClamp; texDescr.addressMode[1] = cudaAddressModeClamp; texDescr.addressMode[2] = cudaAddressModeClamp; texDescr.maxMipmapLevelClamp = float(levels - 1); texDescr.readMode = cudaReadModeNormalizedFloat; checkCudaErrors(cudaCreateTextureObject(&image.textureObject, &resDescr, &texDescr, NULL)); } // create atlas array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uint2>(); checkCudaErrors(cudaMallocArray(&atlasImage.dataArray, &channelDesc, atlasSize.width, atlasSize.height)); atlasImage.h_data = malloc(atlasSize.width * atlasSize.height * sizeof(uint2)); atlasImage.type = cudaResourceTypeArray; atlasImage.size = atlasSize; cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = atlasImage.dataArray; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = true; texDescr.filterMode = cudaFilterModePoint; texDescr.addressMode[0] = cudaAddressModeClamp; texDescr.addressMode[1] = cudaAddressModeClamp; texDescr.addressMode[1] = cudaAddressModeClamp; texDescr.readMode = cudaReadModeElementType; checkCudaErrors(cudaCreateTextureObject(&atlasImage.textureObject, &texRes, &texDescr, NULL)); randomizeAtlas(); } #endif // #ifndef _SIMPLETEXTURE3D_KERNEL_CU_
the_stack
namespace xlib { template<unsigned ITEMS_PER_BLOCK, typename T> __global__ void mergePathLBPartition(const T* __restrict__ d_prefixsum, int prefixsum_size, T last_value, int num_merge, int* __restrict__ d_partitions, int num_partitions) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = id; i <= num_partitions; i += stride) { T diagonal = ::min(i * ITEMS_PER_BLOCK, num_merge); auto value = xlib::merge_path_search(d_prefixsum, prefixsum_size, NaturalIterator(), last_value, diagonal); d_partitions[i] = value.y; //if (i < 10) // printf("%d\t%d\t\t%d\n", value.x, value.y, d_prefixsum[value.x]); } } //============================================================================== template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD, typename T> __device__ __forceinline__ void blockMergePathLB(const T* d_prefixsum, int2 block_coord_start, int y_size, T* smem_prefix, int smem_size, T* smem_buffer) { auto smem_tmp = smem_prefix + threadIdx.x; auto d_tmp = d_prefixsum + block_coord_start.x + threadIdx.x; for (int i = threadIdx.x; i < smem_size; i += BLOCK_SIZE) { *smem_tmp = *d_tmp; smem_tmp += BLOCK_SIZE; d_tmp += BLOCK_SIZE; } /*#pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { if (i * BLOCK_SIZE + threadIdx.x < smem_size) smem_tmp[i * BLOCK_SIZE] = d_tmp[i * BLOCK_SIZE]; }*/ xlib::sync<BLOCK_SIZE>(); T diagonal = threadIdx.x * ITEMS_PER_THREAD; NaturalIterator natural_iterator(block_coord_start.y); auto thread_coord = xlib::merge_path_search(smem_prefix, smem_size, natural_iterator, y_size, diagonal); const auto MAX = xlib::numeric_limits<int>::max; int first = (threadIdx.x == 0); thread_coord.x = max(thread_coord.x - first, 0); int next = (thread_coord.x < smem_size) ? smem_prefix[thread_coord.x] : MAX; int y_value = block_coord_start.y + thread_coord.y; /*#pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { assert(y_value <= next); if (y_value < next) { assert(thread_coord.y < y_size || blockIdx.x == gridDim.x - 1); assert(thread_coord.x >= 0); assert(thread_coord.x - 1 < smem_size); smem_buffer[thread_coord.y] = thread_coord.x - 1; thread_coord.y++; y_value++; } else { thread_coord.x++; next = (thread_coord.x < smem_size) ? smem_prefix[thread_coord.x] : MAX; } }*/ /*if (blockIdx.x == 0 && threadIdx.x == 0) { //printf("%d\t%d\n", smem_size, y_size); xlib::printfArray(smem_prefix, smem_size); printf("\n"); xlib::printfArray(smem_buffer, y_size); }*/ #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { bool pred = (y_value < next); if (pred) smem_buffer[thread_coord.y] = thread_coord.x - 1; y_value = (pred) ? y_value + 1 : y_value; thread_coord.y = (pred) ? thread_coord.y + 1 : thread_coord.y; thread_coord.x = (pred) ? thread_coord.x : thread_coord.x + 1; next = (thread_coord.x < smem_size) ? smem_prefix[thread_coord.x] : MAX; } xlib::sync<BLOCK_SIZE>(); } //============================================================================== template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_BLOCK, typename T, typename Lambda, typename... TArgs> __device__ __forceinline__ void mergePathLBAux(const int* d_partitions, int num_partitions, const T* d_prefixsum, int prefixsum_size, void* smem, const Lambda& lambda, unsigned block_idx, TArgs* ... forward_args) { const unsigned ITEMS_PER_THREAD = ITEMS_PER_BLOCK / BLOCK_SIZE; int block_diag0 = block_idx * ITEMS_PER_BLOCK; int block_diag1 = block_diag0 + ITEMS_PER_BLOCK; int block_start_pos = d_partitions[ block_idx ]; int block_end_pos = d_partitions[ block_idx + 1 ]; int min_value = ::min(block_diag1 - block_end_pos, prefixsum_size); int2 block_coord_start { block_diag0 - block_start_pos, block_start_pos }; int2 block_coord_end { min_value, block_end_pos }; int smem_size = block_coord_end.x - block_coord_start.x; int y_size = block_coord_end.y - block_coord_start.y; auto smem_prefix = static_cast<T*>(smem); auto smem_buffer = static_cast<T*>(smem) + smem_size; assert(block_idx == gridDim.x - 1 || (block_coord_end.x - block_coord_start.x) + (block_coord_end.y - block_coord_start.y) == ITEMS_PER_BLOCK); assert(block_coord_start.x + smem_size <= prefixsum_size); /*if (block_idx < 10 && threadIdx.x == 0) printf("%d\t%d\t%d\t%d\n", block_coord_start.x, block_coord_end.x, block_coord_start.y, block_coord_end.y);*/ blockMergePathLB<BLOCK_SIZE, ITEMS_PER_THREAD> (d_prefixsum, block_coord_start, y_size, smem_prefix, smem_size, smem_buffer); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { int index = threadIdx.x + i * BLOCK_SIZE; if (index < y_size) { int reg_pos = smem_buffer[index]; int reg_index = block_coord_start.y + index; int reg_offset = reg_index - smem_prefix[reg_pos]; lambda(reg_pos + block_coord_start.x, reg_offset, reg_index, forward_args...); } } } template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_BLOCK, typename T, typename Lambda> __device__ __forceinline__ void mergePathLB(const int* d_partitions, int num_partitions, const T* d_prefixsum, int prefixsum_size, void* smem, const Lambda& lambda) { mergePathLBAux<BLOCK_SIZE, ITEMS_PER_BLOCK> (d_partitions, num_partitions, d_prefixsum, prefixsum_size, smem, lambda, blockIdx.x); } //============================================================================== template<int ITEMS_PER_BLOCK, int BLOCK_SIZE, typename T, typename Lambda, typename... TArgs> __global__ void mergePathWrapper(const int* __restrict__ d_partitions, int num_partitions, const T* __restrict__ d_prefixsum, int prefixsum_size, const Lambda& lambda, TArgs* __restrict__ ... forward_args) { __shared__ int smem[ITEMS_PER_BLOCK]; mergePathLBAux<BLOCK_SIZE, ITEMS_PER_BLOCK> (d_partitions, num_partitions, d_prefixsum, prefixsum_size, smem, lambda, blockIdx.x, forward_args...); } template<int ITEMS_PER_BLOCK, int BLOCK_SIZE, typename T, typename Lambda, typename... TArgs> __global__ void mergePathKernelFusionWrapper(int* __restrict__ d_partitions, int num_partitions, const T* __restrict__ d_prefixsum, int prefixsum_size, T last_value, const Lambda& lambda, TArgs* __restrict__ ... forward_args) { __shared__ int smem[ITEMS_PER_BLOCK]; int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; T diagonal = ::min(id * ITEMS_PER_BLOCK, last_value + prefixsum_size); auto value = xlib::merge_path_search(d_prefixsum, prefixsum_size, NaturalIterator(), last_value, diagonal); d_partitions[id] = value.y; cooperative_groups::this_grid().sync(); const int* __restrict__ d_partitions_aux = d_partitions; for (int i = id; i < num_partitions; i += stride) { mergePathLBAux<BLOCK_SIZE, ITEMS_PER_BLOCK> (d_partitions_aux, num_partitions, d_prefixsum, prefixsum_size, smem, lambda, i, forward_args...); } } //============================================================================== namespace merge_path_lb { template<typename T, typename Lambda, typename... TArgs> void run(const T* d_prefixsum, int prefixsum_size, T last_value, const Lambda& lambda, TArgs*... forward_args) noexcept { T num_merges = prefixsum_size + last_value; int num_merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merges); int* d_partitions; cuMalloc(d_partitions, num_merge_blocks + 1); merge_path_lb::run(d_prefixsum, prefixsum_size, last_value, lambda, forward_args...); cuFree(d_partitions); } template<typename T, typename Lambda, typename... TArgs> void run_kernel_fusion(const T* d_prefixsum, int prefixsum_size, T last_value, const Lambda& lambda, TArgs*... forward_args) noexcept { T num_merges = prefixsum_size + last_value; int num_merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merges); int* d_partitions; cuMalloc(d_partitions, num_merge_blocks + 1); merge_path_lb::run_kernel_fusion(d_prefixsum, prefixsum_size, last_value, lambda, forward_args...); cuFree(d_partitions); } template<typename T, typename Lambda, typename... TArgs> void run(const T* d_prefixsum, int prefixsum_size, T last_value, int* d_partitions, const Lambda& lambda, TArgs*... forward_args) noexcept { T num_merges = prefixsum_size + last_value; int num_merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merges); int num_partition_blocks = xlib::ceil_div<BLOCK_SIZE>(num_merge_blocks); mergePathLBPartition <ITEMS_PER_BLOCK> <<< num_partition_blocks, BLOCK_SIZE >>> (d_prefixsum, prefixsum_size, last_value, num_merges, d_partitions, num_merge_blocks); mergePathWrapper <ITEMS_PER_BLOCK, BLOCK_SIZE> <<< num_merge_blocks, BLOCK_SIZE >>> (d_partitions, num_merge_blocks, d_prefixsum, prefixsum_size, lambda, forward_args...); } template<typename T, typename Lambda, typename... TArgs> void run_kernel_fusion(const T* d_prefixsum, int prefixsum_size, T last_value, int* d_partitions, const Lambda& lambda, TArgs*... forward_args) noexcept { T num_merges = prefixsum_size + last_value; int num_merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merges); int max_resident_blocks = xlib::kernel_occupancy( mergePathWrapper<ITEMS_PER_BLOCK, BLOCK_SIZE, true, Lambda>, BLOCK_SIZE); int num_actual_blocks = std::min(num_merge_blocks, max_resident_blocks); mergePathKernelFusionWrapper <ITEMS_PER_BLOCK, BLOCK_SIZE> <<< num_actual_blocks, BLOCK_SIZE >>> (d_partitions, num_merge_blocks, d_prefixsum, prefixsum_size, lambda, forward_args...); } } // namespace merge_path_lb //============================================================================== template<typename T> MergePathLB<T>::MergePathLB(int max_prefixsum_size, T max_last_value) noexcept { init(max_prefixsum_size, max_last_value); } template<typename T> MergePathLB<T>::MergePathLB(const T* d_prefixsum, int prefixsum_size, T last_value) noexcept { init(d_prefixsum, prefixsum_size, last_value); } template<typename T> MergePathLB<T>::~MergePathLB() noexcept { cuFree(_d_partitions); } template<typename T> void MergePathLB<T>::init(int max_prefixsum_size, T max_last_value) noexcept { assert(_d_prefixsum == nullptr && _d_partitions == nullptr && _num_merge_blocks == 0); T num_merges = max_prefixsum_size + max_last_value; _num_merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merges); cuMalloc(_d_partitions, std::max(static_cast<unsigned>(_num_merge_blocks + 1), GPU_MAX_BLOCKS)); } template<typename T> void MergePathLB<T>::init(const T* d_prefixsum, int prefixsum_size, T last_value) noexcept { assert(_d_prefixsum != nullptr && _d_partitions == nullptr && _num_merge_blocks == 0); _d_prefixsum = d_prefixsum; _prefixsum_size = prefixsum_size; T num_merges = prefixsum_size + last_value; _num_merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merges); int num_partition_blocks = xlib::ceil_div<BLOCK_SIZE>(_num_merge_blocks); cuMalloc(_d_partitions, _num_merge_blocks + 1); mergePathLBPartition <ITEMS_PER_BLOCK> <<< num_partition_blocks, BLOCK_SIZE >>> (d_prefixsum, prefixsum_size, last_value, num_merges, _d_partitions, _num_merge_blocks); } template<typename T> template<typename Lambda> void MergePathLB<T>::run(const T* d_prefixsum, int prefixsum_size, T last_value, const Lambda& lambda) const noexcept { assert(d_prefixsum != nullptr && _d_partitions != nullptr && _d_prefixsum == nullptr && prefixsum_size > 0 && last_value > 0 && _num_merge_blocks != 0); T num_merges = prefixsum_size + last_value; int num_merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merges); int num_partition_blocks = xlib::ceil_div<BLOCK_SIZE>(num_merge_blocks); assert(num_merge_blocks <= _num_merge_blocks); mergePathLBPartition <ITEMS_PER_BLOCK> <<< num_partition_blocks, BLOCK_SIZE >>> (d_prefixsum, prefixsum_size, last_value, num_merges, _d_partitions, num_merge_blocks); mergePathWrapper <ITEMS_PER_BLOCK, BLOCK_SIZE> <<< num_merge_blocks, BLOCK_SIZE >>> (_d_partitions, num_merge_blocks, d_prefixsum, prefixsum_size, lambda); } template<typename T> template<typename Lambda> void MergePathLB<T>::run_kernel_fusion(const T* d_prefixsum, int prefixsum_size, T last_value, const Lambda& lambda) const noexcept { assert(d_prefixsum != nullptr && _d_partitions != nullptr && _d_prefixsum == nullptr && prefixsum_size > 0 && last_value > 0 && _num_merge_blocks != 0); T num_merges = prefixsum_size + last_value; int num_merge_blocks = xlib::ceil_div<ITEMS_PER_BLOCK>(num_merges); int max_resident_blocks = xlib::kernel_occupancy( mergePathWrapper<ITEMS_PER_BLOCK, BLOCK_SIZE, true, Lambda>, BLOCK_SIZE); int num_actual_blocks = std::min(num_merge_blocks, max_resident_blocks); mergePathKernelFusionWrapper <ITEMS_PER_BLOCK, BLOCK_SIZE> <<< num_actual_blocks, BLOCK_SIZE >>> (_d_partitions, num_merge_blocks, d_prefixsum, prefixsum_size, lambda); } template<typename T> template<typename Lambda> void MergePathLB<T>::run(const Lambda& lambda) const noexcept { assert(_d_prefixsum != nullptr && _d_partitions != nullptr && _num_merge_blocks >= 0 && _prefixsum_size >= 0); mergePathWrapper <ITEMS_PER_BLOCK, BLOCK_SIZE> <<< _num_merge_blocks, BLOCK_SIZE >>> (_d_partitions, _num_merge_blocks, _d_prefixsum, _prefixsum_size, lambda); } } // namespace xlib
the_stack
* Radix Sorting API * ******************************************************************************/ #pragma once #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #include <b40c/KernelCommon/b40c_error_synchronize.cu> #include <b40c/LsbRadixSort/radixsort_multi_cta.cu> #include <b40c/LsbRadixSort/kernel/radixsort_singlegrid_kernel.cu> namespace b40c { /** * Single-grid sorting enactor class. * * This sorting implementation is specifically designed for small problems * that are not large enough to saturate the GPU (e.g., problems < 1M elements.) * It performs multiple digit-place passes over the input problem all within * a single kernel launch. It does so by implementing software global-barriers * across threadblocks. * * NOTE: This enactor can NOT be used to sort: * - Problems having structured value-types (i.e., keys that are paired with * satellite values that are structs or classes). This is because * the compiler cannot be told how to copy structures from global * memory using volatile or cache-global load modifiers. * * It also allows the caller to specify the lower-order bits over which the * keys should be sorted (e.g., the lower 17 bits out of 32-bit keys). This * reduces the number of overall sorting passes (4-bits per pass) for * scenarios in which the keyspace can be restricted in this manner. * * To use, simply create a specialized instance of this class with your * key-type K (and optionally value-type V if sorting with satellite * values). E.g., for sorting signed ints: * * SingleGridRadixSortingEnactor<int> sorting_enactor; * * or for sorting floats paired with unsigned ints: * * SingleGridRadixSortingEnactor<float, unsigned int> sorting_enactor; * * The enactor itself manages a small amount of device state for use when * performing sorting operations. To minimize GPU allocation overhead, * enactors can be re-used over multiple sorting operations. * * The problem-storage for a sorting operation is independent of the sorting * enactor. A single enactor can be reused to sort multiple instances of the * same type of problem storage. The MultiCtaRadixSortStorage structure * is used to manage the input/output/temporary buffers needed to sort * a problem of a given size. This enactor will lazily allocate any NULL * buffers contained within a problem-storage structure. * * Sorting is invoked upon a problem-storage as follows: * * sorting_enactor.EnactSort(device_storage); * * or * * sorting_enactor.EnactSort<17>(device_storage); * * in the case where the caller knows that only the lower 17 bits * are used to differentiate keys. N.B.: for use within templated * functions, the proper syntax is: * * sorting_enactor.template EnactSort<17>(device_storage); * * This enactor will update the selector within the problem storage * to indicate which buffer contains the sorted output. E.g., * * device_storage.d_keys[device_storage.selector]; * * Please see the overview of MultiCtaRadixSortStorage for more details. * * * @template-param K * Type of keys to be sorted * @template-param V * Type of values to be sorted. * @template-param ConvertedKeyType * Leave as default to effect necessary enactor specialization for * signed and floating-point types */ template <typename K, typename V = KeysOnlyType> class SingleGridRadixSortingEnactor; /** * Template-specialized structure for invoking the single-grid kernel a * specific number of times. We extract this so as to avoid unnecessary * kernel generation. */ template<int INVOCATIONS, typename K, typename V, int RADIX_BITS, int PASSES> struct SingleGridKernelInvoker; /** * Single-grid sorting enactor class. */ template <typename K, typename V> class SingleGridRadixSortingEnactor : public MultiCtaRadixSortingEnactor<K, V> { private: // Typedef for base class typedef MultiCtaRadixSortingEnactor<K, V> Base; protected: // Array of global synchronization counters, one for each threadblock int *d_sync; public: // Unsigned integer type suitable for radix sorting of keys typedef typename KeyConversion<K>::UnsignedBits ConvertedKeyType; /** * Utility function: Returns the maximum problem size this enactor can sort on the device * it was initialized for. */ static long long MaxProblemSize(const CudaProperties &props) { long long element_size = (Base::KeysOnly()) ? sizeof(K) : sizeof(K) + sizeof(V); // Begin with device memory, subtract 128MB for video/spine/etc. Factor in // three vectors for keys (and values, if present) long long available_bytes = props.device_props.totalGlobalMem - 128; return available_bytes / (element_size * 3); } protected: // Radix bits per pass static const int RADIX_BITS = 4; /** * Utility function: Returns the default maximum number of threadblocks * this enactor class can launch. */ static int MaxGridSize(const CudaProperties &props, int max_grid_size = 0) { if (max_grid_size == 0) { // No override: Fully populate all SMs max_grid_size = props.device_props.multiProcessorCount * B40C_RADIXSORT_SG_OCCUPANCY(props.kernel_ptx_version); } return max_grid_size; } protected: /** * Determines the actual number of CTAs to launch for the given problem size * * @return The actual number of CTAs that should be launched */ int GridSize(int num_elements) { // Initially assume that each threadblock will do only one // tile worth of work (and that the last one will do any remainder), // but then clamp it by the "max" restriction int grid_size = (num_elements + this->tile_elements - 1) / this->tile_elements; if (grid_size == 0) { // Always at least one block to process the remainder grid_size = 1; } else if (grid_size > this->max_grid_size) { grid_size = this->max_grid_size; } return grid_size; } /** * Post-sorting logic. */ virtual cudaError_t PostSort(MultiCtaRadixSortStorage<K, V> &problem_storage, int passes) { problem_storage.selector = passes & 0x1; return Base::PostSort(problem_storage, passes); } public: /** * Constructor. */ SingleGridRadixSortingEnactor( int max_grid_size = 0, const CudaProperties &props = CudaProperties()) : Base::MultiCtaRadixSortingEnactor( MaxGridSize(props, max_grid_size), B40C_RADIXSORT_SG_TILE_ELEMENTS(props.kernel_ptx_version , ConvertedKeyType, V), RADIX_BITS, props), d_sync(NULL) { // Allocate and initialize synchronization counters cudaMalloc((void**) &d_sync, sizeof(int) * this->max_grid_size); InitSync<void><<<this->max_grid_size, 32, 0>>>(d_sync); } /** * Destructor */ virtual ~SingleGridRadixSortingEnactor() { if (d_sync) cudaFree(d_sync); } /** * Enacts a radix sorting operation on the specified device data. * * @return cudaSuccess on success, error enumeration otherwise */ template <int LOWER_KEY_BITS> cudaError_t EnactSort(MultiCtaRadixSortStorage<K, V> &problem_storage) { const int PASSES = (LOWER_KEY_BITS + RADIX_BITS - 1) / RADIX_BITS; // Compute work distribution CtaDecomposition work_decomposition; int grid_size = GridSize(problem_storage.num_elements); GetWorkDecomposition(problem_storage.num_elements, grid_size, work_decomposition); // Compute number of spine elements to scan during this pass int spine_elements = grid_size * (1 << RADIX_BITS); int spine_tiles = (spine_elements + B40C_RADIXSORT_SPINE_TILE_ELEMENTS - 1) / B40C_RADIXSORT_SPINE_TILE_ELEMENTS; spine_elements = spine_tiles * B40C_RADIXSORT_SPINE_TILE_ELEMENTS; // Perform any lazy allocation PreSort(problem_storage, PASSES); if (RADIXSORT_DEBUG) { printf("\ndevice_sm_version: %d, kernel_ptx_version: %d\n", this->cuda_props.device_sm_version, this->cuda_props.kernel_ptx_version); printf("%d-bit bottom-level reduction & scan kernels:\n\tgrid_size: %d, \n\tthreads: %d, \n\ttile_elements: %d, \n\tnum_big_blocks: %d, \n\tbig_block_elements: %d, \n\tnormal_block_elements: %d\n\textra_elements_last_block: %d\n\n", LOWER_KEY_BITS, grid_size, B40C_RADIXSORT_THREADS, this->tile_elements, work_decomposition.num_big_blocks, work_decomposition.big_block_elements, work_decomposition.normal_block_elements, work_decomposition.extra_elements_last_block); printf("Top-level spine scan:\n\tgrid_size: %d, \n\tthreads: %d, \n\tspine_block_elements: %d\n\n", grid_size, B40C_RADIXSORT_SPINE_THREADS, spine_elements); } // Invoke kernel SingleGridKernelInvoker<(PASSES + 8 - 1) / 8, K, V, RADIX_BITS, PASSES>::Invoke( grid_size, this->d_sync, this->d_spine, problem_storage, work_decomposition, spine_elements); // Perform any post-mortem PostSort(problem_storage, PASSES); return cudaSuccess; } /** * Enacts a radix sorting operation on the specified device data. * * @return cudaSuccess on success, error enumeration otherwise */ cudaError_t EnactSort(MultiCtaRadixSortStorage<K, V> &problem_storage) { return EnactSort<sizeof(K) * 8>(problem_storage); // mooch } }; /** * Template specialization for one invocation of the sorting kernel (which * performs up to 8 passes). */ template<typename K, typename V, int RADIX_BITS, int PASSES> struct SingleGridKernelInvoker <1, K, V, RADIX_BITS, PASSES> { typedef typename KeyConversion<K>::UnsignedBits ConvertedKeyType; static void Invoke( int grid_size, int *d_sync, int *d_spine, MultiCtaRadixSortStorage<K, V> &problem_storage, CtaDecomposition &work_decomposition, int spine_elements) { LsbSingleGridSortingKernel<ConvertedKeyType, V, RADIX_BITS, PASSES, 0, PreprocessKeyFunctor<K>, PostprocessKeyFunctor<K> ><<<grid_size, B40C_RADIXSORT_THREADS, 0>>>( d_sync, d_spine, (ConvertedKeyType *) problem_storage.d_keys[0], (ConvertedKeyType *) problem_storage.d_keys[1], problem_storage.d_values[0], problem_storage.d_values[1], work_decomposition, spine_elements); synchronize_if_enabled("ScanScatterDigits"); } }; /** * Template specialization for two invocations of the sorting kernel (which * performs up to 8 passes). */ template<typename K, typename V, int RADIX_BITS, int PASSES> struct SingleGridKernelInvoker <2, K, V, RADIX_BITS, PASSES> { typedef typename KeyConversion<K>::UnsignedBits ConvertedKeyType; static void Invoke( int grid_size, int *d_sync, int *d_spine, MultiCtaRadixSortStorage<K, V> &problem_storage, CtaDecomposition &work_decomposition, int spine_elements) { LsbSingleGridSortingKernel<ConvertedKeyType, V, RADIX_BITS, PASSES, 0, PreprocessKeyFunctor<K>, PostprocessKeyFunctor<K> ><<<grid_size, B40C_RADIXSORT_THREADS, 0>>>( d_sync, d_spine, (ConvertedKeyType *) problem_storage.d_keys[0], (ConvertedKeyType *) problem_storage.d_keys[1], problem_storage.d_values[0], problem_storage.d_values[1], work_decomposition, spine_elements); synchronize_if_enabled("ScanScatterDigits"); LsbSingleGridSortingKernel<ConvertedKeyType, V, RADIX_BITS, PASSES, 8, PreprocessKeyFunctor<K>, PostprocessKeyFunctor<K> ><<<grid_size, B40C_RADIXSORT_THREADS, 0>>>( d_sync, d_spine, (ConvertedKeyType *) problem_storage.d_keys[0], (ConvertedKeyType *) problem_storage.d_keys[1], problem_storage.d_values[0], problem_storage.d_values[1], work_decomposition, spine_elements); synchronize_if_enabled("ScanScatterDigits"); } }; }// namespace b40c
the_stack
#define TPB52 256 #define TPB50 128 __constant__ const uint2 keccak_round_constants[24] = { { 0x00000001, 0x00000000 }, { 0x00008082, 0x00000000 }, { 0x0000808a, 0x80000000 }, { 0x80008000, 0x80000000 }, { 0x0000808b, 0x00000000 }, { 0x80000001, 0x00000000 }, { 0x80008081, 0x80000000 }, { 0x00008009, 0x80000000 }, { 0x0000008a, 0x00000000 }, { 0x00000088, 0x00000000 }, { 0x80008009, 0x00000000 }, { 0x8000000a, 0x00000000 }, { 0x8000808b, 0x00000000 }, { 0x0000008b, 0x80000000 }, { 0x00008089, 0x80000000 }, { 0x00008003, 0x80000000 }, { 0x00008002, 0x80000000 }, { 0x00000080, 0x80000000 }, { 0x0000800a, 0x00000000 }, { 0x8000000a, 0x80000000 }, { 0x80008081, 0x80000000 }, { 0x00008080, 0x80000000 }, { 0x80000001, 0x00000000 }, { 0x80008008, 0x80000000 } }; __device__ static void macro1(uint2 *const __restrict__ p){ p[0] += p[1];p[2] += p[3];p[4] += p[5];p[6] += p[7];p[1] = ROL2(p[1],46) ^ p[0];p[3] = ROL2(p[3],36) ^ p[2];p[5] = ROL2(p[5],19) ^ p[4];p[7] = ROL2(p[7], 37) ^ p[6]; p[2] += p[1];p[4] += p[7];p[6] += p[5];p[0] += p[3];p[1] = ROL2(p[1],33) ^ p[2];p[7] = ROL2(p[7],27) ^ p[4];p[5] = ROL2(p[5],14) ^ p[6];p[3] = ROL2(p[3], 42) ^ p[0]; p[4] += p[1];p[6] += p[3];p[0] += p[5];p[2] += p[7];p[1] = ROL2(p[1],17) ^ p[4];p[3] = ROL2(p[3],49) ^ p[6];p[5] = ROL2(p[5],36) ^ p[0];p[7] = ROL2(p[7], 39) ^ p[2]; p[6] += p[1];p[0] += p[7];p[2] += p[5];p[4] += p[3];p[1] = ROL2(p[1],44) ^ p[6];p[7] = ROL2(p[7], 9) ^ p[0];p[5] = ROL2(p[5],54) ^ p[2];p[3] = ROR8(p[3]) ^ p[4]; } __device__ static void macro2(uint2 *const __restrict__ p){ p[0] += p[1];p[2] += p[3];p[4] += p[5];p[6] += p[7];p[1] = ROL2(p[1], 39) ^ p[0];p[3] = ROL2(p[3], 30) ^ p[2];p[5] = ROL2(p[5], 34) ^ p[4];p[7] = ROL24(p[7]) ^ p[6]; p[2] += p[1];p[4] += p[7];p[6] += p[5];p[0] += p[3];p[1] = ROL2(p[1], 13) ^ p[2];p[7] = ROL2(p[7], 50) ^ p[4];p[5] = ROL2(p[5], 10) ^ p[6];p[3] = ROL2(p[3], 17) ^ p[0]; p[4] += p[1];p[6] += p[3];p[0] += p[5];p[2] += p[7];p[1] = ROL2(p[1], 25) ^ p[4];p[3] = ROL2(p[3], 29) ^ p[6];p[5] = ROL2(p[5], 39) ^ p[0];p[7] = ROL2(p[7], 43) ^ p[2]; p[6] += p[1];p[0] += p[7];p[2] += p[5];p[4] += p[3];p[1] = ROL8(p[1]) ^ p[6];p[7] = ROL2(p[7], 35) ^ p[0];p[5] = ROR8(p[5]) ^ p[2];p[3] = ROL2(p[3], 22) ^ p[4]; } __constant__ const uint2 buffer[152] = { {0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC33,0xAE18A40B}, {0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC73,0x9E18A40B},{0x98173EC5,0xCAB2076D}, {0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC73,0x9E18A40B},{0x98173F04,0xCAB2076D},{0x749C51D0,0x4903ADFF}, {0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173F04,0xCAB2076D},{0x749C51CE,0x3903ADFF},{0x9746DF06,0x0D95DE39}, {0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x3903ADFF},{0x9746DF43,0xFD95DE39},{0x27C79BD2,0x8FD19341}, {0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF43,0xFD95DE39},{0x27C79C0E,0x8FD19341},{0xFF352CB6,0x9A255629}, {0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79C0E,0x8FD19341},{0xFF352CB1,0x8A255629},{0xDF6CA7B6,0x5DB62599}, {0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x8A255629},{0xDF6CA7F0,0x4DB62599},{0xA9D5C3FB,0xEABE394C}, {0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7F0,0x4DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B52B,0x991112C7}, {0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC3C,0xAE18A40B}, {0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC73,0x9E18A40B},{0x98173ece,0xcab2076d}, {0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC73,0x9E18A40B},{0x98173F04,0xCAB2076D},{0x749C51D9,0x4903ADFF}, {0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173F04,0xCAB2076D},{0x749C51CE,0x3903ADFF},{0x9746DF0F,0x0D95DE39}, {0xDF6CA7B0,0x5DB62599},{0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x3903ADFF},{0x9746DF43,0xFD95DE39},{0x27C79BDB,0x8FD19341}, {0xA9D5C3F4,0xEABE394C},{0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF43,0xFD95DE39},{0x27C79C0E,0x8FD19341},{0xFF352CBF,0x9A255629}, {0x1A75B523,0x991112C7},{0x660FCC33,0xAE18A40B},{0x98173EC4,0xCAB2076D},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79C0E,0x8FD19341},{0xFF352CB1,0x8A255629},{0xDF6CA7BF,0x5DB62599}, {0x660FCC33,0xAE18A40B},{0x98173ec4,0xcab2076d},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x8A255629},{0xDF6CA7F0,0x4DB62599},{0xA9D5C404,0xEABE394C}, {0x98173ec4,0xcab2076d},{0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7F0,0x4DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B534,0x991112C7}, {0x749C51CE,0x4903ADFF},{0x9746DF03,0x0D95DE39},{0x27C79BCE,0x8FD19341},{0xFF352CB1,0x9A255629},{0xDF6CA7B0,0x5DB62599},{0xA9D5C434,0xEABE394C},{0x1A75B523,0x891112C7},{0x660FCC45,0xAE18A40B} }; __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPB52,3) #else __launch_bounds__(TPB50,7) #endif void quark_keccakskein512_gpu_hash_64(uint32_t threads, uint2 *g_hash,const uint32_t * g_nonceVector) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint2 t[5], u[5], v, w; uint2 s[25]; if (thread < threads) { const uint32_t hashPosition = (g_nonceVector == NULL) ? thread : g_nonceVector[thread]; uint2x4* phash = (uint2x4 *)&g_hash[hashPosition * 8]; *(uint2x4*)&s[ 0] = __ldg4(&phash[ 0]); *(uint2x4*)&s[ 4] = __ldg4(&phash[ 1]); s[8] = make_uint2(1,0x80000000); /*theta*/ t[ 0] = s[ 0]^s[ 5]; t[ 1] = s[ 1]^s[ 6]; t[ 2] = s[ 2]^s[ 7]; t[ 3] = s[ 3]^s[ 8]; t[ 4] = s[4]; /*theta*/ #pragma unroll 5 for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 4] = xor3x(s[ 4], t[3], u[ 0]); s[24] = s[19] = s[14] = s[ 9] = t[ 3] ^ u[ 0]; s[ 0] = xor3x(s[ 0], t[4], u[ 1]); s[ 5] = xor3x(s[ 5], t[4], u[ 1]); s[20] = s[15] = s[10] = t[4] ^ u[ 1]; s[ 1] = xor3x(s[ 1], t[0], u[ 2]); s[ 6] = xor3x(s[ 6], t[0], u[ 2]); s[21] = s[16] = s[11] = t[0] ^ u[ 2]; s[ 2] = xor3x(s[ 2], t[1], u[ 3]); s[ 7] = xor3x(s[ 7], t[1], u[ 3]); s[22] = s[17] = s[12] = t[1] ^ u[ 3]; s[ 3] = xor3x(s[ 3], t[2], u[ 4]);s[ 8] = xor3x(s[ 8], t[2], u[ 4]); s[23] = s[18] = s[13] = t[2] ^ u[ 4]; /* rho pi: b[..] = rotl(a[..], ..) */ v = s[1]; s[1] = ROL2(s[6], 44); s[6] = ROL2(s[9], 20); s[9] = ROL2(s[22], 61); s[22] = ROL2(s[14], 39); s[14] = ROL2(s[20], 18); s[20] = ROL2(s[2], 62); s[2] = ROL2(s[12], 43); s[12] = ROL2(s[13], 25); s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15], 41); s[15] = ROL2(s[4], 27); s[4] = ROL2(s[24], 14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[8], 55); s[8] = ROL2(s[16], 45); s[16] = ROL2(s[5], 36); s[5] = ROL2(s[3], 28); s[3] = ROL2(s[18], 21); s[18] = ROL2(s[17], 15); s[17] = ROL2(s[11], 10); s[11] = ROL2(s[7], 6); s[7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ #pragma unroll 5 for(int j=0;j<25;j+=5){ v=s[j];w=s[j + 1];s[j] = chi(v,w,s[j+2]);s[j+1] = chi(w,s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w); } /* iota: a[0,0] ^= round constant */ s[0] ^= keccak_round_constants[ 0]; for (int i = 1; i < 23; i++) { /*theta*/ #pragma unroll for(int j=0;j<5;j++){ t[ j] = vectorize(xor5(devectorize(s[ j]),devectorize(s[j+5]),devectorize(s[j+10]),devectorize(s[j+15]),devectorize(s[j+20]))); } /*theta*/ #pragma unroll for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 4] = xor3x(s[ 4], t[3], u[ 0]);s[ 9] = xor3x(s[ 9], t[3], u[ 0]);s[14] = xor3x(s[14], t[3], u[ 0]);s[19] = xor3x(s[19], t[3], u[ 0]);s[24] = xor3x(s[24], t[3], u[ 0]); s[ 0] = xor3x(s[ 0], t[4], u[ 1]);s[ 5] = xor3x(s[ 5], t[4], u[ 1]);s[10] = xor3x(s[10], t[4], u[ 1]);s[15] = xor3x(s[15], t[4], u[ 1]);s[20] = xor3x(s[20], t[4], u[ 1]); s[ 1] = xor3x(s[ 1], t[0], u[ 2]);s[ 6] = xor3x(s[ 6], t[0], u[ 2]);s[11] = xor3x(s[11], t[0], u[ 2]);s[16] = xor3x(s[16], t[0], u[ 2]);s[21] = xor3x(s[21], t[0], u[ 2]); s[ 2] = xor3x(s[ 2], t[1], u[ 3]);s[ 7] = xor3x(s[ 7], t[1], u[ 3]);s[12] = xor3x(s[12], t[1], u[ 3]);s[17] = xor3x(s[17], t[1], u[ 3]);s[22] = xor3x(s[22], t[1], u[ 3]); s[ 3] = xor3x(s[ 3], t[2], u[ 4]);s[ 8] = xor3x(s[ 8], t[2], u[ 4]);s[13] = xor3x(s[13], t[2], u[ 4]);s[18] = xor3x(s[18], t[2], u[ 4]);s[23] = xor3x(s[23], t[2], u[ 4]); /* rho pi: b[..] = rotl(a[..], ..) */ v = s[1]; s[1] = ROL2(s[6], 44); s[6] = ROL2(s[9], 20); s[9] = ROL2(s[22], 61); s[22] = ROL2(s[14], 39); s[14] = ROL2(s[20], 18); s[20] = ROL2(s[2], 62); s[2] = ROL2(s[12], 43); s[12] = ROL2(s[13], 25); s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15], 41); s[15] = ROL2(s[4], 27); s[4] = ROL2(s[24], 14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[8], 55); s[8] = ROL2(s[16], 45); s[16] = ROL2(s[5], 36); s[5] = ROL2(s[3], 28); s[3] = ROL2(s[18], 21); s[18] = ROL2(s[17], 15); s[17] = ROL2(s[11], 10); s[11] = ROL2(s[7], 6); s[7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ #pragma unroll for(int j=0;j<25;j+=5){ v=s[j];w=s[j + 1];s[j] = chi(v,w,s[j+2]);s[j+1] = chi(w,s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w); } /* iota: a[0,0] ^= round constant */ s[0] ^= keccak_round_constants[i]; } /*theta*/ #pragma unroll for(int j=0;j<5;j++){ t[ j] = xor3x(xor3x(s[j+0],s[j+5],s[j+10]),s[j+15],s[j+20]); } /*theta*/ #pragma unroll for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 9] = xor3x(s[ 9], t[3], u[ 0]); s[24] = xor3x(s[24], t[3], u[ 0]); s[ 0] = xor3x(s[ 0], t[4], u[ 1]); s[10] = xor3x(s[10], t[4], u[ 1]); s[ 6] = xor3x(s[ 6], t[0], u[ 2]); s[16] = xor3x(s[16], t[0], u[ 2]); s[12] = xor3x(s[12], t[1], u[ 3]); s[22] = xor3x(s[22], t[1], u[ 3]); s[ 3] = xor3x(s[ 3], t[2], u[ 4]); s[18] = xor3x(s[18], t[2], u[ 4]); /* rho pi: b[..] = rotl(a[..], ..) */ s[ 1] = ROL2(s[ 6], 44); s[ 2] = ROL2(s[12], 43); s[ 5] = ROL2(s[ 3], 28); s[ 7] = ROL2(s[10], 3); s[ 3] = ROL2(s[18], 21); s[ 4] = ROL2(s[24], 14); s[ 6] = ROL2(s[ 9], 20); s[ 8] = ROL2(s[16], 45); s[ 9] = ROL2(s[22], 61); uint2 p[8],h[9]; uint32_t t0; uint2 t1,t2; t0 = 8; t1 = vectorize(0xFF00000000000000); t2 = t1+t0; /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ p[ 0] = chi(s[ 0],s[ 1],s[ 2]); p[ 1] = chi(s[ 1],s[ 2],s[ 3]); p[ 2] = chi(s[ 2],s[ 3],s[ 4]); p[ 3] = chi(s[ 3],s[ 4],s[ 0]); p[ 4] = chi(s[ 4],s[ 0],s[ 1]); p[ 5] = chi(s[ 5],s[ 6],s[ 7]); p[ 6] = chi(s[ 6],s[ 7],s[ 8]); p[ 7] = chi(s[ 7],s[ 8],s[ 9]); /* iota: a[0,0] ^= round constant */ p[0] ^= keccak_round_constants[23]; h[ 0] = p[0]; h[ 1] = p[1]; h[ 2] = p[2]; h[ 3] = p[3]; h[ 4] = p[4]; h[ 5] = p[5]; h[ 6] = p[6]; h[ 7] = p[7]; p[0] += buffer[0]; p[1] += buffer[1]; p[2] += buffer[2]; p[3] += buffer[3]; p[4] += buffer[4]; p[5] += buffer[5]; p[6] += buffer[6]; p[7] += buffer[7]; macro1(p); p[0] += buffer[8]; p[1] += buffer[9]; p[2] += buffer[10]; p[3] += buffer[11]; p[4] += buffer[12]; p[5] += buffer[13]; p[6] += buffer[14]; p[7] += buffer[15]; macro2(p); p[0] += buffer[16]; p[1] += buffer[17]; p[2] += buffer[18]; p[3] += buffer[19]; p[4] += buffer[20]; p[5] += buffer[21]; p[6] += buffer[22]; p[7] += buffer[23]; macro1(p); p[0] += buffer[24]; p[1] += buffer[25]; p[2] += buffer[26]; p[3] += buffer[27]; p[4] += buffer[28]; p[5] += buffer[29]; p[6] += buffer[30]; p[7] += buffer[31]; macro2(p); p[0] += buffer[32]; p[1] += buffer[33]; p[2] += buffer[34]; p[3] += buffer[35]; p[4] += buffer[36]; p[5] += buffer[37]; p[6] += buffer[38]; p[7] += buffer[39]; macro1(p); p[0] += buffer[40]; p[1] += buffer[41]; p[2] += buffer[42]; p[3] += buffer[43]; p[4] += buffer[44]; p[5] += buffer[45]; p[6] += buffer[46]; p[7] += buffer[47]; macro2(p); p[0] += buffer[48]; p[1] += buffer[49]; p[2] += buffer[50]; p[3] += buffer[51]; p[4] += buffer[52]; p[5] += buffer[53]; p[6] += buffer[54]; p[7] += buffer[55]; macro1(p); p[0] += buffer[56]; p[1] += buffer[57]; p[2] += buffer[58]; p[3] += buffer[59]; p[4] += buffer[60]; p[5] += buffer[61]; p[6] += buffer[62]; p[7] += buffer[63]; macro2(p); p[0] += buffer[64]; p[1] += buffer[65]; p[2] += buffer[66]; p[3] += buffer[67]; p[4] += buffer[68]; p[5] += buffer[69]; p[6] += buffer[70]; p[7] += buffer[71]; macro1(p); p[0] += buffer[72]; p[1] += buffer[73]; p[2] += buffer[74]; p[3] += buffer[75]; p[4] += buffer[76]; p[5] += buffer[77]; p[6] += buffer[78]; p[7] += buffer[79]; macro2(p); p[0] += buffer[80]; p[1] += buffer[81]; p[2] += buffer[82]; p[3] += buffer[83]; p[4] += buffer[84]; p[5] += buffer[85]; p[6] += buffer[86]; p[7] += buffer[87]; macro1(p); p[0] += buffer[88]; p[1] += buffer[89]; p[2] += buffer[90]; p[3] += buffer[91]; p[4] += buffer[92]; p[5] += buffer[93]; p[6] += buffer[94]; p[7] += buffer[95]; macro2(p); p[0] += buffer[96]; p[1] += buffer[97]; p[2] += buffer[98]; p[3] += buffer[99]; p[4] += buffer[100]; p[5] += buffer[101]; p[6] += buffer[102]; p[7] += buffer[103]; macro1(p); p[0] += buffer[104]; p[1] += buffer[105]; p[2] += buffer[106]; p[3] += buffer[107]; p[4] += buffer[108]; p[5] += buffer[109]; p[6] += buffer[110]; p[7] += buffer[111]; macro2(p); p[0] += buffer[112]; p[1] += buffer[113]; p[2] += buffer[114]; p[3] += buffer[115]; p[4] += buffer[116]; p[5] += buffer[117]; p[6] += buffer[118]; p[7] += buffer[119]; macro1(p); p[0] += buffer[120]; p[1] += buffer[121]; p[2] += buffer[122]; p[3] += buffer[123]; p[4] += buffer[124]; p[5] += buffer[125]; p[6] += buffer[126]; p[7] += buffer[127]; macro2(p); p[0] += buffer[128]; p[1] += buffer[129]; p[2] += buffer[130]; p[3] += buffer[131]; p[4] += buffer[132]; p[5] += buffer[133]; p[6] += buffer[134]; p[7] += buffer[135]; macro1(p); p[0] += buffer[136]; p[1] += buffer[137]; p[2] += buffer[138]; p[3] += buffer[139]; p[4] += buffer[140]; p[5] += buffer[141]; p[6] += buffer[142]; p[7] += buffer[143]; macro2(p); p[0] += buffer[144]; p[1] += buffer[145]; p[2] += buffer[146]; p[3] += buffer[147]; p[4] += buffer[148]; p[5] += buffer[149]; p[6] += buffer[150]; p[7] += buffer[151]; #define h0 p[0] #define h1 p[1] #define h2 p[2] #define h3 p[3] #define h4 p[4] #define h5 p[5] #define h6 p[6] #define h7 p[7] h0 ^= h[0]; h1 ^= h[1]; h2 ^= h[2]; h3 ^= h[3]; h4 ^= h[4]; h5 ^= h[5]; h6 ^= h[6]; h7 ^= h[7]; uint2 skein_h8 = h0 ^ h1 ^ h2 ^ h3 ^ h4 ^ h5 ^ h6 ^ h7 ^ vectorize(0x1BD11BDAA9FC1A22); uint2 hash64[8]; hash64[5] = h5 + 8U; hash64[0] = h0 + h1; hash64[1] = ROL2(h1, 46) ^ hash64[0]; hash64[2] = h2 + h3; hash64[3] = ROL2(h3, 36) ^ hash64[2]; hash64[4] = h4 + hash64[5]; hash64[5] = ROL2(hash64[5], 19) ^ hash64[4]; hash64[6] = h6 + h7 + t1; hash64[7] = ROL2(h7, 37) ^ hash64[6]; hash64[2]+= hash64[1]; hash64[1] = ROL2(hash64[1], 33) ^ hash64[2]; hash64[4]+= hash64[7]; hash64[7] = ROL2(hash64[7], 27) ^ hash64[4]; hash64[6]+= hash64[5]; hash64[5] = ROL2(hash64[5], 14) ^ hash64[6]; hash64[0]+= hash64[3]; hash64[3] = ROL2(hash64[3], 42) ^ hash64[0]; hash64[4]+= hash64[1]; hash64[1] = ROL2(hash64[1], 17) ^ hash64[4]; hash64[6]+= hash64[3]; hash64[3] = ROL2(hash64[3], 49) ^ hash64[6]; hash64[0]+= hash64[5]; hash64[5] = ROL2(hash64[5], 36) ^ hash64[0]; hash64[2]+= hash64[7]; hash64[7] = ROL2(hash64[7], 39) ^ hash64[2]; hash64[6]+= hash64[1]; hash64[1] = ROL2(hash64[1], 44) ^ hash64[6]; hash64[0]+= hash64[7]; hash64[7] = ROL2(hash64[7], 9) ^ hash64[0]; hash64[2]+= hash64[5]; hash64[5] = ROL2(hash64[5], 54) ^ hash64[2]; hash64[4]+= hash64[3]; hash64[3] = ROR8(hash64[3]) ^ hash64[4]; hash64[0]+= h1; hash64[1]+= h2; hash64[2]+= h3; hash64[3]+= h4; hash64[4]+= h5; hash64[5]+= h6 + t1; hash64[6]+= h7 + t2; hash64[7]+= skein_h8 + 1U; macro2(hash64); hash64[0]+= h2; hash64[1]+= h3; hash64[2]+= h4; hash64[3]+= h5; hash64[4]+= h6; hash64[5]+= h7 + t2; hash64[6]+= skein_h8+t0;hash64[7]+= h0 + 2U; macro1(hash64); hash64[0]+= h3; hash64[1]+= h4; hash64[2]+= h5; hash64[3]+= h6; hash64[4]+= h7; hash64[5]+= skein_h8 + t0; hash64[6]+= h0 + t1; hash64[7]+= h1 + 3U; macro2(hash64); hash64[0]+= h4; hash64[1]+= h5; hash64[2]+= h6; hash64[3]+= h7; hash64[4]+= skein_h8; hash64[5]+= h0 + t1; hash64[6]+= h1 + t2; hash64[7]+= h2 + 4U; macro1(hash64); hash64[0]+= h5; hash64[1]+= h6; hash64[2]+= h7; hash64[3]+= skein_h8; hash64[4]+= h0; hash64[5]+= h1 + t2; hash64[6]+= h2 + t0; hash64[7]+= h3 + 5U; macro2(hash64); hash64[0]+= h6; hash64[1]+= h7; hash64[2]+= skein_h8; hash64[3]+= h0; hash64[4]+= h1; hash64[5]+= h2 + t0; hash64[6]+= h3 + t1; hash64[7]+= h4 + 6U; macro1(hash64); hash64[0]+= h7; hash64[1]+= skein_h8; hash64[2]+= h0; hash64[3]+= h1; hash64[4]+= h2; hash64[5]+= h3 + t1; hash64[6]+= h4 + t2; hash64[7]+= h5 + 7U; macro2(hash64); hash64[0]+= skein_h8; hash64[1]+= h0; hash64[2]+= h1; hash64[3]+= h2; hash64[4]+= h3; hash64[5]+= h4 + t2; hash64[6]+= h5 + t0; hash64[7]+= h6 + 8U; macro1(hash64); hash64[0]+= h0; hash64[1]+= h1; hash64[2]+= h2; hash64[3]+= h3; hash64[4]+= h4; hash64[5]+= h5 + t0; hash64[6]+= h6 + t1; hash64[7]+= h7 + 9U; macro2(hash64); hash64[0]+= h1; hash64[1]+= h2; hash64[2]+= h3; hash64[3]+= h4; hash64[4]+= h5; hash64[5]+= h6 + t1; hash64[6]+= h7 + t2; hash64[7]+= skein_h8 + 10U; macro1(hash64); hash64[0]+= h2; hash64[1]+= h3; hash64[2]+= h4; hash64[3]+= h5; hash64[4]+= h6; hash64[5]+= h7 + t2; hash64[6]+= skein_h8+t0;hash64[7]+= h0 + 11U; macro2(hash64); hash64[0]+= h3; hash64[1]+= h4; hash64[2]+= h5; hash64[3]+= h6; hash64[4]+= h7; hash64[5]+= skein_h8 + t0; hash64[6]+= h0 + t1; hash64[7]+= h1 + 12U; macro1(hash64); hash64[0]+= h4; hash64[1]+= h5; hash64[2]+= h6; hash64[3]+= h7; hash64[4]+= skein_h8; hash64[5]+= h0 + t1; hash64[6]+= h1 + t2; hash64[7]+= h2 + 13U; macro2(hash64); hash64[0]+= h5; hash64[1]+= h6; hash64[2]+= h7; hash64[3]+= skein_h8; hash64[4]+= h0; hash64[5]+= h1 + t2; hash64[6]+= h2 + t0; hash64[7]+= h3 + 14U; macro1(hash64); hash64[0]+= h6; hash64[1]+= h7; hash64[2]+= skein_h8; hash64[3]+= h0; hash64[4]+= h1; hash64[5]+= h2 + t0; hash64[6]+= h3 + t1; hash64[7]+= h4 + 15U; macro2(hash64); hash64[0]+= h7; hash64[1]+= skein_h8; hash64[2]+= h0; hash64[3]+= h1; hash64[4]+= h2; hash64[5]+= h3 + t1; hash64[6]+= h4 + t2; hash64[7]+= h5 + 16U; macro1(hash64); hash64[0]+= skein_h8; hash64[1]+= h0; hash64[2]+= h1; hash64[3]+= h2; hash64[4]+= h3; hash64[5]+= h4 + t2; hash64[6]+= h5 + t0; hash64[7]+= h6 + 17U; macro2(hash64); hash64[0]+= h0; hash64[1]+= h1; hash64[2]+= h2; hash64[3]+= h3; hash64[4]+= h4; hash64[5]+= h5 + t0; hash64[6]+= h6 + t1; hash64[7]+= h7 + 18U; phash[0] = *(uint2x4*)&hash64[0]; phash[1] = *(uint2x4*)&hash64[4]; #undef h0 #undef h1 #undef h2 #undef h3 #undef h4 #undef h5 #undef h6 #undef h7 } } __host__ void quark_keccak_skein512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t *d_nonceVector, uint32_t *d_hash) { // berechne wie viele Thread Blocks wir brauchen const uint32_t dev_id = device_map[thr_id]; const uint32_t tpb = (device_sm[dev_id] > 500) ? TPB52 : TPB50; dim3 grid((threads + tpb - 1) / tpb); dim3 block(tpb); quark_keccakskein512_gpu_hash_64 << <grid, block >> >(threads, (uint2 *)d_hash, d_nonceVector); } __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPB52,3) #else __launch_bounds__(TPB50,7) #endif void quark_keccakskein512_gpu_hash_64_final(uint32_t threads, uint2 *g_hash,const uint32_t * g_nonceVector, uint32_t *resNonce,const uint64_t highTarget) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint2 t[5], u[5], v, w; uint2 s[25]; if (thread < threads) { const uint32_t hashPosition = (g_nonceVector == NULL) ? thread : g_nonceVector[thread]; uint2x4* phash = (uint2x4 *)&g_hash[hashPosition * 8]; *(uint2x4*)&s[ 0] = __ldg4(&phash[ 0]); *(uint2x4*)&s[ 4] = __ldg4(&phash[ 1]); s[8] = make_uint2(1,0x80000000); /*theta*/ t[ 0] = s[ 0]^s[ 5]; t[ 1] = s[ 1]^s[ 6]; t[ 2] = s[ 2]^s[ 7]; t[ 3] = s[ 3]^s[ 8]; t[ 4] = s[4]; /*theta*/ #pragma unroll 5 for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 4] = xor3x(s[ 4], t[3], u[ 0]); s[24] = s[19] = s[14] = s[ 9] = t[ 3] ^ u[ 0]; s[ 0] = xor3x(s[ 0], t[4], u[ 1]); s[ 5] = xor3x(s[ 5], t[4], u[ 1]); s[20] = s[15] = s[10] = t[4] ^ u[ 1]; s[ 1] = xor3x(s[ 1], t[0], u[ 2]); s[ 6] = xor3x(s[ 6], t[0], u[ 2]); s[21] = s[16] = s[11] = t[0] ^ u[ 2]; s[ 2] = xor3x(s[ 2], t[1], u[ 3]); s[ 7] = xor3x(s[ 7], t[1], u[ 3]); s[22] = s[17] = s[12] = t[1] ^ u[ 3]; s[ 3] = xor3x(s[ 3], t[2], u[ 4]);s[ 8] = xor3x(s[ 8], t[2], u[ 4]); s[23] = s[18] = s[13] = t[2] ^ u[ 4]; /* rho pi: b[..] = rotl(a[..], ..) */ v = s[1]; s[1] = ROL2(s[6], 44); s[6] = ROL2(s[9], 20); s[9] = ROL2(s[22], 61); s[22] = ROL2(s[14], 39); s[14] = ROL2(s[20], 18); s[20] = ROL2(s[2], 62); s[2] = ROL2(s[12], 43); s[12] = ROL2(s[13], 25); s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15], 41); s[15] = ROL2(s[4], 27); s[4] = ROL2(s[24], 14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[8], 55); s[8] = ROL2(s[16], 45); s[16] = ROL2(s[5], 36); s[5] = ROL2(s[3], 28); s[3] = ROL2(s[18], 21); s[18] = ROL2(s[17], 15); s[17] = ROL2(s[11], 10); s[11] = ROL2(s[7], 6); s[7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ #pragma unroll 5 for(int j=0;j<25;j+=5){ v=s[j];w=s[j + 1];s[j] = chi(v,w,s[j+2]);s[j+1] = chi(w,s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w); } /* iota: a[0,0] ^= round constant */ s[0] ^= keccak_round_constants[ 0]; for (int i = 1; i < 23; i++) { /*theta*/ #pragma unroll for(int j=0;j<5;j++){ t[ j] = vectorize(xor5(devectorize(s[ j]),devectorize(s[j+5]),devectorize(s[j+10]),devectorize(s[j+15]),devectorize(s[j+20]))); // t[ j] = s[ j] ^ s[j+5] ^ s[j+10] ^ s[j+15] ^ s[j+20]; } /*theta*/ #pragma unroll for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 4] = xor3x(s[ 4], t[3], u[ 0]);s[ 9] = xor3x(s[ 9], t[3], u[ 0]);s[14] = xor3x(s[14], t[3], u[ 0]);s[19] = xor3x(s[19], t[3], u[ 0]);s[24] = xor3x(s[24], t[3], u[ 0]); s[ 0] = xor3x(s[ 0], t[4], u[ 1]);s[ 5] = xor3x(s[ 5], t[4], u[ 1]);s[10] = xor3x(s[10], t[4], u[ 1]);s[15] = xor3x(s[15], t[4], u[ 1]);s[20] = xor3x(s[20], t[4], u[ 1]); s[ 1] = xor3x(s[ 1], t[0], u[ 2]);s[ 6] = xor3x(s[ 6], t[0], u[ 2]);s[11] = xor3x(s[11], t[0], u[ 2]);s[16] = xor3x(s[16], t[0], u[ 2]);s[21] = xor3x(s[21], t[0], u[ 2]); s[ 2] = xor3x(s[ 2], t[1], u[ 3]);s[ 7] = xor3x(s[ 7], t[1], u[ 3]);s[12] = xor3x(s[12], t[1], u[ 3]);s[17] = xor3x(s[17], t[1], u[ 3]);s[22] = xor3x(s[22], t[1], u[ 3]); s[ 3] = xor3x(s[ 3], t[2], u[ 4]);s[ 8] = xor3x(s[ 8], t[2], u[ 4]);s[13] = xor3x(s[13], t[2], u[ 4]);s[18] = xor3x(s[18], t[2], u[ 4]);s[23] = xor3x(s[23], t[2], u[ 4]); /* s[ 4] = s[ 4]^t[3]^u[ 0];s[ 9] = s[ 9]^t[3]^u[ 0];s[14] = s[14]^t[3]^u[ 0];s[19] = s[19]^t[3]^u[ 0];s[24] = s[24]^t[3]^u[ 0]; s[ 0] = s[ 0]^t[4]^u[ 1];s[ 5] = s[ 5]^t[4]^u[ 1];s[10] = s[10]^t[4]^u[ 1];s[15] = s[15]^t[4]^u[ 1];s[20] = s[20]^t[4]^u[ 1]; s[ 1] = s[ 1]^t[0]^u[ 2];s[ 6] = s[ 6]^t[0]^u[ 2];s[11] = s[11]^t[0]^u[ 2];s[16] = s[16]^t[0]^u[ 2];s[21] = s[21]^t[0]^u[ 2]; s[ 2] = s[ 2]^t[1]^u[ 3];s[ 7] = s[ 7]^t[1]^u[ 3];s[12] = s[12]^t[1]^u[ 3];s[17] = s[17]^t[1]^u[ 3];s[22] = s[22]^t[1]^u[ 3]; s[ 3] = s[ 3]^t[2]^u[ 4];s[ 8] = s[ 8]^t[2]^u[ 4];s[13] = s[13]^t[2]^u[ 4];s[18] = s[18]^t[2]^u[ 4];s[23] = s[23]^t[2]^u[ 4];*/ /* rho pi: b[..] = rotl(a[..], ..) */ v = s[1]; s[1] = ROL2(s[6], 44); s[6] = ROL2(s[9], 20); s[9] = ROL2(s[22], 61); s[22] = ROL2(s[14], 39); s[14] = ROL2(s[20], 18); s[20] = ROL2(s[2], 62); s[2] = ROL2(s[12], 43); s[12] = ROL2(s[13], 25); s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15], 41); s[15] = ROL2(s[4], 27); s[4] = ROL2(s[24], 14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[8], 55); s[8] = ROL2(s[16], 45); s[16] = ROL2(s[5], 36); s[5] = ROL2(s[3], 28); s[3] = ROL2(s[18], 21); s[18] = ROL2(s[17], 15); s[17] = ROL2(s[11], 10); s[11] = ROL2(s[7], 6); s[7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ #pragma unroll for(int j=0;j<25;j+=5){ v=s[j];w=s[j + 1];s[j] = chi(v,w,s[j+2]);s[j+1] = chi(w,s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w); } /* iota: a[0,0] ^= round constant */ s[0] ^= keccak_round_constants[i]; } /*theta*/ #pragma unroll for(int j=0;j<5;j++){ t[ j] = xor3x(xor3x(s[j+0],s[j+5],s[j+10]),s[j+15],s[j+20]); } /*theta*/ #pragma unroll for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 9] = xor3x(s[ 9], t[3], u[ 0]); s[24] = xor3x(s[24], t[3], u[ 0]); s[ 0] = xor3x(s[ 0], t[4], u[ 1]); s[10] = xor3x(s[10], t[4], u[ 1]); s[ 6] = xor3x(s[ 6], t[0], u[ 2]); s[16] = xor3x(s[16], t[0], u[ 2]); s[12] = xor3x(s[12], t[1], u[ 3]); s[22] = xor3x(s[22], t[1], u[ 3]); s[ 3] = xor3x(s[ 3], t[2], u[ 4]); s[18] = xor3x(s[18], t[2], u[ 4]); /* rho pi: b[..] = rotl(a[..], ..) */ s[ 1] = ROL2(s[ 6], 44); s[ 2] = ROL2(s[12], 43); s[ 5] = ROL2(s[ 3], 28); s[ 7] = ROL2(s[10], 3); s[ 3] = ROL2(s[18], 21); s[ 4] = ROL2(s[24], 14); s[ 6] = ROL2(s[ 9], 20); s[ 8] = ROL2(s[16], 45); s[ 9] = ROL2(s[22], 61); uint2 p[8],h[9]; uint32_t t0; uint2 t1,t2; t0 = 8; t1 = vectorize(0xFF00000000000000); t2 = t1+t0; /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ p[ 0] = chi(s[ 0],s[ 1],s[ 2]); p[ 1] = chi(s[ 1],s[ 2],s[ 3]); p[ 2] = chi(s[ 2],s[ 3],s[ 4]); p[ 3] = chi(s[ 3],s[ 4],s[ 0]); p[ 4] = chi(s[ 4],s[ 0],s[ 1]); p[ 5] = chi(s[ 5],s[ 6],s[ 7]); p[ 6] = chi(s[ 6],s[ 7],s[ 8]); p[ 7] = chi(s[ 7],s[ 8],s[ 9]); /* iota: a[0,0] ^= round constant */ p[0] ^= keccak_round_constants[23]; h[ 0] = p[0]; h[ 1] = p[1]; h[ 2] = p[2]; h[ 3] = p[3]; h[ 4] = p[4]; h[ 5] = p[5]; h[ 6] = p[6]; h[ 7] = p[7]; p[0] += buffer[0]; p[1] += buffer[1]; p[2] += buffer[2]; p[3] += buffer[3]; p[4] += buffer[4]; p[5] += buffer[5]; p[6] += buffer[6]; p[7] += buffer[7]; macro1(p); p[0] += buffer[8]; p[1] += buffer[9]; p[2] += buffer[10]; p[3] += buffer[11]; p[4] += buffer[12]; p[5] += buffer[13]; p[6] += buffer[14]; p[7] += buffer[15]; macro2(p); p[0] += buffer[16]; p[1] += buffer[17]; p[2] += buffer[18]; p[3] += buffer[19]; p[4] += buffer[20]; p[5] += buffer[21]; p[6] += buffer[22]; p[7] += buffer[23]; macro1(p); p[0] += buffer[24]; p[1] += buffer[25]; p[2] += buffer[26]; p[3] += buffer[27]; p[4] += buffer[28]; p[5] += buffer[29]; p[6] += buffer[30]; p[7] += buffer[31]; macro2(p); p[0] += buffer[32]; p[1] += buffer[33]; p[2] += buffer[34]; p[3] += buffer[35]; p[4] += buffer[36]; p[5] += buffer[37]; p[6] += buffer[38]; p[7] += buffer[39]; macro1(p); p[0] += buffer[40]; p[1] += buffer[41]; p[2] += buffer[42]; p[3] += buffer[43]; p[4] += buffer[44]; p[5] += buffer[45]; p[6] += buffer[46]; p[7] += buffer[47]; macro2(p); p[0] += buffer[48]; p[1] += buffer[49]; p[2] += buffer[50]; p[3] += buffer[51]; p[4] += buffer[52]; p[5] += buffer[53]; p[6] += buffer[54]; p[7] += buffer[55]; macro1(p); p[0] += buffer[56]; p[1] += buffer[57]; p[2] += buffer[58]; p[3] += buffer[59]; p[4] += buffer[60]; p[5] += buffer[61]; p[6] += buffer[62]; p[7] += buffer[63]; macro2(p); p[0] += buffer[64]; p[1] += buffer[65]; p[2] += buffer[66]; p[3] += buffer[67]; p[4] += buffer[68]; p[5] += buffer[69]; p[6] += buffer[70]; p[7] += buffer[71]; macro1(p); p[0] += buffer[72]; p[1] += buffer[73]; p[2] += buffer[74]; p[3] += buffer[75]; p[4] += buffer[76]; p[5] += buffer[77]; p[6] += buffer[78]; p[7] += buffer[79]; macro2(p); p[0] += buffer[80]; p[1] += buffer[81]; p[2] += buffer[82]; p[3] += buffer[83]; p[4] += buffer[84]; p[5] += buffer[85]; p[6] += buffer[86]; p[7] += buffer[87]; macro1(p); p[0] += buffer[88]; p[1] += buffer[89]; p[2] += buffer[90]; p[3] += buffer[91]; p[4] += buffer[92]; p[5] += buffer[93]; p[6] += buffer[94]; p[7] += buffer[95]; macro2(p); p[0] += buffer[96]; p[1] += buffer[97]; p[2] += buffer[98]; p[3] += buffer[99]; p[4] += buffer[100]; p[5] += buffer[101]; p[6] += buffer[102]; p[7] += buffer[103]; macro1(p); p[0] += buffer[104]; p[1] += buffer[105]; p[2] += buffer[106]; p[3] += buffer[107]; p[4] += buffer[108]; p[5] += buffer[109]; p[6] += buffer[110]; p[7] += buffer[111]; macro2(p); p[0] += buffer[112]; p[1] += buffer[113]; p[2] += buffer[114]; p[3] += buffer[115]; p[4] += buffer[116]; p[5] += buffer[117]; p[6] += buffer[118]; p[7] += buffer[119]; macro1(p); p[0] += buffer[120]; p[1] += buffer[121]; p[2] += buffer[122]; p[3] += buffer[123]; p[4] += buffer[124]; p[5] += buffer[125]; p[6] += buffer[126]; p[7] += buffer[127]; macro2(p); p[0] += buffer[128]; p[1] += buffer[129]; p[2] += buffer[130]; p[3] += buffer[131]; p[4] += buffer[132]; p[5] += buffer[133]; p[6] += buffer[134]; p[7] += buffer[135]; macro1(p); p[0] += buffer[136]; p[1] += buffer[137]; p[2] += buffer[138]; p[3] += buffer[139]; p[4] += buffer[140]; p[5] += buffer[141]; p[6] += buffer[142]; p[7] += buffer[143]; macro2(p); p[0] += buffer[144]; p[1] += buffer[145]; p[2] += buffer[146]; p[3] += buffer[147]; p[4] += buffer[148]; p[5] += buffer[149]; p[6] += buffer[150]; p[7] += buffer[151]; #define h0 p[0] #define h1 p[1] #define h2 p[2] #define h3 p[3] #define h4 p[4] #define h5 p[5] #define h6 p[6] #define h7 p[7] h0 ^= h[0]; h1 ^= h[1]; h2 ^= h[2]; h3 ^= h[3]; h4 ^= h[4]; h5 ^= h[5]; h6 ^= h[6]; h7 ^= h[7]; uint2 skein_h8 = h0 ^ h1 ^ h2 ^ h3 ^ h4 ^ h5 ^ h6 ^ h7 ^ vectorize(0x1BD11BDAA9FC1A22); uint2 hash64[8]; hash64[5] = h5 + 8U; hash64[0] = h0 + h1; hash64[1] = ROL2(h1, 46) ^ hash64[0]; hash64[2] = h2 + h3; hash64[3] = ROL2(h3, 36) ^ hash64[2]; hash64[4] = h4 + hash64[5]; hash64[5] = ROL2(hash64[5], 19) ^ hash64[4]; hash64[6] = h6 + h7 + t1; hash64[7] = ROL2(h7, 37) ^ hash64[6]; hash64[2]+= hash64[1]; hash64[1] = ROL2(hash64[1], 33) ^ hash64[2]; hash64[4]+= hash64[7]; hash64[7] = ROL2(hash64[7], 27) ^ hash64[4]; hash64[6]+= hash64[5]; hash64[5] = ROL2(hash64[5], 14) ^ hash64[6]; hash64[0]+= hash64[3]; hash64[3] = ROL2(hash64[3], 42) ^ hash64[0]; hash64[4]+= hash64[1]; hash64[1] = ROL2(hash64[1], 17) ^ hash64[4]; hash64[6]+= hash64[3]; hash64[3] = ROL2(hash64[3], 49) ^ hash64[6]; hash64[0]+= hash64[5]; hash64[5] = ROL2(hash64[5], 36) ^ hash64[0]; hash64[2]+= hash64[7]; hash64[7] = ROL2(hash64[7], 39) ^ hash64[2]; hash64[6]+= hash64[1]; hash64[1] = ROL2(hash64[1], 44) ^ hash64[6]; hash64[0]+= hash64[7]; hash64[7] = ROL2(hash64[7], 9) ^ hash64[0]; hash64[2]+= hash64[5]; hash64[5] = ROL2(hash64[5], 54) ^ hash64[2]; hash64[4]+= hash64[3]; hash64[3] = ROR8(hash64[3]) ^ hash64[4]; hash64[0]+= h1; hash64[1]+= h2; hash64[2]+= h3; hash64[3]+= h4; hash64[4]+= h5; hash64[5]+= h6 + t1; hash64[6]+= h7 + t2; hash64[7]+= skein_h8 + 1U; macro2(hash64); hash64[0]+= h2; hash64[1]+= h3; hash64[2]+= h4; hash64[3]+= h5; hash64[4]+= h6; hash64[5]+= h7 + t2; hash64[6]+= skein_h8+t0;hash64[7]+= h0 + 2U; macro1(hash64); hash64[0]+= h3; hash64[1]+= h4; hash64[2]+= h5; hash64[3]+= h6; hash64[4]+= h7; hash64[5]+= skein_h8 + t0; hash64[6]+= h0 + t1; hash64[7]+= h1 + 3U; macro2(hash64); hash64[0]+= h4; hash64[1]+= h5; hash64[2]+= h6; hash64[3]+= h7; hash64[4]+= skein_h8; hash64[5]+= h0 + t1; hash64[6]+= h1 + t2; hash64[7]+= h2 + 4U; macro1(hash64); hash64[0]+= h5; hash64[1]+= h6; hash64[2]+= h7; hash64[3]+= skein_h8; hash64[4]+= h0; hash64[5]+= h1 + t2; hash64[6]+= h2 + t0; hash64[7]+= h3 + 5U; macro2(hash64); hash64[0]+= h6; hash64[1]+= h7; hash64[2]+= skein_h8; hash64[3]+= h0; hash64[4]+= h1; hash64[5]+= h2 + t0; hash64[6]+= h3 + t1; hash64[7]+= h4 + 6U; macro1(hash64); hash64[0]+= h7; hash64[1]+= skein_h8; hash64[2]+= h0; hash64[3]+= h1; hash64[4]+= h2; hash64[5]+= h3 + t1; hash64[6]+= h4 + t2; hash64[7]+= h5 + 7U; macro2(hash64); hash64[0]+= skein_h8; hash64[1]+= h0; hash64[2]+= h1; hash64[3]+= h2; hash64[4]+= h3; hash64[5]+= h4 + t2; hash64[6]+= h5 + t0; hash64[7]+= h6 + 8U; macro1(hash64); hash64[0]+= h0; hash64[1]+= h1; hash64[2]+= h2; hash64[3]+= h3; hash64[4]+= h4; hash64[5]+= h5 + t0; hash64[6]+= h6 + t1; hash64[7]+= h7 + 9U; macro2(hash64); hash64[0]+= h1; hash64[1]+= h2; hash64[2]+= h3; hash64[3]+= h4; hash64[4]+= h5; hash64[5]+= h6 + t1; hash64[6]+= h7 + t2; hash64[7]+= skein_h8 + 10U; macro1(hash64); hash64[0]+= h2; hash64[1]+= h3; hash64[2]+= h4; hash64[3]+= h5; hash64[4]+= h6; hash64[5]+= h7 + t2; hash64[6]+= skein_h8+t0;hash64[7]+= h0 + 11U; macro2(hash64); hash64[0]+= h3; hash64[1]+= h4; hash64[2]+= h5; hash64[3]+= h6; hash64[4]+= h7; hash64[5]+= skein_h8 + t0; hash64[6]+= h0 + t1; hash64[7]+= h1 + 12U; macro1(hash64); hash64[0]+= h4; hash64[1]+= h5; hash64[2]+= h6; hash64[3]+= h7; hash64[4]+= skein_h8; hash64[5]+= h0 + t1; hash64[6]+= h1 + t2; hash64[7]+= h2 + 13U; macro2(hash64); hash64[0]+= h5; hash64[1]+= h6; hash64[2]+= h7; hash64[3]+= skein_h8; hash64[4]+= h0; hash64[5]+= h1 + t2; hash64[6]+= h2 + t0; hash64[7]+= h3 + 14U; macro1(hash64); hash64[0]+= h6; hash64[1]+= h7; hash64[2]+= skein_h8; hash64[3]+= h0; hash64[4]+= h1; hash64[5]+= h2 + t0; hash64[6]+= h3 + t1; hash64[7]+= h4 + 15U; macro2(hash64); hash64[0]+= h7; hash64[1]+= skein_h8; hash64[2]+= h0; hash64[3]+= h1; hash64[4]+= h2; hash64[5]+= h3 + t1; hash64[6]+= h4 + t2; hash64[7]+= h5 + 16U; macro1(hash64); hash64[0]+= skein_h8; hash64[1]+= h0; hash64[2]+= h1; hash64[3]+= h2; hash64[4]+= h3; hash64[5]+= h4 + t2; hash64[6]+= h5 + t0; hash64[7]+= h6 + 17U; // macro2(hash64); hash64[0] += hash64[1]; hash64[2] += hash64[3]; hash64[4] += hash64[5]; hash64[6] += hash64[7]; hash64[1] = ROL2(hash64[1], 39) ^ hash64[0]; hash64[3] = ROL2(hash64[3], 30) ^ hash64[2]; hash64[5] = ROL2(hash64[5], 34) ^ hash64[4]; hash64[7] = ROL24(hash64[7]) ^ hash64[6]; hash64[2] += hash64[1]; hash64[4] += hash64[7]; hash64[6] += hash64[5]; hash64[0] += hash64[3]; hash64[1] = ROL2(hash64[1], 13) ^ hash64[2]; hash64[3] = ROL2(hash64[3], 17) ^ hash64[0]; hash64[4] += hash64[1]; hash64[6] += hash64[3]; hash64[3] = ROL2(hash64[3], 29) ^ hash64[6]; hash64[4] += hash64[3]; hash64[3] = ROL2(hash64[3], 22) ^ hash64[4]; if(devectorize(hash64[3]+h3)<=highTarget){ uint32_t tmp = atomicExch(&resNonce[0], thread); if (tmp != UINT32_MAX) resNonce[1] = tmp; } // phash[0] = *(uint2x4*)&hash64[0]; // phash[1] = *(uint2x4*)&hash64[4]; #undef h0 #undef h1 #undef h2 #undef h3 #undef h4 #undef h5 #undef h6 #undef h7 } } __host__ void quark_keccak_skein512_cpu_hash_64_final(int thr_id, uint32_t threads, uint32_t *d_nonceVector, uint32_t *d_hash,uint32_t *d_resNonce,const uint64_t highTarget){ // berechne wie viele Thread Blocks wir brauchen const uint32_t dev_id = device_map[thr_id]; const uint32_t tpb = (device_sm[dev_id] > 500) ? TPB52 : TPB50; dim3 grid((threads + tpb - 1) / tpb); dim3 block(tpb); quark_keccakskein512_gpu_hash_64_final <<<grid, block >>>(threads, (uint2 *)d_hash, d_nonceVector, d_resNonce, highTarget); }
the_stack
#include <stdio.h> #include <assert.h> #include <string.h> #include <limits.h> #include <math.h> #include <iostream> #include <algorithm> #include <iterator> #include <vector> #include <map> #include <cuda.h> #define RUN_CPU_SORTS //#define GET_DETAILED_PERFORMANCE #define gpucheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Types: typedef unsigned int uint; #ifdef min #undef min #endif #ifdef max #undef max #endif /// return a timestamp with sub-second precision /** QueryPerformanceCounter and clock_gettime have an undefined starting point (null/zero) * and can wrap around, i.e. be nulled again. **/ double seconds() { struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); return now.tv_sec + now.tv_nsec / 1000000000.0; } bool parseArgs(int argc, char** argv, unsigned int* test_iterations, unsigned int* widthReSz, unsigned int* heightReSz) { const char sUsageString[512] = "Usage: Quicksort [num test iterations] [SurfWidth(^2 only)] [SurfHeight(^2 only)]"; if (argc != 4) { printf(sUsageString); return false; } else { *test_iterations = atoi (argv[1]); *widthReSz = atoi (argv[2]); *heightReSz = atoi (argv[3]); return true; } } #include "Quicksort.h" #include "QuicksortKernels.cuh" template <class T> T* partition(T* left, T* right, T pivot) { // move pivot to the end T temp = *right; *right = pivot; *left = temp; T* store = left; for(T* p = left; p != right; p++) { if (*p < pivot) { temp = *store; *store = *p; *p = temp; store++; } } temp = *store; *store = pivot; *right = temp; return store; } template <class T> void quicksort(T* data, int left, int right) { T* store = partition(data + left, data + right, data[left]); int nright = store-data; int nleft = nright+1; if (left < nright) { if (nright - left > 32) { quicksort(data, left, nright); } else std::sort(data + left, data + nright + 1); } if (nleft < right) { if (right - nleft > 32) { quicksort(data, nleft, right); } else { std::sort(data + nleft, data + right + 1); } } } template <class T> void gqsort(T *db, T *dnb, std::vector<block_record<T>>& blocks, std::vector<parent_record>& parents, std::vector<work_record<T>>& news, bool reset) { news.resize(blocks.size()*2); #ifdef GET_DETAILED_PERFORMANCE static double absoluteTotal = 0.0; static uint count = 0; if (reset) { absoluteTotal = 0.0; count = 0; } double beginClock, endClock; beginClock = seconds(); #endif block_record<T> *blocksb; parent_record *parentsb; work_record<T> *newsb; gpucheck(cudaMalloc((void**)&blocksb, sizeof(block_record<T>)*blocks.size())); gpucheck(cudaMalloc((void**)&parentsb, sizeof(parent_record)*parents.size())); gpucheck(cudaMalloc((void**)&newsb, sizeof(work_record<T>)*news.size())); gpucheck(cudaMemcpy(blocksb, blocks.data(), sizeof(block_record<T>)*blocks.size(), cudaMemcpyHostToDevice)); gpucheck(cudaMemcpy(parentsb, parents.data(), sizeof(parent_record)*parents.size(), cudaMemcpyHostToDevice)); gpucheck(cudaMemcpy(newsb, news.data(), sizeof(work_record<T>)*news.size(), cudaMemcpyHostToDevice)); gqsort_kernel<<<dim3(blocks.size()), dim3(GQSORT_LOCAL_WORKGROUP_SIZE)>>>( db, dnb, blocksb, parentsb, newsb); gpucheck( cudaPeekAtLastError() ); gpucheck( cudaDeviceSynchronize() ); gpucheck(cudaMemcpy(news.data(), newsb, sizeof(work_record<T>)*news.size(), cudaMemcpyDeviceToHost)); gpucheck(cudaFree(blocksb)); gpucheck(cudaFree(parentsb)); gpucheck(cudaFree(newsb)); #ifdef GET_DETAILED_PERFORMANCE endClock = seconds(); double totalTime = endClock - beginClock; absoluteTotal += totalTime; std::cout << ++count << ": gqsort time " << absoluteTotal * 1000 << " ms" << std::endl; #endif #ifdef DEBUG printf("\noutput news\n"); for (int i = 0; i < news.size(); i++) { printf("%u %u %u %u\n", news[i].start, news[i].end, news[i].pivot, news[i].direction); } #endif } template <class T> void lqsort(T *db, T *dnb, std::vector<work_record<T>>& done) { #ifdef GET_DETAILED_PERFORMANCE double beginClock, endClock; beginClock = seconds(); #endif work_record<T>* doneb; //std::cout << "done size is " << done.size() << std::endl; gpucheck(cudaMalloc((void**)&doneb, sizeof(work_record<T>)*done.size())); gpucheck(cudaMemcpy(doneb, done.data(), sizeof(work_record<T>)*done.size(), cudaMemcpyHostToDevice)); lqsort_kernel<<<dim3(done.size()), dim3(LQSORT_LOCAL_WORKGROUP_SIZE)>>>(db, dnb, doneb); gpucheck( cudaPeekAtLastError() ); gpucheck( cudaDeviceSynchronize() ); // Lets do phase 2 pass gpucheck(cudaFree(doneb)); #ifdef GET_DETAILED_PERFORMANCE endClock = seconds(); double totalTime = endClock - beginClock; std::cout << "lqsort time " << totalTime * 1000 << " ms" << std::endl; #endif } size_t optp(size_t s, double k, size_t m) { return (size_t)pow(2, floor(log(s*k + m)/log(2.0) + 0.5)); } template <class T> void GPUQSort(size_t size, T* d, T* dn) { // allocate buffers T *db, *dnb; cudaMalloc((void**)&db, ((sizeof(T)*size)/64 + 1)*64); cudaMemcpy(db, d, ((sizeof(T)*size)/64 + 1)*64, cudaMemcpyHostToDevice); cudaMalloc((void**)&dnb, ((sizeof(T)*size)/64 + 1)*64); cudaMemcpy(dnb, dn, ((sizeof(T)*size)/64 + 1)*64, cudaMemcpyHostToDevice); const size_t MAXSEQ = optp(size, 0.00009516, 203); const size_t MAX_SIZE = 12*std::max(MAXSEQ, (size_t)QUICKSORT_BLOCK_SIZE); //std::cout << "MAXSEQ = " << MAXSEQ << std::endl; uint startpivot = median_host(d[0], d[size/2], d[size-1]); std::vector<work_record<T>> work, done, news; work.reserve(MAX_SIZE); done.reserve(MAX_SIZE); news.reserve(MAX_SIZE); std::vector<parent_record> parent_records; parent_records.reserve(MAX_SIZE); std::vector<block_record<T>> blocks; blocks.reserve(MAX_SIZE); work.push_back(work_record<T>(0, size, startpivot, 1)); bool reset = true; while(!work.empty() /*&& work.size() + done.size() < MAXSEQ*/) { size_t blocksize = 0; for(auto it = work.begin(); it != work.end(); ++it) { blocksize += std::max((it->end - it->start)/MAXSEQ, (size_t)1); } for(auto it = work.begin(); it != work.end(); ++it) { uint start = it->start; uint end = it->end; uint pivot = it->pivot; uint direction = it->direction; uint blockcount = (end - start + blocksize - 1)/blocksize; parent_record prnt(start, end, start, end, blockcount-1); parent_records.push_back(prnt); for(uint i = 0; i < blockcount - 1; i++) { uint bstart = start + blocksize*i; block_record<T> br(bstart, bstart+blocksize, pivot, direction, parent_records.size()-1); blocks.push_back(br); } block_record<T> br(start + blocksize*(blockcount - 1), end, pivot, direction, parent_records.size()-1); blocks.push_back(br); } //std::cout << " blocks = " << blocks.size() << " parent records = " << parent_records.size() << " news = " << news.size() << std::endl; gqsort<T>(db, dnb, blocks, parent_records, news, reset); reset = false; work.clear(); parent_records.clear(); blocks.clear(); for(auto it = news.begin(); it != news.end(); ++it) { if (it->direction != EMPTY_RECORD) { if (it->end - it->start <= QUICKSORT_BLOCK_SIZE /*size/MAXSEQ*/) { if (it->end - it->start > 0) done.push_back(*it); } else { work.push_back(*it); } } } news.clear(); } for(auto it = work.begin(); it != work.end(); ++it) { if (it->end - it->start > 0) done.push_back(*it); } lqsort<T>(db, dnb, done); cudaMemcpy(d, db, ((sizeof(T)*size)/64 + 1)*64, cudaMemcpyDeviceToHost); cudaFree(db); cudaFree(dnb); } template <class T> int test(uint arraySize, unsigned int NUM_ITERATIONS, const std::string& type_name) { double totalTime, quickSortTime, stdSortTime; double beginClock, endClock; printf("\n\n\n--------------------------------------------------------------------\n"); printf("Allocating array size of %d\n", arraySize); T* pArray = (T*)aligned_alloc (4096, ((arraySize*sizeof(T))/64 + 1)*64); T* pArrayCopy = (T*)aligned_alloc (4096, ((arraySize*sizeof(T))/64 + 1)*64); std::generate(pArray, pArray + arraySize, [](){static T i = 0; return ++i; }); std::random_shuffle(pArray, pArray + arraySize); #ifdef RUN_CPU_SORTS std::cout << "Sorting the regular way..." << std::endl; std::copy(pArray, pArray + arraySize, pArrayCopy); beginClock = seconds(); std::sort(pArrayCopy, pArrayCopy + arraySize); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; stdSortTime = totalTime; std::cout << "quicksort on the cpu: " << std::endl; std::copy(pArray, pArray + arraySize, pArrayCopy); beginClock = seconds(); quicksort(pArrayCopy, 0, arraySize-1); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; quickSortTime = totalTime; #ifdef TRUST_BUT_VERIFY { std::vector<uint> verify(arraySize); std::copy(pArray, pArray + arraySize, verify.begin()); std::cout << "verifying: "; std::sort(verify.begin(), verify.end()); bool correct = std::equal(verify.begin(), verify.end(), pArrayCopy); unsigned int num_discrepancies = 0; if (!correct) { for(size_t i = 0; i < arraySize; i++) { if (verify[i] != pArrayCopy[i]) { //std:: cout << "discrepancy at " << i << " " << pArrayCopy[i] << " expected " << verify[i] << std::endl; num_discrepancies++; } } } std::cout << std::boolalpha << correct << std::endl; if (!correct) { char y; std::cout << "num_discrepancies: " << num_discrepancies << std::endl; std::cin >> y; } } #endif #endif // RUN_CPU_SORTS std::cout << "Sorting with GPU quicksort: " << std::endl; std::vector<uint> original(arraySize); std::copy(pArray, pArray + arraySize, original.begin()); std::vector<double> times; times.resize(NUM_ITERATIONS); double AverageTime = 0.0; uint num_failures = 0; for(uint k = 0; k < NUM_ITERATIONS; k++) { std::copy(original.begin(), original.end(), pArray); std::vector<uint> seqs; std::vector<uint> verify(arraySize); std::copy(pArray, pArray + arraySize, verify.begin()); beginClock = seconds(); GPUQSort(arraySize, pArray, pArrayCopy); endClock = seconds(); totalTime = endClock - beginClock; std::cout << "Time to sort: " << totalTime * 1000 << " ms" << std::endl; times[k] = totalTime; AverageTime += totalTime; #ifdef TRUST_BUT_VERIFY std::cout << "verifying: "; std::sort(verify.begin(), verify.end()); bool correct = std::equal(verify.begin(), verify.end(), pArray); unsigned int num_discrepancies = 0; if (!correct) { for(size_t i = 0; i < arraySize; i++) { if (verify[i] != pArray[i]) { std:: cout << "discrepancy at " << i << " " << pArray[i] << " expected " << verify[i] << std::endl; num_discrepancies++; } } } std::cout << std::boolalpha << correct << std::endl; if (!correct) { std::cout << "num_discrepancies: " << num_discrepancies << std::endl; num_failures ++; } #endif } std::cout << " Number of failures: " << num_failures << " out of " << NUM_ITERATIONS << std::endl; AverageTime = AverageTime/NUM_ITERATIONS; std::cout << "Average Time: " << AverageTime * 1000 << " ms" << std::endl; double stdDev = 0.0, minTime = 1000000.0, maxTime = 0.0; for(uint k = 0; k < NUM_ITERATIONS; k++) { stdDev += (AverageTime - times[k])*(AverageTime - times[k]); minTime = std::min(minTime, times[k]); maxTime = std::max(maxTime, times[k]); } if (NUM_ITERATIONS > 1) { stdDev = sqrt(stdDev/(NUM_ITERATIONS - 1)); std::cout << "Standard Deviation: " << stdDev * 1000 << std::endl; std::cout << "%error (3*stdDev)/Average: " << 3*stdDev / AverageTime * 100 << "%" << std::endl; std::cout << "min time: " << minTime * 1000 << " ms" << std::endl; std::cout << "max time: " << maxTime * 1000 << " ms" << std::endl; } #ifdef RUN_CPU_SORTS std::cout << "Average speedup over CPU quicksort: " << quickSortTime/AverageTime << std::endl; std::cout << "Average speedup over CPU std::sort: " << stdSortTime/AverageTime << std::endl; #endif // RUN_CPU_SORTS printf("-------done--------------------------------------------------------\n"); free(pArray); free(pArrayCopy); return 0; } int main(int argc, char** argv) { unsigned int NUM_ITERATIONS; uint heightReSz, widthReSz; bool success = parseArgs (argc, argv, &NUM_ITERATIONS, &widthReSz, &heightReSz); if (!success) return -1; uint arraySize = widthReSz*heightReSz; test<uint>(arraySize, NUM_ITERATIONS, "uint"); test<float>(arraySize, NUM_ITERATIONS, "float"); test<double>(arraySize, NUM_ITERATIONS, "double"); return 0; }
the_stack
* @file * track_utils.cuh * * @brief tracking utilities function */ #pragma once namespace gunrock { namespace util { #define TO_TRACK false #define NUM_TO_TRACK 1 #define MAX_GPU 0 template <typename VertexT> __device__ __host__ __forceinline__ bool isTracking(VertexT node) { const VertexT node_to_track[] = {11}; if (!TO_TRACK) return false; else { #pragma unroll for (int i = 0; i < NUM_TO_TRACK; i++) if (node == node_to_track[i]) return true; } return false; } template <typename VertexId> static __device__ __host__ __inline__ bool to_track(int gpu_num, VertexId node) { // const VertexId node_to_track[NUM_TO_TRACK > 0 ? NUM_TO_TRACK : 1][3] = {}; /*for BFS, market /data/gunrock_dataset/large/soc-LiveJournal1/soc-LiveJournal1.mtx --src=largestdegree --traversal-mode=1 --device=0,1 --queue-sizing=7.0 --queue-sizing1=8.0 --in-sizing=0.5 --partition-seed=1451953615 --v NUM_TO_TRACK = 38 const VertexId node_to_track[NUM_TO_TRACK][3] = { { 541845, 271043, 2569951}, { 569068, 284715, 2953294}, {4016145, 2008346, 3872477}, { 40641, 20374, 2555548}, { 40885, 20494, 2579834}, { 1077, 518, 2441318}, { 1421, 692, 2432176}, { 1432, 2442039, 733}, { 4494, 2201, 2432178}, { 7327, 2424483, 3718}, { 11142, 2424090, 5558}, { 17218, 2442240, 8597}, { 17649, 8828, 2445489}, { 25287, 2442048, 12597}, { 253814, 2623718, 126782}, {2590170, 2479765, 1294485}, { 19137, 2463137, 9576}, { 23900, 11956, 2510031}, { 24364, 2494127, 12157}, { 40830, 2582274, 20366}, { 260110, 130220, 3107660}, { 501, 240, 2453050}, { 1426, 2494049, 730}, { 1772, 857, 2432012}, { 9983, 4979, 2445486}, { 17204, 8613, 2558446}, { 67433, 2430736, 33588}, { 265677, 2582262, 132629}, { 36852, 2533935, 18350}, { 99110, 49699, 2681560}, { 109806, 2732830, 54796}, { 175832, 2696177, 87747}, { 227015, 113648, 2426409}, { 569018, 2905970, 284333}, { 624385, 2904043, 311822}, {1402946, 2912942, 701003}, {1402948, 3381721, 701005}, {1404916, 3517695, 701958} };*/ // for BFS, market // /data/gunrock_dataset/large/soc-LiveJournal1/soc-LiveJournal1.mtx // --src=largestdegree --traversal-mode=1 --undirected --device=0,1,2,3 // --queue-sizing=8.0 --in-sizing=0.5 --idempotence --v // --partition-seed=1452208768 /*const VertexId node_to_track[][5] = { { 5487, 1370, 1239278, 1212000, 1238478}, { 5503, 1377, 1531518, 1236984, 1238502}, { 5520, 1381, 1309968, 1236988, 1482071}, { 5842, 2313598, 2078214, 1454, 2916472}, { 6110, 2938440, 3162369, 2915808, 1523}, { 22727, 3051939, 3633847, 2969441, 5630}, {228833, 2377430, 2375217, 57405, 3215261}, {228837, 57428, 1536317, 1536368, 1537966}, {228845, 57431, 1536319, 1536371, 1537967}, {228852, 57433, 1536331, 1536372, 1537968} };*/ // for BFS, market // /data/gunrock_dataset/large/soc-LiveJournal1/soc-LiveJournal1.mtx // --src=largestdegree --traversal-mode=1 --undirected --device=0,1 // --queue-sizing=7.0 --queue-sizing1=8.5 --idempotence // --partition-seed=1452615167 /* NUM_TO_TRACK = 56 const VertexId node_to_track[][5] = { {1252566, 626153, 2873663}, {1252567, 3483619, 626413}, {1252568, 626154, 2601173}, {2168129, 3299550, 1084495}, { 417722, 3415075, 208995}, { 673597, 4847572, 336639}, {1533408, 4847572, 767025}, {1533411, 4847572, 767027}, {2527926, 3280482, 1264659}, {2949435, 1474568, 2893498}, { 15791, 2621277, 7862}, { 15792, 7929, 2634600}, { 16818, 2501728, 8366}, { 26081, 13056, 4285221}, { 26775, 2694940, 13370}, { 15789, 7928, 2634593}, {2332103, 1165431, 2634601}, { 3947, 1963, 2444884}, { 6168, 3085, 2521563}, { 4622, 2289, 2511546}, { 4639, 2501727, 2338}, { 4648, 2501743, 2346}, { 42787, 2501720, 21415}, {1617210, 2691935, 808783}, {2657850, 1328388, 2641350}, {2657855, 1328391, 2641353}, { 26054, 13038, 2593963}, {2682456, 2692087, 1341748}, { 26773, 2694881, 13368}, { 26777, 2694941, 13372}, { 26782, 2694942, 13374}, { 26784, 2694882, 13376}, { 26802, 13415, 2713197}, { 26803, 2694943, 13387}, { 434027, 216933, 2713220}, { 518973, 2446618, 259374}, { 548276, 274268, 2713221}, {1464833, 2611737, 732610}, {2278177, 1138503, 2713225}, {2650609, 1324690, 2713226}, {2683835, 1341400, 2713227}, {2683838, 3696777, 1342435}, {2683840, 3696778, 1342437}, {2683841, 3696779, 1342438}, {2683842, 1341403, 2713230}, {2683846, 3696780, 1342439}, {2683847, 4055014, 1342440}, {2683848, 1341407, 2713234}, {2683849, 3696781, 1342441}, {2683850, 1341408, 2713235}, { 301726, 150596, 2440086}, { 11723, 2428169, 5836}, { 359848, 179920, 2593632}, { 219110, 109174, 2692826}, { 209169, 104204, 3297101}, { 56958, 2552293, 28432} }; const VertexId node_to_track[][5] = { {243640, 121881, 3344179}, {330800, 2922799, 165211}, {497263, 3226907, 248563}, {575687, 287983, 2692910}, {769572, 2875850, 384909}, {860756, 430280, 2846303}, {1229164, 614416, 2667922}, {1229165, 614417, 3289893}, {1229166, 614418, 3591811}, {1229167, 614419, 3428648}, // {860756, 430280, 2846303}, {50632 , 2458866, 25223}, {51419 , 25811, 2443801}, {644000 , 322182, 2461938}, {983065 , 3401569, 491449}, {1098096 , 2859806, 549202}, {1516165 , 2675745, 758377}, // {769572, 2875850, 384909} {769546 , 2863027, 384896} };*/ // for BFS, market // /data/gunrock_dataset/large/soc-LiveJournal1/soc-LiveJournal1.mtx // --src=largestdegree --traversal-mode=1 --undirected --device=1 // --queue-sizing=12.0 --idempotence // const VertexId node_to_track[][MAX_GPU + 1] = { //{571021}, //{1046961}, //{2383788}, //{3489561}, //{3799683}, //{94386}, //{473430}, //{616588}, //{620833}, //{620835}, //{327529}, //{736107}, //{821811}, //{821813}, //{821814}, //{155168}, //{156578}, //{167158}, //{168044}, //{177768}, //{91256}, //{4847570}, //{32456 }, //{182080 }, //{134578 }, //{613300 }, //{604857 } //}; // for BFS, ./bin/test_bfs_7.5_x86_64 market // /data/gunrock_dataset/large/soc-LiveJournal1/soc-LiveJournal1.mtx // --src=largestdegree --traversal-mode=1 --undirected --device=0 // --queue-sizing=12.0 --idempotence const VertexId node_to_track[][MAX_GPU + 1] = { {357989}, {3291894}, {3291895}, {3291896}, {3291897}, {617822}, {3291898}, {1036861}, {1025745}, {3291899} //{109644 }, //{101108 }, //{44056 }, //{106822 }, //{430576 } }; bool retval = false; if (TO_TRACK) { gpu_num = -1; #pragma unroll for (int i = 0; i < NUM_TO_TRACK; i++) // if (gpu_num == gpu_to_track[i] && // node == node_to_track[i]) if (node_to_track[i][gpu_num + 1] == node) retval = true; } return retval; } #define OFFSET_TO_TRACK 5 template <typename SizeT> static __device__ __host__ __inline__ bool offset_to_track(int gpu_num, SizeT offset) { const SizeT offset_to_track[][MAX_GPU + 1] = { {35771012}, //{35771163}, //{35771269}, //{35771272}, //{35771273}, //{35048294}, {35048419}, //{35771053}, //{35048667}, //{35048669}, //{12544676}, //{12544809}, {12544939}, //{12544941}, //{12544942}, //{2659756}, //{2659757}, //{2659765}, {2659766}, //{2659771}, {15186681} //{4847570}, //{32456 }, //{182080 }, //{134578 }, //{613300 }, //{604857 } }; bool retval = false; if (TO_TRACK) { gpu_num = -1; #pragma unroll for (int i = 0; i < OFFSET_TO_TRACK; i++) // if (gpu_num == gpu_to_track[i] && // node == node_to_track[i]) if (offset_to_track[i][gpu_num + 1] == offset) retval = true; } return retval; } #define THREAD_TO_TRACK 5 template <typename VertexId> static __device__ __host__ __inline__ bool thread_to_track(int gpu_num, VertexId pred) { const VertexId pred_to_track[][MAX_GPU + 1] = {{613300}, //{613300 }, //{613300 }, //{613300 }, //{613300 }, //{604857 }, {604857}, //{613300 }, //{604857 }, //{604857 }, //{134578 }, //{134578 }, {134578}, //{134578 }, //{134578 }, //{32456 }, //{32456 }, //{32456 }, {32456}, //{32456 }, {182080}}; const int thread_to_track[][MAX_GPU + 1] = {{6}, //{29}, //{7}, //{10}, //{11}, //{24}, {21}, //{47}, //{13}, //{15}, //{72}, //{77}, {79}, //{81}, //{82}, //{68}, //{69}, //{77}, {78}, //{83}, {33}}; bool retval = false; if (TO_TRACK) { gpu_num = -1; #pragma unroll for (int i = 0; i < THREAD_TO_TRACK; i++) // if (gpu_num == gpu_to_track[i] && // node == node_to_track[i]) if (pred_to_track[i][gpu_num + 1] == pred && thread_to_track[i][gpu_num + 1] == threadIdx.x) retval = true; } return retval; } #define PRED_TO_TRACK 5 template <typename VertexId> static __device__ __host__ __inline__ bool pred_to_track(int gpu_num, VertexId node) { const VertexId pred_to_track[][MAX_GPU + 1] = {// {32456 }, //{182080 }, //{134578 }, //{613300 }, //{604857 } {109644}, {101108}, {44056}, {106822}, {430576}}; bool retval = false; if (TO_TRACK) { gpu_num = -1; #pragma unroll for (int i = 0; i < PRED_TO_TRACK; i++) // if (gpu_num == gpu_to_track[i] && // node == node_to_track[i]) if (pred_to_track[i][gpu_num + 1] == node) retval = true; } return retval; } template <typename T> bool is_puer2(T x) { if (x <= 2) return true; if ((x % 2) != 0) return false; return is_puer2(x / 2); } template <typename VertexId, typename SizeT, typename Value> void Print_Vertex(VertexId v, int num_gpus, Value error_threshold, Value* results, Value* references, int* partition_table, VertexId** convertion_tables) { printf("{%lld ", (long long)v); if (num_gpus > 1) { for (int gpu = 0; gpu < num_gpus; gpu++) printf(", %lld", (long long)convertion_tables[gpu][v]); } printf("},\n\t\t"); if (num_gpus > 1) printf("host = %d, ", partition_table[v]); if (fabs(results[v] - references[v]) >= error_threshold) printf("reference = %lld, ", (long long)references[v]); printf("result = %lld, ", (long long)results[v]); printf("\n"); } /*template < typename VertexId, typename SizeT, typename Value, typename T> void Track_Results ( const Csr<VertexId, SizeT, Value> *graph, int num_gpus, T error_threshold, T *results, T *references, int* partition_table, VertexId** convertion_tables) { SizeT nodes = graph->nodes; if (references == NULL) return; if (!TO_TRACK) return; else { VertexId *markers = new VertexId[graph->nodes]; VertexId *track_nodes = new VertexId[NUM_TO_TRACK + 1]; SizeT *incoming_counter = new SizeT[NUM_TO_TRACK]; SizeT counter = 0; VertexId **preds = new VertexId*[NUM_TO_TRACK]; for (VertexId dest=0; dest<nodes; dest++) if (to_track(-1, dest)) { markers[dest] = counter; track_nodes[counter] = dest; incoming_counter[counter] = 0; counter ++; } else markers[dest] = NUM_TO_TRACK; for (VertexId src=0; src<nodes; src++) for (SizeT j = graph->row_offsets[src]; j < graph->row_offsets[src+1]; j++) { VertexId dest = graph -> column_indices[j]; VertexId dest_ = markers[dest]; if (dest_ == NUM_TO_TRACK) continue; if (incoming_counter[dest_] == 0) { preds[dest_] = new VertexId[1]; } else if (is_puer2(incoming_counter[dest_])) { VertexId *temp_array = new VertexId[incoming_counter[dest_] * 2]; memcpy(temp_array, preds[dest_], sizeof(VertexId) * incoming_counter[dest_]); delete[] preds[dest_]; preds[dest_] = temp_array; temp_array = NULL; } preds[dest_][incoming_counter[dest_]] = src; incoming_counter[dest_] ++; } for (SizeT i=0; i<NUM_TO_TRACK; i++) { VertexId dest = track_nodes[i]; if (pred_to_track(-1, dest)) continue; printf("Vertex "); Print_Vertex<VertexId, SizeT, T>( dest, num_gpus, error_threshold, results, references, partition_table, convertion_tables); for (SizeT j = 0; j < incoming_counter[i]; j++) { VertexId src = preds[i][j]; //if (references[src] != references[dest] -1) continue; // fabs(results[src] - references[src]) < error_threshold) continue; // bfs printf("\t"); Print_Vertex<VertexId, SizeT, T>( src, num_gpus, error_threshold, results, references, partition_table, convertion_tables); } printf("\n"); } for (VertexId src=0; src< nodes; src++) { if (!pred_to_track(-1, src)) continue; printf("Source "); Print_Vertex<VertexId, SizeT, T>( src, num_gpus, error_threshold, results, references, partition_table, convertion_tables); for (SizeT j = graph->row_offsets[src]; j < graph->row_offsets[src+1]; j++) { VertexId dest = graph -> column_indices[j]; printf("\t"); Print_Vertex<VertexId, SizeT, T>( dest, num_gpus, error_threshold, results, references, partition_table, convertion_tables); } printf("\n"); } } }*/ // Output errors template <typename VertexId, typename SizeT, typename Value> void Output_Errors(const char* file_name, SizeT num_nodes, int num_gpus, Value error_threshold, Value* results, Value* references, int* partition_table, VertexId** convertion_tables) { if (references == NULL) return; std::ofstream fout; printf("\nWriting errors into %s\n", file_name); fout.open(file_name); for (VertexId v = 0; v < num_nodes; v++) { if (fabs(results[v] - references[v]) <= error_threshold) continue; fout << v << "\t" << references[v] << "\t" << results[v]; if (num_gpus > 1) { fout << "\t" << partition_table[v]; for (int gpu = 0; gpu < num_gpus; gpu++) fout << "\t" << convertion_tables[gpu][v]; } fout << std::endl; } fout.close(); } template <typename VertexId, typename SizeT, typename Value> __global__ void Check_Queue(const SizeT num_elements, const int gpu_num, const SizeT num_nodes, const long long iteration, const VertexId* keys, const Value* labels) { const SizeT STRIDE = gridDim.x * blockDim.x; VertexId x = blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements) { VertexId key = keys[x]; if (key >= num_nodes || keys < 0) printf("%d\t %lld\t Check_Queue\t x, key = %d, %d\n", gpu_num, iteration, x, key); else { Value label = labels[key]; if ((label != iteration + 1 && label != iteration) || label < 0) { printf("%d\t %lld\t Check_Queue\t x, key, label = %d, %d, %d\n", gpu_num, iteration, x, key, label); } } x += STRIDE; } } template <typename VertexId, typename SizeT, typename Value> __global__ void Check_Range(const SizeT num_elements, const int gpu_num, const long long iteration, const Value lower_limit, const Value upper_limit, const Value* values) { const SizeT STRIDE = gridDim.x * blockDim.x; VertexId x = blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements) { Value value = values[x]; if (value > upper_limit || value < lower_limit) { printf("%d\t %lld\t Check_Range\t x = %d, %d not in (%d, %d)\n", gpu_num, iteration, x, value, lower_limit, upper_limit); } x += STRIDE; } } template <typename VertexId, typename SizeT> __global__ void Check_Exist(const SizeT num_elements, const int gpu_num, const int check_num, const long long iteration, const VertexId* keys) { const SizeT STRIDE = gridDim.x * blockDim.x; VertexId x = blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements) { VertexId key = keys[x]; if (to_track(gpu_num, key)) { if ((check_num != 3) || (!pred_to_track(gpu_num, key))) printf("%d\t %lld\t Check_Exist\t [%d] presents at %d\n", gpu_num, iteration, key, check_num); } x += STRIDE; } } template <typename VertexId, typename SizeT> __global__ void Check_Exist_(const SizeT* num_elements, const int gpu_num, const int check_num, const long long iteration, const VertexId* keys) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements[0]) { VertexId key = keys[x]; if (to_track(gpu_num, key)) { if ((check_num != 3) || (!pred_to_track(gpu_num, key))) printf("%d\t %lld\t Check_Exist\t [%d] presents at %d\n", gpu_num, iteration, key, check_num); } x += STRIDE; } } template <typename Type> __global__ void Check_Value(const Type* value, const int gpu_num, const int check_num, const long long iteration) { printf("%d\t %lld\t Check_Value\t %d at %d\n", gpu_num, iteration, value[0], check_num); } template <typename VertexId, typename SizeT, typename Value> __global__ void Verify_Value(const int gpu_num, const int check_num, const SizeT num_elements, const long long iteration, const VertexId* keys, const Value* values, const Value value) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements) { VertexId key = keys[x]; if (key != -1) { if (values[key] != value) printf("%d\t %lld\t Verify_Value\t %d\t values[%d] (%d) != %lld\n", gpu_num, iteration, check_num, key, values[key], (long long)value); } x += STRIDE; } } template <typename SizeT, typename Value> __global__ void Verify_Value(const int gpu_num, const int check_num, const SizeT num_elements, const long long iteration, const Value* values, const Value value) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements) { if (values[x] != value) printf("%d\t %lld\t Verify_Value\t %d\t values[%d] (%d) != %lld\n", gpu_num, iteration, check_num, x, values[x], (long long)value); x += STRIDE; } } template <typename VertexId, typename SizeT, typename Value> __global__ void Verify_Value_(const int gpu_num, const int check_num, const SizeT* num_elements, const long long iteration, const VertexId* keys, const Value* values, const Value value) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements[0]) { VertexId key = keys[x]; if (key != -1) { if (values[key] != value) printf("%d\t %lld\t Verify_Value\t %d\t values[%d] (%d) != %lld\n", gpu_num, iteration, check_num, key, values[key], (long long)value); } x += STRIDE; } } template <typename VertexId, typename SizeT> __global__ void Verify_Row_Length(const int gpu_num, const int check_num, const SizeT num_elements, const long long iteration, const VertexId* keys, const SizeT* row_offsets, const SizeT* values) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements) { VertexId key = keys[x]; if (key != -1) { if (values[x] != row_offsets[key + 1] - row_offsets[key]) printf( "%d\t %lld\t Verify_Row_Length\t %d\t keys[%d] (%d)\t values[%d] " "(%d) != %lld\n", gpu_num, iteration, check_num, x, key, x, values[x], (long long)(row_offsets[key + 1] - row_offsets[key])); } x += STRIDE; } } template <typename VertexId, typename SizeT, typename MarkerT> __global__ void Verify_Edges(const int gpu_num, const int check_num, const SizeT num_elements, const SizeT num_nodes, const long long iteration, const VertexId* keys, const SizeT* row_offsets, const MarkerT* markers, const MarkerT value) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; while (x < num_elements) { VertexId key = keys[x]; if (key > 0 && key < num_nodes) { for (SizeT edge_id = row_offsets[key]; edge_id < row_offsets[key + 1]; edge_id++) if (markers[edge_id] != value) printf( "%d\t %lld\t Verify_Edges\t %d\t edge[%lld] (key_pos = %lld, key " "= %lld," " neighbor_pos = %lld)\t marker (%lld) != %lld\n", gpu_num, iteration, check_num, (long long)edge_id, (long long)x, (long long)key, (long long)(edge_id - row_offsets[key]), (long long)markers[edge_id], (long long)value); } x += STRIDE; } } /*template <typename VertexId, typename SizeT, typename ProblemData> static __device__ __forceinline__ void Store_d_out( VertexId new_value, VertexId *d_out, int checkpoint_num, SizeT offset1, SizeT offset2, typename ProblemData::DataSlice *data_slice, VertexId queue_index) { SizeT offset = offset1 + offset2; //VertexId old_value = d_out[offset]; //if (!TO_TRACK) util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St( new_value, d_out + offset); //else { // VertexId old_value = atomicCAS(d_out + offset, -2, new_value); // if (old_value != -2)// && util::to_track(data_slice -> gpu_idx, new_value)) // { // printf("%d\t %d\t %d\t Storing conflict: [%d] -> %p + %lld, old_value = [%d], " // "offset1 = %lld, offset2 = %lld, blockIdx.x = %d, threadIdx.x = %d\n", // "org_cp = %d, org_q_idx = %d, org_d_out = %p, org_offset1 = %lld," // "org_offset2 = %lld, org_blockIdx.x = %d, org_threadIdx.x = %d\n", // data_slice -> gpu_idx, queue_index+1, checkpoint_num, new_value, d_out, // (long long)offset, old_value, (long long)offset1, // (long long)offset2, blockIdx.x, threadIdx.x); // data_slice -> org_checkpoint[offset], // data_slice -> org_queue_idx [offset], // data_slice -> org_d_out [offset], // (long long)data_slice -> org_offset1 [offset], // (long long)data_slice -> org_offset2 [offset], // data_slice -> org_block_idx [offset], // data_slice -> org_thread_idx[offset]); // } else { // data_slice -> org_checkpoint[offset] = checkpoint_num; // data_slice -> org_d_out [offset] = d_out ; // data_slice -> org_offset1 [offset] = offset1 ; // data_slice -> org_offset2 [offset] = offset2 ; // data_slice -> org_queue_idx [offset] = queue_index+1 ; // data_slice -> org_block_idx [offset] = blockIdx.x ; // data_slice -> org_thread_idx[offset] = threadIdx.x ; // } // if (util::to_track(data_slice -> gpu_idx, new_value) && // !util::pred_to_track(data_slice -> gpu_idx, new_value)) // { // printf("%d\t %d\t %d\t Storing [%d] -> + %lld\n", // data_slice -> gpu_idx, queue_index+1, checkpoint_num, new_value, // (long long)offset); // } //} }*/ } // namespace util } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
using namespace std; namespace amgx { namespace idrmsync_solver { // Constructor template< class T_Config> IDRMSYNC_Solver_Base<T_Config>::IDRMSYNC_Solver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope), m_buffer_N(0) { std::string solverName, new_scope, tmp_scope; cfg.getParameter<std::string>( "preconditioner", solverName, cfg_scope, new_scope ); s = cfg.AMG_Config::getParameter<int>("subspace_dim_s", cfg_scope); if (solverName.compare("NOSOLVER") == 0) { no_preconditioner = true; m_preconditioner = NULL; } else { no_preconditioner = false; m_preconditioner = SolverFactory<T_Config>::allocate( cfg, cfg_scope, "preconditioner" ); } } template<class T_Config> IDRMSYNC_Solver_Base<T_Config>::~IDRMSYNC_Solver_Base() { if (!no_preconditioner) { delete m_preconditioner; } } template<class T_Config> void IDRMSYNC_Solver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { AMGX_CPU_PROFILER( "IDRMSYNC_Solver::solver_setup " ); ViewType oldView = this->m_A->currentView(); this->m_A->setViewExterior(); // The number of elements in temporary vectors. this->m_buffer_N = static_cast<int>( this->m_A->get_num_cols() * this->m_A->get_block_dimy() ); const int N = this->m_buffer_N; s = this->s; // Allocate memory needed for iterating. m_z.resize(N); m_Ax.resize(N); m_v.resize(N); c.resize(s); m_f.resize(s); gamma.resize(s); mu.resize(s); m_alpha.resize(s); tempg.resize(N); tempu.resize(N); temp.resize(N); beta_idr.resize(1); t_idr.resize(N); h_chk.resize(N * s); s_chk.resize(s * s); svec_chk.resize(s); G.resize(N * s); G.set_lda(N); G.set_num_rows(N); G.set_num_cols(s); U.resize(N * s); U.set_lda(N); U.set_num_rows(N); U.set_num_cols(s); P.resize(N * s); P.set_num_cols(s); P.set_lda(N); P.set_num_rows(N); M.resize(s * s); M.set_lda(s); M.set_num_rows(s); M.set_num_cols(s); m_Ax.set_block_dimy(this->m_A->get_block_dimy()); m_Ax.set_block_dimx(1); m_Ax.dirtybit = 1; m_Ax.delayed_send = 1; m_Ax.tag = this->tag * 100 + 2; m_z.set_block_dimy(this->m_A->get_block_dimy()); m_z.set_block_dimx(1); m_z.dirtybit = 1; m_z.delayed_send = 1; m_z.tag = this->tag * 100 + 3; m_v.set_block_dimy(this->m_A->get_block_dimy()); m_v.set_block_dimx(1); m_v.dirtybit = 1; m_v.delayed_send = 1; m_v.tag = this->tag * 100 + 4; c.set_block_dimx(1); c.set_block_dimy(1); c.dirtybit = 1; c.delayed_send = 1; c.tag = this->tag * 100 + 5; m_f.set_block_dimx(1); m_f.set_block_dimy(1); m_f.dirtybit = 1; m_f.delayed_send = 1; m_f.tag = this->tag * 100 + 6; gamma.set_block_dimx(1); gamma.set_block_dimy(1); gamma.dirtybit = 1; gamma.delayed_send = 1; gamma.tag = this->tag * 100 + 7; mu.set_block_dimx(1); mu.set_block_dimy(1); mu.dirtybit = 1; mu.delayed_send = 1; mu.tag = this->tag * 100 + 8; m_alpha.set_block_dimx(1); m_alpha.set_block_dimy(1); m_alpha.dirtybit = 1; m_alpha.delayed_send = 1; m_alpha.tag = this->tag * 100 + 9; tempg.set_block_dimx(1); tempg.set_block_dimy(this->m_A->get_block_dimy()); tempg.dirtybit = 1; tempg.delayed_send = 1; tempg.tag = this->tag * 100 + 11; tempu.set_block_dimx(1); tempu.set_block_dimy(this->m_A->get_block_dimy()); tempu.dirtybit = 1; tempu.delayed_send = 1; tempu.tag = this->tag * 100 + 12; temp.set_block_dimx(1); temp.set_block_dimy(this->m_A->get_block_dimy()); temp.dirtybit = 1; temp.delayed_send = 1; temp.tag = this->tag * 100 + 13; beta_idr.set_block_dimx(1); beta_idr.set_block_dimy(1); beta_idr.dirtybit = 1; beta_idr.delayed_send = 1; beta_idr.tag = this->tag * 100 + 15; t_idr.set_block_dimx(1); t_idr.set_block_dimy(this->m_A->get_block_dimy()); t_idr.dirtybit = 1; t_idr.delayed_send = 1; t_idr.tag = this->tag * 100 + 16; h_chk.set_block_dimx(1); h_chk.set_block_dimy(this->m_A->get_block_dimy()); h_chk.dirtybit = 1; h_chk.delayed_send = 1; h_chk.tag = this->tag * 100 + 17; s_chk.set_block_dimx(1); s_chk.set_block_dimy(1); s_chk.dirtybit = 1; s_chk.delayed_send = 1; s_chk.tag = this->tag * 100 + 18; svec_chk.set_block_dimx(1); svec_chk.set_block_dimy(1); svec_chk.dirtybit = 1; svec_chk.delayed_send = 1; svec_chk.tag = this->tag * 100 + 19; G.set_block_dimx(1); G.set_block_dimy(this->m_A->get_block_dimy()); G.dirtybit = 1; G.delayed_send = 1; G.tag = this->tag * 100 + 20; U.set_block_dimx(1); U.set_block_dimy(this->m_A->get_block_dimy()); U.dirtybit = 1; U.delayed_send = 1; U.tag = this->tag * 100 + 21; P.set_block_dimx(1); P.set_block_dimy(this->m_A->get_block_dimy()); P.dirtybit = 1; P.delayed_send = 1; P.tag = this->tag * 100 + 22; M.set_block_dimx(1); M.set_block_dimy(1); M.dirtybit = 1; M.delayed_send = 1; M.tag = this->tag * 100 + 23; // Setup the preconditionner if (!no_preconditioner) { m_preconditioner->setup(*this->m_A, reuse_matrix_structure); } this->m_A->setView(oldView); } template<class T_Config> void IDRMSYNC_Solver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { AMGX_CPU_PROFILER( "IDRMSYNC_Solver::solve_init " ); int s; int offset, size, N; Operator<T_Config> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); N = A.get_num_rows(); s = this->s; /// to check on the host a sequential version comment these two lines. this->numprox = 1; this->pid = 0; #ifdef AMGX_WITH_MPI if (A.is_matrix_distributed()) { this->pid = A.getManager()->global_id(); this->numprox = A.getManager()->get_num_partitions(); } #endif // G and U are with zeroes // M is identity fill(h_chk, (ValueTypeB)0, 0, N * s); fill(G, (ValueTypeB)0, 0, N * s); fill(U, (ValueTypeB)0, 0, N * s); fill(P, (ValueTypeB)0, 0, N * s); fill(tempg, (ValueTypeB)0, 0, N); fill(tempu, (ValueTypeB)0, 0, N); fill(temp, (ValueTypeB)0, 0, N); fill(s_chk, (ValueTypeB)0, 0, s * s); fill(svec_chk, (ValueTypeB)0, 0, s); fill(t_idr, (ValueTypeB)0, 0, N); fill(m_f, (ValueTypeB)0, 0, s); fill(m_alpha, (ValueTypeB)0, 0, s); fill(gamma, (ValueTypeB)0, 0, s); fill(mu, (ValueTypeB)0, 0, s); fill(c, (ValueTypeB)0, 0, s); fill(m_v, (ValueTypeB)0, 0, N); setup_arrays(P, M, b, x, h_chk, s, N, this->pid); this->omega = (ValueTypeB) 1; A.setView(oldView); } template<class T_Config> bool IDRMSYNC_Solver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { AMGX_CPU_PROFILER( "IDRMYSNC_Solver::solve_iteration " ); Operator<T_Config> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); bool transposed = false; int offset, s, k, N, size; ValueTypeB alpha_blas(1), malpha_blas(-1), beta_blas(0); ValueTypeB beta, ns, nt, ts, rho, angle(0.7); A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); N = A.get_num_rows(); s = this->s; // f = (r'*P)'; // phi=Q'*r transposed = true; dot_parts_and_scatter(transposed, *this->m_r, P, m_f, svec_chk, N, this->s, this->numprox, this->pid, 0); transposed = false; // solving the small system and making v orth. to P for (k = 0; k < s; k++) { // gamma= M(k:s,k:s)\f(k:s); trsv_v2 // similar to IDR begins copy_ext(m_f, gamma, k, 0, s - k ); trsv_extnd(transposed, M, s, gamma, s - k, 1, k + s * k); // v = r - G(:,k:s)*gamma; dense matvec then vector update gemv_extnd(transposed, G, gamma, temp, N, s - k, alpha_blas, beta_blas, 1, 1, N, k * N, 0, 0); axpby(*this->m_r, temp, m_v, alpha_blas, malpha_blas, 0, N); if (no_preconditioner) { ; } else { m_z.delayed_send = 1; m_v.delayed_send = 1; m_preconditioner->solve( m_v, m_z, true ); m_z.delayed_send = 1; m_z.delayed_send = 1; copy(m_z, m_v, 0, N); } // U(:,k) = U(:,k:s)*c + om*v; matvec + axpy gemv_extnd(transposed, U, gamma, U, N, s - k, alpha_blas, beta_blas, 1, 1, N, k * N, 0, k * N); axpy(m_v, U, this->omega, 0, k * N, N); // G(:,k) = A*U(:,k); matvec copy_ext(U, tempu, k * N, 0, N); cudaDeviceSynchronize(); A.apply(tempu, tempg); copy_ext(tempg, G, 0, k * N, N); // Bi-Orthogonalise the new basis vectors: // P'*g_k transposed = true; dot_parts_and_scatter(transposed, G, P, mu, svec_chk, N, this->s, this->numprox, this->pid, k * N); transposed = false; if (k > 0) { copy_ext(mu, m_alpha, 0, 0, k ); trsv_extnd(transposed, M, s, m_alpha, k, 1, 0); gemv_extnd(transposed, G, m_alpha, G, N, k, malpha_blas, alpha_blas, 1, 1, N, 0, 0, k * N); gemv_extnd(transposed, U, m_alpha, U, N, k, malpha_blas, alpha_blas, 1, 1, N, 0, 0, k * N); gemv_extnd(transposed, M, m_alpha, mu, s - k, k, malpha_blas, alpha_blas, 1, 1, s, k, 0, k); } copy_ext(mu, M, k, k * s + k, s - k); divide_for_beta(m_f, M, beta_idr, &beta, k, s); if (beta == (ValueTypeB)0) { FatalError("M(k,k)=0 breakdown condition (beta):IDRMSYNC", AMGX_ERR_INTERNAL); } // r = r - beta*G(:,k); axpy(G, *this->m_r, -beta, k * N, 0, N); // x = x + beta*U(:,k); axpy(U, x, beta, k * N, 0, N); // Do we converge ? this->m_curr_iter = this->m_curr_iter + 1; if ( this->m_monitor_convergence && this->compute_norm_and_converged() ) { A.setView(oldView); return true; } //Early exit: last iteration, no need to prepare the next one. if ( this->is_last_iter() ) { A.setView(oldView); return !this->m_monitor_convergence; } // New f = P'*r (first k components are zero) // if ( k < s ) // f(k+1:s) = f(k+1:s) - beta*M(k+1:s,k); // end if (k < s - 1) { axpy(M, m_f, -beta, k * s + k + 1, k + 1, s - k - 1); } }/// for ends for smaller space //check for convergence once again. If converged just leave the function if ( this->m_monitor_convergence && this->compute_norm_and_converged() ) { A.setView(oldView); return true; } copy( *this->m_r, m_v, 0, N); if (no_preconditioner) { ; } else { m_z.delayed_send = 1; m_v.delayed_send = 1; m_preconditioner->solve( m_v, m_z, true ); m_z.delayed_send = 1; m_v.delayed_send = 1; copy( m_z, m_v, 0, N); } A.apply(m_v, t_idr ); // calculate new omega ns = get_norm(A, *this->m_r, L2); // distributed norm nt = get_norm(A, t_idr, L2); //distributed norm ts = dot(A, t_idr, *this->m_r); // distributed dot. rho = abs(ts / (nt * ns)); this->omega = ts / (nt * nt); if (rho < angle) { this->omega = this->omega * angle / rho; } if (this->omega == (ValueTypeB) 0) { cout << "Error happened in this->omega==0" << endl; exit(1); } // r = r - omega*t; axpy( t_idr, *this->m_r, -(this->omega), 0, N ); axpy( m_v, x, this->omega, 0, N ); // No convergence so far. A.setView(oldView); return !this->m_monitor_convergence; } template<class T_Config> void IDRMSYNC_Solver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} template<class T_Config> void IDRMSYNC_Solver_Base<T_Config>::printSolverParameters() const { if (!no_preconditioner) { std::cout << "preconditioner: " << this->m_preconditioner->getName() << " with scope name: " << this->m_preconditioner->getScope() << std::endl; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::dot_ina_loop(const VVector &a, const VVector &b, int offseta, int offsetb, VVector &res, VVector &hres, int offsetres, int size, int k, int s) { int i; for (i = k; i < s; i++) { hres.raw()[i + offsetres] = dotc(a, b, offseta + i * size, offsetb, size); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::dot_ina_loop(const VVector &a, const VVector &b, int offseta, int offsetb, VVector &res, Vector_h &hres, int offsetres, int size, int k, int s) { int i; for (i = k; i < s; i++) { hres.raw()[i + offsetres] = dotc(a, b, offseta + i * size, offsetb, size); } cudaMemcpy((void *) res.raw(), (void *) hres.raw(), (s - k)*sizeof(ValueTypeB), cudaMemcpyHostToDevice); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::divide_for_beta(VVector &nume, VVector &denom, VVector &Result, ValueTypeB *hresult, int k, int s) { ValueTypeB nume_h, denom_h; // cudaMemcpy((void *) &denom_h, (void *) & (denom.raw()[k * s + k]), sizeof(ValueTypeB), cudaMemcpyDeviceToHost); if (denom_h != (ValueTypeB) 0) { cudaMemcpy((void *) &nume_h, (void *) & (nume.raw()[k]), sizeof(ValueTypeB), cudaMemcpyDeviceToHost); *hresult = (ValueTypeB) nume_h / (ValueTypeB)denom_h; } else { *hresult = (ValueTypeB) 0; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::divide_for_beta(VVector &nume, VVector &denom, VVector &Result, ValueTypeB *hresult, int k, int s) { if ((denom.raw()[k * s + k]) != (ValueTypeB) 0) { *hresult = (ValueTypeB) (nume.raw()[k]) / (ValueTypeB)(denom.raw()[k * s + k]); } else { *hresult = (ValueTypeB) 0; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::dot_parts_and_scatter(bool transposed, VVector &a, VVector &b, VVector &result, Vector_h &hresult, int size, int s, int numprox, int pid, int offsetvec) { Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > *Aptr = dynamic_cast<Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > * >(this->m_A); #ifdef AMGX_WITH_MPI Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &A = *Aptr; #endif if ( !Aptr ) //not a matrix! { FatalError("IDRMSync only works with explicit matrices.", AMGX_ERR_INTERNAL); } for (int j = 0; j < s; j++) { hresult.raw()[j] = dotc(b, a, j * size, offsetvec, size); } if (numprox > 1) // more than one processor { //now result vector must be aggregated across all processors #ifdef AMGX_WITH_MPI Vector_h tresult(hresult); A.manager->getComms()->global_reduce_sum(hresult, tresult, A, 0); #endif }// else ends for more than one processor cudaMemcpy((void *)result.raw(), (void *)hresult.raw(), s * sizeof(ValueTypeB), cudaMemcpyHostToDevice); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::dot_parts_and_scatter(bool transposed, VVector &a, VVector &b, VVector &result, VVector &hresult, int size, int s, int numprox, int pid, int offsetvec) { Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > *Aptr = dynamic_cast<Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > * >(this->m_A); #ifdef AMGX_WITH_MPI Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &A = *Aptr; #endif if ( !Aptr ) //not a matrix! { FatalError("IDRMSync only works with explicit matrices.", AMGX_ERR_INTERNAL); } for (int j = 0; j < s; j++) { hresult.raw()[j] = dotc(b, a, j * size, offsetvec, size); } if (numprox > 1) // more than one processor { //now result vector must be aggregated across all processors #ifdef AMGX_WITH_MPI VVector tresult(hresult); A.manager->getComms()->global_reduce_sum(hresult, tresult, A, 0); // tag is not important for allreduce #endif }// else ends for more than one processor } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::gemv_div(bool trans, const VVector &A, const VVector &x, VVector &y, int m, int n, ValueTypeB alpha, ValueTypeB beta, int incx, int incy, int lda, int offsetA, int offsetx, int offsety, VVector &nume, int k, int s, ValueTypeB *ratio) { ValueTypeB numer, denom;//, dotval; gemv_extnd(trans, A, x, y, m, n, alpha, beta, incx, incy, lda, offsetA, offsetx, offsety); cudaDeviceSynchronize(); cudaMemcpy((void *) &numer, (void *) & ((nume.raw())[k]), sizeof(ValueTypeB), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaMemcpy((void *) &denom, (void *) & (y.raw())[k + s * k], sizeof(ValueTypeB), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); if (denom != (ValueTypeB) 0) { *ratio = numer / denom; } else { *ratio = (ValueTypeB) 0; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::gemv_div(bool trans, const VVector &A, const VVector &x, VVector &y, int m, int n, ValueTypeB alpha, ValueTypeB beta, int incx, int incy, int lda, int offsetA, int offsetx, int offsety, VVector &nume, int k, int s, ValueTypeB *ratio) { ValueTypeB beta_iter; gemv_extnd(trans, A, x, y, m, n, alpha, beta, incx, incy, lda, offsetA, offsetx, offsety); if (y[k + s * k] != (ValueTypeB)0) { beta_iter = (nume)[k] / (y)[k + s * k]; *ratio = beta_iter; } else { *ratio = (ValueTypeB) 0; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> IDRMSYNC_Solver< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::ValueTypeB IDRMSYNC_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::dotc_div(VVector &a, VVector &b, int offseta, int offsetb, int size, VVector &denom, int i, int s, ValueTypeB *ratio) { ValueTypeB dnr; cudaMemcpy((void *) &dnr, (void *) & (denom.raw())[i + s * i], sizeof(ValueTypeB), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); if (dnr != (ValueTypeB) 0) { *ratio = dotc(a, b, offseta, offsetb, size) / dnr; } else { *ratio = (ValueTypeB) 0; } return dnr; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> IDRMSYNC_Solver< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::ValueTypeB IDRMSYNC_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::dotc_div(VVector &a, VVector &b, int offseta, int offsetb, int size, VVector &denom, int i, int s, ValueTypeB *ratio) { ValueTypeB alpha_iter; if (denom[i * s + i] != (ValueTypeB) 0) { alpha_iter = dotc(a, b, offseta, offsetb, size) / denom[i * s + i]; *ratio = alpha_iter; } else { *ratio = (ValueTypeB) 0; } return alpha_iter; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setup_arrays(VVector &P, VVector &M, VVector &b, VVector &x, Vector_h &hbuff, int s, int N, int pid) { int i; for (i = 0; i < s; i++) { (hbuff.raw())[i * s + i] = (ValueTypeB) 1.0; } cudaMemcpy((void *)M.raw(), (void *)hbuff.raw(), s * s * sizeof(ValueTypeB), cudaMemcpyHostToDevice); srand(0); for (i = 0; i < N * s; i++) { (hbuff.raw())[i] = (ValueTypeB) rand() / (ValueTypeB (RAND_MAX)); } cudaMemcpy((void *)P.raw(), (void *)hbuff.raw(), N * s * sizeof(ValueTypeB), cudaMemcpyHostToDevice); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDRMSYNC_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setup_arrays(VVector &P, VVector &M, VVector &b, VVector &x, VVector &hbuff, int s, int N, int pid) { int i; for (i = 0; i < s; i++) { (M.raw())[i * s + i] = (ValueTypeB) 1.0; } srand(0); for (i = 0; i < N * s; i++) { (hbuff.raw())[i] = (ValueTypeB) rand() / (ValueTypeB (RAND_MAX)); } } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class IDRMSYNC_Solver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class IDRMSYNC_Solver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } } // namespace amgx
the_stack
#include <array/NDArray.h> #include <array/NDArrayFactory.h> #include <cuda.h> #include <execution/LaunchContext.h> #include <graph/Context.h> #include <graph/Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <helpers/TAD.h> #include <ops/declarable/CustomOperations.h> #include <ops/specials_cuda.h> #include "testlayers.h" using namespace sd; using namespace sd::graph; class NDArrayCudaBasicsTests : public testing::Test { public: }; ////////////////////////////////////////////////////////////////////////// static cudaError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*, size_t>>& hostData) { if (devicePtrs.size() != hostData.size()) throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !"); cudaError_t cudaResult; void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void**>(&reductionPointer), 1024 * 1024); if (cudaResult != 0) return cudaResult; int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void**>(&allocationPointer), 1024 * 1024); if (cudaResult != 0) return cudaResult; lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); cudaStream_t stream = *lc.getCudaStream(); for (int i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(reinterpret_cast<void**>(&devicePtrs[i]), hostData[i].second); if (cudaResult != 0) return cudaResult; cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, stream); } return cudaResult; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_1) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_2) { auto x = NDArrayFactory::create<int>('c', {5}); auto y = NDArrayFactory::create<int>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_3) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); NDArray::registerSpecialUse({&x}, {&y}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); ASSERT_TRUE(y.isActualOnDeviceSide()); ASSERT_FALSE(y.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_01) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_02) { auto x = NDArrayFactory::create_<int>('c', {5}); auto y = NDArrayFactory::create_<int>('c', {5}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_03) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Neg, *y); // ASSERT_TRUE(x->isActualOnDeviceSide()); // ASSERT_FALSE(x->isActualOnHostSide()); // ASSERT_TRUE(y->isActualOnDeviceSide()); // ASSERT_TRUE(y->isActualOnHostSide()); // y->syncToHost(); // y->printBuffer("Negatives"); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Cosine_1) { auto x = NDArrayFactory::create_<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<double>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Cosine, *y); // ASSERT_TRUE(x->isActualOnDeviceSide()); // ASSERT_FALSE(x->isActualOnHostSide()); // ASSERT_TRUE(y->isActualOnDeviceSide()); // ASSERT_TRUE(y->isActualOnHostSide()); // y->syncToHost(); delete x; delete y; } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', {5}, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', {5}, {2, 4, 6, 8, 10}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); sd::Pointer nativeStream = (sd::Pointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t*>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t*>(&nativeStream); // cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); // cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, // *stream); LaunchContext lc(stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_2) { // allocating host-side arrays NDArray x('c', {5}, {1, 2, 3, 4, 5}); NDArray y('c', {5}, {1, 2, 3, 4, 5}); NDArray z('c', {5}, sd::DataType::DOUBLE); NDArray exp('c', {5}, {2, 4, 6, 8, 10}); sd::Pointer nativeStream = (sd::Pointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t*>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t*>(&nativeStream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_3) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', {5}, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', {5}, {2, 4, 6, 8, 10}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); sd::Pointer nativeStream = (sd::Pointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t*>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t*>(&nativeStream); // cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); // cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, // *stream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); // double* localBuffer = ; z.syncToHost(); cudaMemcpy(z.buffer(), z.specialBuffer(), z.lengthOf() * z.sizeOfT(), cudaMemcpyDeviceToHost); res = cudaStreamSynchronize(*stream); z.tickWriteHost(); ASSERT_EQ(0, res); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_4) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {2, 4, 6, 8, 10}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Add, y, z); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_5) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); // auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', {5}, {2, 4, 6, 8, 10}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); x += y; // x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); // y.printBuffer("3Y = "); // z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_6) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); // auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', {5}, {3, 4, 5, 6, 7}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); x += y; // x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_7) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); // auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); // auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', {5}, {3, 4, 5, 6, 7}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); x += 2.; // x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {1, 4, 9, 16, 25}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); // x.printBuffer("3X = "); // y.printBuffer("3Y = "); // z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_2) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); NDArray z('c', {5}, sd::DataType::DOUBLE); auto exp = NDArrayFactory::create<double>('c', {5}, {1, 4, 9, 16, 25}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_3) { // allocating host-side arrays NDArray x('c', {5}, {1, 2, 3, 4, 5}, sd::DataType::DOUBLE); NDArray y('c', {5}, {1., 2., 3., 4., 5.}, sd::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {1, 4, 9, 16, 25}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, y, z); // x.printBuffer("23X = "); // y.printBuffer("23Y = "); // z.printBuffer("23Result out"); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_4) { // allocating host-side arrays NDArray x('c', {5}, {1, 2, 3, 4, 5}, sd::DataType::DOUBLE); NDArray y('c', {5}, {1., 2., 3., 4., 5.}, sd::DataType::DOUBLE); // auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', {5}, {1, 4, 9, 16, 25}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); // x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); // x.printBuffer("23X = "); // y.printBuffer("23Y = "); x *= y; // x.tickWriteDevice(); // x.printBuffer("33Result out"); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestPrimitiveNeg_01) { // allocating host-side arrays auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto exp = NDArrayFactory::create<int>('c', {5}, {-1, -2, -3, -4, -5}); auto stream = x.getContext()->getCudaStream(); // reinterpret_cast<cudaStream_t *>(&nativeStream); NativeOpExecutioner::execTransformSame(x.getContext(), transform::Neg, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, nullptr, nullptr); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); y.tickWriteDevice(); // x.printBuffer("X = "); // y.printBuffer("Y = "); for (int e = 0; e < y.lengthOf(); e++) { ASSERT_NEAR(exp.e<int>(e), y.e<int>(e), 1e-5); } } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveNeg_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Neg, y); // ASSERT_TRUE(x->isActualOnDeviceSide()); // ASSERT_FALSE(x->isActualOnHostSide()); // ASSERT_TRUE(y->isActualOnDeviceSide()); // ASSERT_TRUE(y->isActualOnHostSide()); // auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); // ASSERT_EQ(0, res); // y.printBuffer("Negatives2"); // delete x; // delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveSqrt_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Sqrt, y); // ASSERT_TRUE(x->isActualOnDeviceSide()); // ASSERT_FALSE(x->isActualOnHostSide()); // ASSERT_TRUE(y->isActualOnDeviceSide()); // ASSERT_TRUE(y->isActualOnHostSide()); // auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); // ASSERT_EQ(0, res); ASSERT_TRUE(y.equalsTo(exp)); // y.printBuffer("SQRT output"); // delete x; // delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveAssign_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); // auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); // ASSERT_TRUE(x.isActualOnDeviceSide()); // ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Assign, y); // ASSERT_TRUE(x->isActualOnDeviceSide()); // ASSERT_FALSE(x->isActualOnHostSide()); // ASSERT_TRUE(y->isActualOnDeviceSide()); // ASSERT_TRUE(y->isActualOnHostSide()); // auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); // ASSERT_EQ(0, res); // printf("Assigned to another array\n"); // y.printBuffer("OUput"); ASSERT_TRUE(y.equalsTo(x)); // y.syncToHost(); // y.printBuffer("IsMax output"); // delete x; // delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); // ASSERT_TRUE(x->isActualOnDeviceSide()); // ASSERT_FALSE(x->isActualOnHostSide()); // ASSERT_TRUE(y->isActualOnDeviceSide()); // ASSERT_TRUE(y->isActualOnHostSide()); // auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); // ASSERT_EQ(0, res); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); // y.printBuffer("Cosine2"); // delete x; // delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); // ASSERT_TRUE(x->isActualOnDeviceSide()); // ASSERT_FALSE(x->isActualOnHostSide()); // ASSERT_TRUE(y->isActualOnDeviceSide()); // ASSERT_TRUE(y->isActualOnHostSide()); // auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); // ASSERT_EQ(0, res); // exp.syncToHost(); // y.printBuffer("PrimitiveCosine2"); // exp.printBuffer("Primitive Cosine exp"); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); // for (int e = 0; e < y.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(y)); // delete x; // delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_3) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, y); // ASSERT_TRUE(x->isActualOnDeviceSide()); // ASSERT_FALSE(x->isActualOnHostSide()); // ASSERT_TRUE(y->isActualOnDeviceSide()); // ASSERT_TRUE(y->isActualOnHostSide()); // auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); // ASSERT_EQ(0, res); // exp.syncToHost(); // y.printBuffer("PrimitiveCosine3"); // exp.printBuffer("Primitive Cosine3 exp"); // y.printShapeInfo("Y shape"); // exp.printShapeInfo("Exp Shape"); ASSERT_TRUE(exp.isSameShape(y)); // // for (int e = 0; e < y.lengthOf(); e++) { // printf("%lf == %lf\n", exp.e<double>(e), y.e<double>(e)); //// ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); // } ASSERT_TRUE(exp.equalsTo(y)); // delete x; // delete y; } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_2) { // if (!Environment::getInstance().isExperimentalBuild()) // return; NDArray x = NDArrayFactory::create<double>('c', {2, 3, 4}); NDArray y('c', {2, 4}, {10, 20, 30, 40, 50, 60, 70, 80}, sd::DataType::DOUBLE); NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, // {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., // 103}, sd::DataType::DOUBLE); NDArray exp('c', {2, 3, 4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, sd::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0, 2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*, size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, sd::broadcast::Multiply, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (sd::LongType*)devicePtrs[1], (sd::LongType*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_3) { // if (!Environment::getInstance().isExperimentalBuild()) // return; NDArray x('c', {2, 3, 4}, sd::DataType::DOUBLE); NDArray y('c', {2, 4}, {10, 20, 30, 40, 50, 60, 70, 80}, sd::DataType::DOUBLE); NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, // {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., // 103}, sd::DataType::DOUBLE); NDArray exp('c', {2, 3, 4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, sd::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0, 2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*, size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; // cudaStream_t stream; // cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext* pLc = x.getContext(); //(&stream); cudaStream_t* stream = pLc->getCudaStream(); // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for (int i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(reinterpret_cast<void**>(&devicePtrs[i]), hostData[i].second); ASSERT_EQ(0, cudaResult); cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, *stream); } NDArray::registerSpecialUse({&z}, {&x, &y}); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, sd::broadcast::Multiply, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (sd::LongType*)devicePtrs[1], (sd::LongType*)devicePtrs[2], nullptr, nullptr); // cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); // z.syncToHost(); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); ASSERT_TRUE(exp.equalsTo(z)); // delete cuda stream // cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_1) { // allocating host-side arrays NDArray x('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, sd::DataType::DOUBLE); // auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', {2, 3}, {3, 6, 9, 12, 15, 18}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); // x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); x *= y; // x.syncToHost(); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(x)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_01) { // allocating host-side arrays NDArray x('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, sd::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', {2, 3}); auto exp = NDArrayFactory::create<double>('c', {2, 3}, {3, 6, 9, 12, 15, 18}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); // x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); // x.printBuffer("23X = "); // y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, z); // *= y; // z.printBuffer("53Result out"); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_02) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {2, 3}, {1, 2, 3, 4, 5, 6}); //, sd::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2, 3}, {3, 3, 3, 3, 3, 3}); //'c', { 3 }, { 2., 3., 4.}, sd::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', {2, 3}); auto exp = NDArrayFactory::create<double>('c', {2, 3}, {3, 6, 9, 12, 15, 18}); // if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); // x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); // x.printBuffer("23X = "); // y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, z); // *= y; // z.printBuffer("52Result out"); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_002) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {2, 3}, {1, 2, 3, 4, 5, 6}); //, sd::DataType::DOUBLE); auto y = NDArrayFactory::create<double>( 'c', {2, 3}, {2., 3., 3., 3., 3., 3.}); //'c', { 3 }, { 2., 3., 4.}, sd::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', {2, 3}); auto exp = NDArrayFactory::create<double>('c', {2, 3}, {2, 6, 9, 12, 15, 18}); // if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); // x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); // x.printBuffer("23X = "); // y.printBuffer("23Y = "); x.applyPairwiseTransform(pairwise::Multiply, y, z); // *= y; // z.printBuffer("51Result out"); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcastRaw_1) { // if (!Environment::getInstance().isExperimentalBuild()) // return; NDArray x('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT32); NDArray y('c', {3}, {10, 20, 30}, sd::DataType::INT64); NDArray z('c', {2, 3, 4}, {100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100}, sd::DataType::INT32); NDArray exp('c', {2, 3, 4}, {10, 11, 12, 13, 24, 25, 26, 27, 38, 39, 40, 41, 22, 23, 24, 25, 36, 37, 38, 39, 50, 51, 52, 53}, sd::DataType::INT32); // real output [10, 11, 12, 13, 4, 5, 6, 7, 28, 29, 30, 31, 22, 23, 24, 25, 16, 17, 18, 19, 40, 41, 42, 43] x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.shapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*, size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(sd::LongType)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(sd::LongType)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t* stream = x.getContext()->getCudaStream(); LaunchContext* pLc = x.getContext(); // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for (size_t i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(&devicePtrs[i], hostData[i].second); // if(cudaResult != 0) return cudaResult; ASSERT_EQ(cudaResult, 0); cudaMemcpy(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice); } // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, sd::broadcast::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (sd::LongType*)devicePtrs[1], (sd::LongType*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(*stream); ASSERT_EQ(0, cudaResult); // x.printIndexedBuffer(" X"); // y.printIndexedBuffer("+Y"); // z.printBuffer("ADD broadcasted output"); // verify results // for (int e = 0; e < z.lengthOf(); e++) // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for (int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream // cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply) { // allocating host-side arrays NDArray x('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE); NDArray y('c', {3}, {2., 3., 4.}, sd::DataType::DOUBLE); // auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', {2, 3}, {2, 6, 12, 8, 15, 24}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); // x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); // x.printBuffer("23X = "); // y.printBuffer("23Y = "); x *= y; // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_2) { // allocating host-side arrays NDArray x('c', {2, 3}, {1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE); NDArray y('c', {3}, {2., 3., 4.}, sd::DataType::DOUBLE); // auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', {2, 3}, {11, 12, 13, 14, 15, 16}); auto expZ = NDArrayFactory::create<double>('c', {2, 3}, {2, 6, 12, 8, 15, 24}); // making raw buffers // sd::Pointer devBufferPtrX, devBufferPtrZ, devShapePtrX; // cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); // ASSERT_EQ(0, res); // res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); // ASSERT_EQ(0, res); // x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); // x.printBuffer("23X = "); // y.printBuffer("23Y = "); // void NDArray::applyTrueBroadcast(sd::BroadcastOpsTuple op, const NDArray* other, NDArray* target, const bool // checkTargetShape, ExtraArguments *extraArgs) x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, exp); // // cudaFree(devBufferPtrX); // cudaFree(devBufferPtrZ); // cudaFree(devShapePtrX); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(expZ)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestReduceSum_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(15); auto exp = NDArrayFactory::create<double>(15); auto stream = x.getContext()->getCudaStream(); // reinterpret_cast<cudaStream_t *>(&nativeStream); NativeOpExecutioner::execReduceSameScalar(x.getContext(), reduce::Sum, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo()); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); y.syncToHost(); ASSERT_NEAR(y.e<double>(0), 15, 1e-5); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestDup1) { NDArray array('c', {2, 3}, {1, 2, 3, 4, 5, 6}); auto arrC = array.dup('c'); auto arrF = array.dup('f'); // arrC->printBuffer("arrC"); // arrF->printBuffer("arrF"); // arrC->printShapeInfo("C shape"); // arrF->printShapeInfo("F shape"); ASSERT_TRUE(array.equalsTo(arrF)); ASSERT_TRUE(array.equalsTo(arrC)); ASSERT_TRUE(arrF.equalsTo(arrC)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_1) { NDArray x('c', {2, 5}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, sd::DataType::DOUBLE); NDArray y('c', {2, 5}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, sd::DataType::DOUBLE); ASSERT_TRUE(x.equalsTo(y)); x.permutei({1, 0}); y.permutei({1, 0}); ASSERT_TRUE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_2) { NDArray x('c', {2, 5}, {1, 2, 3, 4, 5, 6, 7, 8, 10, 10}, sd::DataType::DOUBLE); NDArray y('c', {2, 5}, {1, 2, 5, 4, 5, 6, 7, 8, 9, 10}, sd::DataType::DOUBLE); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1, 0}); y.permutei({1, 0}); ASSERT_FALSE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_3) { NDArray x('c', {2, 5}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, sd::DataType::DOUBLE); NDArray y('c', {2, 5}, {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f}, sd::DataType::FLOAT32); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1, 0}); y.permutei({1, 0}); ASSERT_FALSE(x.equalsTo(y)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_1) { NDArray x('c', {2, 3, 4}, {-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, sd::DataType::INT32); NDArray x2('c', {2, 3, 4}, {-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, sd::DataType::INT32); NDArray y('c', {2, 3, 4}, {-2, 3, -4, 5, -2, 3, -4, 5, -2, 3, -4, 5, -2, 3, -4, 5, -2, 3, -4, 5, -2, 3, -4, 5}, sd::DataType::INT32); NDArray k('c', {2, 3}, {-2, 3, -4, 5, -2, 3}, sd::DataType::INT32); NDArray k2('c', {3, 2}, {-2, 3, -4, 5, -2, 3}, sd::DataType::INT32); NDArray exp1('c', {3}, {4.f, 20.f, 36.f}, sd::DataType::FLOAT32); NDArray exp2('c', {2, 3}, {-10.f, -2.f, 6.f, 14.f, 22.f, 30.f}, sd::DataType::FLOAT32); NDArray exp3('c', {4}, {38.f, 41.f, 44.f, 47.f}, sd::DataType::FLOAT32); NDArray exp4('c', {4}, {114.f, 117.f, 120.f, 123.f}, sd::DataType::FLOAT32); NDArray z = x.applyReduce3(sd::reduce3::Dot, y, {0, 2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyReduce3(sd::reduce3::Dot, k, {0, 1}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({0, 2, 1}); y.permutei({0, 2, 1}); z = y.applyReduce3(sd::reduce3::Dot, x, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); x2.permutei({1, 0, 2}); z = x2.applyReduce3(sd::reduce3::Dot, k2, {0, 1}); ASSERT_TRUE(z.equalsTo(&exp4)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_2) { NDArray x('c', {2, 3, 4}, {-10, -9, -8.5, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, sd::DataType::DOUBLE); NDArray x2('c', {2, 3, 4}, {-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, sd::DataType::DOUBLE); NDArray y('c', {2, 3, 4}, {-2, 3, -4, 5, -2, 3, -4, 5, -2, 3, -4, 5, -2.5, 3, -4, 5, -2, 3, -4, 5, -2, 3, -4, 5}, sd::DataType::DOUBLE); NDArray k('c', {2, 3}, {-2, 3, -4, 5.5, -2, 3}, sd::DataType::DOUBLE); NDArray k2('c', {3, 2}, {-2, 3, -4, 5, -2, 3.5}, sd::DataType::DOUBLE); NDArray exp1('c', {3}, {5., 20., 36.}, sd::DataType::DOUBLE); NDArray exp2('c', {2, 3}, {-8., -2., 6., 13., 22., 30.}, sd::DataType::DOUBLE); NDArray exp3('c', {4}, {39., 42.5, 47., 49.5}, sd::DataType::DOUBLE); NDArray exp4('c', {4}, {119., 122.5, 125., 129.5}, sd::DataType::DOUBLE); NDArray z = x.applyReduce3(sd::reduce3::Dot, y, {0, 2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyReduce3(sd::reduce3::Dot, k, {0, 1}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({0, 2, 1}); y.permutei({0, 2, 1}); z = y.applyReduce3(sd::reduce3::Dot, x, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); x2.permutei({1, 0, 2}); z = x2.applyReduce3(sd::reduce3::Dot, k2, {0, 1}); ASSERT_TRUE(z.equalsTo(&exp4)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_3) { NDArray x1('c', {2, 2, 2}, {1, 2, 3, 4, 5, 6, 7, 8}, sd::DataType::INT32); NDArray x2('c', {2, 2, 2}, {-1, -2, -3, -4, -5, -6, -7, -8}, sd::DataType::INT32); NDArray x3('c', {3, 2}, {1.5, 1.5, 1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE); NDArray x4('c', {3, 2}, {1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{-204}, sd::DataType::FLOAT32); NDArray exp2('c', {}, std::vector<double>{31.5}, sd::DataType::DOUBLE); auto z = x1.applyReduce3(reduce3::Dot, x2); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyReduce3(reduce3::Dot, x4); ASSERT_TRUE(z.equalsTo(&exp2)); x1.permutei({2, 1, 0}); x2.permutei({2, 1, 0}); x3.permutei({1, 0}); x4.permutei({1, 0}); z = x1.applyReduce3(reduce3::Dot, x2); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyReduce3(reduce3::Dot, x4); ASSERT_TRUE(z.equalsTo(&exp2)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyAllReduce3_1) { NDArray x1('c', {2, 3, 2}, { 1, 2, 3, 4, 5, 6, 7, 8, -1, -2, -3, -4, }, sd::DataType::INT32); NDArray x2('c', {2, 2, 2}, {-1, -2, -3, -4, -5, -6, -7, -8}, sd::DataType::INT32); NDArray x3('c', {3, 2}, {1.5, 1.5, 1.5, 1.5, 1.5, 1.5}, sd::DataType::DOUBLE); NDArray x4('c', {3, 2}, {1, 2, 3, 4, 5, 6}, sd::DataType::DOUBLE); NDArray exp1('c', {3, 2}, {-88.f, -124.f, 6.f, -2.f, 22.f, 14.f}, sd::DataType::FLOAT32); NDArray exp2('c', {6, 4}, {-36.f, -44.f, -52.f, -60.f, -42.f, -52.f, -62.f, -72.f, 2.f, 0.f, -2.f, -4.f, 6.f, 4.f, 2.f, 0.f, 10.f, 8.f, 6.f, 4.f, 14.f, 12.f, 10.f, 8.f}, sd::DataType::FLOAT32); NDArray exp3('c', {1, 1}, std::vector<double>{31.5}, sd::DataType::DOUBLE); NDArray exp4('c', {3, 3}, {4.5, 10.5, 16.5, 4.5, 10.5, 16.5, 4.5, 10.5, 16.5}, sd::DataType::DOUBLE); auto z = x1.applyAllReduce3(reduce3::Dot, x2, {0, 2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x1.applyAllReduce3(reduce3::Dot, x2, {0}); ASSERT_TRUE(z.equalsTo(&exp2)); z = x3.applyAllReduce3(reduce3::Dot, x4, {0, 1}); ASSERT_TRUE(z.equalsTo(&exp3)); z = x3.applyAllReduce3(reduce3::Dot, x4, {1}); ASSERT_TRUE(z.equalsTo(&exp4)); x1.permutei({2, 1, 0}); x2.permutei({2, 1, 0}); x3.permutei({1, 0}); x4.permutei({1, 0}); z = x1.applyAllReduce3(reduce3::Dot, x2, {0, 2}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x3.applyAllReduce3(reduce3::Dot, x4, {0}); ASSERT_TRUE(z.equalsTo(&exp4)); } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test1) { NDArray x('c', {2, 3}, {0, 10, 1, 2, 2.5, -4}, sd::DataType::DOUBLE); NDArray scalar('c', {}, std::vector<double>{100}, sd::DataType::INT64); NDArray vec1('c', {2}, {100, 100}, sd::DataType::INT64); NDArray vec2('c', {3}, {100, 100, 100}, sd::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{1}, sd::DataType::INT64); NDArray exp2('c', {2}, {1, 1}, sd::DataType::INT64); NDArray exp3('c', {3}, {1, 0, 0}, sd::DataType::INT64); NDArray exp4('c', {}, std::vector<double>{2}, sd::DataType::INT64); NDArray exp5('c', {2}, {1, 1}, sd::DataType::INT64); NDArray exp6('c', {3}, {1, 0, 0}, sd::DataType::INT64); x.applyIndexReduce(sd::indexreduce::IndexMax, scalar, {0, 1}); ASSERT_TRUE(scalar.equalsTo(&exp1)); x.applyIndexReduce(sd::indexreduce::IndexMax, vec1, {1}); ASSERT_TRUE(vec1.equalsTo(&exp2)); x.applyIndexReduce(sd::indexreduce::IndexMax, vec2, {0}); ASSERT_TRUE(vec2.equalsTo(&exp3)); x.permutei({1, 0}); x.applyIndexReduce(sd::indexreduce::IndexMax, scalar, {0, 1}); ASSERT_TRUE(scalar.equalsTo(&exp4)); x.applyIndexReduce(sd::indexreduce::IndexMax, vec1, {0}); ASSERT_TRUE(vec1.equalsTo(&exp5)); x.applyIndexReduce(sd::indexreduce::IndexMax, vec2, {1}); ASSERT_TRUE(vec2.equalsTo(&exp6)); } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test2) { NDArray x('c', {2, 3}, {0, 10, 1, 2, 2.5, -4}, sd::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{1}, sd::DataType::INT64); NDArray exp2('c', {2}, {1, 1}, sd::DataType::INT64); NDArray exp3('c', {3}, {1, 0, 0}, sd::DataType::INT64); NDArray exp4('c', {}, std::vector<double>{2}, sd::DataType::INT64); NDArray exp5('c', {2}, {1, 1}, sd::DataType::INT64); NDArray exp6('c', {3}, {1, 0, 0}, sd::DataType::INT64); auto z = x.applyIndexReduce(sd::indexreduce::IndexMax, {0, 1}); ASSERT_TRUE(z.equalsTo(&exp1)); z = x.applyIndexReduce(sd::indexreduce::IndexMax, {1}); ASSERT_TRUE(z.equalsTo(&exp2)); z = x.applyIndexReduce(sd::indexreduce::IndexMax, {0}); ASSERT_TRUE(z.equalsTo(&exp3)); x.permutei({1, 0}); z = x.applyIndexReduce(sd::indexreduce::IndexMax, {0, 1}); ASSERT_TRUE(z.equalsTo(&exp4)); z = x.applyIndexReduce(sd::indexreduce::IndexMax, {0}); ASSERT_TRUE(z.equalsTo(&exp5)); z = x.applyIndexReduce(sd::indexreduce::IndexMax, {1}); ASSERT_TRUE(z.equalsTo(&exp6)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test1) { NDArray x('c', {2, 3, 2}, { 1, 2, 3, 4, 5, 6, 7, 8, -1, -2, -3, -4, }, sd::DataType::INT32); NDArray z1('c', {}, std::vector<double>{100}, sd::DataType::DOUBLE); NDArray z2('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::FLOAT32); NDArray z3('c', {3}, {100, 100, 100}, sd::DataType::DOUBLE); NDArray z4('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32); NDArray z5('c', {2}, {100, 100}, sd::DataType::FLOAT32); NDArray exp1('c', {}, std::vector<double>{2.166667}, sd::DataType::DOUBLE); NDArray exp2('c', {2, 2}, {3.f, 4.f, 1.f, 0.666667f}, sd::DataType::FLOAT32); NDArray exp3('c', {3}, {4.5, 1, 1}, sd::DataType::DOUBLE); NDArray exp4('c', {3, 2}, {4, 5, 1, 1, 1, 1}, sd::DataType::FLOAT32); NDArray exp5('c', {2}, {3.5f, 0.833333f}, sd::DataType::FLOAT32); x.reduceAlongDimension(sd::reduce::Mean, z1, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(sd::reduce::Mean, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(sd::reduce::Mean, z3, {0, 2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1, 0, 2}); // 3x2x2 x.reduceAlongDimension(sd::reduce::Mean, z1, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(sd::reduce::Mean, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(sd::reduce::Mean, z5, {0, 2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test2) { NDArray x('c', {2, 3, 2}, { 1, 2, 3, 4, 5, 6, 7, 8, -1, -2, -3, -4, }, sd::DataType::DOUBLE); NDArray exp1('c', {}, std::vector<double>{2.166667}, sd::DataType::DOUBLE); NDArray exp2('c', {2, 2}, {3, 4, 1, 0.666667}, sd::DataType::DOUBLE); NDArray exp3('c', {3}, {4.5, 1, 1}, sd::DataType::DOUBLE); NDArray exp4('c', {3, 2}, {4, 5, 1, 1, 1, 1}, sd::DataType::DOUBLE); NDArray exp5('c', {2}, {3.5, 0.833333}, sd::DataType::DOUBLE); NDArray z1 = x.reduceAlongDimension(sd::reduce::Mean, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(sd::reduce::Mean, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(sd::reduce::Mean, {0, 2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1, 0, 2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(sd::reduce::Mean, {0, 1, 2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(sd::reduce::Mean, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(sd::reduce::Mean, {0, 2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, EqualityTest1) { auto arrayA = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayB = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayC = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayD = NDArrayFactory::create_<float>('f', {2, 4}); auto arrayE = NDArrayFactory::create_<float>('f', {1, 15}); for (int i = 0; i < arrayA->rows(); i++) { for (int k = 0; k < arrayA->columns(); k++) { arrayA->p(i, k, (float)i); } } for (int i = 0; i < arrayB->rows(); i++) { for (int k = 0; k < arrayB->columns(); k++) { arrayB->p(i, k, (float)i); } } for (int i = 0; i < arrayC->rows(); i++) { for (int k = 0; k < arrayC->columns(); k++) { arrayC->p(i, k, (float)i + 1); } } ASSERT_TRUE(arrayA->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayC->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayD->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayE->equalsTo(arrayB, 1e-5)); delete arrayA; delete arrayB; delete arrayC; delete arrayD; delete arrayE; } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test1) { NDArray x('c', {2, 3, 2}, {1.5f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.5f, 8.f, -1.f, -2.f, -3.5f, -4.f}, sd::DataType::FLOAT32); NDArray z1('c', {}, std::vector<double>{100}, sd::DataType::FLOAT32); NDArray z2('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::FLOAT32); NDArray z3('c', {3}, {100, 100, 100}, sd::DataType::FLOAT32); NDArray z4('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::FLOAT32); NDArray z5('c', {2}, {100, 100}, sd::DataType::FLOAT32); NDArray exp1('c', {}, std::vector<double>{26.5f}, sd::DataType::FLOAT32); NDArray exp2('c', {2, 2}, {9.5f, 12.f, 3.f, 2.f}, sd::DataType::FLOAT32); NDArray exp3('c', {3}, {19.f, 4.f, 3.5f}, sd::DataType::FLOAT32); NDArray exp4('c', {3, 2}, {9.f, 10.f, 2.f, 2.f, 1.5f, 2.f}, sd::DataType::FLOAT32); NDArray exp5('c', {2}, {21.5f, 5.f}, sd::DataType::FLOAT32); x.reduceAlongDimension(sd::reduce::Sum, z1, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(sd::reduce::Sum, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(sd::reduce::Sum, z3, {0, 2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1, 0, 2}); // 3x2x2 x.reduceAlongDimension(sd::reduce::Sum, z1, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(sd::reduce::Sum, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(sd::reduce::Sum, z5, {0, 2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test2) { NDArray x('c', {2, 3, 2}, { 1.5, 2, 3, 4, 5, 6, 7.5, 8, -1, -2, -3.5, -4, }, sd::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{26}, sd::DataType::INT64); NDArray exp2('c', {2, 2}, {9, 12, 3, 2}, sd::DataType::INT64); NDArray exp3('c', {3}, {18, 4, 4}, sd::DataType::INT64); NDArray exp4('c', {3, 2}, {8, 10, 2, 2, 2, 2}, sd::DataType::INT64); NDArray exp5('c', {2}, {21, 5}, sd::DataType::INT64); NDArray z1 = x.reduceAlongDimension(sd::reduce::Sum, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(sd::reduce::Sum, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(sd::reduce::Sum, {0, 2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1, 0, 2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(sd::reduce::Sum, {0, 1, 2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(sd::reduce::Sum, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(sd::reduce::Sum, {0, 2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test1) { NDArray x('c', {2, 3, 2}, {0.5, 2, 3, -4, 5, 6, -7.5, 8, -1, -0.5, -3.5, 4}, sd::DataType::DOUBLE); NDArray z1('c', {}, std::vector<double>{true}, sd::DataType::BOOL); NDArray z2('c', {2, 2}, {true, true, true, true}, sd::DataType::BOOL); NDArray z3('c', {3}, {true, true, true}, sd::DataType::BOOL); NDArray z4('c', {3, 2}, {true, true, true, true, true, true}, sd::DataType::BOOL); NDArray z5('c', {2}, {true, true}, sd::DataType::BOOL); NDArray exp1('c', {}, std::vector<double>{true}, sd::DataType::BOOL); NDArray exp2('c', {2, 2}, {true, true, false, true}, sd::DataType::BOOL); NDArray exp3('c', {3}, {true, true, true}, sd::DataType::BOOL); NDArray exp4('c', {3, 2}, {true, true, true, false, true, true}, sd::DataType::BOOL); NDArray exp5('c', {2}, {true, true}, sd::DataType::BOOL); x.reduceAlongDimension(sd::reduce::IsPositive, z1, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(sd::reduce::IsPositive, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(sd::reduce::IsPositive, z3, {0, 2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1, 0, 2}); // 3x2x2 x.reduceAlongDimension(sd::reduce::IsPositive, z1, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(sd::reduce::IsPositive, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(sd::reduce::IsPositive, z5, {0, 2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test2) { NDArray x('c', {2, 3, 2}, {0.5, 2, 3, -4, 5, 6, -7.5, 8, -1, -0.5, -3.5, 4}, sd::DataType::INT32); NDArray exp1('c', {}, std::vector<double>{1}, sd::DataType::BOOL); NDArray exp2('c', {2, 2}, {1, 1, 0, 1}, sd::DataType::BOOL); NDArray exp3('c', {3}, {1, 1, 1}, sd::DataType::BOOL); NDArray exp4('c', {3, 2}, {0, 1, 1, 0, 1, 1}, sd::DataType::BOOL); NDArray exp5('c', {2}, {1, 1}, sd::DataType::BOOL); NDArray z1 = x.reduceAlongDimension(sd::reduce::IsPositive, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(sd::reduce::IsPositive, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(sd::reduce::IsPositive, {0, 2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1, 0, 2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(sd::reduce::IsPositive, {0, 1, 2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(sd::reduce::IsPositive, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(sd::reduce::IsPositive, {0, 2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test1) { NDArray x('c', {2, 3, 2}, {0.5f, 2.f, 3.f, -0.f, 5.f, 6.f, -7.5f, 0.f, -1.f, -0.5f, -3.5f, 4.f}, sd::DataType::FLOAT32); NDArray z1('c', {}, std::vector<double>{100}, sd::DataType::INT64); NDArray z2('c', {2, 2}, {100, 100, 100, 100}, sd::DataType::INT64); NDArray z3('c', {3}, {100, 100, 100}, sd::DataType::INT64); NDArray z4('c', {3, 2}, {100, 100, 100, 100, 100, 100}, sd::DataType::INT64); NDArray z5('c', {2}, {100, 100}, sd::DataType::INT64); NDArray exp1('c', {}, std::vector<double>{2}, sd::DataType::INT64); NDArray exp2('c', {2, 2}, {0, 1, 0, 1}, sd::DataType::INT64); NDArray exp3('c', {3}, {1, 1, 0}, sd::DataType::INT64); NDArray exp4('c', {3, 2}, {0, 1, 0, 1, 0, 0}, sd::DataType::INT64); NDArray exp5('c', {2}, {1, 1}, sd::DataType::INT64); x.reduceAlongDimension(sd::reduce::CountZero, z1, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(sd::reduce::CountZero, z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(sd::reduce::CountZero, z3, {0, 2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1, 0, 2}); // 3x2x2 x.reduceAlongDimension(sd::reduce::CountZero, z1, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(sd::reduce::CountZero, z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(sd::reduce::CountZero, z5, {0, 2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test2) { NDArray x('c', {2, 3, 2}, {0.5, 2, 3, -0, 5, 6, -7.5, 0, -1, -0.5, -3.5, 4}, sd::DataType::INT32); NDArray exp1('c', {}, std::vector<double>{4}, sd::DataType::INT64); NDArray exp2('c', {2, 2}, {1, 1, 0, 2}, sd::DataType::INT64); NDArray exp3('c', {3}, {2, 2, 0}, sd::DataType::INT64); NDArray exp4('c', {3, 2}, {1, 1, 0, 2, 0, 0}, sd::DataType::INT64); NDArray exp5('c', {2}, {2, 2}, sd::DataType::INT64); NDArray z1 = x.reduceAlongDimension(sd::reduce::CountZero, {0, 1, 2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDimension(sd::reduce::CountZero, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDimension(sd::reduce::CountZero, {0, 2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1, 0, 2}); // 3x2x2 NDArray z4 = x.reduceAlongDimension(sd::reduce::CountZero, {0, 1, 2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDimension(sd::reduce::CountZero, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDimension(sd::reduce::CountZero, {0, 2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest1) { auto x = NDArrayFactory::create<float>('c', {5, 5}); auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', { 1, 5, }, {1, 2, 3, 4, 5}, sd::DataType::FLOAT32); NDArray exp('c', {5, 5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, sd::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, *row, z); x += *row; ASSERT_TRUE(x.equalsTo(z)); // ASSERT_TRUE(z.equalsTo(&exp)); delete row; } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest2) { auto x = NDArrayFactory::create<float>('c', {5, 5}); // auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', { 1, 5, }, {1, 2, 3, 4, 5}, sd::DataType::FLOAT32); NDArray exp('c', {5, 5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, sd::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, *row, x); ASSERT_TRUE(x.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcast_1) { NDArray exp('c', {2, 3, 2, 2}, {1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3., 1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3.}, sd::DataType::DOUBLE); auto input = NDArrayFactory::create<double>('c', {2, 3, 2, 2}); auto bias = NDArrayFactory::create<double>('c', {1, 3}); bias.linspace(1); input.applyBroadcast(broadcast::Add, {1}, bias, input); ASSERT_TRUE(exp.equalsTo(&input)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_1) { auto x = NDArrayFactory::create<float>({1, 2, 3, 4, 5, 7, 8, 9}); auto y = NDArrayFactory::create<float>({1, 2, 3, 4, 5, 7, 8, 9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_2) { auto x = NDArrayFactory::create<float16>('c', {9}, {1, 2, 3, 4, 5, 6, 7, 8, 9}); auto y = NDArrayFactory::create<float16>('c', {9}, {1, 2, 3, 4, 5, 6, 7, 8, 9}); ASSERT_TRUE(x.equalsTo(y)); // for (int e = 0; e < x.lengthOf(); e++) // ASSERT_NEAR(x.e<float16>(e), y.e<float16>(e), 1.e-5f); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_3) { auto x = NDArrayFactory::create<bfloat16>({1, 2, 3, 4, 5, 7, 8, 9}); auto y = NDArrayFactory::create<bfloat16>({1, 2, 3, 4, 5, 7, 8, 9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_4) { auto x = NDArrayFactory::create<float>({1, 2, 3, 4, 5, 7, 8, 9}); auto y = NDArrayFactory::create<float>({2, 4, 5, 5, 6, 7, 8, 9}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_5) { auto x = NDArrayFactory::create<float>('c', {3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9}); auto y = NDArrayFactory::create<float>('c', {3, 3}, {2, 4, 5, 5, 6, 7, 8, 9, 10}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_6) { auto x = NDArrayFactory::create<float>('f', {3, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9}); auto y = NDArrayFactory::create<float>('f', {3, 3}, {2, 4, 5, 5, 6, 7, 8, 9, 10}); ASSERT_FALSE(x.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_05) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {1, 8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2 = NDArrayFactory::create<float>(expected.ordering(), expected.getShapeAsVector()); x = 1.; y = 2.; expected = 3.; res2 = 0.f; x.applyTrueBroadcast(BroadcastOpsTuple::Add(), y, res2); // *= y; ASSERT_TRUE(expected.isSameShape(&res2)); ASSERT_TRUE(expected.equalsTo(&res2)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_5) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 1, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; // x.printBuffer("X="); // y.printBuffer("Y="); // expected.printBuffer("EXPECTED"); auto result = x + y; // result.printBuffer("1 + 2 ="); // res2.assign(x + y); // x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); // res2.printBuffer("Z="); // x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; // x.printBuffer("OutputX"); // res2.syncToHost(); // res2.printBuffer("OUputZ"); // x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_51) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; // x.printBuffer("X="); // y.printBuffer("Y="); // expected.printBuffer("EXPECTED"); auto result = x + y; // result.printBuffer("1 + 2 ="); // res2.assign(x + y); // x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); // res2.printBuffer("Z="); // x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; // x.printBuffer("OutputX"); // res2.syncToHost(); // res2.printBuffer("OUputZ"); // x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_1) { auto x = NDArrayFactory::create<float>('c', {2, 1, 2}); x = 10.; auto y = x.tile({1, 2, 1}); auto exp = NDArrayFactory::create<float>('c', {2, 2, 2}); exp = 10.; // y.printShapeInfo("Output SHAPE"); // y.printBuffer("Output TILE"); // exp.printBuffer("Expect TILE"); ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_2) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; auto y = x.tile({1, 2, 1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_3) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; x.p(1, 0, 1, 20); x.syncToDevice(); auto y = x.tile({1, 2, 1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; exp.p(1, 0, 1, 20.); exp.p(1, 1, 1, 20.); exp.syncToDevice(); ASSERT_TRUE(exp.equalsTo(y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_2) { double expBuff[] = {2., 3, 3., 4., 4., 5, 5., 6., 6., 7, 7., 8.}; NDArray a('c', {4, 4}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 2, 1, 0, 4, 7}, sd::DataType::FLOAT32); auto x = NDArrayFactory::create<double>('c', {3, 2, 1}); auto y = NDArrayFactory::create<double>('c', {1, 2}); auto expected = NDArrayFactory::create<double>(expBuff, 'c', {3, 2, 2}); x.linspace(1); y.linspace(1); auto result = x + y; ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, assign_2) { NDArray x('c', {4}, {1.5f, 2.5f, 3.5f, 4.5f}, sd::DataType::FLOAT32); NDArray y('c', {4}, sd::DataType::INT32); NDArray expected('c', {4}, {1, 2, 3, 4}, sd::DataType::INT32); y.assign(x); // y.printBuffer("ASSIGN VECTOR"); ASSERT_TRUE(expected.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, subarray_1) { NDArray x('c', {2, 3, 4}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, sd::DataType::FLOAT32); NDArray y('f', {2, 3, 4}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, sd::DataType::FLOAT32); sd::LongType shapeExpX0[] = {1, 2, 12, 8192, 1, 99}; float buffExpX0[] = {1.f, 13.f}; sd::LongType shapeExpX1[] = {1, 2, 12, 8192, 1, 99}; float buffExpX1[] = {2.f, 14.f}; sd::LongType shapeExpX2[] = {3, 2, 1, 1, 12, 4, 1, 8192, 1, 99}; float buffExpX2[] = {1.f, 13.f}; sd::LongType shapeExpX3[] = {2, 2, 4, 12, 1, 8192, 1, 99}; float buffExpX3[] = {9.f, 10.f, 11.f, 12.f, 21.f, 22.f, 23.f, 24.f}; sd::LongType shapeExpX4[] = {3, 2, 1, 4, 12, 4, 1, 8192, 1, 99}; float buffExpX4[] = {9.f, 10.f, 11.f, 12.f, 21.f, 22.f, 23.f, 24.f}; sd::LongType shapeExpX5[] = {2, 2, 3, 12, 4, 8192, 1, 99}; float buffExpX5[] = {4.f, 8.f, 12.f, 16.f, 20.f, 24.f}; sd::LongType shapeExpY0[] = {1, 2, 1, 8192, 1, 99}; float buffExpY0[] = {1.f, 2.f}; sd::LongType shapeExpY1[] = {1, 2, 1, 8192, 1, 99}; float buffExpY1[] = {7.f, 8.f}; sd::LongType shapeExpY2[] = {3, 2, 1, 1, 1, 2, 6, 8192, 1, 102}; float buffExpY2[] = {1.f, 2.f}; sd::LongType shapeExpY3[] = {2, 2, 4, 1, 6, 8192, 1, 99}; float buffExpY3[] = {5.f, 11.f, 17.f, 23.f, 6.f, 12.f, 18.f, 24.f}; sd::LongType shapeExpY4[] = {3, 2, 1, 4, 1, 2, 6, 8192, 1, 102}; float buffExpY4[] = {5.f, 11.f, 17.f, 23.f, 6.f, 12.f, 18.f, 24.f}; sd::LongType shapeExpY5[] = {2, 2, 3, 1, 2, 8192, 1, 99}; float buffExpY5[] = {19.f, 21.f, 23.f, 20.f, 22.f, 24.f}; NDArray x0 = x(0, {1, 2}); NDArray xExp(buffExpX0, shapeExpX0); ASSERT_TRUE(xExp.isSameShape(x0)); ASSERT_TRUE(xExp.equalsTo(x0)); // for(int i = 0; i < shape::shapeInfoLength(x0.rankOf()); ++i) // ASSERT_TRUE(x0.shapeInfo()[i] == shapeExpX0[i]); // for(int i = 0; i < x0.lengthOf(); ++i) // ASSERT_TRUE(x0.e<float>(i) == buffExpX0[i]); NDArray x1 = x(1, {1, 2}); NDArray x1Exp(buffExpX1, shapeExpX1); ASSERT_TRUE(x1Exp.isSameShape(x1)); ASSERT_TRUE(x1Exp.equalsTo(x1)); // for(int i = 0; i < shape::shapeInfoLength(x1.rankOf()); ++i) // ASSERT_TRUE(x1.shapeInfo()[i] == shapeExpX1[i]); // for(int i = 0; i < x1.lengthOf(); ++i) // ASSERT_TRUE(x1.e<float>(i) == buffExpX1[i]); NDArray x2 = x(0, {1, 2}, true); NDArray x2Exp(buffExpX2, shapeExpX2); ASSERT_TRUE(x2Exp.isSameShape(x2)); // x2.printBuffer("X2"); // x2Exp.printBuffer("X2 EXPECT"); ASSERT_TRUE(x2Exp.equalsTo(x2)); // for(int i = 0; i < shape::shapeInfoLength(x2.rankOf()); ++i) // ASSERT_TRUE(x2.shapeInfo()[i] == shapeExpX2[i]); // for(int i = 0; i < x2.lengthOf(); ++i) // ASSERT_TRUE(x2.e<float>(i) == buffExpX2[i]); NDArray x3 = x(2, {1}); NDArray x3Exp(buffExpX3, shapeExpX3); ASSERT_TRUE(x3Exp.isSameShape(x3)); ASSERT_TRUE(x3Exp.equalsTo(x3)); // for(int i = 0; i < shape::shapeInfoLength(x3.rankOf()); ++i) // ASSERT_TRUE(x3.shapeInfo()[i] == shapeExpX3[i]); // for(int i = 0; i < x3.lengthOf(); ++i) // ASSERT_TRUE(x3.e<float>(i) == buffExpX3[i]); NDArray x4 = x(2, {1}, true); NDArray x4Exp(buffExpX4, shapeExpX4); ASSERT_TRUE(x4Exp.isSameShape(x4)); ASSERT_TRUE(x4Exp.equalsTo(x4)); // for(int i = 0; i < shape::shapeInfoLength(x4.rankOf()); ++i) // ASSERT_TRUE(x4.shapeInfo()[i] == shapeExpX4[i]); // for(int i = 0; i < x4.lengthOf(); ++i) // ASSERT_TRUE(x4.e<float>(i) == buffExpX4[i]); NDArray x5 = x(3, {2}); NDArray x5Exp(buffExpX5, shapeExpX5); ASSERT_TRUE(x5Exp.isSameShape(x5)); ASSERT_TRUE(x5Exp.equalsTo(x5)); // for(int i = 0; i < shape::shapeInfoLength(x5.rankOf()); ++i) // ASSERT_TRUE(x5.shapeInfo()[i] == shapeExpX5[i]); // for(int i = 0; i < x5.lengthOf(); ++i) // ASSERT_TRUE(x5.e<float>(i) == buffExpX5[i]); // ******************* // NDArray y0 = y(0, {1, 2}); NDArray y0Exp(buffExpY0, shapeExpY0); ASSERT_TRUE(y0Exp.isSameShape(y0)); ASSERT_TRUE(y0Exp.equalsTo(y0)); // for(int i = 0; i < shape::shapeInfoLength(y0.rankOf()); ++i) // ASSERT_TRUE(y0.shapeInfo()[i] == shapeExpY0[i]); // for(int i = 0; i < y0.lengthOf(); ++i) // ASSERT_TRUE(y0.e<float>(i) == buffExpY0[i]); NDArray y1 = y(1, {1, 2}); NDArray y1Exp(buffExpY1, shapeExpY1); ASSERT_TRUE(y1Exp.isSameShape(y1)); ASSERT_TRUE(y1Exp.equalsTo(y1)); // for(int i = 0; i < shape::shapeInfoLength(y1.rankOf()); ++i) // ASSERT_TRUE(y1.shapeInfo()[i] == shapeExpY1[i]); // for(int i = 0; i < y1.lengthOf(); ++i) // ASSERT_TRUE(y1.e<float>(i) == buffExpY1[i]); NDArray y2 = y(0, {1, 2}, true); NDArray y2Exp(buffExpY2, shapeExpY2); ASSERT_TRUE(y2Exp.isSameShape(y2)); ASSERT_TRUE(y2Exp.equalsTo(y2)); // for(int i = 0; i < shape::shapeInfoLength(y2.rankOf()); ++i) // ASSERT_TRUE(y2.shapeInfo()[i] == shapeExpY2[i]); // for(int i = 0; i < y2.lengthOf(); ++i) // ASSERT_TRUE(y2.e<float>(i) == buffExpY2[i]); NDArray y3 = y(2, {1}); NDArray y3Exp(buffExpY3, shapeExpY3); ASSERT_TRUE(y3Exp.isSameShape(y3)); ASSERT_TRUE(y3Exp.equalsTo(y3)); // for(int i = 0; i < shape::shapeInfoLength(y3.rankOf()); ++i) // ASSERT_TRUE(y3.shapeInfo()[i] == shapeExpY3[i]); // for(int i = 0; i < y3.lengthOf(); ++i) // ASSERT_TRUE(y3.e<float>(i) == buffExpY3[i]); NDArray y4 = y(2, {1}, true); NDArray y4Exp = NDArrayFactory::create<float>('f', {2, 1, 4}, {5, 6, 11, 12, 17, 18, 23, 24}); ASSERT_TRUE(y4Exp.isSameShape(y4)); ASSERT_TRUE(y4Exp.equalsTo(y4)); // for(int i = 0; i < shape::shapeInfoLength(y4.rankOf()); ++i) // ASSERT_TRUE(y4.shapeInfo()[i] == shapeExpY4[i]); // for(int i = 0; i < y4.lengthOf(); ++i) // ASSERT_TRUE(y4.e<float>(i) == buffExpY4[i]); NDArray y5 = y(3, {2}); NDArray y5Exp(buffExpY5, shapeExpY5); ASSERT_TRUE(y5Exp.isSameShape(y5)); ASSERT_TRUE(y5Exp.equalsTo(y5)); // for(int i = 0; i < shape::shapeInfoLength(y5.rankOf()); ++i) // ASSERT_TRUE(y5.shapeInfo()[i] == shapeExpY5[i]); // for(int i = 0; i < y5.lengthOf(); ++i) // ASSERT_TRUE(y5.e<float>(i) == buffExpY5[i]); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Test_diagonal_1) { auto x = NDArrayFactory::create<float>('c', {2, 3}, {1, 2, 3, 4, 5, 6}); auto exp = NDArrayFactory::create<float>('c', {2, 1}, {1, 5}); auto diag = x.diagonal('c'); // diag.syncToDevice(); for (sd::LongType e = 0; e < exp.lengthOf(); ++e) { printf("VAL[%ld] = %f\n", e, diag.e<float>(e)); //, exp.e<float>(e), 1.e-5); } for (sd::LongType e = 0; e < exp.lengthOf(); ++e) { ASSERT_NEAR(diag.e<float>(e), exp.e<float>(e), 1.e-5); } double eps(1.e-5); NDArray tmp(sd::DataType::FLOAT32, x.getContext()); // scalar = 0 ExtraArguments extras({eps}); NativeOpExecutioner::execReduce3Scalar(diag.getContext(), reduce3::EqualsWithEps, diag.buffer(), diag.shapeInfo(), diag.specialBuffer(), diag.specialShapeInfo(), extras.argumentsAsT(sd::DataType::FLOAT32), exp.buffer(), exp.shapeInfo(), exp.specialBuffer(), exp.specialShapeInfo(), tmp.buffer(), tmp.shapeInfo(), tmp.specialBuffer(), tmp.specialShapeInfo()); cudaStream_t* stream = x.getContext()->getCudaStream(); auto res = cudaStreamSynchronize(*stream); // tmp.printBuffer("Compare result is (expected 0)"); ASSERT_TRUE(exp.isSameShape(diag)); ASSERT_TRUE(exp.equalsTo(diag)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_02) { auto x = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); // x.linspace(1); auto exp = NDArrayFactory::create<float>( 'c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x->reshapei('c', {3, 4, 5}); x->permutei({0, 1, 2}); x->streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(x)); ASSERT_TRUE(exp.equalsTo(x)); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_0) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>( 'c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_1) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>( 'c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_2) { // auto x = NDArrayFactory::create<float>('c', {1, 60}); auto xx = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); // auto x = *xx; // x.linspace(1); // auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, // {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, // 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, // 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, // 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); // x.permutei({0, 1, 2}); // x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); // ASSERT_TRUE(exp.isSameShape(&x)); // ASSERT_TRUE(exp.equalsTo(&x)); delete xx; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_3) { auto x = NDArrayFactory::create<float>('c', {1, 60}); // x.linspace(1); for (int l = 0; l < x.lengthOf(); l++) x.p(l, float(l + 1.f)); auto exp = NDArrayFactory::create<float>( 'c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_1) { auto x = NDArrayFactory::empty<float>(); ASSERT_TRUE(x.isActualOnHostSide()); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_2) { auto x = NDArrayFactory::empty_<float>(); ASSERT_TRUE(x->isEmpty()); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_Empty_3) { auto x = NDArrayFactory::empty(sd::DataType::FLOAT32); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_4) { auto x = NDArrayFactory::empty_(sd::DataType::FLOAT32); ASSERT_TRUE(x->isEmpty()); delete x; }
the_stack
#include <thrust/complex.h> /////////////////////////////////////////////////////////////////////////////// // READER // /////////////////////////////////////////////////////////////////////////////// // Byte swap short __device__ short swap_int16( short val ) { return ( val << 8 ) | ( ( val >> 8 ) & 0xFF ); } // Byte swap unsigned short __device__ unsigned short swap_uint16( unsigned short val ) { return ( val << 8 ) | ( val >> 8 ); } // Byte swap int __device__ int swap_int32( int val ) { val = ( ( val << 8 ) & 0xFF00FF00 ) | ( ( val >> 8 ) & 0xFF00FF ); return ( val << 16 ) | ( ( val >> 16 ) & 0xFFFF ); } // Byte swap unsigned int __device__ unsigned int swap_uint32( unsigned int val ) { val = ( ( val << 8 ) & 0xFF00FF00 ) | ( ( val >> 8 ) & 0xFF00FF ); return ( val << 16 ) | ( val >> 16 ); } // Byte swap float __device__ float swap_float( float val ) { float retVal; char *floatToConvert = reinterpret_cast<char *>( &val ); char *returnFloat = reinterpret_cast<char *>( &retVal ); int ds = sizeof( float ); // data size // swap the bytes into a temporary buffer #pragma unroll 4 for ( int i = 0; i < ds; i++ ) { returnFloat[i] = floatToConvert[( ds - 1 ) - i]; } return retVal; } __device__ double swap_double( double val ) { double retVal; char * doubleToConvert = reinterpret_cast<char *>( &val ); char * returnDouble = reinterpret_cast<char *>( &retVal ); int ds = sizeof( double ); // data size // swap the bytes into a temporary buffer #pragma unroll 8 for ( int i = 0; i < ds; i++ ) { returnDouble[i] = doubleToConvert[( ds - 1 ) - i]; } return retVal; } template<typename T> __device__ void _cupy_unpack( const size_t N, const bool little, unsigned char *__restrict__ input, T *__restrict__ output ) { const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( int tid = tx; tid < N; tid += stride ) { if ( little ) { output[tid] = reinterpret_cast<T *>( input )[tid]; } else { T data = reinterpret_cast<T *>( input )[tid]; #if __cplusplus >= 201703L if constexpr ( std::is_same<T, char>::value || std::is_same<T, unsigned char>::value ) { output[tid] = data; } else if constexpr ( std::is_same<T, short>::value ) { output[tid] = swap_int16( data ); } else if constexpr ( std::is_same<T, unsigned short>::value ) { output[tid] = swap_uint16( data ); } else if constexpr ( std::is_same<T, int>::value ) { output[tid] = swap_int32( data ); } else if constexpr ( std::is_same<T, unsigned int>::value ) { output[tid] = swap_uint32( data ); } else if constexpr ( std::is_same<T, float>::value ) { output[tid] = swap_float( data ); } else if constexpr ( std::is_same<T, double>::value ) { output[tid] = swap_double( data ); } else if constexpr ( std::is_same<T, thrust::complex<float>>::value ) { float real = swap_float( data.real( ) ); float imag = swap_float( data.imag( ) ); output[tid] = thrust::complex<float>( real, imag ); } else if constexpr ( std::is_same<T, thrust::complex<double>>::value ) { double real = swap_double( data.real( ) ); double imag = swap_double( data.imag( ) ); output[tid] = thrust::complex<double>( real, imag ); } #else if ( std::is_same<T, char>::value ) { output[tid] = data; } else if ( std::is_same<T, short>::value ) { output[tid] = swap_int16( data ); } else if ( std::is_same<T, unsigned short>::value ) { output[tid] = swap_uint16( data ); } else if ( std::is_same<T, int>::value ) { output[tid] = swap_int32( data ); } else if ( std::is_same<T, unsigned int>::value ) { output[tid] = swap_uint32( data ); } else if ( std::is_same<T, float>::value ) { output[tid] = swap_float( data ); } else if ( std::is_same<T, double>::value ) { output[tid] = swap_double( data ); } #endif } } } #if __cplusplus < 201703L template<typename T> __device__ void _cupy_unpack_complex( const size_t N, const bool little, unsigned char *__restrict__ input, T *__restrict__ output ) { const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( int tid = tx; tid < N; tid += stride ) { if ( little ) { output[tid] = reinterpret_cast<T *>( input )[tid]; } else { T data = reinterpret_cast<T *>( input )[tid]; if ( std::is_same<T, thrust::complex<float>>::value ) { float real = swap_float( data.real( ) ); float imag = swap_float( data.imag( ) ); output[tid] = thrust::complex<float>( real, imag ); } else if ( std::is_same<T, thrust::complex<double>>::value ) { double real = swap_double( data.real( ) ); double imag = swap_double( data.imag( ) ); output[tid] = thrust::complex<double>( real, imag ); } } } } #endif extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_int8( const size_t N, const bool little, unsigned char *__restrict__ input, char *__restrict__ output ) { _cupy_unpack<char>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_uint8( const size_t N, const bool little, unsigned char *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_unpack<unsigned char>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_int16( const size_t N, const bool little, unsigned char *__restrict__ input, short *__restrict__ output ) { _cupy_unpack<short>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_uint16( const size_t N, const bool little, unsigned char *__restrict__ input, unsigned short *__restrict__ output ) { _cupy_unpack<unsigned short>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_int32( const size_t N, const bool little, unsigned char *__restrict__ input, int *__restrict__ output ) { _cupy_unpack<int>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_uint32( const size_t N, const bool little, unsigned char *__restrict__ input, unsigned int *__restrict__ output ) { _cupy_unpack<unsigned int>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_float32( const size_t N, const bool little, unsigned char *__restrict__ input, float *__restrict__ output ) { _cupy_unpack<float>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_float64( const size_t N, const bool little, unsigned char *__restrict__ input, double *__restrict__ output ) { _cupy_unpack<double>( N, little, input, output ); } #if __cplusplus >= 201703L extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_complex64( const size_t N, const bool little, unsigned char *__restrict__ input, thrust::complex<float> *__restrict__ output ) { _cupy_unpack<thrust::complex<float>>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_complex128( const size_t N, const bool little, unsigned char *__restrict__ input, thrust::complex<double> *__restrict__ output ) { _cupy_unpack<thrust::complex<double>>( N, little, input, output ); } #else extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_complex64( const size_t N, const bool little, unsigned char *__restrict__ input, thrust::complex<float> *__restrict__ output ) { _cupy_unpack_complex<thrust::complex<float>>( N, little, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_unpack_complex128( const size_t N, const bool little, unsigned char *__restrict__ input, thrust::complex<double> *__restrict__ output ) { _cupy_unpack_complex<thrust::complex<double>>( N, little, input, output ); } #endif
the_stack
#include <stdio.h> #include <stdlib.h> #include <string.h> //#include "KeccakTreeGPU.h" #include "KeccakTree.h" #include "KeccakF.h" __host__ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } /*GPU constants __constant__ tKeccakLane KeccakF_RoundConstants[22] = { (tKeccakLane)0x00000001 , (tKeccakLane)0x00008082 , (tKeccakLane)0x0000808a , (tKeccakLane)0x80008000 , (tKeccakLane)0x0000808b , (tKeccakLane)0x80000001 , (tKeccakLane)0x80008081 , (tKeccakLane)0x00008009 , (tKeccakLane)0x0000008a , (tKeccakLane)0x00000088 , (tKeccakLane)0x80008009 , (tKeccakLane)0x8000000a , (tKeccakLane)0x8000808b , (tKeccakLane)0x0000008b , (tKeccakLane)0x00008089 , (tKeccakLane)0x00008003 , (tKeccakLane)0x00008002 , (tKeccakLane)0x00000080 , (tKeccakLane)0x0000800a , (tKeccakLane)0x8000000a , (tKeccakLane)0x80008081 , (tKeccakLane)0x00008080 }; */ //host constants tKeccakLane KeccakF_RoundConstants_h[22] = { (tKeccakLane)0x00000001 , (tKeccakLane)0x00008082 , (tKeccakLane)0x0000808a , (tKeccakLane)0x80008000 , (tKeccakLane)0x0000808b , (tKeccakLane)0x80000001 , (tKeccakLane)0x80008081 , (tKeccakLane)0x00008009 , (tKeccakLane)0x0000008a , (tKeccakLane)0x00000088 , (tKeccakLane)0x80008009 , (tKeccakLane)0x8000000a , (tKeccakLane)0x8000808b , (tKeccakLane)0x0000008b , (tKeccakLane)0x00008089 , (tKeccakLane)0x00008003 , (tKeccakLane)0x00008002 , (tKeccakLane)0x00000080 , (tKeccakLane)0x0000800a , (tKeccakLane)0x8000000a , (tKeccakLane)0x80008081 , (tKeccakLane)0x00008080 }; // Device (GPU) Keccak-f function implementation // unrolled __device__ void KeccakFunr( tKeccakLane * state, const tKeccakLane *KeccakF_RoundConstants ) { unsigned int round; //try to avoid to many registers tKeccakLane BC[5]; tKeccakLane temp; for ( round = 0; round < cKeccakNumberOfRounds; ++round ) { { // Theta BC[0] = state[0] ^ state[5] ^ state[10] ^ state[15] ^ state[20]; BC[1] = state[1] ^ state[6] ^ state[11] ^ state[16] ^ state[21]; BC[2] = state[2] ^ state[7] ^ state[12] ^ state[17] ^ state[22]; BC[3] = state[3] ^ state[8] ^ state[13] ^ state[18] ^ state[23]; BC[4] = state[4] ^ state[9] ^ state[14] ^ state[19] ^ state[24]; temp = BC[4] ^ ROL32(BC[1], 1);//x=0 state[0] ^= temp; state[5] ^= temp; state[10] ^= temp; state[15] ^= temp; state[20] ^= temp; temp = BC[0] ^ ROL32(BC[2], 1);//x=1 state[1] ^= temp; state[6] ^= temp; state[11] ^= temp; state[16] ^= temp; state[21] ^= temp; temp = BC[1] ^ ROL32(BC[3], 1);//x=2 state[2] ^= temp; state[7] ^= temp; state[12] ^= temp; state[17] ^= temp; state[22] ^= temp; temp = BC[2] ^ ROL32(BC[4], 1);//x=3 state[3] ^= temp; state[8] ^= temp; state[13] ^= temp; state[18] ^= temp; state[23] ^= temp; temp = BC[3] ^ ROL32(BC[0], 1);//x=4 state[4] ^= temp; state[9] ^= temp; state[14] ^= temp; state[19] ^= temp; state[24] ^= temp; }//end Theta { // Rho Pi temp = state[1]; BC[0] = state[10]; state[10] = ROL32( temp, 1); temp = BC[0];//x=0 BC[0] = state[7]; state[7] = ROL32( temp, 3); temp = BC[0]; BC[0] = state[11]; state[11] = ROL32( temp, 6); temp = BC[0]; BC[0] = state[17]; state[17] = ROL32( temp,10); temp = BC[0]; BC[0] = state[18]; state[18] = ROL32( temp,15); temp = BC[0]; BC[0] = state[3]; state[3] = ROL32( temp,21); temp = BC[0];//x=5 BC[0] = state[5]; state[5] = ROL32( temp,28); temp = BC[0]; BC[0] = state[16]; state[16] = ROL32( temp, 4); temp = BC[0]; BC[0] = state[8]; state[8] = ROL32( temp,13); temp = BC[0]; BC[0] = state[21]; state[21] = ROL32( temp,23); temp = BC[0]; BC[0] = state[24]; state[24] = ROL32( temp, 2); temp = BC[0];//x=10 BC[0] = state[4]; state[4] = ROL32( temp,14); temp = BC[0]; BC[0] = state[15]; state[15] = ROL32( temp,27); temp = BC[0]; BC[0] = state[23]; state[23] = ROL32( temp, 9); temp = BC[0]; BC[0] = state[19]; state[19] = ROL32( temp,24); temp = BC[0]; BC[0] = state[13]; state[13] = ROL32( temp, 8); temp = BC[0];//x=15 BC[0] = state[12]; state[12] = ROL32( temp,25); temp = BC[0]; BC[0] = state[2]; state[2] = ROL32( temp,11); temp = BC[0]; BC[0] = state[20]; state[20] = ROL32( temp,30); temp = BC[0]; BC[0] = state[14]; state[14] = ROL32( temp,18); temp = BC[0]; BC[0] = state[22]; state[22] = ROL32( temp, 7); temp = BC[0];//x=20 BC[0] = state[9]; state[9] = ROL32( temp,29); temp = BC[0]; BC[0] = state[6]; state[6] = ROL32( temp,20); temp = BC[0]; BC[0] = state[1]; state[1] = ROL32( temp,12); temp = BC[0];//x=23 }//end Rho Pi { // Chi BC[0] = state[0]; BC[1] = state[1]; BC[2] = state[2]; BC[3] = state[3]; BC[4] = state[4]; state[0] = BC[0] ^((~BC[1]) & BC[2]); state[1] = BC[1] ^((~BC[2]) & BC[3]); state[2] = BC[2] ^((~BC[3]) & BC[4]); state[3] = BC[3] ^((~BC[4]) & BC[0]); state[4] = BC[4] ^((~BC[0]) & BC[1]); BC[0] = state[5]; BC[1] = state[6]; BC[2] = state[7]; BC[3] = state[8]; BC[4] = state[9]; state[5] = BC[0] ^((~BC[1]) & BC[2]); state[6] = BC[1] ^((~BC[2]) & BC[3]); state[7] = BC[2] ^((~BC[3]) & BC[4]); state[8] = BC[3] ^((~BC[4]) & BC[0]); state[9] = BC[4] ^((~BC[0]) & BC[1]); BC[0] = state[10]; BC[1] = state[11]; BC[2] = state[12]; BC[3] = state[13]; BC[4] = state[14]; state[10] = BC[0] ^((~BC[1]) & BC[2]); state[11] = BC[1] ^((~BC[2]) & BC[3]); state[12] = BC[2] ^((~BC[3]) & BC[4]); state[13] = BC[3] ^((~BC[4]) & BC[0]); state[14] = BC[4] ^((~BC[0]) & BC[1]); BC[0] = state[15]; BC[1] = state[16]; BC[2] = state[17]; BC[3] = state[18]; BC[4] = state[19]; state[15] = BC[0] ^((~BC[1]) & BC[2]); state[16] = BC[1] ^((~BC[2]) & BC[3]); state[17] = BC[2] ^((~BC[3]) & BC[4]); state[18] = BC[3] ^((~BC[4]) & BC[0]); state[19] = BC[4] ^((~BC[0]) & BC[1]); BC[0] = state[20]; BC[1] = state[21]; BC[2] = state[22]; BC[3] = state[23]; BC[4] = state[24]; state[20] = BC[0] ^((~BC[1]) & BC[2]); state[21] = BC[1] ^((~BC[2]) & BC[3]); state[22] = BC[2] ^((~BC[3]) & BC[4]); state[23] = BC[3] ^((~BC[4]) & BC[0]); state[24] = BC[4] ^((~BC[0]) & BC[1]); }//end Chi // Iota state[0] ^= KeccakF_RoundConstants[round]; } } //end unrolled //Host Keccak-f function (pb with using the same constants between host and device) //unrolled __host__ void KeccakFunr_h( tKeccakLane * state ) { unsigned int round; //try to avoid to many registers tKeccakLane BC[5]; tKeccakLane temp; for ( round = 0; round < cKeccakNumberOfRounds; ++round ) { { // Theta BC[0] = state[0] ^ state[5] ^ state[10] ^ state[15] ^ state[20]; BC[1] = state[1] ^ state[6] ^ state[11] ^ state[16] ^ state[21]; BC[2] = state[2] ^ state[7] ^ state[12] ^ state[17] ^ state[22]; BC[3] = state[3] ^ state[8] ^ state[13] ^ state[18] ^ state[23]; BC[4] = state[4] ^ state[9] ^ state[14] ^ state[19] ^ state[24]; temp = BC[4] ^ ROL32(BC[1], 1);//x=0 state[0] ^= temp; state[5] ^= temp; state[10] ^= temp; state[15] ^= temp; state[20] ^= temp; temp = BC[0] ^ ROL32(BC[2], 1);//x=1 state[1] ^= temp; state[6] ^= temp; state[11] ^= temp; state[16] ^= temp; state[21] ^= temp; temp = BC[1] ^ ROL32(BC[3], 1);//x=2 state[2] ^= temp; state[7] ^= temp; state[12] ^= temp; state[17] ^= temp; state[22] ^= temp; temp = BC[2] ^ ROL32(BC[4], 1);//x=3 state[3] ^= temp; state[8] ^= temp; state[13] ^= temp; state[18] ^= temp; state[23] ^= temp; temp = BC[3] ^ ROL32(BC[0], 1);//x=4 state[4] ^= temp; state[9] ^= temp; state[14] ^= temp; state[19] ^= temp; state[24] ^= temp; }//end Theta { // Rho Pi temp = state[1]; BC[0] = state[10]; state[10] = ROL32( temp, 1); temp = BC[0];//x=0 BC[0] = state[7]; state[7] = ROL32( temp, 3); temp = BC[0]; BC[0] = state[11]; state[11] = ROL32( temp, 6); temp = BC[0]; BC[0] = state[17]; state[17] = ROL32( temp,10); temp = BC[0]; BC[0] = state[18]; state[18] = ROL32( temp,15); temp = BC[0]; BC[0] = state[3]; state[3] = ROL32( temp,21); temp = BC[0];//x=5 BC[0] = state[5]; state[5] = ROL32( temp,28); temp = BC[0]; BC[0] = state[16]; state[16] = ROL32( temp, 4); temp = BC[0]; BC[0] = state[8]; state[8] = ROL32( temp,13); temp = BC[0]; BC[0] = state[21]; state[21] = ROL32( temp,23); temp = BC[0]; BC[0] = state[24]; state[24] = ROL32( temp, 2); temp = BC[0];//x=10 BC[0] = state[4]; state[4] = ROL32( temp,14); temp = BC[0]; BC[0] = state[15]; state[15] = ROL32( temp,27); temp = BC[0]; BC[0] = state[23]; state[23] = ROL32( temp, 9); temp = BC[0]; BC[0] = state[19]; state[19] = ROL32( temp,24); temp = BC[0]; BC[0] = state[13]; state[13] = ROL32( temp, 8); temp = BC[0];//x=15 BC[0] = state[12]; state[12] = ROL32( temp,25); temp = BC[0]; BC[0] = state[2]; state[2] = ROL32( temp,11); temp = BC[0]; BC[0] = state[20]; state[20] = ROL32( temp,30); temp = BC[0]; BC[0] = state[14]; state[14] = ROL32( temp,18); temp = BC[0]; BC[0] = state[22]; state[22] = ROL32( temp, 7); temp = BC[0];//x=20 BC[0] = state[9]; state[9] = ROL32( temp,29); temp = BC[0]; BC[0] = state[6]; state[6] = ROL32( temp,20); temp = BC[0]; BC[0] = state[1]; state[1] = ROL32( temp,12); temp = BC[0];//x=23 }//end Rho Pi { // Chi BC[0] = state[0]; BC[1] = state[1]; BC[2] = state[2]; BC[3] = state[3]; BC[4] = state[4]; state[0] = BC[0] ^((~BC[1]) & BC[2]); state[1] = BC[1] ^((~BC[2]) & BC[3]); state[2] = BC[2] ^((~BC[3]) & BC[4]); state[3] = BC[3] ^((~BC[4]) & BC[0]); state[4] = BC[4] ^((~BC[0]) & BC[1]); BC[0] = state[5]; BC[1] = state[6]; BC[2] = state[7]; BC[3] = state[8]; BC[4] = state[9]; state[5] = BC[0] ^((~BC[1]) & BC[2]); state[6] = BC[1] ^((~BC[2]) & BC[3]); state[7] = BC[2] ^((~BC[3]) & BC[4]); state[8] = BC[3] ^((~BC[4]) & BC[0]); state[9] = BC[4] ^((~BC[0]) & BC[1]); BC[0] = state[10]; BC[1] = state[11]; BC[2] = state[12]; BC[3] = state[13]; BC[4] = state[14]; state[10] = BC[0] ^((~BC[1]) & BC[2]); state[11] = BC[1] ^((~BC[2]) & BC[3]); state[12] = BC[2] ^((~BC[3]) & BC[4]); state[13] = BC[3] ^((~BC[4]) & BC[0]); state[14] = BC[4] ^((~BC[0]) & BC[1]); BC[0] = state[15]; BC[1] = state[16]; BC[2] = state[17]; BC[3] = state[18]; BC[4] = state[19]; state[15] = BC[0] ^((~BC[1]) & BC[2]); state[16] = BC[1] ^((~BC[2]) & BC[3]); state[17] = BC[2] ^((~BC[3]) & BC[4]); state[18] = BC[3] ^((~BC[4]) & BC[0]); state[19] = BC[4] ^((~BC[0]) & BC[1]); BC[0] = state[20]; BC[1] = state[21]; BC[2] = state[22]; BC[3] = state[23]; BC[4] = state[24]; state[20] = BC[0] ^((~BC[1]) & BC[2]); state[21] = BC[1] ^((~BC[2]) & BC[3]); state[22] = BC[2] ^((~BC[3]) & BC[4]); state[23] = BC[3] ^((~BC[4]) & BC[0]); state[24] = BC[4] ^((~BC[0]) & BC[1]); }//end Chi // Iota state[0] ^= KeccakF_RoundConstants_h[round]; } } //end unrolled //Keccak final node hashing results of previous nodes in sequential mode __host__ void Keccak_top_GPU(tKeccakLane * Kstate, tKeccakLane *inBuffer , int block_number) { int ind_word,k; for (k=0;k<block_number;k++) { for (ind_word=0; ind_word<OUTPUT_BLOCK_SIZE_B/4; ind_word++) { Kstate[ind_word] ^= inBuffer[ind_word + k * OUTPUT_BLOCK_SIZE_B/4]; } KeccakFunr_h(Kstate); } } //************************************************************************ //kernel implementaing hash function, hashing NB_INPUT_BLOCK (of 256 bits) // __global__ void ker_Keccak(tKeccakLane *d_inBuffer, tKeccakLane * d_outBuffer, const tKeccakLane *KeccakF_RoundConstants) { int ind_word,k; tKeccakLane Kstate[25]; //zeroize the state for(ind_word=0; ind_word<25; ind_word++) {Kstate[ind_word]=0; } for (k=0;k<NB_INPUT_BLOCK;k++) { //xor input into state for (ind_word=0; ind_word<(INPUT_BLOCK_SIZE_B/4 ); ind_word++) { Kstate[ind_word] ^= d_inBuffer[threadIdx.x + ind_word * NB_THREADS + k * NB_THREADS * INPUT_BLOCK_SIZE_B/4 + blockIdx.x * NB_THREADS * INPUT_BLOCK_SIZE_B/4 * NB_INPUT_BLOCK ]; } //apply GPU Keccak permutation KeccakFunr(Kstate, KeccakF_RoundConstants); } //output hash in buffer for (ind_word=0; ind_word<OUTPUT_BLOCK_SIZE_B/4; ind_word++) { d_outBuffer[threadIdx.x + ind_word *NB_THREADS + blockIdx.x *NB_THREADS * OUTPUT_BLOCK_SIZE_B/4 ]= Kstate[ind_word]; } } //******************************************************************************** //************************ //First Tree mode //data to be hashed is in h_inBuffer //output chaining values hashes are copied to h_outBuffer //************************ __host__ void KeccakTreeGPU(tKeccakLane * h_inBuffer, tKeccakLane * d_inBuffer, tKeccakLane * h_outBuffer, tKeccakLane * d_outBuffer, tKeccakLane * d_KeccakF_RoundConstants ) { //copy host to device cudaMemcpy(d_inBuffer,h_inBuffer, INPUT_BLOCK_SIZE_B * NB_THREADS * NB_INPUT_BLOCK*NB_THREADS_BLOCKS, cudaMemcpyHostToDevice); checkCUDAError(" Memcpy htd"); //exec kernels ker_Keccak <<< dim3(NB_THREADS_BLOCKS), dim3(NB_THREADS) >>> (d_inBuffer, d_outBuffer, d_KeccakF_RoundConstants); checkCUDAError(" ker_keccak"); //copy back device to host cudaMemcpy(h_outBuffer,d_outBuffer, OUTPUT_BLOCK_SIZE_B * NB_THREADS*NB_THREADS_BLOCKS ,cudaMemcpyDeviceToHost); checkCUDAError(" Memcpy dth"); }
the_stack
#include "Common.h" #define INFINITY ((float)(1e+300 * 1e+300)) __device__ __constant__ int screen_width; __device__ __constant__ int screen_pitch; __device__ __constant__ int screen_height; __device__ __constant__ Config config; // Frame Buffers __device__ __constant__ float4 * frame_buffer_albedo; __device__ __constant__ float4 * frame_buffer_direct; __device__ __constant__ float4 * frame_buffer_indirect; #include "Util.h" #include "BSDF.h" #include "Material.h" #include "Sky.h" #include "RayCone.h" // Final Frame Buffer, shared with OpenGL __device__ __constant__ Surface<float4> accumulator; #include "Raytracing/BVH.h" #include "Raytracing/QBVH.h" #include "Raytracing/CWBVH.h" #include "Sampling.h" #include "SVGF/SVGF.h" #include "SVGF/TAA.h" struct Camera { float3 position; float3 bottom_left_corner; float3 x_axis; float3 y_axis; float pixel_spread_angle; float aperture_radius; float focal_distance; } __device__ __constant__ camera; __device__ PixelQuery pixel_query = { INVALID, INVALID, INVALID, INVALID }; extern "C" __global__ void kernel_generate(int sample_index, int pixel_offset, int pixel_count) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= pixel_count) return; int index_offset = index + pixel_offset; int x = index_offset % screen_width; int y = index_offset / screen_width; int pixel_index = x + y * screen_pitch; ASSERT(pixel_index < screen_pitch * screen_height, "Pixel should fit inside the buffer"); float2 rand_filter = random<SampleDimension::FILTER> (pixel_index, 0, sample_index); float2 rand_aperture = random<SampleDimension::APERTURE>(pixel_index, 0, sample_index); float2 jitter; if (config.enable_svgf) { jitter.x = taa_halton_x[sample_index & (TAA_HALTON_NUM_SAMPLES-1)]; jitter.y = taa_halton_y[sample_index & (TAA_HALTON_NUM_SAMPLES-1)]; } else { switch (config.reconstruction_filter) { case ReconstructionFilter::BOX: { jitter = rand_filter; break; } case ReconstructionFilter::TENT: { jitter.x = sample_tent(rand_filter.x); jitter.y = sample_tent(rand_filter.y); break; } case ReconstructionFilter::GAUSSIAN: { float2 gaussians = sample_gaussian(rand_filter.x, rand_filter.y); jitter.x = 0.5f + 0.5f * gaussians.x; jitter.y = 0.5f + 0.5f * gaussians.y; break; } } } float x_jittered = float(x) + jitter.x; float y_jittered = float(y) + jitter.y; float3 focal_point = camera.focal_distance * normalize(camera.bottom_left_corner + x_jittered * camera.x_axis + y_jittered * camera.y_axis); float2 lens_point = camera.aperture_radius * sample_disk(rand_aperture.x, rand_aperture.y); float3 offset = camera.x_axis * lens_point.x + camera.y_axis * lens_point.y; float3 direction = normalize(focal_point - offset); // Create primary Ray that starts at the Camera's position and goes through the current pixel ray_buffer_trace.origin .set(index, camera.position + offset); ray_buffer_trace.direction.set(index, direction); ray_buffer_trace.pixel_index_and_mis_eligable[index] = pixel_index | (false << 31); } extern "C" __global__ void kernel_trace_bvh(int bounce) { bvh_trace(buffer_sizes.trace[bounce], &buffer_sizes.rays_retired[bounce]); } extern "C" __global__ void kernel_trace_qbvh(int bounce) { qbvh_trace(buffer_sizes.trace[bounce], &buffer_sizes.rays_retired[bounce]); } extern "C" __global__ void kernel_trace_cwbvh(int bounce) { cwbvh_trace(buffer_sizes.trace[bounce], &buffer_sizes.rays_retired[bounce]); } extern "C" __global__ void kernel_trace_shadow_bvh(int bounce) { bvh_trace_shadow(buffer_sizes.shadow[bounce], &buffer_sizes.rays_retired_shadow[bounce], bounce); } extern "C" __global__ void kernel_trace_shadow_qbvh(int bounce) { qbvh_trace_shadow(buffer_sizes.shadow[bounce], &buffer_sizes.rays_retired_shadow[bounce], bounce); } extern "C" __global__ void kernel_trace_shadow_cwbvh(int bounce) { cwbvh_trace_shadow(buffer_sizes.shadow[bounce], &buffer_sizes.rays_retired_shadow[bounce], bounce); } extern "C" __global__ void kernel_sort(int bounce, int sample_index) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.trace[bounce]) return; float3 ray_direction = ray_buffer_trace.direction.get(index); RayHit hit = ray_buffer_trace.hits.get(index); unsigned pixel_index_and_mis_eligable = ray_buffer_trace.pixel_index_and_mis_eligable[index]; int pixel_index = pixel_index_and_mis_eligable & ~(0b11 << 31); int x = pixel_index % screen_pitch; int y = pixel_index / screen_pitch; bool mis_eligable = pixel_index_and_mis_eligable >> 31; float3 throughput; if (bounce == 0) { throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { throughput = ray_buffer_trace.throughput.get(index); } // If we didn't hit anything, sample the Sky if (hit.triangle_id == INVALID) { float3 illumination = throughput * sample_sky(ray_direction); if (bounce == 0) { if (config.enable_albedo || config.enable_svgf) { frame_buffer_albedo[pixel_index] = make_float4(1.0f); } frame_buffer_direct[pixel_index] = make_float4(illumination); } else if (bounce == 1) { frame_buffer_direct[pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[pixel_index] += make_float4(illumination); } return; } // Get the Material of the Mesh we hit int material_id = mesh_get_material_id(hit.mesh_id); MaterialType material_type = material_get_type(material_id); if (bounce == 0 && pixel_query.pixel_index == pixel_index) { pixel_query.mesh_id = hit.mesh_id; pixel_query.triangle_id = hit.triangle_id; pixel_query.material_id = material_id; } if (material_type == MaterialType::LIGHT) { // Obtain the Light's position and normal TrianglePosNor light = triangle_get_positions_and_normals(hit.triangle_id); float3 light_point; float3 light_normal; triangle_barycentric(light, hit.u, hit.v, light_point, light_normal); float3 light_point_prev = light_point; // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, light_point); matrix3x4_transform_direction(world, light_normal); light_normal = normalize(light_normal); if (bounce == 0 && config.enable_svgf) { Matrix3x4 world_prev = mesh_get_transform_prev(hit.mesh_id); matrix3x4_transform_position(world_prev, light_point_prev); svgf_set_gbuffers(x, y, hit, light_point, light_normal, light_point_prev); } MaterialLight material_light = material_as_light(material_id); bool should_count_light_contribution = config.enable_next_event_estimation ? !mis_eligable : true; if (should_count_light_contribution) { float3 illumination = throughput * material_light.emission; if (bounce == 0) { if (config.enable_albedo || config.enable_svgf) { frame_buffer_albedo[pixel_index] = make_float4(1.0f); } frame_buffer_direct[pixel_index] = make_float4(material_light.emission); } else if (bounce == 1) { frame_buffer_direct[pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[pixel_index] += make_float4(illumination); } return; } if (config.enable_multiple_importance_sampling) { float cos_theta_light = fabsf(dot(ray_direction, light_normal)); float distance_to_light_squared = hit.t * hit.t; float brdf_pdf = ray_buffer_trace.last_pdf[index]; float light_power = luminance(material_light.emission.x, material_light.emission.y, material_light.emission.z); float light_pdf = light_power * distance_to_light_squared / (cos_theta_light * lights_total_weight); float mis_weight = power_heuristic(brdf_pdf, light_pdf); float3 illumination = throughput * material_light.emission * mis_weight; assert(bounce != 0); if (bounce == 1) { frame_buffer_direct[pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[pixel_index] += make_float4(illumination); } } return; } // If this is the last bounce and we haven't hit a light, terminate if (bounce == config.num_bounces - 1) return; // Russian Roulette if (config.enable_russian_roulette && bounce > 0) { // Throughput does not include albedo so it doesn't need to be demodulated by SVGF (causing precision issues) // This deteriorates Russian Roulette performance, so albedo is included here float3 throughput_with_albedo = throughput * make_float3(frame_buffer_albedo[pixel_index]); float survival_probability = saturate(vmax_max(throughput_with_albedo.x, throughput_with_albedo.y, throughput_with_albedo.z)); float rand_russian_roulette = random<SampleDimension::RUSSIAN_ROULETTE>(pixel_index, bounce, sample_index).x; if (rand_russian_roulette > survival_probability) { return; } throughput /= survival_probability; } switch (material_type) { case MaterialType::DIFFUSE: { int index_out = atomic_agg_inc(&buffer_sizes.diffuse[bounce]); ray_buffer_shade_diffuse_and_plastic.direction.set(index_out, ray_direction); if (bounce > 0 && config.enable_mipmapping) ray_buffer_shade_diffuse_and_plastic.cone[index_out] = ray_buffer_trace.cone[index]; ray_buffer_shade_diffuse_and_plastic.hits.set(index_out, hit); ray_buffer_shade_diffuse_and_plastic.pixel_index[index_out] = pixel_index; if (bounce > 0) ray_buffer_shade_diffuse_and_plastic.throughput.set(index_out, throughput); break; } case MaterialType::PLASTIC: { // Plastic Material buffer is shared with Diffuse Material buffer but grows in the opposite direction int index_out = (BATCH_SIZE - 1) - atomic_agg_inc(&buffer_sizes.plastic[bounce]); ray_buffer_shade_diffuse_and_plastic.direction.set(index_out, ray_direction); if (bounce > 0 && config.enable_mipmapping) ray_buffer_shade_diffuse_and_plastic.cone[index_out] = ray_buffer_trace.cone[index]; ray_buffer_shade_diffuse_and_plastic.hits.set(index_out, hit); ray_buffer_shade_diffuse_and_plastic.pixel_index[index_out] = pixel_index; if (bounce > 0) ray_buffer_shade_diffuse_and_plastic.throughput.set(index_out, throughput); break; } case MaterialType::DIELECTRIC: { int index_out = atomic_agg_inc(&buffer_sizes.dielectric[bounce]); ray_buffer_shade_dielectric_and_conductor.direction.set(index_out, ray_direction); if (bounce > 0 && config.enable_mipmapping) ray_buffer_shade_dielectric_and_conductor.cone[index_out] = ray_buffer_trace.cone[index]; ray_buffer_shade_dielectric_and_conductor.hits.set(index_out, hit); ray_buffer_shade_dielectric_and_conductor.pixel_index[index_out] = pixel_index; if (bounce > 0) ray_buffer_shade_dielectric_and_conductor.throughput.set(index_out, throughput); break; } case MaterialType::CONDUCTOR: { // Conductor Material buffer is shared with Dielectric Material buffer but grows in the opposite direction int index_out = (BATCH_SIZE - 1) - atomic_agg_inc(&buffer_sizes.conductor[bounce]); ray_buffer_shade_dielectric_and_conductor.direction.set(index_out, ray_direction); if (bounce > 0 && config.enable_mipmapping) ray_buffer_shade_dielectric_and_conductor.cone[index_out] = ray_buffer_trace.cone[index]; ray_buffer_shade_dielectric_and_conductor.hits.set(index_out, hit); ray_buffer_shade_dielectric_and_conductor.pixel_index[index_out] = pixel_index; if (bounce > 0) ray_buffer_shade_dielectric_and_conductor.throughput.set(index_out, throughput); break; } } } template<typename BSDF, MaterialBuffer * material_buffer, bool REVERSED> __device__ void shade_material(int bounce, int sample_index, int buffer_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_size) return; // Material Buffers can be shared by 2 different Materials, one growing left to right, one growing right to left // If this Material is right to left, reverse the index into the buffers if constexpr (REVERSED) { index = (BATCH_SIZE - 1) - index; } float3 ray_direction = material_buffer->direction.get(index); RayHit hit = material_buffer->hits .get(index); int pixel_index = material_buffer->pixel_index[index]; float3 throughput; if (bounce == 0) { throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { throughput = material_buffer->throughput.get(index); } // Obtain hit Triangle position, normal, and texture coordinates TrianglePosNorTex hit_triangle = triangle_get_positions_normals_and_tex_coords(hit.triangle_id); float3 hit_point; float3 normal; float2 tex_coord; triangle_barycentric(hit_triangle, hit.u, hit.v, hit_point, normal, tex_coord); float3 hit_point_local = hit_point; // Keep copy of the untransformed hit point in local space // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, hit_point); matrix3x4_transform_direction(world, normal); normal = normalize(normal); // Make sure the normal is always pointing outwards bool entering_material = dot(ray_direction, normal) < 0.0f; if (!entering_material) { normal = -normal; } // Load and propagate Ray Cone float cone_angle; float cone_width; if (config.enable_mipmapping) { if (bounce == 0) { cone_angle = camera.pixel_spread_angle; cone_width = cone_angle * hit.t; } else { float2 cone = material_buffer->cone[index]; cone_angle = cone.x; cone_width = cone.y + cone_angle * hit.t; } } // Calculate texture level of detail float mesh_scale = mesh_get_scale(hit.mesh_id); LOD lod; if constexpr (BSDF::HAS_ALBEDO) { if (config.enable_mipmapping) { float3 geometric_normal = cross(hit_triangle.position_edge_1, hit_triangle.position_edge_2); float triangle_area_inv = 1.0f / length(geometric_normal); geometric_normal *= triangle_area_inv; // Normalize if (bounce == 0) { // First bounce uses anisotrpoic LOD float3 ellipse_axis_1, ellipse_axis_2; ray_cone_get_ellipse_axes(ray_direction, normal, cone_width, ellipse_axis_1, ellipse_axis_2); ray_cone_get_texture_gradients( mesh_scale, geometric_normal, triangle_area_inv, hit_triangle.position_0, hit_triangle.position_edge_1, hit_triangle.position_edge_2, hit_triangle.tex_coord_0, hit_triangle.tex_coord_edge_1, hit_triangle.tex_coord_edge_2, hit_point_local, tex_coord, ellipse_axis_1, ellipse_axis_2, lod.aniso.gradient_1, lod.aniso.gradient_2 ); } else { // Subsequent bounces use isotropic LOD float lod_triangle = sqrtf(triangle_get_lod(mesh_scale, triangle_area_inv, hit_triangle.tex_coord_edge_1, hit_triangle.tex_coord_edge_2)); float lod_ray_cone = ray_cone_get_lod(ray_direction, normal, cone_width); lod.iso.lod = log2f(lod_triangle * lod_ray_cone); } } } // Calulate new Ray Cone angle based on Mesh curvature if (config.enable_mipmapping) { float curvature = triangle_get_curvature( hit_triangle.position_edge_1, hit_triangle.position_edge_2, hit_triangle.normal_edge_1, hit_triangle.normal_edge_2 ) / mesh_scale; cone_angle -= 2.0f * curvature * fabsf(cone_width) / dot(normal, ray_direction); // Eq. 5 (Akenine-Möller 2021) } // Construct TBN frame float3 tangent, bitangent; orthonormal_basis(normal, tangent, bitangent); float3 omega_i = world_to_local(-ray_direction, tangent, bitangent, normal); // Initialize BSDF int material_id = mesh_get_material_id(hit.mesh_id); BSDF bsdf; bsdf.pixel_index = pixel_index; bsdf.bounce = bounce; bsdf.sample_index = sample_index; bsdf.tangent = tangent; bsdf.bitangent = bitangent; bsdf.normal = normal; bsdf.omega_i = omega_i; bsdf.init(bounce, entering_material, material_id, tex_coord, lod); bsdf.attenuate(bounce, pixel_index, throughput, hit.t); // Emit GBuffers if SVGF is enabled if (bounce == 0 && config.enable_svgf) { float3 hit_point_prev = hit_point_local; Matrix3x4 world_prev = mesh_get_transform_prev(hit.mesh_id); matrix3x4_transform_position(world_prev, hit_point_prev); int x = pixel_index % screen_pitch; int y = pixel_index / screen_pitch; svgf_set_gbuffers(x, y, hit, hit_point, normal, hit_point_prev); } // Next Event Estimation if (config.enable_next_event_estimation && lights_total_weight > 0.0f && bsdf.is_mis_eligable()) { float2 rand_light = random<SampleDimension::NEE_LIGHT> (pixel_index, bounce, sample_index); float2 rand_triangle = random<SampleDimension::NEE_TRIANGLE>(pixel_index, bounce, sample_index); // Pick random Light int light_mesh_id; int light_triangle_id = sample_light(rand_light.x, rand_light.y, light_mesh_id); // Pick random point on the Light float2 light_uv = sample_triangle(rand_triangle.x, rand_triangle.y); // Obtain the Light's position and normal TrianglePosNor light = triangle_get_positions_and_normals(light_triangle_id); float3 light_point; float3 light_normal; triangle_barycentric(light, light_uv.x, light_uv.y, light_point, light_normal); // Transform into world space Matrix3x4 light_world = mesh_get_transform(light_mesh_id); matrix3x4_transform_position (light_world, light_point); matrix3x4_transform_direction(light_world, light_normal); light_normal = normalize(light_normal); float3 to_light = light_point - hit_point; float distance_to_light_squared = dot(to_light, to_light); float distance_to_light = sqrtf(distance_to_light_squared); // Normalize the vector to the light to_light /= distance_to_light; float cos_theta_light = fabsf(dot(to_light, light_normal)); float cos_theta_hit = dot(to_light, normal); int light_material_id = mesh_get_material_id(light_mesh_id); MaterialLight material_light = material_as_light(light_material_id); float3 bsdf_value; float bsdf_pdf; bool valid = bsdf.eval(to_light, cos_theta_hit, bsdf_value, bsdf_pdf); if (valid) { float light_power = luminance(material_light.emission.x, material_light.emission.y, material_light.emission.z); float light_pdf = light_power * distance_to_light_squared / (cos_theta_light * lights_total_weight); float mis_weight; if (config.enable_multiple_importance_sampling) { mis_weight = power_heuristic(light_pdf, bsdf_pdf); } else { mis_weight = 1.0f; } float3 illumination = throughput * bsdf_value * material_light.emission * mis_weight / light_pdf; // Emit Shadow Ray int shadow_ray_index = atomic_agg_inc(&buffer_sizes.shadow[bounce]); ray_buffer_shadow.ray_origin .set(shadow_ray_index, ray_origin_epsilon_offset(hit_point, to_light, normal)); ray_buffer_shadow.ray_direction.set(shadow_ray_index, to_light); ray_buffer_shadow.max_distance[shadow_ray_index] = distance_to_light - 2.0f * EPSILON; ray_buffer_shadow.illumination_and_pixel_index[shadow_ray_index] = make_float4( illumination.x, illumination.y, illumination.z, __int_as_float(pixel_index) ); } } // Sample BSDF float3 direction_out; float pdf; bool valid = bsdf.sample(throughput, direction_out, pdf); if (!valid) return; float3 origin_out = ray_origin_epsilon_offset(hit_point, direction_out, normal); // Emit next Ray int index_out = atomic_agg_inc(&buffer_sizes.trace[bounce + 1]); ray_buffer_trace.origin .set(index_out, origin_out); ray_buffer_trace.direction.set(index_out, direction_out); if (config.enable_mipmapping) { ray_buffer_trace.cone[index_out] = make_float2(cone_angle, cone_width); } ray_buffer_trace.pixel_index_and_mis_eligable[index_out] = pixel_index | (bsdf.is_mis_eligable() << 31); ray_buffer_trace.throughput.set(index_out, throughput); ray_buffer_trace.last_pdf[index_out] = pdf; } extern "C" __global__ void kernel_shade_diffuse(int bounce, int sample_index) { shade_material<BSDFDiffuse, &ray_buffer_shade_diffuse_and_plastic, false>(bounce, sample_index, buffer_sizes.diffuse[bounce]); } extern "C" __global__ void kernel_shade_plastic(int bounce, int sample_index) { shade_material<BSDFPlastic, &ray_buffer_shade_diffuse_and_plastic, true>(bounce, sample_index, buffer_sizes.plastic[bounce]); } extern "C" __global__ void kernel_shade_dielectric(int bounce, int sample_index) { shade_material<BSDFDielectric, &ray_buffer_shade_dielectric_and_conductor, false>(bounce, sample_index, buffer_sizes.dielectric[bounce]); } extern "C" __global__ void kernel_shade_conductor(int bounce, int sample_index) { shade_material<BSDFConductor, &ray_buffer_shade_dielectric_and_conductor, true>(bounce, sample_index, buffer_sizes.conductor[bounce]); } extern "C" __global__ void kernel_accumulate(float frames_accumulated) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= screen_width || y >= screen_height) return; int pixel_index = x + y * screen_pitch; float4 direct = frame_buffer_direct [pixel_index]; float4 indirect = frame_buffer_indirect[pixel_index]; float4 colour = direct + indirect; if (config.enable_albedo) { colour *= frame_buffer_albedo[pixel_index]; } if (frames_accumulated > 0.0f) { float4 colour_prev = accumulator.get(x, y); colour = colour_prev + (colour - colour_prev) / frames_accumulated; // Online average } // if (isnan(colour.x + colour.y + colour.z)) colour = make_float4(1,0,1,1); accumulator.set(x, y, colour); }
the_stack
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/DeviceGuard.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <vector> #include <iostream> #include <cmath> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename scalar_t> struct Point { scalar_t x, y; __device__ Point() : x(0), y(0) {} __device__ Point(scalar_t x, scalar_t y) : x(x), y(y) {} __device__ scalar_t dot(const Point<scalar_t>& vec) const { return this->x * vec.x + this->y * vec.y; } __device__ scalar_t cross(const Point<scalar_t>& vec) const { return this->x * vec.y - vec.x * this->y; } __device__ Point<scalar_t> operator-( const Point<scalar_t>& vec) const { return Point(this->x - vec.x, this->y - vec.y); } __device__ Point<scalar_t> operator-=( const Point<scalar_t>& vec) { this->x -= vec.x; this->y -= vec.y; return *this; } __device__ Point<scalar_t> operator+( const Point<scalar_t>& vec) const { return Point(this->x + vec.x, this->y + vec.y); } __device__ Point<scalar_t> operator+=( const Point<scalar_t>& vec) { this->x += vec.x; this->y += vec.y; return *this; } __device__ bool operator<( const Point<scalar_t>& vec) const { if ((this->x == 0 && this->y == 0) && (vec.x != 0 || vec.y != 0)) return true; return this->cross(vec) > 0; } }; template <typename scalar_t> __device__ Point<scalar_t> operator*(scalar_t a, const Point<scalar_t>& p) { return Point<scalar_t>(a * p.x, a * p.y); } template <typename scalar_t> struct LinSeg { Point<scalar_t> x1, x2; __device__ LinSeg() {} __device__ LinSeg(const Point<scalar_t>& x1, const Point<scalar_t>& x2) : x1(x1), x2(x2) {} __device__ int InterSectWith(const LinSeg<scalar_t>& linseg, Point<scalar_t>* ps) { Point<scalar_t> a1 = this->x1, a2 = this->x2, b1 = linseg.x1, b2 = linseg.x2; /* intersection point A=a2-a1, B=b2-b1, C=a1-b1 [C.x] = [-A.x B.x] * [s] [C.y] [-A.y B.y] [t] */ Point<scalar_t> A = a2 - a1, B = b2 - b1, C = a1 - b1; if (C.x == 0 && C.y == 0) { ps[0] = a1; return 1; } scalar_t D = -A.cross(B); if (D != 0) { // not parallel, may intersect. scalar_t s = C.cross(B) / D; scalar_t t = -A.cross(C) / D; if (0 <= s && s < 1 && 0 <= t && t < 1) { // head vertex does not count. ps[0] = a1 + s * A; return 1; } else { return 0; } } else { // check colinearity: |A*C|=0 if (A.cross(C) != 0) { // not colinear return 0; } else { int p_cnt = 0; // colinear overlap: only tail vertices count. scalar_t BdtC = B.dot(C); // (b2-b1)*(a1-b1) scalar_t BdtB = B.dot(B); // (b2-b1)*(b2-b1) scalar_t AdtnC = -A.dot(C); // (a2-a1)*(b1-a1) scalar_t AdtA = A.dot(A); // (a2-a1)*(a2-a1) if (BdtC >= 0 && BdtC < BdtB) // a1 between b2 and b1 ps[p_cnt++] = a1; if (AdtnC >= 0 && AdtnC < AdtA) // b1 between a2 and a1 ps[p_cnt++] = b1; return p_cnt; } } } }; template <typename scalar_t> __device__ void rbbox2points(const scalar_t* const rb, Point<scalar_t>* vs) { scalar_t x = rb[0], y = rb[1], w_2 = rb[2] / 2, h_2 = rb[3] / 2, a = rb[4]; scalar_t cosa = cosf(a), sina = sinf(a); scalar_t wx = cosa * w_2, wy = sina * w_2; scalar_t hx = -sina * h_2, hy = cosa * h_2; vs[0] = Point<scalar_t>(x + wx + hx, y + wy + hy); vs[1] = Point<scalar_t>(x - wx + hx, y - wy + hy); vs[2] = Point<scalar_t>(x - wx - hx, y - wy - hy); vs[3] = Point<scalar_t>(x + wx - hx, y + wy - hy); } template <typename scalar_t> __device__ int vertex_in_rbbox(Point<scalar_t>* v1, Point<scalar_t>* v2, Point<scalar_t>* ps) { Point<scalar_t> center = (scalar_t)0.5 * (v2[0] + v2[2]); Point<scalar_t> w_vec = (scalar_t)0.5 * (v2[1] - v2[0]); Point<scalar_t> h_vec = (scalar_t)0.5 * (v2[2] - v2[1]); scalar_t h_vec_2 = h_vec.dot(h_vec); scalar_t w_vec_2 = w_vec.dot(w_vec); int p_cnt = 0; for (int i = 0; i < 4; i++) { Point<scalar_t> pr = v1[i] - center; if (std::abs(pr.dot(h_vec)) < h_vec_2 && std::abs(pr.dot(w_vec)) < w_vec_2) { ps[p_cnt++] = v1[i]; } } return p_cnt; } template <typename scalar_t> __device__ int rbbox_border_intsec(Point<scalar_t>* v1, Point<scalar_t>* v2, Point<scalar_t>* ps) { LinSeg<scalar_t> rb1[4] = { {v1[0], v1[1]}, {v1[1], v1[2]}, {v1[2], v1[3]}, {v1[3], v1[0]} }; LinSeg<scalar_t> rb2[4] = { {v2[0], v2[1]}, {v2[1], v2[2]}, {v2[2], v2[3]}, {v2[3], v2[0]} }; int p_cnt = 0; for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) { p_cnt += rb1[i].InterSectWith(rb2[j], ps + p_cnt); } return p_cnt; } template <typename scalar_t> __device__ scalar_t area(Point<scalar_t> *vs_dirty, int p_cnt_dirty) { const scalar_t numthres = (scalar_t) 1e-2; Point<scalar_t> vs[16]; vs[0] = {0, 0}; int p_cnt = 1; // set vs[0] the reference point for (int i = 1; i < p_cnt_dirty; i++) { bool clean = true; vs_dirty[i] -= vs_dirty[0]; for (int j = 0; j < p_cnt; j++) { Point<scalar_t> diff = vs_dirty[i] - vs[j]; if (std::abs(diff.x) < numthres && std::abs(diff.y) < numthres) { clean = false; break; } } if (clean) { vs[p_cnt++] = vs_dirty[i]; } } // sort for (int i = 1; i < p_cnt; i++) { vs[0] = vs[i]; int j; for (j = i - 1; vs[0] < vs[j]; j--) vs[j + 1] = vs[j]; vs[j + 1] = vs[0]; } // calculate area scalar_t a = 0; vs[0] = {0, 0}; for (int i = 1; i < p_cnt; i++) a += vs[i].cross(vs[(i + 1) % p_cnt]); return a / 2; } template <typename scalar_t> __device__ scalar_t devIoU( const scalar_t* const rb1_p, const scalar_t* const rb2_p) { Point<scalar_t> v1[4], v2[4], u[16]; rbbox2points(rb1_p, v1); rbbox2points(rb2_p, v2); int p_cnt = 0; // add rbbox's vertices inside the other one p_cnt += vertex_in_rbbox(v1, v2, u + p_cnt); p_cnt += vertex_in_rbbox(v2, v1, u + p_cnt); // add rect border line segment intersection points p_cnt += rbbox_border_intsec(v1, v2, u + p_cnt); if (p_cnt >= 3) { scalar_t s1 = rb1_p[2] * rb1_p[3]; scalar_t s2 = rb2_p[2] * rb2_p[3]; scalar_t su = area(u, p_cnt); su = min(su, s1); su = min(su, s2); su = max(su, (scalar_t)0); return su / (s1 + s2 - su); } else { return (scalar_t)0; } } __global__ void nmsr_kernel(const int n_boxes, const float nms_overlap_thresh, const float* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; auto block_boxes_p = block_boxes + threadIdx.x * 5; auto dev_boxes_p = dev_boxes + (threadsPerBlock * col_start + threadIdx.x) * 6; if (threadIdx.x < col_size) { block_boxes_p[0] = dev_boxes_p[0]; block_boxes_p[1] = dev_boxes_p[1]; block_boxes_p[2] = dev_boxes_p[2]; block_boxes_p[3] = dev_boxes_p[3]; block_boxes_p[4] = dev_boxes_p[4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float* cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 6 tensor at::Tensor nmsr_cuda(const at::Tensor boxes, float nms_overlap_thresh) { // Ensure CUDA uses the input tensor device. at::DeviceGuard guard(boxes.device()); using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 5); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState* state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nmsr_kernel <<<blocks, threads>>> (boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({ boxes_num }, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
the_stack
#include <torch/types.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <cuda.h> #include <cuda_runtime.h> #include <time.h> #include <pybind11/pybind11.h> #include <pybind11/stl.h> #include <vector> #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) struct SpTrilinear_wc_Params { int in_feature_dim; int in_feature_numentries; int corner_lut_dims[3]; int corner_lut_strides[3]; int in_worldcoord_dims[8]; int in_worldcoord_strides[8]; int in_worldcoord_ndim; int out_feature_dims[8]; int out_feature_strides[8]; bool ign_zero; }; // out_feature.data_ptr<float>(), // in_feature.data_ptr<float>(), corner_lut_t.data_ptr<int32_t>(), in_worldcoord.data_ptr<float>(), p template <int TILE_DIM_X, int TILE_DIM_Y, int DUP_X> __global__ void sp_trilinear_worldcoord_kernel( float* __restrict__ out_feature, const float* __restrict__ in_feature, const int32_t* __restrict__ corner_lut_t, const float* __restrict__ in_worldcoord, SpTrilinear_wc_Params p) { const int GRID_X = gridDim.y; int idx_entry = blockIdx.x * TILE_DIM_Y + threadIdx.y; // Index processing //int index[7]; int t = idx_entry; int idx_in_worldcoord = 0; int idx_out_feature = 0; for (int i=p.in_worldcoord_ndim-2; i>=0; i--) { int idx_t = t % p.in_worldcoord_dims[i]; t = t / p.in_worldcoord_dims[i]; idx_in_worldcoord += p.in_worldcoord_strides[i] * idx_t; idx_out_feature += p.out_feature_strides[i] * idx_t; } if (t > 0) { return; } int stride_in_worldcoord = p.in_worldcoord_strides[p.in_worldcoord_ndim-1]; int stride_out_feature = p.out_feature_strides[p.in_worldcoord_ndim-1]; float world_coords[3]; world_coords[0] = in_worldcoord[idx_in_worldcoord]; world_coords[1] = in_worldcoord[idx_in_worldcoord+stride_in_worldcoord]; world_coords[2] = in_worldcoord[idx_in_worldcoord+stride_in_worldcoord*2]; float local_coords[3]; int vox_coords[3]; local_coords[0] = world_coords[0] - floorf(world_coords[0]); vox_coords[0] = (int)floorf(world_coords[0]); local_coords[1] = world_coords[1] - floorf(world_coords[1]); vox_coords[1] = (int)floorf(world_coords[1]); local_coords[2] = world_coords[2] - floorf(world_coords[2]); vox_coords[2] = (int)floorf(world_coords[2]); float interp_weight[8]; // 0,0,0 interp_weight[0] = (1.0f-local_coords[0])*(1.0f-local_coords[1])*(1.0f-local_coords[2]); // 0,0,1 interp_weight[1] = (1.0f-local_coords[0])*(1.0f-local_coords[1])*(local_coords[2]); // 0,1,0 interp_weight[2] = (1.0f-local_coords[0])*(local_coords[1])*(1.0f-local_coords[2]); // 0,1,1 interp_weight[3] = (1.0f-local_coords[0])*(local_coords[1])*(local_coords[2]); // 1,0,0 interp_weight[4] = (local_coords[0])*(1.0f-local_coords[1])*(1.0f-local_coords[2]); // 1,0,1 interp_weight[5] = (local_coords[0])*(1.0f-local_coords[1])*(local_coords[2]); // 1,1,0 interp_weight[6] = (local_coords[0])*(local_coords[1])*(1.0f-local_coords[2]); // 1,1,1 interp_weight[7] = (local_coords[0])*(local_coords[1])*(local_coords[2]); int indices[8]; // Hard boundary check (zero padding) if (isnan(world_coords[0]) || isnan(world_coords[1]) || isnan(world_coords[2])) { indices[0] = -1;indices[1] = -1;indices[2] = -1;indices[3] = -1; indices[4] = -1;indices[5] = -1;indices[6] = -1;indices[7] = -1; } else { // Clamp to boundaries int vox_coords_1[3]; vox_coords_1[0] = min(max(vox_coords[0]+1, 0), p.corner_lut_dims[0]-1); vox_coords_1[1] = min(max(vox_coords[1]+1, 0), p.corner_lut_dims[1]-1); vox_coords_1[2] = min(max(vox_coords[2]+1, 0), p.corner_lut_dims[2]-1); vox_coords[0] = min(max(vox_coords[0], 0), p.corner_lut_dims[0]-1); vox_coords[1] = min(max(vox_coords[1], 0), p.corner_lut_dims[1]-1); vox_coords[2] = min(max(vox_coords[2], 0), p.corner_lut_dims[2]-1); int idx_corner_lut; // 000 idx_corner_lut = p.corner_lut_strides[0] * vox_coords[0] + p.corner_lut_strides[1] * vox_coords[1] + p.corner_lut_strides[2] * vox_coords[2]; indices[0] = corner_lut_t[idx_corner_lut]; // 001 idx_corner_lut = p.corner_lut_strides[0] * vox_coords[0] + p.corner_lut_strides[1] * vox_coords[1] + p.corner_lut_strides[2] * vox_coords_1[2]; indices[1] = corner_lut_t[idx_corner_lut]; // 010 idx_corner_lut = p.corner_lut_strides[0] * vox_coords[0] + p.corner_lut_strides[1] * vox_coords_1[1] + p.corner_lut_strides[2] * vox_coords[2]; indices[2] = corner_lut_t[idx_corner_lut]; // 011 idx_corner_lut = p.corner_lut_strides[0] * vox_coords[0] + p.corner_lut_strides[1] * vox_coords_1[1] + p.corner_lut_strides[2] * vox_coords_1[2]; indices[3] = corner_lut_t[idx_corner_lut]; // 100 idx_corner_lut = p.corner_lut_strides[0] * vox_coords_1[0] + p.corner_lut_strides[1] * vox_coords[1] + p.corner_lut_strides[2] * vox_coords[2]; indices[4] = corner_lut_t[idx_corner_lut]; // 101 idx_corner_lut = p.corner_lut_strides[0] * vox_coords_1[0] + p.corner_lut_strides[1] * vox_coords[1] + p.corner_lut_strides[2] * vox_coords_1[2]; indices[5] = corner_lut_t[idx_corner_lut]; // 110 idx_corner_lut = p.corner_lut_strides[0] * vox_coords_1[0] + p.corner_lut_strides[1] * vox_coords_1[1] + p.corner_lut_strides[2] * vox_coords[2]; indices[6] = corner_lut_t[idx_corner_lut]; // 111 idx_corner_lut = p.corner_lut_strides[0] * vox_coords_1[0] + p.corner_lut_strides[1] * vox_coords_1[1] + p.corner_lut_strides[2] * vox_coords_1[2]; indices[7] = corner_lut_t[idx_corner_lut]; } if (p.ign_zero) { // Zero indices are to be ignored #pragma unroll for (int i=0; i<8; i++) { indices[i] -= 1; } } //int idx_feat = blockIdx.x * TILE_DIM_X * DUP_X + threadIdx.x; int idx_feat = blockIdx.y * TILE_DIM_X + threadIdx.x; for (int i=0; i<DUP_X; i++) { if (idx_feat >= p.in_feature_dim) { return; } float interp_feat = 0.0f; #pragma unroll for (int j=0; j<8; j++) { if (indices[j] >= 0) { interp_feat = fmaf(in_feature[indices[j]*p.in_feature_dim+idx_feat], interp_weight[j], interp_feat); } } //out_feature[idx_entry*p.in_feature_dim+idx_feat] = interp_feat; out_feature[idx_out_feature+stride_out_feature*idx_feat] = interp_feat; //idx_feat += TILE_DIM_X; idx_feat += TILE_DIM_X * GRID_X; } } //sp_trilinear_worldcoord_backward2feature_kernel<TILE_DIM_X, TILE_DIM_Y, DUP_X><<<dimGrid, dimBlock, 0, stream>>>( // in_feature_grad.data_ptr<float>(), // out_feature_grad.data_ptr<float>(), in_feature.data_ptr<float>(), in_corner_lut.data_ptr<int32_t>(), in_worldcoord.data_ptr<float>(), p // Backward to feature template <int TILE_DIM_X, int TILE_DIM_Y, int DUP_X> __global__ void sp_trilinear_worldcoord_backward2feature_kernel( float* __restrict__ in_feature_grad, const float* __restrict__ out_feature_grad, const int32_t* __restrict__ corner_lut_t, const float* __restrict__ in_worldcoord, SpTrilinear_wc_Params p) { const int GRID_X = gridDim.x; int idx_entry = blockIdx.y * TILE_DIM_Y + threadIdx.y; // Index processing //int index[7]; int t = idx_entry; int idx_in_worldcoord = 0; int idx_out_feature = 0; for (int i=p.in_worldcoord_ndim-2; i>=0; i--) { int idx_t = t % p.in_worldcoord_dims[i]; t = t / p.in_worldcoord_dims[i]; //index[i] = idx_t; idx_in_worldcoord += p.in_worldcoord_strides[i] * idx_t; idx_out_feature += p.out_feature_strides[i] * idx_t; } if (t > 0) { return; } int stride_in_worldcoord = p.in_worldcoord_strides[p.in_worldcoord_ndim-1]; int stride_out_feature = p.out_feature_strides[p.in_worldcoord_ndim-1]; float world_coords[3]; world_coords[0] = in_worldcoord[idx_in_worldcoord]; world_coords[1] = in_worldcoord[idx_in_worldcoord+stride_in_worldcoord]; world_coords[2] = in_worldcoord[idx_in_worldcoord+stride_in_worldcoord*2]; float local_coords[3]; int vox_coords[3]; local_coords[0] = world_coords[0] - floorf(world_coords[0]); vox_coords[0] = (int)floorf(world_coords[0]); local_coords[1] = world_coords[1] - floorf(world_coords[1]); vox_coords[1] = (int)floorf(world_coords[1]); local_coords[2] = world_coords[2] - floorf(world_coords[2]); vox_coords[2] = (int)floorf(world_coords[2]); float interp_weight[8]; // 0,0,0 interp_weight[0] = (1.0f-local_coords[0])*(1.0f-local_coords[1])*(1.0f-local_coords[2]); // 0,0,1 interp_weight[1] = (1.0f-local_coords[0])*(1.0f-local_coords[1])*(local_coords[2]); // 0,1,0 interp_weight[2] = (1.0f-local_coords[0])*(local_coords[1])*(1.0f-local_coords[2]); // 0,1,1 interp_weight[3] = (1.0f-local_coords[0])*(local_coords[1])*(local_coords[2]); // 1,0,0 interp_weight[4] = (local_coords[0])*(1.0f-local_coords[1])*(1.0f-local_coords[2]); // 1,0,1 interp_weight[5] = (local_coords[0])*(1.0f-local_coords[1])*(local_coords[2]); // 1,1,0 interp_weight[6] = (local_coords[0])*(local_coords[1])*(1.0f-local_coords[2]); // 1,1,1 interp_weight[7] = (local_coords[0])*(local_coords[1])*(local_coords[2]); int indices[8]; // Hard boundary check (zero padding) if (isnan(world_coords[0]) || isnan(world_coords[1]) || isnan(world_coords[2])) {// || //vox_coords[0] < 0 || vox_coords[0] >= (p.corner_lut_dims[0]-1) || //vox_coords[1] < 0 || vox_coords[1] >= (p.corner_lut_dims[1]-1) || //vox_coords[2] < 0 || vox_coords[2] >= (p.corner_lut_dims[2]-1)) { indices[0] = -1;indices[1] = -1;indices[2] = -1;indices[3] = -1; indices[4] = -1;indices[5] = -1;indices[6] = -1;indices[7] = -1; } else { // Clamp to boundaries int vox_coords_1[3]; vox_coords_1[0] = min(max(vox_coords[0]+1, 0), p.corner_lut_dims[0]-1); vox_coords_1[1] = min(max(vox_coords[1]+1, 0), p.corner_lut_dims[1]-1); vox_coords_1[2] = min(max(vox_coords[2]+1, 0), p.corner_lut_dims[2]-1); vox_coords[0] = min(max(vox_coords[0], 0), p.corner_lut_dims[0]-1); vox_coords[1] = min(max(vox_coords[1], 0), p.corner_lut_dims[1]-1); vox_coords[2] = min(max(vox_coords[2], 0), p.corner_lut_dims[2]-1); int idx_corner_lut; // 000 idx_corner_lut = p.corner_lut_strides[0] * vox_coords[0] + p.corner_lut_strides[1] * vox_coords[1] + p.corner_lut_strides[2] * vox_coords[2]; indices[0] = corner_lut_t[idx_corner_lut]; // 001 idx_corner_lut = p.corner_lut_strides[0] * vox_coords[0] + p.corner_lut_strides[1] * vox_coords[1] + p.corner_lut_strides[2] * vox_coords_1[2]; indices[1] = corner_lut_t[idx_corner_lut]; // 010 idx_corner_lut = p.corner_lut_strides[0] * vox_coords[0] + p.corner_lut_strides[1] * vox_coords_1[1] + p.corner_lut_strides[2] * vox_coords[2]; indices[2] = corner_lut_t[idx_corner_lut]; // 011 idx_corner_lut = p.corner_lut_strides[0] * vox_coords[0] + p.corner_lut_strides[1] * vox_coords_1[1] + p.corner_lut_strides[2] * vox_coords_1[2]; indices[3] = corner_lut_t[idx_corner_lut]; // 100 idx_corner_lut = p.corner_lut_strides[0] * vox_coords_1[0] + p.corner_lut_strides[1] * vox_coords[1] + p.corner_lut_strides[2] * vox_coords[2]; indices[4] = corner_lut_t[idx_corner_lut]; // 101 idx_corner_lut = p.corner_lut_strides[0] * vox_coords_1[0] + p.corner_lut_strides[1] * vox_coords[1] + p.corner_lut_strides[2] * vox_coords_1[2]; indices[5] = corner_lut_t[idx_corner_lut]; // 110 idx_corner_lut = p.corner_lut_strides[0] * vox_coords_1[0] + p.corner_lut_strides[1] * vox_coords_1[1] + p.corner_lut_strides[2] * vox_coords[2]; indices[6] = corner_lut_t[idx_corner_lut]; // 111 idx_corner_lut = p.corner_lut_strides[0] * vox_coords_1[0] + p.corner_lut_strides[1] * vox_coords_1[1] + p.corner_lut_strides[2] * vox_coords_1[2]; indices[7] = corner_lut_t[idx_corner_lut]; } if (p.ign_zero) { #pragma unroll for (int i=0; i<8; i++) { indices[i] -= 1; } } //int idx_feat = blockIdx.x * TILE_DIM_X * DUP_X + threadIdx.x; int idx_feat = blockIdx.x * TILE_DIM_X + threadIdx.x; for (int i=0; i<DUP_X; i++) { if (idx_feat >= p.in_feature_dim) { return; } float grad = out_feature_grad[idx_out_feature+stride_out_feature*idx_feat]; #pragma unroll for (int j=0; j<8; j++) { if (indices[j] >= 0) { //indices[j]*p.in_feature_dim+idx_feat atomicAdd(&in_feature_grad[indices[j]*p.in_feature_dim+idx_feat], grad * interp_weight[j]); } } //idx_feat += TILE_DIM_X; idx_feat += TILE_DIM_X * GRID_X; } } // in_feature, corner_lut_t, in_world_coord, ign_zero=False // Input: // in_feature: float32 [M C] // in_corner_lut: int32 [X Y Z] // in_worldcoord: float32 [..., 3] // ---Index: int32 [..., 8], containing [0, M]. 0 is ignore label. // ---Coord: float32 [..., 3] // Output: // Interp. Feat: float32 [..., C] // std::vector<torch::Tensor> torch::Tensor sp_trilinear_worldcoord_cuda(const torch::Tensor& in_feature, const torch::Tensor& in_corner_lut, const torch::Tensor& in_worldcoord, bool ign_zero, int channel_pos) { CHECK_CUDA(in_feature); CHECK_CUDA(in_corner_lut); CHECK_CUDA(in_worldcoord); int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); torch::Device device = in_feature.device(); // assert(tensor.sizes() == std::vector<int64_t>{3, 4, 5}); assert(in_feature.dtype() == torch::kFloat32); assert(in_feature.dim() == 2); assert(in_corner_lut.dtype() == torch::kInt32); assert(in_corner_lut.dim() == 3); assert(in_worldcoord.dtype() == torch::kFloat32); assert(in_worldcoord.size(-1) == 3); assert(in_worldcoord.dim() <= 8); CHECK_CONTIGUOUS(in_feature); //CHECK_CONTIGUOUS(in_corner_lut); // Will still run correctly, but performance will suffer. //CHECK_CONTIGUOUS(in_worldcoord); //int channel_pos = -1; // -1 for HWC, -3 for CHW if (channel_pos < 0) { channel_pos += in_worldcoord.dim(); } assert(channel_pos >= 0 && channel_pos < in_worldcoord.dim()); SpTrilinear_wc_Params p; p.in_feature_dim = in_feature.size(1); p.in_feature_numentries = in_feature.size(0); p.in_worldcoord_ndim = in_worldcoord.dim(); for (int i=0; i<in_worldcoord.dim(); i++) { p.in_worldcoord_dims[i] = in_worldcoord.size(i); p.in_worldcoord_strides[i] = in_worldcoord.stride(i); } p.ign_zero = ign_zero; p.corner_lut_dims[0] = in_corner_lut.size(0); p.corner_lut_dims[1] = in_corner_lut.size(1); p.corner_lut_dims[2] = in_corner_lut.size(2); p.corner_lut_strides[0] = in_corner_lut.stride(0); p.corner_lut_strides[1] = in_corner_lut.stride(1); p.corner_lut_strides[2] = in_corner_lut.stride(2); int numentries = in_worldcoord.numel() / 3; //printf("FWD numentries: %d\n", numentries); std::vector<int64_t> out_feature_shape; //if (channel_first) { // Channel First format, suitable for 2D convolution // //assert(false); for (int i=0; i<channel_pos; i++) { out_feature_shape.push_back(in_worldcoord.size(i)); } out_feature_shape.push_back(p.in_feature_dim); for (int i=channel_pos; i<in_worldcoord.dim()-1; i++) { out_feature_shape.push_back(in_worldcoord.size(i)); } torch::Tensor out_feature = torch::empty(out_feature_shape, torch::TensorOptions().dtype(torch::kFloat32).device(device)); // The feature is always at the last dimension. Swap it to the last dim. for (int i=channel_pos+1; i<out_feature.dim(); i++) { out_feature.transpose_(i-1, i); } //} else { // Channel Last // for (int i=0; i<in_worldcoord.dim()-1; i++) { // out_feature_shape.push_back(in_worldcoord.size(i)); // } // out_feature_shape.push_back(p.in_feature_dim); // out_feature = torch::empty(out_feature_shape, torch::TensorOptions().dtype(torch::kFloat32).device(device)); //} for (int i=0; i<out_feature.dim(); i++) { p.out_feature_dims[i] = out_feature.size(i); p.out_feature_strides[i] = out_feature.stride(i); } const int TILE_DIM_X = 16; // feature dim const int TILE_DIM_Y = 64; // entry dim const int DUP_X = 4; // To amortize the cost of weight computation //dim3 dimGrid((p.in_feature_dim+(TILE_DIM_X*DUP_X)-1)/(TILE_DIM_X*DUP_X), (numentries+TILE_DIM_Y-1)/TILE_DIM_Y, 1); dim3 dimGrid((numentries+TILE_DIM_Y-1)/TILE_DIM_Y, (p.in_feature_dim+(TILE_DIM_X*DUP_X)-1)/(TILE_DIM_X*DUP_X), 1); dim3 dimBlock(TILE_DIM_X, TILE_DIM_Y, 1); sp_trilinear_worldcoord_kernel<TILE_DIM_X, TILE_DIM_Y, DUP_X><<<dimGrid, dimBlock, 0, stream>>>( out_feature.data_ptr<float>(), in_feature.data_ptr<float>(), in_corner_lut.data_ptr<int32_t>(), in_worldcoord.data_ptr<float>(), p ); THCudaCheck(cudaGetLastError()); return out_feature; } // Backward function for sparse trilinear interpolation // Input: // out_feature_grad: float32 [..., C] // in_feature: float32 [M, C] // in_corner_lut: int32 [X Y Z] // ---in_index: int32 [..., 8], containing [0, M]. 0 is ignore label. // in_worldcoord: float32 [..., 3] // ign_zero: bool // need_coord_grad: bool // Output: // in_feature_grad: float32 [M, C] // in_coord_grad: float32 [..., 3] std::vector<torch::Tensor> sp_trilinear_worldcoord_backward_cuda(const torch::Tensor& out_feature_grad , const torch::Tensor& in_feature, const torch::Tensor& in_corner_lut, const torch::Tensor& in_worldcoord, bool ign_zero, bool need_coord_grad) { assert(need_coord_grad == false); CHECK_CUDA(out_feature_grad); CHECK_CUDA(in_feature); CHECK_CUDA(in_corner_lut); CHECK_CUDA(in_worldcoord); int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); torch::Device device = out_feature_grad.device(); //for (int i=0; i<out_feature_grad.dim(); i++) { // printf("[sp_trilinear_backward_cuda] dim, size, stride: %d, %d, %d\n", i, out_feature_grad.size(i), out_feature_grad.stride(i)); //} //CHECK_CONTIGUOUS(out_feature_grad); CHECK_CONTIGUOUS(in_feature); //CHECK_CONTIGUOUS(in_worldcoord); // assert(tensor.sizes() == std::vector<int64_t>{3, 4, 5}); assert(out_feature_grad.dtype() == torch::kFloat32); for (int i=0; i<out_feature_grad.dim()-1; i++) { assert(out_feature_grad.size(i) == in_worldcoord.size(i)); } assert(out_feature_grad.size(-1) == in_feature.size(1)); assert(in_feature.dtype() == torch::kFloat32); assert(in_feature.dim() == 2); assert(in_worldcoord.dtype() == torch::kFloat32); assert(in_worldcoord.size(-1) == 3); SpTrilinear_wc_Params p; p.in_feature_dim = in_feature.size(1); p.in_feature_numentries = in_feature.size(0); p.in_worldcoord_ndim = in_worldcoord.dim(); for (int i=0; i<in_worldcoord.dim(); i++) { p.in_worldcoord_dims[i] = in_worldcoord.size(i); p.in_worldcoord_strides[i] = in_worldcoord.stride(i); } p.ign_zero = ign_zero; p.corner_lut_dims[0] = in_corner_lut.size(0); p.corner_lut_dims[1] = in_corner_lut.size(1); p.corner_lut_dims[2] = in_corner_lut.size(2); p.corner_lut_strides[0] = in_corner_lut.stride(0); p.corner_lut_strides[1] = in_corner_lut.stride(1); p.corner_lut_strides[2] = in_corner_lut.stride(2); for (int i=0; i<out_feature_grad.dim(); i++) { p.out_feature_dims[i] = out_feature_grad.size(i); p.out_feature_strides[i] = out_feature_grad.stride(i); } int numentries = in_worldcoord.numel() / 3; // Create output tensors torch::Tensor in_feature_grad = torch::zeros({p.in_feature_numentries, p.in_feature_dim}, torch::TensorOptions().dtype(torch::kFloat32).device(device)); torch::Tensor in_coord_grad; { const int TILE_DIM_X = 16; // feature dim const int TILE_DIM_Y = 64; // entry dim const int DUP_X = 4; // To amortize the cost of weight computation dim3 dimGrid((p.in_feature_dim+(TILE_DIM_X*DUP_X)-1)/(TILE_DIM_X*DUP_X), (numentries+TILE_DIM_Y-1)/TILE_DIM_Y, 1); dim3 dimBlock(TILE_DIM_X, TILE_DIM_Y, 1); //printf("BW dimGrid: %d, %d, %d \n", dimGrid.x, dimGrid.y, dimGrid.z); sp_trilinear_worldcoord_backward2feature_kernel<TILE_DIM_X, TILE_DIM_Y, DUP_X><<<dimGrid, dimBlock, 0, stream>>>( in_feature_grad.data_ptr<float>(), out_feature_grad.data_ptr<float>(), in_corner_lut.data_ptr<int32_t>(), in_worldcoord.data_ptr<float>(), p ); } THCudaCheck(cudaGetLastError()); return {in_feature_grad}; }
the_stack
#include <cuda_runtime.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cmath> #include <GL/glew.h> #include <GL/glut.h> #include <cuda_gl_interop.h> #include "header.h" #include "visualisation.h" #define FOVY 45.0 // bo variables GLuint sphereVerts; GLuint sphereNormals; //Simulation output buffers/textures cudaGraphicsResource_t agent_state1_cgr; GLuint agent_state1_tbo; GLuint agent_state1_displacementTex; // mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -VIEW_DISTANCE; // keyboard controls #if defined(PAUSE_ON_START) bool paused = true; #else bool paused = false; #endif // vertex Shader GLuint vertexShader; GLuint fragmentShader; GLuint shaderProgram; GLuint vs_displacementMap; GLuint vs_mapIndex; //timer cudaEvent_t start, stop; const int display_rate = 50; int frame_count; float frame_time = 0.0; #ifdef SIMULATION_DELAY //delay int delay_count = 0; #endif // prototypes int initGL(); void initShader(); void createVBO( GLuint* vbo, GLuint size); void deleteVBO( GLuint* vbo); void createTBO( cudaGraphicsResource_t* cudaResource, GLuint* tbo, GLuint* tex, GLuint size); void deleteTBO( cudaGraphicsResource_t* cudaResource, GLuint* tbo); void setVertexBufferData(); void reshape(int width, int height); void display(); void keyboard( unsigned char key, int x, int y); void special(int key, int x, int y); void mouse(int button, int state, int x, int y); void motion(int x, int y); void runCuda(); void checkGLError(); /* Error check function for safe CUDA API calling */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /* Error check function for post CUDA Kernel calling */ #define gpuErrchkLaunch() { gpuLaunchAssert(__FILE__, __LINE__); } inline void gpuLaunchAssert(const char *file, int line, bool abort=true) { gpuAssert( cudaPeekAtLastError(), file, line ); #ifdef _DEBUG gpuAssert( cudaDeviceSynchronize(), file, line ); #endif } const char vertexShaderSource[] = { "#extension GL_EXT_gpu_shader4 : enable \n" "uniform samplerBuffer displacementMap; \n" "attribute in float mapIndex; \n" "varying vec3 normal, lightDir; \n" "varying vec4 colour; \n" "void main() \n" "{ \n" " vec4 position = gl_Vertex; \n" " vec4 lookup = texelFetchBuffer(displacementMap, (int)mapIndex); \n" " if (lookup.w == 0) \n" " colour = vec4(1.0, 0.0, 0.0, 0.0); \n" " else if (lookup.w == 1) \n" " colour = vec4(0.0, 0.0, 1.0, 0.0); \n" " else \n" " colour = vec4(0.0, 0.0, 0.0, 0.0); \n" " \n" " lookup.w = 1.0; \n" " position += lookup; \n" " gl_Position = gl_ModelViewProjectionMatrix * position; \n" " \n" " vec3 mvVertex = vec3(gl_ModelViewMatrix * position); \n" " lightDir = vec3(gl_LightSource[0].position.xyz - mvVertex); \n" " normal = gl_NormalMatrix * gl_Normal; \n" "} \n" }; const char fragmentShaderSource[] = { "varying vec3 normal, lightDir; \n" "varying vec4 colour; \n" "void main (void) \n" "{ \n" " // Defining The Material Colors \n" " vec4 AmbientColor = vec4(0.25, 0.0, 0.0, 1.0); \n" " vec4 DiffuseColor = colour; \n" " \n" " // Scaling The Input Vector To Length 1 \n" " vec3 n_normal = normalize(normal); \n" " vec3 n_lightDir = normalize(lightDir); \n" " \n" " // Calculating The Diffuse Term And Clamping It To [0;1] \n" " float DiffuseTerm = clamp(dot(n_normal, n_lightDir), 0.0, 1.0);\n" " \n" " // Calculating The Final Color \n" " gl_FragColor = AmbientColor + DiffuseColor * DiffuseTerm; \n" " \n" "} \n" }; //GPU Kernels __global__ void output_agent_agent_to_VBO(xmachine_memory_agent_list* agents, glm::vec4* vbo, glm::vec3 centralise){ //global thread index int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; vbo[index].x = 0.0; vbo[index].y = 0.0; vbo[index].z = 0.0; vbo[index].x = agents->x[index] - centralise.x; vbo[index].y = agents->y[index] - centralise.y; vbo[index].z = agents->z[index] - centralise.z; vbo[index].w = agents->strategy[index]; } void initVisualisation() { // Create GL context int argc = 1; char glutString[] = "GLUT application"; char *argv[] = {glutString, NULL}; //char *argv[] = {"GLUT application", NULL}; glutInit( &argc, argv); glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize( WINDOW_WIDTH, WINDOW_HEIGHT); glutCreateWindow( "FLAME GPU Visualiser"); // initialize GL if( !initGL()) { return; } initShader(); // register callbacks glutReshapeFunc( reshape); glutDisplayFunc( display); glutKeyboardFunc( keyboard); glutSpecialFunc( special); glutMouseFunc( mouse); glutMotionFunc( motion); // create VBO's createVBO( &sphereVerts, SPHERE_SLICES* (SPHERE_STACKS+1) * sizeof(glm::vec3)); createVBO( &sphereNormals, SPHERE_SLICES* (SPHERE_STACKS+1) * sizeof (glm::vec3)); setVertexBufferData(); // create TBO createTBO(&agent_state1_cgr, &agent_state1_tbo, &agent_state1_displacementTex, xmachine_memory_agent_MAX * sizeof( glm::vec4)); //set shader uniforms glUseProgram(shaderProgram); //create a events for timer cudaEventCreate(&start); cudaEventCreate(&stop); } void runVisualisation(){ // start rendering mainloop glutMainLoop(); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda() { if(!paused){ #ifdef SIMULATION_DELAY delay_count++; if (delay_count == SIMULATION_DELAY){ delay_count = 0; singleIteration(); } #else singleIteration(); #endif } //kernals sizes int threads_per_tile = 256; int tile_size; dim3 grid; dim3 threads; glm::vec3 centralise; //pointer glm::vec4 *dptr; if (get_agent_agent_state1_count() > 0) { // map OpenGL buffer object for writing from CUDA size_t accessibleBufferSize = 0; gpuErrchk(cudaGraphicsMapResources(1, &agent_state1_cgr)); gpuErrchk(cudaGraphicsResourceGetMappedPointer( (void**)&dptr, &accessibleBufferSize, agent_state1_cgr)); //cuda block size tile_size = (int) ceil((float)get_agent_agent_state1_count()/threads_per_tile); grid = dim3(tile_size, 1, 1); threads = dim3(threads_per_tile, 1, 1); //continuous variables centralise = getMaximumBounds() + getMinimumBounds(); centralise /= 2; output_agent_agent_to_VBO<<< grid, threads>>>(get_device_agent_state1_agents(), dptr, centralise); gpuErrchkLaunch(); // unmap buffer object gpuErrchk(cudaGraphicsUnmapResources(1, &agent_state1_cgr)); } } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// int initGL() { // initialize necessary OpenGL extensions glewInit(); if (! glewIsSupported( "GL_VERSION_2_0 " "GL_ARB_pixel_buffer_object")) { fprintf( stderr, "ERROR: Support for necessary OpenGL extensions missing.\n"); fflush( stderr); return 1; } // default initialization glClearColor( 1.0, 1.0, 1.0, 1.0); glEnable( GL_DEPTH_TEST); reshape(WINDOW_WIDTH, WINDOW_HEIGHT); checkGLError(); //lighting glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); return 1; } //////////////////////////////////////////////////////////////////////////////// //! Initialize GLSL Vertex Shader //////////////////////////////////////////////////////////////////////////////// void initShader() { const char* v = vertexShaderSource; const char* f = fragmentShaderSource; //vertex shader vertexShader = glCreateShader(GL_VERTEX_SHADER); glShaderSource(vertexShader, 1, &v, 0); glCompileShader(vertexShader); //fragment shader fragmentShader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(fragmentShader, 1, &f, 0); glCompileShader(fragmentShader); //program shaderProgram = glCreateProgram(); glAttachShader(shaderProgram, vertexShader); glAttachShader(shaderProgram, fragmentShader); glLinkProgram(shaderProgram); // check for errors GLint status; glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &status); if (status == GL_FALSE){ printf("ERROR: Shader Compilation Error\n"); char data[262144]; int len; glGetShaderInfoLog(vertexShader, 262144, &len, data); printf("%s", data); } glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &status); if (status == GL_FALSE){ printf("ERROR: Shader Compilation Error\n"); char data[262144]; int len; glGetShaderInfoLog(fragmentShader, 262144, &len, data); printf("%s", data); } glGetProgramiv(shaderProgram, GL_LINK_STATUS, &status); if (status == GL_FALSE){ printf("ERROR: Shader Program Link Error\n"); } // get shader variables vs_displacementMap = glGetUniformLocation(shaderProgram, "displacementMap"); vs_mapIndex = glGetAttribLocation(shaderProgram, "mapIndex"); } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint* vbo, GLuint size) { // create buffer object glGenBuffers( 1, vbo); glBindBuffer( GL_ARRAY_BUFFER, *vbo); // initialize buffer object glBufferData( GL_ARRAY_BUFFER, size, 0, GL_STATIC_DRAW); glBindBuffer( GL_ARRAY_BUFFER, 0); checkGLError(); } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO( GLuint* vbo) { glBindBuffer( 1, *vbo); glDeleteBuffers( 1, vbo); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Create TBO //////////////////////////////////////////////////////////////////////////////// void createTBO(cudaGraphicsResource_t* cudaResource, GLuint* tbo, GLuint* tex, GLuint size) { // create buffer object glGenBuffers( 1, tbo); glBindBuffer( GL_TEXTURE_BUFFER_EXT, *tbo); // initialize buffer object glBufferData( GL_TEXTURE_BUFFER_EXT, size, 0, GL_DYNAMIC_DRAW); //tex glGenTextures(1, tex); glBindTexture(GL_TEXTURE_BUFFER_EXT, *tex); glTexBufferEXT(GL_TEXTURE_BUFFER_EXT, GL_RGBA32F_ARB, *tbo); glBindBuffer(GL_TEXTURE_BUFFER_EXT, 0); // register buffer object with CUDA gpuErrchk(cudaGraphicsGLRegisterBuffer(cudaResource, *tbo, cudaGraphicsMapFlagsWriteDiscard)); checkGLError(); } //////////////////////////////////////////////////////////////////////////////// //! Delete TBO //////////////////////////////////////////////////////////////////////////////// void deleteTBO(cudaGraphicsResource_t* cudaResource, GLuint* tbo) { gpuErrchk(cudaGraphicsUnregisterResource(*cudaResource)); *cudaResource = 0; glBindBuffer( 1, *tbo); glDeleteBuffers( 1, tbo); *tbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Set Sphere Vertex Data //////////////////////////////////////////////////////////////////////////////// static void setSphereVertex(glm::vec3* data, int slice, int stack) { float PI = 3.14159265358; double sl = 2*PI*slice/SPHERE_SLICES; double st = 2*PI*stack/SPHERE_STACKS; data->x = cos(st)*sin(sl) * SPHERE_RADIUS; data->y = sin(st)*sin(sl) * SPHERE_RADIUS; data->z = cos(sl) * SPHERE_RADIUS; } //////////////////////////////////////////////////////////////////////////////// //! Set Sphere Normal Data //////////////////////////////////////////////////////////////////////////////// static void setSphereNormal(glm::vec3* data, int slice, int stack) { float PI = 3.14159265358; double sl = 2*PI*slice/SPHERE_SLICES; double st = 2*PI*stack/SPHERE_STACKS; data->x = cos(st)*sin(sl); data->y = sin(st)*sin(sl); data->z = cos(sl); } //////////////////////////////////////////////////////////////////////////////// //! Set Vertex Buffer Data //////////////////////////////////////////////////////////////////////////////// void setVertexBufferData() { int slice, stack; int i; // upload vertex points data glBindBuffer(GL_ARRAY_BUFFER, sphereVerts); glm::vec3* verts =( glm::vec3*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY); i = 0; for (slice=0; slice<SPHERE_SLICES/2; slice++) { for (stack=0; stack<=SPHERE_STACKS; stack++) { setSphereVertex(&verts[i++], slice, stack); setSphereVertex(&verts[i++], slice+1, stack); } } glUnmapBuffer(GL_ARRAY_BUFFER); // upload vertex normal data glBindBuffer(GL_ARRAY_BUFFER, sphereNormals); glm::vec3* normals =( glm::vec3*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY); i = 0; for (slice=0; slice<SPHERE_SLICES/2; slice++) { for (stack=0; stack<=SPHERE_STACKS; stack++) { setSphereNormal(&normals[i++], slice, stack); setSphereNormal(&normals[i++], slice+1, stack); } } glUnmapBuffer(GL_ARRAY_BUFFER); } //////////////////////////////////////////////////////////////////////////////// //! Reshape callback //////////////////////////////////////////////////////////////////////////////// void reshape(int width, int height){ // viewport glViewport( 0, 0, width, height); // projection glMatrixMode( GL_PROJECTION); glLoadIdentity(); gluPerspective(FOVY, (GLfloat)width / (GLfloat) height, NEAR_CLIP, FAR_CLIP); checkGLError(); } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { float millis; //CUDA start Timing cudaEventRecord(start); // run CUDA kernel to generate vertex positions runCuda(); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); //zoom glTranslatef(0.0, 0.0, translate_z); //move glRotatef(rotate_x, 1.0, 0.0, 0.0); glRotatef(rotate_y, 0.0, 0.0, 1.0); //Set light position glLightfv(GL_LIGHT0, GL_POSITION, LIGHT_POSITION); //Draw agent Agents in state1 state glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_BUFFER_EXT, agent_state1_displacementTex); //loop for (int i=0; i< get_agent_agent_state1_count(); i++){ glVertexAttrib1f(vs_mapIndex, (float)i); //draw using vertex and attribute data on the gpu (fast) glEnableClientState(GL_VERTEX_ARRAY); glEnableClientState(GL_NORMAL_ARRAY); glBindBuffer(GL_ARRAY_BUFFER, sphereVerts); glVertexPointer(3, GL_FLOAT, 0, 0); glBindBuffer(GL_ARRAY_BUFFER, sphereNormals); glNormalPointer(GL_FLOAT, 0, 0); glDrawArrays(GL_TRIANGLE_STRIP, 0, SPHERE_SLICES * (SPHERE_STACKS+1)); glDisableClientState(GL_NORMAL_ARRAY); glDisableClientState(GL_VERTEX_ARRAY); } //CUDA stop timing cudaEventRecord(stop); glFlush(); cudaEventSynchronize(stop); cudaEventElapsedTime(&millis, start, stop); frame_time += millis; if(frame_count == display_rate){ char title [100]; sprintf(title, "Execution & Rendering Total: %f (FPS), %f milliseconds per frame", display_rate/(frame_time/1000.0f), frame_time/display_rate); glutSetWindowTitle(title); //reset frame_count = 0; frame_time = 0.0; }else{ frame_count++; } glutSwapBuffers(); glutPostRedisplay(); } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard( unsigned char key, int /*x*/, int /*y*/) { switch( key) { // Space == 32 case(32): paused = !paused; break; // Esc == 27 case(27) : deleteVBO( &sphereVerts); deleteVBO( &sphereNormals); deleteTBO( &agent_state1_cgr, &agent_state1_tbo); cudaEventDestroy(start); cudaEventDestroy(stop); exit(EXIT_SUCCESS); } } void special(int key, int x, int y){ switch (key) { case(GLUT_KEY_RIGHT) : singleIteration(); fflush(stdout); break; } } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void motion(int x, int y) { float dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (mouse_buttons & 1) { rotate_x += dy * 0.2; rotate_y += dx * 0.2; } else if (mouse_buttons & 4) { translate_z += dy * VIEW_DISTANCE * 0.001; } mouse_old_x = x; mouse_old_y = y; } void checkGLError(){ int Error; if((Error = glGetError()) != GL_NO_ERROR) { const char* Message = (const char*)gluErrorString(Error); fprintf(stderr, "OpenGL Error : %s\n", Message); } }
the_stack
#include "libhmsbeagle/GPU/GPUImplDefs.h" #include <stdlib.h> #include <string.h> #include <stdio.h> extern "C" { #elif defined(FW_OPENCL) #ifdef DOUBLE_PRECISION #pragma OPENCL EXTENSION cl_khr_fp64: enable #endif #define __umul24(x, y) (x * y) #endif //FW_OPENCL #if (!defined DOUBLE_PRECISION && defined FP_FAST_FMAF) || (defined DOUBLE_PRECISION && defined FP_FAST_FMA) #define FMA(x, y, z) (z = fma(x, y, z)) #else //FP_FAST_FMA #define FMA(x, y, z) (z += x * y) #endif //FP_FAST_FMA #if (defined CUDA) && (defined DOUBLE_PRECISION) && (__CUDA_ARCH__ < 600) __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif /////////////////////////////////////////////////////////////////////////////// KW_GLOBAL_KERNEL void kernelReorderPatterns( KW_GLOBAL_VAR REAL* dPartials, KW_GLOBAL_VAR int* dStates, KW_GLOBAL_VAR int* dStatesSort, const KW_GLOBAL_VAR int* KW_RESTRICT dTipOffsets, const KW_GLOBAL_VAR int* KW_RESTRICT dTipTypes, const KW_GLOBAL_VAR int* KW_RESTRICT dPatternsNewOrder, const KW_GLOBAL_VAR REAL* KW_RESTRICT dPatternWeights, KW_GLOBAL_VAR REAL* KW_RESTRICT dPatternWeightsSort, int patternCount, int paddedPatternCount) { #ifdef FW_OPENCL_CPU int state = 0; int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * KW_LOCAL_SIZE_0; #else int state = KW_LOCAL_ID_0; int pattern = KW_LOCAL_ID_1 + KW_GROUP_ID_0 * KW_LOCAL_SIZE_1; #endif int stateCount = PADDED_STATE_COUNT; int category = KW_GROUP_ID_1; int tip = KW_GROUP_ID_2; int tipCount = KW_NUM_GROUPS_2; if (pattern < patternCount) { int patternSorted = dPatternsNewOrder[pattern]; if (dTipTypes[tip] == 0) { int categoryOffset = category * stateCount * paddedPatternCount; int sortIndex = categoryOffset + patternSorted * stateCount; int originIndex = categoryOffset + pattern * stateCount; const KW_GLOBAL_VAR REAL* KW_RESTRICT partialOriginal = dPartials + dTipOffsets[tip]; KW_GLOBAL_VAR REAL* KW_RESTRICT partialSorted = dPartials + dTipOffsets[tip+tipCount]; #ifdef FW_OPENCL_CPU for (int i=0; i < stateCount; i++) { partialSorted[sortIndex+i] = partialOriginal[originIndex+i]; } #else sortIndex += state; originIndex += state; partialSorted[sortIndex] = partialOriginal[originIndex]; #endif } else if (state == 0) { const KW_GLOBAL_VAR int* KW_RESTRICT stateOriginal = dStates + dTipOffsets[tip]; KW_GLOBAL_VAR int* KW_RESTRICT stateSorted = dStatesSort + dTipOffsets[tip+tipCount]; stateSorted[patternSorted] = stateOriginal[pattern]; } if (state == 0 && category == 0 && tip == 0) { dPatternWeightsSort[patternSorted] = dPatternWeights[pattern]; } } } KW_GLOBAL_KERNEL void kernelMatrixMulADBMulti(KW_GLOBAL_VAR REAL* dMatrices, KW_GLOBAL_VAR unsigned int* offsets, KW_GLOBAL_VAR REAL* Alist, KW_GLOBAL_VAR REAL* Dlist, KW_GLOBAL_VAR REAL* Blist, KW_GLOBAL_VAR REAL* distanceQueue, int length, int wB, int totalMatrix) { int wMatrix = KW_GROUP_ID_0 % totalMatrix; int offIndex = wMatrix * 3; // Block index int bx = KW_GROUP_ID_0 / totalMatrix; int by = KW_GROUP_ID_1; // Thread index int tx = KW_LOCAL_ID_0; int ty = KW_LOCAL_ID_1; int BLOCKS = KW_NUM_GROUPS_1; KW_GLOBAL_VAR REAL* C = dMatrices + offsets[offIndex]; KW_GLOBAL_VAR REAL* B = Blist + offsets[offIndex + 1]; // dEvec KW_GLOBAL_VAR REAL* A = Alist + offsets[offIndex + 1]; // dIevc KW_GLOBAL_VAR REAL* D = Dlist + offsets[offIndex + 2]; // dEigenValues REAL distance = distanceQueue[wMatrix]; const int EDGE = PADDED_STATE_COUNT - (BLOCKS - 1) * MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of A int aStep = MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of B int bStep = MULTIPLY_BLOCK_SIZE * PADDED_STATE_COUNT; // Csub is used to store the element of the block sub-matrix // that is computed by the thread REAL Csub = 0; int a = PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE * by; int b = MULTIPLY_BLOCK_SIZE * bx; int d = 0; //MULTIPLY_BLOCK_SIZE * bx; KW_LOCAL_MEM REAL As[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Bs[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Ds[MULTIPLY_BLOCK_SIZE]; for (int i = 0; i < BLOCKS - 1; i++) { if (ty == 0) Ds[tx] = exp(D[d + tx] * distance); As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; KW_LOCAL_FENCE; for (int k = 0; k < MULTIPLY_BLOCK_SIZE; ++k) Csub += As[ty][k] * Ds[k] * Bs[k][tx]; KW_LOCAL_FENCE; a += aStep; b += bStep; d += MULTIPLY_BLOCK_SIZE; } // Last block is too long if (tx < EDGE && ty < EDGE) { if (ty == 0) Ds[tx] = exp(D[d + tx] * distance); As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; } else { if (ty == 0) Ds[tx] = 0; As[ty][tx] = 0; Bs[ty][tx] = 0; } KW_LOCAL_FENCE; for (int k = 0; k < EDGE; k++) Csub += As[ty][k] * Ds[k] * Bs[k][tx]; KW_LOCAL_FENCE; // Write the block sub-matrix to device memory; // each thread writes one element if ((tx < EDGE || bx < BLOCKS - 1) && (ty < EDGE || by < BLOCKS - 1)) { // It's OK to write if (Csub < 0) C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = 0; else C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = Csub; } } KW_GLOBAL_KERNEL void kernelMatrixMulADB(KW_GLOBAL_VAR REAL* dMatrices, KW_GLOBAL_VAR unsigned int* listC, KW_GLOBAL_VAR REAL* A, KW_GLOBAL_VAR REAL* D, KW_GLOBAL_VAR REAL* B, KW_GLOBAL_VAR REAL* distanceQueue, int length, int wB, int totalMatrix) { int wMatrix = KW_GROUP_ID_0 % totalMatrix; // Block index int bx = KW_GROUP_ID_0 / totalMatrix; int by = KW_GROUP_ID_1; // Thread index int tx = KW_LOCAL_ID_0; int ty = KW_LOCAL_ID_1; int BLOCKS = KW_NUM_GROUPS_1; #ifdef CUDA KW_LOCAL_MEM REAL* C; KW_LOCAL_MEM REAL distance; if (tx == 0 && ty == 0) { C = dMatrices + listC[wMatrix]; // Non-coalescent read distance = distanceQueue[wMatrix]; // Non-coalescent read } #elif defined(FW_OPENCL) KW_GLOBAL_VAR REAL* C; REAL distance; C = dMatrices + listC[wMatrix]; distance = distanceQueue[wMatrix]; #endif KW_LOCAL_FENCE; const int EDGE = PADDED_STATE_COUNT - (BLOCKS - 1) * MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of A int aStep = MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of B int bStep = MULTIPLY_BLOCK_SIZE * PADDED_STATE_COUNT; // Csub is used to store the element of the block sub-matrix // that is computed by the thread REAL Csub = 0; int a = PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE * by; int b = MULTIPLY_BLOCK_SIZE * bx; int d = 0; //MULTIPLY_BLOCK_SIZE * bx; KW_LOCAL_MEM REAL As[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Bs[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Ds[MULTIPLY_BLOCK_SIZE]; for (int i = 0; i < BLOCKS - 1; i++) { if (ty == 0) Ds[tx] = exp(D[d + tx] * distance); As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; KW_LOCAL_FENCE; for (int k = 0; k < MULTIPLY_BLOCK_SIZE; ++k) Csub += As[ty][k] * Ds[k] * Bs[k][tx]; KW_LOCAL_FENCE; a += aStep; b += bStep; d += MULTIPLY_BLOCK_SIZE; } // Last block is too long if (tx < EDGE && ty < EDGE) { if (ty == 0) Ds[tx] = exp(D[d + tx] * distance); As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; } else { if (ty == 0) Ds[tx] = 0; As[ty][tx] = 0; Bs[ty][tx] = 0; } KW_LOCAL_FENCE; for (int k = 0; k < EDGE; k++) Csub += As[ty][k] * Ds[k] * Bs[k][tx]; KW_LOCAL_FENCE; // Write the block sub-matrix to device memory; // each thread writes one element if ((tx < EDGE || bx < BLOCKS - 1) && (ty < EDGE || by < BLOCKS - 1)) { // It's OK to write if (Csub < 0) C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = 0; else C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = Csub; } } KW_GLOBAL_KERNEL void kernelMatrixMulADBFirstDeriv(KW_GLOBAL_VAR REAL* dMatrices, KW_GLOBAL_VAR unsigned int* listC, KW_GLOBAL_VAR REAL* A, KW_GLOBAL_VAR REAL* D, KW_GLOBAL_VAR REAL* B, KW_GLOBAL_VAR REAL* distanceQueue, int length, int wB, int totalMatrix) { int wMatrix = KW_GROUP_ID_0 % totalMatrix; // Block index int bx = KW_GROUP_ID_0 / totalMatrix; int by = KW_GROUP_ID_1; // Thread index int tx = KW_LOCAL_ID_0; int ty = KW_LOCAL_ID_1; int BLOCKS = KW_NUM_GROUPS_1; #ifdef CUDA KW_LOCAL_MEM REAL* C; KW_LOCAL_MEM REAL* CFirstDeriv; KW_LOCAL_MEM REAL distanceLength; KW_LOCAL_MEM REAL distanceRate; if (tx == 0 && ty == 0) { C = dMatrices + listC[wMatrix]; CFirstDeriv = dMatrices + listC[wMatrix + totalMatrix]; distanceLength = distanceQueue[wMatrix]; // Non-coalescent read distanceRate = distanceQueue[wMatrix + totalMatrix]; // Non-coalescent read } #elif defined(FW_OPENCL) KW_GLOBAL_VAR REAL* C; KW_GLOBAL_VAR REAL* CFirstDeriv; REAL distanceLength; REAL distanceRate; C = dMatrices + listC[wMatrix]; CFirstDeriv = dMatrices + listC[wMatrix + totalMatrix]; distanceLength = distanceQueue[wMatrix]; distanceRate = distanceQueue[wMatrix + totalMatrix]; #endif KW_LOCAL_FENCE; const int EDGE = PADDED_STATE_COUNT - (BLOCKS - 1) * MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of A int aStep = MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of B int bStep = MULTIPLY_BLOCK_SIZE * PADDED_STATE_COUNT; // Csub is used to store the element of the block sub-matrix // that is computed by the thread REAL Csub = 0; REAL CFirstDerivSub = 0; int a = PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE * by; int b = MULTIPLY_BLOCK_SIZE * bx; int d = 0; //MULTIPLY_BLOCK_SIZE * bx; KW_LOCAL_MEM REAL As[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Bs[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Ds[MULTIPLY_BLOCK_SIZE][2]; for (int i = 0; i < BLOCKS - 1; i++) { if (ty == 0) { REAL scaledEigenTmp = D[d + tx] * distanceRate; Ds[tx][0] = exp(scaledEigenTmp * distanceLength); Ds[tx][1] = scaledEigenTmp * Ds[tx][0]; } As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; KW_LOCAL_FENCE; for (int k = 0; k < MULTIPLY_BLOCK_SIZE; ++k) { Csub += As[ty][k] * Ds[k][0] * Bs[k][tx]; CFirstDerivSub += As[ty][k] * Ds[k][1] * Bs[k][tx]; } KW_LOCAL_FENCE; a += aStep; b += bStep; d += MULTIPLY_BLOCK_SIZE; } // Last block is too long if (tx < EDGE && ty < EDGE) { if (ty == 0) { REAL scaledEigenTmp = D[d + tx] * distanceRate; Ds[tx][0] = exp(scaledEigenTmp * distanceLength); Ds[tx][1] = scaledEigenTmp * Ds[tx][0]; } As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; } else { if (ty == 0) { Ds[tx][0] = 0; Ds[tx][1] = 0; } As[ty][tx] = 0; Bs[ty][tx] = 0; } KW_LOCAL_FENCE; for (int k = 0; k < EDGE; k++) { Csub += As[ty][k] * Ds[k][0] * Bs[k][tx]; CFirstDerivSub += As[ty][k] * Ds[k][1] * Bs[k][tx]; } KW_LOCAL_FENCE; // Write the block sub-matrix to device memory; // each thread writes one element if ((tx < EDGE || bx < BLOCKS - 1) && (ty < EDGE || by < BLOCKS - 1)) { // It's OK to write if (Csub < 0) C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = 0; else C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = Csub; CFirstDeriv[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = CFirstDerivSub; } } KW_GLOBAL_KERNEL void kernelMatrixMulADBSecondDeriv(KW_GLOBAL_VAR REAL* dMatrices, KW_GLOBAL_VAR unsigned int* listC, KW_GLOBAL_VAR REAL* A, KW_GLOBAL_VAR REAL* D, KW_GLOBAL_VAR REAL* B, KW_GLOBAL_VAR REAL* distanceQueue, int length, int wB, int totalMatrix) { int wMatrix = KW_GROUP_ID_0 % totalMatrix; // Block index int bx = KW_GROUP_ID_0 / totalMatrix; int by = KW_GROUP_ID_1; // Thread index int tx = KW_LOCAL_ID_0; int ty = KW_LOCAL_ID_1; int BLOCKS = KW_NUM_GROUPS_1; #ifdef CUDA KW_LOCAL_MEM REAL* C; KW_LOCAL_MEM REAL* CFirstDeriv; KW_LOCAL_MEM REAL* CSecondDeriv; KW_LOCAL_MEM REAL distanceLength; KW_LOCAL_MEM REAL distanceRate; if (tx == 0 && ty == 0) { C = dMatrices + listC[wMatrix]; CFirstDeriv = dMatrices + listC[wMatrix + totalMatrix]; CSecondDeriv = dMatrices + listC[wMatrix + totalMatrix * 2]; distanceLength = distanceQueue[wMatrix]; // Non-coalescent read distanceRate = distanceQueue[wMatrix + totalMatrix]; // Non-coalescent read } #elif defined(FW_OPENCL) KW_GLOBAL_VAR REAL* C; KW_GLOBAL_VAR REAL* CFirstDeriv; KW_GLOBAL_VAR REAL* CSecondDeriv; REAL distanceLength; REAL distanceRate; C = dMatrices + listC[wMatrix]; CFirstDeriv = dMatrices + listC[wMatrix + totalMatrix]; CSecondDeriv = dMatrices + listC[wMatrix + totalMatrix * 2]; distanceLength = distanceQueue[wMatrix]; distanceRate = distanceQueue[wMatrix + totalMatrix]; #endif KW_LOCAL_FENCE; const int EDGE = PADDED_STATE_COUNT - (BLOCKS - 1) * MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of A int aStep = MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of B int bStep = MULTIPLY_BLOCK_SIZE * PADDED_STATE_COUNT; // Csub is used to store the element of the block sub-matrix // that is computed by the thread REAL Csub = 0; REAL CFirstDerivSub = 0; REAL CSecondDerivSub = 0; int a = PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE * by; int b = MULTIPLY_BLOCK_SIZE * bx; int d = 0; //MULTIPLY_BLOCK_SIZE * bx; KW_LOCAL_MEM REAL As[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Bs[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Ds[MULTIPLY_BLOCK_SIZE][3]; for (int i = 0; i < BLOCKS - 1; i++) { if (ty == 0) { REAL scaledEigenTmp = D[d + tx] * distanceRate; Ds[tx][0] = exp(scaledEigenTmp * distanceLength); Ds[tx][1] = scaledEigenTmp * Ds[tx][0]; Ds[tx][2] = scaledEigenTmp * Ds[tx][1]; } As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; KW_LOCAL_FENCE; for (int k = 0; k < MULTIPLY_BLOCK_SIZE; ++k) { Csub += As[ty][k] * Ds[k][0] * Bs[k][tx]; CFirstDerivSub += As[ty][k] * Ds[k][1] * Bs[k][tx]; CSecondDerivSub += As[ty][k] * Ds[k][2] * Bs[k][tx]; } KW_LOCAL_FENCE; a += aStep; b += bStep; d += MULTIPLY_BLOCK_SIZE; } // Last block is too long if (tx < EDGE && ty < EDGE) { if (ty == 0) { REAL scaledEigenTmp = D[d + tx] * distanceRate; Ds[tx][0] = exp(scaledEigenTmp * distanceLength); Ds[tx][1] = scaledEigenTmp * Ds[tx][0]; Ds[tx][2] = scaledEigenTmp * Ds[tx][1]; } As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; } else { if (ty == 0) { Ds[tx][0] = 0; Ds[tx][1] = 0; Ds[tx][2] = 0; } As[ty][tx] = 0; Bs[ty][tx] = 0; } KW_LOCAL_FENCE; for (int k = 0; k < EDGE; k++) { Csub += As[ty][k] * Ds[k][0] * Bs[k][tx]; CFirstDerivSub += As[ty][k] * Ds[k][1] * Bs[k][tx]; CSecondDerivSub += As[ty][k] * Ds[k][2] * Bs[k][tx]; } KW_LOCAL_FENCE; // Write the block sub-matrix to device memory; // each thread writes one element if ((tx < EDGE || bx < BLOCKS - 1) && (ty < EDGE || by < BLOCKS - 1)) { // It's OK to write if (Csub < 0) C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = 0; else C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = Csub; CFirstDeriv[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = CFirstDerivSub; CSecondDeriv[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = CSecondDerivSub; } } KW_GLOBAL_KERNEL void kernelMatrixConvolution(KW_GLOBAL_VAR REAL* dMatrices, KW_GLOBAL_VAR unsigned int* list, int totalMatrixCount ) { int wMatrix = KW_GROUP_ID_0 % totalMatrixCount; // Block index int bx = KW_GROUP_ID_0 / totalMatrixCount; int by = KW_GROUP_ID_1; // Thread index int tx = KW_LOCAL_ID_0; int ty = KW_LOCAL_ID_1; int BLOCKS = KW_NUM_GROUPS_1; #ifdef CUDA KW_LOCAL_MEM REAL* A; KW_LOCAL_MEM REAL* B; KW_LOCAL_MEM REAL* C; if (tx == 0 && ty == 0) { A = dMatrices + list[wMatrix]; // Non-coalescent read B = dMatrices + list[wMatrix + totalMatrixCount]; // Non-coalescent read C = dMatrices + list[wMatrix + totalMatrixCount*2]; // Non-coalescent read } #elif defined(FW_OPENCL) KW_GLOBAL_VAR REAL* A; KW_GLOBAL_VAR REAL* B; KW_GLOBAL_VAR REAL* C; A = dMatrices + list[wMatrix]; B = dMatrices + list[wMatrix + totalMatrixCount]; C = dMatrices + list[wMatrix + totalMatrixCount*2]; #endif KW_LOCAL_FENCE; const int EDGE = PADDED_STATE_COUNT - (BLOCKS - 1) * MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of A int aStep = MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of B int bStep = MULTIPLY_BLOCK_SIZE * PADDED_STATE_COUNT; // Csub is used to store the element of the block sub-matrix // that is computed by the thread REAL Csub = 0; int a = PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE * by; int b = MULTIPLY_BLOCK_SIZE * bx; KW_LOCAL_MEM REAL As[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Bs[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; for (int i = 0; i < BLOCKS - 1; i++) { As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; KW_LOCAL_FENCE; for (int k = 0; k < MULTIPLY_BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; KW_LOCAL_FENCE; a += aStep; b += bStep; }//END: BLOCKS loop // Last block is too long if (tx < EDGE && ty < EDGE) { #ifndef KERNEL_PRINT_ENABLED KW_LOCAL_FENCE; #endif As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; Bs[ty][tx] = B[b + PADDED_STATE_COUNT * ty + tx]; } else { As[ty][tx] = 0; Bs[ty][tx] = 0; }//END: EDGE check KW_LOCAL_FENCE; for (int k = 0; k < EDGE; k++) { Csub += As[ty][k] * Bs[k][tx]; } KW_LOCAL_FENCE; // Write the block sub-matrix to device memory; // each thread writes one element if ((tx < EDGE || bx < BLOCKS - 1) && (ty < EDGE || by < BLOCKS - 1)) { // It's OK to write if (Csub < 0) { C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = 0; } else { C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = Csub; }//END: Csub check }//END: EDGE check }//END: kernelMatrixConvolution KW_GLOBAL_KERNEL void kernelMatrixTranspose(KW_GLOBAL_VAR REAL* dMatrices, KW_GLOBAL_VAR unsigned int* list, int totalMatrixCount) { int wMatrix = KW_GROUP_ID_0 % totalMatrixCount; // Block index int bx = KW_GROUP_ID_0 / totalMatrixCount; int by = KW_GROUP_ID_1; // Thread index int tx = KW_LOCAL_ID_0; int ty = KW_LOCAL_ID_1; #ifdef CUDA KW_LOCAL_MEM REAL* A; KW_LOCAL_MEM REAL* C; if (tx == 0 && ty == 0) { A = dMatrices + list[wMatrix]; // Non-coalescent read C = dMatrices + list[wMatrix + totalMatrixCount]; // Non-coalescent read } #elif defined(FW_OPENCL) KW_GLOBAL_VAR REAL* A; KW_GLOBAL_VAR REAL* C; A = dMatrices + list[wMatrix]; C = dMatrices + list[wMatrix + totalMatrixCount]; #endif KW_LOCAL_FENCE; const int rowOffset = MULTIPLY_BLOCK_SIZE * bx; const int colOffset = MULTIPLY_BLOCK_SIZE * by; const int row = rowOffset + tx; const int col = colOffset + ty; KW_LOCAL_MEM REAL As[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; if (row < PADDED_STATE_COUNT && col < PADDED_STATE_COUNT) { As[ty][tx] = A[PADDED_STATE_COUNT * colOffset + rowOffset + PADDED_STATE_COUNT * ty + tx]; } KW_LOCAL_FENCE; if (row < PADDED_STATE_COUNT && col < PADDED_STATE_COUNT) { C[PADDED_STATE_COUNT * rowOffset + colOffset + PADDED_STATE_COUNT * ty + tx] = As[tx][ty]; } } KW_GLOBAL_KERNEL void kernelMatrixMulADBComplexMulti(KW_GLOBAL_VAR REAL* dMatrices, KW_GLOBAL_VAR unsigned int* offsets, KW_GLOBAL_VAR REAL* Alist, KW_GLOBAL_VAR REAL* Dlist, KW_GLOBAL_VAR REAL* Blist, KW_GLOBAL_VAR REAL* distanceQueue, int length, int wB, int totalMatrix) { #if !(defined(FW_OPENCL_APPLEAMDGPU) && defined(DOUBLE_PRECISION)) // TODO: fix this issue int wMatrix = KW_GROUP_ID_0 % totalMatrix; int offIndex = wMatrix * 3; // Block index int bx = KW_GROUP_ID_0 / totalMatrix; int by = KW_GROUP_ID_1; int BLOCKS = KW_NUM_GROUPS_1; // Thread index int tx = KW_LOCAL_ID_0; int ty = KW_LOCAL_ID_1; KW_GLOBAL_VAR REAL* C = dMatrices + offsets[offIndex]; KW_GLOBAL_VAR REAL* B = Blist + offsets[offIndex + 1]; // dEvec KW_GLOBAL_VAR REAL* A = Alist + offsets[offIndex + 1]; // dIevc KW_GLOBAL_VAR REAL* D = Dlist + offsets[offIndex + 2]; // dEigenValues REAL distance = distanceQueue[wMatrix]; const int EDGE = PADDED_STATE_COUNT - (BLOCKS - 1) * MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of A int aStep = MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of B int bStep = MULTIPLY_BLOCK_SIZE * PADDED_STATE_COUNT; // Csub is used to store the element of the block sub-matrix // that is computed by the thread REAL Csub = 0; int a = PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE * by; int b = MULTIPLY_BLOCK_SIZE * bx; int d = 0; //MULTIPLY_BLOCK_SIZE * bx; KW_LOCAL_MEM REAL As[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Bs[MULTIPLY_BLOCK_SIZE + 2][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Cs[MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Ds[MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Es[MULTIPLY_BLOCK_SIZE + 2]; #ifdef CUDA REAL* B0 = &Bs[1][0]; REAL* Bm1 = &Bs[0][0]; REAL* Bp1 = &Bs[2][0]; REAL* E0 = &Es[1]; #elif defined(FW_OPENCL) KW_LOCAL_MEM REAL* B0 = &Bs[1][0]; KW_LOCAL_MEM REAL* Bm1 = &Bs[0][0]; KW_LOCAL_MEM REAL* Bp1 = &Bs[2][0]; KW_LOCAL_MEM REAL* E0 = &Es[1]; #endif // Zero first row of Bs and Es if (ty == 0) { Bs[0][tx] = 0; if (tx == 0) { Es[0] = 0; } } while (d + MULTIPLY_BLOCK_SIZE < PADDED_STATE_COUNT) { // READ_SCHUR_VALUES(); if (ty == 0) { Ds[tx] = exp(D[d + tx] * distance); Cs[tx] = D[d + PADDED_STATE_COUNT + tx] * distance; if (Cs[tx]) { REAL expat = Ds[tx]; REAL cosbt = cos(Cs[tx]); #ifdef FW_OPENCL_AMDGPU Cs[tx] = -expat * sin(Cs[tx] + 0.0); #else Cs[tx] = -expat * sin(Cs[tx]); #endif Ds[tx] *= cosbt; } } // Block read A and B sub-matrices As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; B0[ty * MULTIPLY_BLOCK_SIZE + tx] = B[b + PADDED_STATE_COUNT * ty + tx]; // Read extra row of B for Bp1 if (ty == 0) { B0[MULTIPLY_BLOCK_SIZE * MULTIPLY_BLOCK_SIZE + tx] = B[b + PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE + tx]; } // All necessary values loaded KW_LOCAL_FENCE; // POPULATE_SCHUR_BAND(MULTIPLY_BLOCK_SIZE); if (ty == 0 && tx == 0) { for(int k=0; k<MULTIPLY_BLOCK_SIZE; k++) { if (Cs[k] && !Es[k]) { E0[k] = Cs[k]; } else { E0[k] = 0; } } } KW_LOCAL_FENCE; // DO_MULTIPLICATION(MULTIPLY_BLOCK_SIZE); for (int k = 0; k < MULTIPLY_BLOCK_SIZE; k++) { Csub += As[ty][k] * ( Ds[k] * B0 [k * MULTIPLY_BLOCK_SIZE + tx] + E0[k] * Bp1[k * MULTIPLY_BLOCK_SIZE + tx] - Es[k] * Bm1[k * MULTIPLY_BLOCK_SIZE + tx] ); } // Move last entries in B0 and E0 to first entries in Bs and Es if (ty == 0) { Bm1[tx] = Bm1[MULTIPLY_BLOCK_SIZE*MULTIPLY_BLOCK_SIZE + tx]; if (tx == 0) { Es[0] = Es[MULTIPLY_BLOCK_SIZE]; } } KW_LOCAL_FENCE; // Increment sub-matrices a += aStep; b += bStep; d += MULTIPLY_BLOCK_SIZE; } if (tx < EDGE && ty < EDGE) { // Last block is too long // READ_SCHUR_VALUES(); if (ty == 0) { Ds[tx] = exp(D[d + tx] * distance); Cs[tx] = D[d + PADDED_STATE_COUNT + tx] * distance; if (Cs[tx]) { REAL expat = Ds[tx]; REAL cosbt = cos(Cs[tx]); #ifdef FW_OPENCL_AMDGPU Cs[tx] = -expat * sin(Cs[tx] + 0.0); #else Cs[tx] = -expat * sin(Cs[tx]); #endif Ds[tx] *= cosbt; } } As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; B0[ty * MULTIPLY_BLOCK_SIZE + tx] = B[b + PADDED_STATE_COUNT * ty + tx]; } else { if (ty == 0) { Ds[tx] = 0; Cs[tx] = 0; } As[ty][tx] = 0; B0[ty * MULTIPLY_BLOCK_SIZE + tx] = 0; } // Zero last row of Bs and Es (only for unrolled iteration at end) if (ty == 0) { Bs[MULTIPLY_BLOCK_SIZE+1][tx] = 0; } // All necessary values loaded KW_LOCAL_FENCE; // POPULATE_SCHUR_BAND(EDGE); if (ty == 0 && tx == 0) { for(int k=0; k<EDGE; k++) { if (Cs[k] && !Es[k]) { E0[k] = Cs[k]; } else { E0[k] = 0; } } } KW_LOCAL_FENCE; // Do matrix multiplication // DO_MULTIPLICATION(EDGE); for (int k = 0; k < EDGE; k++) { Csub += As[ty][k] * ( Ds[k] * B0 [k * MULTIPLY_BLOCK_SIZE + tx] + E0[k] * Bp1[k * MULTIPLY_BLOCK_SIZE + tx] - Es[k] * Bm1[k * MULTIPLY_BLOCK_SIZE + tx] ); } KW_LOCAL_FENCE; // Write the block sub-matrix to device memory; // each thread writes one element if (Csub < 0) Csub = 0; if ((tx < EDGE || bx < BLOCKS - 1) && (ty < EDGE || by < BLOCKS - 1)) { // It's OK to write C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = Csub; } #endif } KW_GLOBAL_KERNEL void kernelMatrixMulADBComplex(KW_GLOBAL_VAR REAL* dMatrices, KW_GLOBAL_VAR unsigned int* listC, KW_GLOBAL_VAR REAL* A, KW_GLOBAL_VAR REAL* D, KW_GLOBAL_VAR REAL* B, KW_GLOBAL_VAR REAL* distanceQueue, int length, int wB, int totalMatrix) { #if !(defined(FW_OPENCL_APPLEAMDGPU) && defined(DOUBLE_PRECISION)) // TODO: fix this issue int wMatrix = KW_GROUP_ID_0 % totalMatrix; // Block index int bx = KW_GROUP_ID_0 / totalMatrix; int by = KW_GROUP_ID_1; int BLOCKS = KW_NUM_GROUPS_1; // Thread index int tx = KW_LOCAL_ID_0; int ty = KW_LOCAL_ID_1; #ifdef CUDA KW_LOCAL_MEM REAL* C; KW_LOCAL_MEM REAL distance; if (tx == 0 && ty == 0) { C = dMatrices + listC[wMatrix]; distance = distanceQueue[wMatrix]; // Non-coalescent read } #elif defined(FW_OPENCL) KW_GLOBAL_VAR REAL* C; REAL distance; C = dMatrices + listC[wMatrix]; distance = distanceQueue[wMatrix]; #endif KW_LOCAL_FENCE; const int EDGE = PADDED_STATE_COUNT - (BLOCKS - 1) * MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of A int aStep = MULTIPLY_BLOCK_SIZE; // Step size used to iterate through the sub-matrices of B int bStep = MULTIPLY_BLOCK_SIZE * PADDED_STATE_COUNT; // Csub is used to store the element of the block sub-matrix // that is computed by the thread REAL Csub = 0; int a = PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE * by; int b = MULTIPLY_BLOCK_SIZE * bx; int d = 0; //MULTIPLY_BLOCK_SIZE * bx; KW_LOCAL_MEM REAL As[MULTIPLY_BLOCK_SIZE][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Bs[MULTIPLY_BLOCK_SIZE + 2][MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Cs[MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Ds[MULTIPLY_BLOCK_SIZE]; KW_LOCAL_MEM REAL Es[MULTIPLY_BLOCK_SIZE + 2]; #ifdef CUDA REAL* B0 = &Bs[1][0]; REAL* Bm1 = &Bs[0][0]; REAL* Bp1 = &Bs[2][0]; REAL* E0 = &Es[1]; #elif defined(FW_OPENCL) KW_LOCAL_MEM REAL* B0 = &Bs[1][0]; KW_LOCAL_MEM REAL* Bm1 = &Bs[0][0]; KW_LOCAL_MEM REAL* Bp1 = &Bs[2][0]; KW_LOCAL_MEM REAL* E0 = &Es[1]; #endif // Zero first row of Bs and Es if (ty == 0) { Bs[0][tx] = 0; if (tx == 0) { Es[0] = 0; } } while (d + MULTIPLY_BLOCK_SIZE < PADDED_STATE_COUNT) { // READ_SCHUR_VALUES(); if (ty == 0) { Ds[tx] = exp(D[d + tx] * distance); Cs[tx] = D[d + PADDED_STATE_COUNT + tx] * distance; if (Cs[tx]) { REAL expat = Ds[tx]; REAL cosbt = cos(Cs[tx]); #ifdef FW_OPENCL_AMDGPU Cs[tx] = -expat * sin(Cs[tx] + 0.0); #else Cs[tx] = -expat * sin(Cs[tx]); #endif Ds[tx] *= cosbt; } } // Block read A and B sub-matrices As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; B0[ty * MULTIPLY_BLOCK_SIZE + tx] = B[b + PADDED_STATE_COUNT * ty + tx]; // Read extra row of B for Bp1 if (ty == 0) { B0[MULTIPLY_BLOCK_SIZE * MULTIPLY_BLOCK_SIZE + tx] = B[b + PADDED_STATE_COUNT * MULTIPLY_BLOCK_SIZE + tx]; } // All necessary values loaded KW_LOCAL_FENCE; // POPULATE_SCHUR_BAND(MULTIPLY_BLOCK_SIZE); if (ty == 0 && tx == 0) { for(int k=0; k<MULTIPLY_BLOCK_SIZE; k++) { if (Cs[k] && !Es[k]) { E0[k] = Cs[k]; } else { E0[k] = 0; } } } KW_LOCAL_FENCE; // DO_MULTIPLICATION(MULTIPLY_BLOCK_SIZE); for (int k = 0; k < MULTIPLY_BLOCK_SIZE; k++) { Csub += As[ty][k] * ( Ds[k] * B0 [k * MULTIPLY_BLOCK_SIZE + tx] + E0[k] * Bp1[k * MULTIPLY_BLOCK_SIZE + tx] - Es[k] * Bm1[k * MULTIPLY_BLOCK_SIZE + tx] ); } // Move last entries in B0 and E0 to first entries in Bs and Es if (ty == 0) { Bm1[tx] = Bm1[MULTIPLY_BLOCK_SIZE*MULTIPLY_BLOCK_SIZE + tx]; if (tx == 0) { Es[0] = Es[MULTIPLY_BLOCK_SIZE]; } } KW_LOCAL_FENCE; // Increment sub-matrices a += aStep; b += bStep; d += MULTIPLY_BLOCK_SIZE; } if (tx < EDGE && ty < EDGE) { // Last block is too long // READ_SCHUR_VALUES(); if (ty == 0) { Ds[tx] = exp(D[d + tx] * distance); Cs[tx] = D[d + PADDED_STATE_COUNT + tx] * distance; if (Cs[tx]) { REAL expat = Ds[tx]; REAL cosbt = cos(Cs[tx]); #ifdef FW_OPENCL_AMDGPU Cs[tx] = -expat * sin(Cs[tx] + 0.0); #else Cs[tx] = -expat * sin(Cs[tx]); #endif Ds[tx] *= cosbt; } } As[ty][tx] = A[a + PADDED_STATE_COUNT * ty + tx]; B0[ty * MULTIPLY_BLOCK_SIZE + tx] = B[b + PADDED_STATE_COUNT * ty + tx]; } else { if (ty == 0) { Ds[tx] = 0; Cs[tx] = 0; } As[ty][tx] = 0; B0[ty * MULTIPLY_BLOCK_SIZE + tx] = 0; } // Zero last row of Bs and Es (only for unrolled iteration at end) if (ty == 0) { Bs[MULTIPLY_BLOCK_SIZE+1][tx] = 0; } // All necessary values loaded KW_LOCAL_FENCE; // POPULATE_SCHUR_BAND(EDGE); if (ty == 0 && tx == 0) { for(int k=0; k<EDGE; k++) { if (Cs[k] && !Es[k]) { E0[k] = Cs[k]; } else { E0[k] = 0; } } } KW_LOCAL_FENCE; // Do matrix multiplication // DO_MULTIPLICATION(EDGE); for (int k = 0; k < EDGE; k++) { Csub += As[ty][k] * ( Ds[k] * B0 [k * MULTIPLY_BLOCK_SIZE + tx] + E0[k] * Bp1[k * MULTIPLY_BLOCK_SIZE + tx] - Es[k] * Bm1[k * MULTIPLY_BLOCK_SIZE + tx] ); } KW_LOCAL_FENCE; // Write the block sub-matrix to device memory; // each thread writes one element if (Csub < 0) Csub = 0; if ((tx < EDGE || bx < BLOCKS - 1) && (ty < EDGE || by < BLOCKS - 1)) { // It's OK to write C[PADDED_STATE_COUNT* MULTIPLY_BLOCK_SIZE * by + MULTIPLY_BLOCK_SIZE * bx + PADDED_STATE_COUNT * ty + tx] = Csub; } #endif } KW_GLOBAL_KERNEL void kernelSumSites1(KW_GLOBAL_VAR REAL* dArray, KW_GLOBAL_VAR REAL* dSum, KW_GLOBAL_VAR REAL* dPatternWeights, int patternCount) { #ifdef FW_OPENCL_CPU REAL sum = 0; int pattern = KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; int maxPattern = (KW_GROUP_ID_0 + 1) * SUM_SITES_BLOCK_SIZE; if (maxPattern > patternCount) maxPattern = patternCount; while (pattern < maxPattern) { FMA(dArray[pattern], dPatternWeights[pattern], sum); pattern++; } dSum[KW_GROUP_ID_0] = sum; #else KW_LOCAL_MEM REAL sum[SUM_SITES_BLOCK_SIZE]; int tx = KW_LOCAL_ID_0; int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; if (pattern < patternCount) sum[tx] = dArray[pattern] * dPatternWeights[pattern]; else sum[tx] = 0.0; KW_LOCAL_FENCE; for (unsigned int s = SUM_SITES_BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tx < s) sum[tx] += sum[tx + s]; KW_LOCAL_FENCE; } if (tx == 0) dSum[KW_GROUP_ID_0] = sum[0]; #endif } KW_GLOBAL_KERNEL void kernelSumSites1Partition(KW_GLOBAL_VAR REAL* dArray, KW_GLOBAL_VAR REAL* dSum, KW_GLOBAL_VAR REAL* dPatternWeights, int startPattern, int endPattern) { #ifdef FW_OPENCL_CPU REAL sum = 0; int pattern = startPattern + KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; int maxPattern = startPattern + (KW_GROUP_ID_0 + 1) * SUM_SITES_BLOCK_SIZE; if (maxPattern > endPattern) maxPattern = endPattern; while (pattern < maxPattern) { FMA(dArray[pattern], dPatternWeights[pattern], sum); pattern++; } dSum[KW_GROUP_ID_0] = sum; #else KW_LOCAL_MEM REAL sum[SUM_SITES_BLOCK_SIZE]; int tx = KW_LOCAL_ID_0; int pattern = startPattern + KW_LOCAL_ID_0 + KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; if (pattern < endPattern) sum[tx] = dArray[pattern] * dPatternWeights[pattern]; else sum[tx] = 0.0; KW_LOCAL_FENCE; for (unsigned int s = SUM_SITES_BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tx < s) sum[tx] += sum[tx + s]; KW_LOCAL_FENCE; } if (tx == 0) dSum[KW_GROUP_ID_0] = sum[0]; #endif } // KW_GLOBAL_KERNEL void kernelSumSites1Partition(KW_GLOBAL_VAR REAL* dArray, // KW_GLOBAL_VAR REAL* dSum, // KW_GLOBAL_VAR REAL* dPatternWeights, // KW_GLOBAL_VAR unsigned int* dPtrOffsets) { // int opIndexPtr = KW_GROUP_ID_0 * 2; // int startPattern = dPtrOffsets[opIndexPtr ]; // int endPattern = dPtrOffsets[opIndexPtr + 1]; // #ifdef FW_OPENCL_CPU // REAL sum = 0; // int pattern = startPattern + KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; // while (pattern < endPattern) { // FMA(dArray[pattern], dPatternWeights[pattern], sum); // pattern++; // } // dSum[KW_GROUP_ID_0] = sum; // #else // KW_LOCAL_MEM REAL sum[SUM_SITES_BLOCK_SIZE]; // int tx = KW_LOCAL_ID_0; // int pattern = startPattern + KW_LOCAL_ID_0 + KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; // if (pattern < endPattern) // sum[tx] = dArray[pattern] * dPatternWeights[pattern]; // else // sum[tx] = 0.0; // KW_LOCAL_FENCE; // for (unsigned int s = SUM_SITES_BLOCK_SIZE / 2; s > 0; s >>= 1) { // if (tx < s) // sum[tx] += sum[tx + s]; // KW_LOCAL_FENCE; // } // if (tx == 0) // dSum[KW_GROUP_ID_0] = sum[0]; // #endif // } KW_GLOBAL_KERNEL void kernelSumSites2(KW_GLOBAL_VAR REAL* dArray1, KW_GLOBAL_VAR REAL* dSum1, KW_GLOBAL_VAR REAL* dArray2, KW_GLOBAL_VAR REAL* dSum2, KW_GLOBAL_VAR REAL* dPatternWeights, int patternCount) { #ifdef FW_OPENCL_CPU REAL sum1 = 0, sum2 = 0; int pattern = KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; int maxPattern = (KW_GROUP_ID_0 + 1) * SUM_SITES_BLOCK_SIZE; if (maxPattern > patternCount) maxPattern = patternCount; while (pattern < maxPattern) { FMA(dArray1[pattern], dPatternWeights[pattern], sum1); FMA(dArray2[pattern], dPatternWeights[pattern], sum2); pattern++; } dSum1[KW_GROUP_ID_0] = sum1; dSum2[KW_GROUP_ID_0] = sum2; #else KW_LOCAL_MEM REAL sum1[SUM_SITES_BLOCK_SIZE]; KW_LOCAL_MEM REAL sum2[SUM_SITES_BLOCK_SIZE]; int tx = KW_LOCAL_ID_0; int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; if (pattern < patternCount) { REAL pWeight = dPatternWeights[pattern]; sum1[tx] = dArray1[pattern] * pWeight; sum2[tx] = dArray2[pattern] * pWeight; } else { sum1[tx] = 0.0; sum2[tx] = 0.0; } KW_LOCAL_FENCE; for (unsigned int s = SUM_SITES_BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tx < s) { sum1[tx] += sum1[tx + s]; sum2[tx] += sum2[tx + s]; } KW_LOCAL_FENCE; } if (tx == 0) { dSum1[KW_GROUP_ID_0] = sum1[0]; dSum2[KW_GROUP_ID_0] = sum2[0]; } #endif } KW_GLOBAL_KERNEL void kernelSumSites3(KW_GLOBAL_VAR REAL* dArray1, KW_GLOBAL_VAR REAL* dSum1, KW_GLOBAL_VAR REAL* dArray2, KW_GLOBAL_VAR REAL* dSum2, KW_GLOBAL_VAR REAL* dArray3, KW_GLOBAL_VAR REAL* dSum3, KW_GLOBAL_VAR REAL* dPatternWeights, int patternCount) { #ifdef FW_OPENCL_CPU REAL sum1 = 0, sum2 = 0, sum3 = 0; int pattern = KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; int maxPattern = (KW_GROUP_ID_0 + 1) * SUM_SITES_BLOCK_SIZE; if (maxPattern > patternCount) maxPattern = patternCount; while (pattern < maxPattern) { FMA(dArray1[pattern], dPatternWeights[pattern], sum1); FMA(dArray2[pattern], dPatternWeights[pattern], sum2); FMA(dArray3[pattern], dPatternWeights[pattern], sum3); pattern++; } dSum1[KW_GROUP_ID_0] = sum1; dSum2[KW_GROUP_ID_0] = sum2; dSum3[KW_GROUP_ID_0] = sum3; #else KW_LOCAL_MEM REAL sum1[SUM_SITES_BLOCK_SIZE]; KW_LOCAL_MEM REAL sum2[SUM_SITES_BLOCK_SIZE]; KW_LOCAL_MEM REAL sum3[SUM_SITES_BLOCK_SIZE]; int tx = KW_LOCAL_ID_0; int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * SUM_SITES_BLOCK_SIZE; if (pattern < patternCount) { REAL pWeight = dPatternWeights[pattern]; sum1[tx] = dArray1[pattern] * pWeight; sum2[tx] = dArray2[pattern] * pWeight; sum3[tx] = dArray3[pattern] * pWeight; } else { sum1[tx] = 0.0; sum2[tx] = 0.0; sum3[tx] = 0.0; } KW_LOCAL_FENCE; for (unsigned int s = SUM_SITES_BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tx < s) { sum1[tx] += sum1[tx + s]; sum2[tx] += sum2[tx + s]; sum3[tx] += sum3[tx + s]; } KW_LOCAL_FENCE; } if (tx == 0) { dSum1[KW_GROUP_ID_0] = sum1[0]; dSum2[KW_GROUP_ID_0] = sum2[0]; dSum3[KW_GROUP_ID_0] = sum3[0]; } #endif } KW_GLOBAL_KERNEL void kernelAccumulateFactors(KW_GLOBAL_VAR REAL* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR REAL* rootScaling, int nodeCount, int patternCount) { int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; REAL total = 0; KW_GLOBAL_VAR REAL* nodeScales; int n; for(n = 0; n < nodeCount; n++) { // if (KW_LOCAL_ID_0 == 0) // TODO Why does this not work??? nodeScales = dScalingFactors + dNodePtrQueue[n]; // KW_LOCAL_FENCE; #ifdef KERNEL_PRINT_ENABLED if (pattern == 1) printf("added %1.2e\n", nodeScales[pattern]); #endif REAL factor = nodeScales[pattern]; if (factor != 1.0) { total += log(factor); } } #ifdef FW_OPENCL_CPU // CPU/MIC implementation rootScaling[pattern] += total; #else // GPU implementation if (pattern < patternCount) rootScaling[pattern] += total; #endif // FW_OPENCL_CPU } KW_GLOBAL_KERNEL void kernelAccumulateFactorsByPartition(KW_GLOBAL_VAR REAL* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR REAL* rootScaling, int nodeCount, int startPattern, int endPattern) { int pattern = startPattern + KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; REAL total = 0; KW_GLOBAL_VAR REAL* nodeScales; int n; for(n = 0; n < nodeCount; n++) { nodeScales = dScalingFactors + dNodePtrQueue[n]; REAL factor = nodeScales[pattern]; if (factor != 1.0) { total += log(factor); } } if (pattern < endPattern) { rootScaling[pattern] += total; } } KW_GLOBAL_KERNEL void kernelAccumulateFactorsScalersLog(KW_GLOBAL_VAR REAL* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR REAL* rootScaling, int nodeCount, int patternCount) { int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; REAL total = 0; KW_GLOBAL_VAR REAL* nodeScales; int n; for(n = 0; n < nodeCount; n++) { // if (KW_LOCAL_ID_0 == 0) // TODO Why does this not work??? nodeScales = dScalingFactors + dNodePtrQueue[n]; // KW_LOCAL_FENCE; #ifdef KERNEL_PRINT_ENABLED if (pattern == 1) printf("added %1.2e\n", nodeScales[pattern]); #endif total += nodeScales[pattern]; } #ifdef FW_OPENCL_CPU // CPU/MIC implementation rootScaling[pattern] += total; #else // GPU implementation if (pattern < patternCount) rootScaling[pattern] += total; #endif // FW_OPENCL_CPU } KW_GLOBAL_KERNEL void kernelAccumulateFactorsScalersLogByPartition( KW_GLOBAL_VAR REAL* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR REAL* rootScaling, int nodeCount, int startPattern, int endPattern) { int pattern = startPattern + KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; REAL total = 0; KW_GLOBAL_VAR REAL* nodeScales; int n; for(n = 0; n < nodeCount; n++) { nodeScales = dScalingFactors + dNodePtrQueue[n]; total += nodeScales[pattern]; } if (pattern < endPattern) { rootScaling[pattern] += total; } } KW_GLOBAL_KERNEL void kernelRemoveFactors(KW_GLOBAL_VAR REAL* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR REAL* rootScaling, int nodeCount, int patternCount) { int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; REAL total = 0; KW_GLOBAL_VAR REAL* nodeScales; int n; for(n = 0; n < nodeCount; n++) { // if (KW_LOCAL_ID_0 == 0) // TODO Why does this not work??? nodeScales = dScalingFactors + dNodePtrQueue[n]; // KW_LOCAL_FENCE; #ifdef KERNEL_PRINT_ENABLED if (pattern == 1) printf("added %1.2e\n", nodeScales[pattern]); #endif REAL factor = nodeScales[pattern]; if (factor != 1.0) { total += log(factor); } } #ifdef FW_OPENCL_CPU // CPU/MIC implementation rootScaling[pattern] -= total; #else // GPU implementation if (pattern < patternCount) rootScaling[pattern] -= total; #endif // FW_OPENCL_CPU } KW_GLOBAL_KERNEL void kernelRemoveFactorsByPartition(KW_GLOBAL_VAR REAL* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR REAL* rootScaling, int nodeCount, int startPattern, int endPattern) { int pattern = startPattern + KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; REAL total = 0; KW_GLOBAL_VAR REAL* nodeScales; int n; for(n = 0; n < nodeCount; n++) { nodeScales = dScalingFactors + dNodePtrQueue[n]; REAL factor = nodeScales[pattern]; if (factor != 1.0) { total += log(factor); } } if (pattern < endPattern) { rootScaling[pattern] -= total; } } KW_GLOBAL_KERNEL void kernelRemoveFactorsScalersLog(KW_GLOBAL_VAR REAL* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR REAL* rootScaling, int nodeCount, int patternCount) { int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; REAL total = 0; KW_GLOBAL_VAR REAL* nodeScales; int n; for(n = 0; n < nodeCount; n++) { // if (KW_LOCAL_ID_0 == 0) // TODO Why does this not work??? nodeScales = dScalingFactors + dNodePtrQueue[n]; // KW_LOCAL_FENCE; #ifdef KERNEL_PRINT_ENABLED if (pattern == 1) printf("added %1.2e\n", nodeScales[pattern]); #endif total += nodeScales[pattern]; } #ifdef FW_OPENCL_CPU // CPU/MIC implementation rootScaling[pattern] -= total; #else // GPU implementation if (pattern < patternCount) rootScaling[pattern] -= total; #endif // FW_OPENCL_CPU } KW_GLOBAL_KERNEL void kernelRemoveFactorsScalersLogByPartition(KW_GLOBAL_VAR REAL* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR REAL* rootScaling, int nodeCount, int startPattern, int endPattern) { int pattern = startPattern + KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; REAL total = 0; KW_GLOBAL_VAR REAL* nodeScales; int n; for(n = 0; n < nodeCount; n++) { nodeScales = dScalingFactors + dNodePtrQueue[n]; total += nodeScales[pattern]; } if (pattern < endPattern) rootScaling[pattern] -= total; } KW_GLOBAL_KERNEL void kernelResetFactorsByPartition(KW_GLOBAL_VAR REAL* dScalingFactors, int startPattern, int endPattern) { int pattern = startPattern + KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; if (pattern < endPattern) { dScalingFactors[pattern] = 0.0; } } KW_GLOBAL_KERNEL void kernelPartialsDynamicScalingSlow(KW_GLOBAL_VAR REAL* allPartials, KW_GLOBAL_VAR REAL* scalingFactors, int matrixCount) { int state = KW_LOCAL_ID_0; int pattern = KW_GROUP_ID_0; int patternCount = KW_NUM_GROUPS_0; KW_LOCAL_MEM REAL partials[PADDED_STATE_COUNT]; KW_LOCAL_MEM REAL max; if (state == 0) max = 0.0; int m; for(m = 0; m < matrixCount; m++) { partials[state] = allPartials[m * patternCount * PADDED_STATE_COUNT + pattern * PADDED_STATE_COUNT + state]; KW_LOCAL_FENCE; #ifdef IS_POWER_OF_TWO // parallelized reduction *** only works for powers-of-2 **** for (int i = PADDED_STATE_COUNT / 2; i > 0; i >>= 1) { if (state < i) { #else for (int i = SMALLEST_POWER_OF_TWO / 2; i > 0; i >>= 1) { if (state < i && state + i < PADDED_STATE_COUNT ) { #endif // IS_POWER_OF_TWO REAL compare1 = partials[state]; REAL compare2 = partials[state + i]; if(compare2 > compare1) partials[state] = compare2; } KW_LOCAL_FENCE; } if(state == 0) { if( partials[0] > max) max = partials[0]; } } if(state == 0) { if (max == 0) max = 1.0; scalingFactors[pattern] = max; } KW_LOCAL_FENCE; for(m = 0; m < matrixCount; m++) allPartials[m * patternCount * PADDED_STATE_COUNT + pattern * PADDED_STATE_COUNT + state] /= max; } KW_GLOBAL_KERNEL void kernelPartialsDynamicScalingSlowScalersLog(KW_GLOBAL_VAR REAL* allPartials, KW_GLOBAL_VAR REAL* scalingFactors, int matrixCount) { int state = KW_LOCAL_ID_0; int pattern = KW_GROUP_ID_0; int patternCount = KW_NUM_GROUPS_0; KW_LOCAL_MEM REAL partials[PADDED_STATE_COUNT]; KW_LOCAL_MEM REAL max; if (state == 0) max = 0.0; int m; for(m = 0; m < matrixCount; m++) { partials[state] = allPartials[m * patternCount * PADDED_STATE_COUNT + pattern * PADDED_STATE_COUNT + state]; KW_LOCAL_FENCE; #ifdef IS_POWER_OF_TWO // parallelized reduction *** only works for powers-of-2 **** for (int i = PADDED_STATE_COUNT / 2; i > 0; i >>= 1) { if (state < i) { #else for (int i = SMALLEST_POWER_OF_TWO / 2; i > 0; i >>= 1) { if (state < i && state + i < PADDED_STATE_COUNT ) { #endif // IS_POWER_OF_TWO REAL compare1 = partials[state]; REAL compare2 = partials[state + i]; if(compare2 > compare1) partials[state] = compare2; } KW_LOCAL_FENCE; } if(state == 0) { if( partials[0] > max) max = partials[0]; } } if(state == 0) { if (max == 0) { max = 1.0; scalingFactors[pattern] = 0.0; } else { scalingFactors[pattern] = log(max); } } KW_LOCAL_FENCE; for(m = 0; m < matrixCount; m++) allPartials[m * patternCount * PADDED_STATE_COUNT + pattern * PADDED_STATE_COUNT + state] /= max; } KW_GLOBAL_KERNEL void kernelMultipleNodeSiteReduction(KW_GLOBAL_VAR REAL* dOut, KW_GLOBAL_VAR REAL* dIn, KW_GLOBAL_VAR REAL* dPatternWeights, int outOffset, int patternCount) { #ifdef FW_OPENCL_CPU // TODO #else KW_LOCAL_MEM REAL reduce[MULTI_NODE_SUM_BLOCK_SIZE]; int tx = KW_LOCAL_ID_0; int node = KW_GROUP_ID_0; int offset = patternCount * node; int pattern = tx; REAL sum = 0; while (pattern < patternCount) { FMA(dIn[offset + pattern], dPatternWeights[pattern], sum); pattern += MULTI_NODE_SUM_BLOCK_SIZE; } reduce[tx] = sum; KW_LOCAL_FENCE; for (unsigned int s = MULTI_NODE_SUM_BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tx < s) { reduce[tx] += reduce[tx + s]; } KW_LOCAL_FENCE; } if (tx == 0) { dOut[outOffset + node] = reduce[0]; } #endif } KW_GLOBAL_KERNEL void kernelMultipleNodeSiteSquaredReduction(KW_GLOBAL_VAR REAL* dOut, KW_GLOBAL_VAR REAL* dIn, KW_GLOBAL_VAR REAL* dPatternWeights, int outOffset, int patternCount) { #ifdef FW_OPENCL_CPU // TODO #else KW_LOCAL_MEM REAL reduce[MULTI_NODE_SUM_BLOCK_SIZE]; int tx = KW_LOCAL_ID_0; int node = KW_GROUP_ID_0; int offset = patternCount * node; int pattern = tx; REAL sum = 0; while (pattern < patternCount) { REAL value = dIn[offset + pattern]; FMA(value * value, dPatternWeights[pattern], sum); pattern += MULTI_NODE_SUM_BLOCK_SIZE; } reduce[tx] = sum; KW_LOCAL_FENCE; for (unsigned int s = MULTI_NODE_SUM_BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tx < s) { reduce[tx] += reduce[tx + s]; } KW_LOCAL_FENCE; } if (tx == 0) { dOut[outOffset + node] = reduce[0]; } #endif } //////////////////////////////////////////////////////////////////////////////////////////////// // scaling experiments kernels KW_GLOBAL_KERNEL void kernelAccumulateFactorsAutoScaling(KW_GLOBAL_VAR signed char* dScalingFactors, KW_GLOBAL_VAR unsigned int* dNodePtrQueue, KW_GLOBAL_VAR int* rootScaling, int nodeCount, int patternCount, int scaleBufferSize) { int pattern = KW_LOCAL_ID_0 + KW_GROUP_ID_0 * PATTERN_BLOCK_SIZE; int index = pattern + KW_GROUP_ID_1 * patternCount; int total = 0; KW_GLOBAL_VAR signed char* nodeScales; int n; for(n = 0; n < nodeCount; n++) { // int sIndex = dNodePtrQueue[n]; nodeScales = dScalingFactors + dNodePtrQueue[n] * scaleBufferSize; total += nodeScales[index]; } if (pattern < patternCount) rootScaling[index] = total; } #ifdef CUDA } // extern "C" #endif //CUDA
the_stack
__device__ static void geodesic(double* Variables, double* VariablesIn, double *y, double *dydx) { double r = y[0]; double theta = y[1]; double pr = y[4]; double ptheta = y[5]; double r2 = r * r; double twor = 2.0 * r; double sintheta = sin(theta); double costheta = cos(theta); double cos2 = costheta * costheta; double sin2 = sintheta * sintheta; double sigma = r2 + a2 * cos2; double delta = r2 - twor + a2; double sd = sigma * delta; double siginv = 1.0 / sigma; double bot = 1.0 / sd; //avoid problems near the axis if (sintheta < 1e-8) { sintheta = 1e-8; sin2 = 1e-16; } dydx[0] = -pr * delta * siginv; dydx[1] = -ptheta * siginv; dydx[2] = -(twor * A + (sigma - twor) * L / sin2) * bot; dydx[3] = -(1.0 + (twor * (r2 + a2) - twor * A * L) * bot); dydx[4] = -(((r - 1.0) * (-kappa) + twor * (r2 + a2) - 2.0 * A * L) * bot - 2.0 * pr * pr * (r - 1.0) * siginv); dydx[5] = -sintheta * costheta*(L * L / (sin2 * sin2) - a2) * siginv; } __device__ static void rkstep(double* Variables, double* VariablesIn,double *y, double *dydx, double h, double *yout, double *yerr) { int i; double ak[N]; double ytemp1[N], ytemp2[N], ytemp3[N], ytemp4[N], ytemp5[N]; double hdydx; double yi, yt; for (i = 0; i < N; i++) { hdydx = h * dydx[i]; yi = y[i]; ytemp1[i] = yi + 0.2 * hdydx; ytemp2[i] = yi + (3.0/40.0) * hdydx; ytemp3[i] = yi + 0.3 * hdydx; ytemp4[i] = yi -(11.0/54.0) * hdydx; ytemp5[i] = yi + (1631.0/55296.0) * hdydx; yout[i] = yi + (37.0/378.0) * hdydx; yerr[i] = ((37.0/378.0)-(2825.0/27648.0)) * hdydx; } geodesic(Variables, VariablesIn, ytemp1, ak); for (i = 0; i < N; i++) { yt = h * ak[i]; ytemp2[i] += (9.0/40.0) * yt; ytemp3[i] -= 0.9 * yt; ytemp4[i] += 2.5 * yt; ytemp5[i] += (175.0/512.0) * yt; } geodesic(Variables, VariablesIn, ytemp2, ak); for (i = 0; i < N; i++) { yt = h * ak[i]; ytemp3[i] += 1.2 * yt; ytemp4[i] -= (70.0/27.0) * yt; ytemp5[i] += (575.0/13824.0) * yt; yout[i] += (250.0/621.0) * yt; yerr[i] += ((250.0/621.0)-(18575.0/48384.0)) * yt; } geodesic(Variables, VariablesIn, ytemp3, ak); for (i = 0; i < N; i++) { yt = h * ak[i]; ytemp4[i] += (35.0/27.0) * yt; ytemp5[i] += (44275.0/110592.0) * yt; yout[i] += (125.0/594.0) * yt; yerr[i] += ((125.0/594.0)-(13525.0/55296.0)) * yt; } geodesic(Variables, VariablesIn, ytemp4, ak); for (i = 0; i < N; i++) { yt = h * ak[i]; ytemp5[i] += (253.0/4096.0) * yt; yerr[i] -= (277.0/14336.0) * yt; } geodesic(Variables, VariablesIn, ytemp5, ak); for (i = 0; i < N; i++) { yt = h * ak[i]; yout[i] += (512.0/1771.0) * yt; yerr[i] += ((512.0/1771.0)-0.25) * yt; } } __device__ static double rk5(double* Variables, double* VariablesIn, double *y, double *dydx, double htry, double escal, double *yscal, double *hdid) { int i; double hnext; double errmax, h = htry, htemp; double yerr[N], ytemp[N]; while (1) { // find adaptive step size rkstep(Variables, VariablesIn, y, dydx, h, ytemp, yerr); errmax = 0.0; for (i = 0; i < N; i++) { double temp = fabs(yerr[i]/yscal[i]); if (temp > errmax) errmax = temp; } errmax *= escal; if (errmax <= 1.0) break; htemp = 0.9 * h / sqrt(sqrt(errmax)); h *= 0.1; if (h >= 0.0) { if (htemp > h) h = htemp; } else { if (htemp < h) h = htemp; } } if (errmax > 1.89e-4) { hnext = 0.9 * h * pow(errmax, -0.2); } else { hnext = 5.0 * h; } *hdid = h; memcpy(y, ytemp, N * sizeof(double)); return hnext; } __device__ static void initial(double* Variables, double* VariablesIn, double *y0, double *ydot0) { double alpha = grid_x; double beta = grid_y; //see Eq[18-19], with phi_obs=0, x= alpha, y=beta, z=0 double x = sqrt(r0*r0+a2)*sin(theta0)-beta*cos(theta0); double y = alpha; double z = r0*cos(theta0)+beta*sin(theta0); double w = x*x+y*y+z*z-a2; //see Eq[20-22] y0[0] = sqrt((w+sqrt(w*w+(2.*A*z)*(2.*A*z)))/2.); y0[1] = acos(z/y0[0]); y0[2] = atan2(y,x); y0[3] = 0; double r = y0[0]; double theta = y0[1]; double phi=y0[2]; double sigma = r*r+(A*cos(theta))*(A*cos(theta)); double u = sqrt(a2+r*r); double v = -sin(theta0)*cos(phi); double zdot = -1.; //see Eq[24-26] double rdot0 = zdot*(-u*u*cos(theta0)*cos(theta)+r*u*v*sin(theta))/sigma; double thetadot0 = zdot*(cos(theta0)*r*sin(theta)+u*v*cos(theta))/sigma; double phidot0 = zdot*sin(theta0)*sin(phi)/(u*sin(theta)); ydot0[0] = rdot0; ydot0[1] = thetadot0; ydot0[2] = phidot0; double sintheta=sin(theta); double sin2 = sintheta*sintheta; double r2 = r * r; double delta = r2 - 2.0 * r + a2; double s1 = sigma - 2.0 * r; y0[4] = rdot0*sigma/delta; y0[5] = thetadot0*sigma; // Eq.7 double energy2 = s1*(rdot0*rdot0/delta+thetadot0*thetadot0) + delta*sin2*phidot0*phidot0; double energy = sqrt(energy2); // rescaled by energy y0[4] = y0[4]/energy; y0[5] = y0[5]/energy; // Eq.8 L = ((sigma*delta*phidot0-2.0*A*r*energy)*sin2/s1)/energy; // Eq below Eq.15 kappa = y0[5]*y0[5]+a2*sin2+L*L/sin2; } __device__ static float ISCO(double* VariablesIn) { double z1 = 1 + pow(1 - A * A, 1 / 3.0) * pow(1 + A, 1 / 3.0) + pow(1 - A, 1 / 3.0); double z2 = sqrt(3 * A * A + z1 * z1); return 3. + z2 - sqrt((3 - z1) * (3 + z1 + 2 * z2)); } // // functions for radiative transfer // #define Te_min 0.1 #define Te_max 100. #define Te_grids 50. static __device__ __constant__ double K2_tab[] = { -10.747001, //Te=0.1 -9.362569, -8.141373, -7.061568, -6.104060, -5.252153, -4.491244, -3.808555, -3.192909, -2.634534, -2.124893, -1.656543, -1.223007, -0.818668, -0.438676, -0.078863, +0.264332, +0.593930, +0.912476, +1.222098, +1.524560, +1.821311, +2.113537, +2.402193, +2.688050, +2.971721, +3.253692, +3.534347, +3.813984, +4.092839, +4.371092, +4.648884, +4.926323, +5.203493, +5.480457, +5.757264, +6.033952, +6.310550, +6.587078, +6.863554, +7.139990, +7.416395, +7.692778, +7.969143, +8.245495, +8.521837, +8.798171, +9.074500, +9.350824, +9.627144 //Te=87.09 }; __device__ static double K2_find(double Te) { double d = Te_grids*(log(Te / Te_min)/ log(Te_max / Te_min)); int i = floor(d); return (1 - (double)(d-i)) * K2_tab[i] + (double)(d-i) * K2_tab[i+1]; } __device__ static double K2(double Te) { double tab_K2; //avoid the boundary effect when T~T_max if (Te>85.){ tab_K2=2.*Te*Te; return tab_K2; } if (Te<Te_min){ //set a dummy value return exp(-11.); } tab_K2= K2_find(Te); return exp(tab_K2); } __device__ static double Jansky_Correction(double* VariablesIn,double ima_width) { double distance=C_sgrA_d*C_pc; double theta=atan(ima_width*C_sgrA_mbh*C_rgeo/distance); double pix_str=theta/(SIZE/2.)*theta/(SIZE/2.); //radian^2 (steradians) per pixel return pix_str/C_Jansky; } __device__ static double Luminosity_Correction(double* VariablesIn,double ima_width) { double distance=C_sgrA_d*C_pc; double theta=atan(ima_width*C_sgrA_mbh*C_rgeo/distance); double pix_str=theta/(SIZE/2.)*theta/(SIZE/2.); //radian^2 (steradians) per pixel return pix_str*distance*distance*4.*PI*freq_obs; } __device__ double task1fun_GetZ(double* Variables, double* VariablesIn, double *y) { double r1 = y[0]; double E_local = -(r1 * r1 + A * sqrt(r1)) / (r1 * sqrt(r1 * r1 - 3. * r1 + 2. * A * sqrt(r1))) + L / sqrt(r1) / sqrt(r1 * r1 - 3. * r1 +2. * A * sqrt(r1)); double E_inf = -1.0; return E_local / E_inf; } __global__ void task1(double*__restrict ResultsPixel, double*__restrict VariablesIn, int GridIdxX, int GridIdxY) { // to check whether the photon is inside image plane if(X1 >= SIZE || Y1 >= SIZE) return; double Variables[VarNUM]; r0 = 1000.0; theta0 = (PI/180.0) * INCLINATION; a2 = A * A; Rhor = 1.0 + sqrt(1.0 - a2) + 1e-5; Rmstable = ISCO(VariablesIn); double htry = 0.5, escal = 1e14, hdid = 0.0, hnext = 0.0; double y[N], dydx[N], yscal[N], ylaststep[N]; double Rdisk = 50.; double ima_width = 55.; double s1 = ima_width; double s2 = 2.*ima_width/((int)SIZE+1.); grid_x = -s1 + s2*(X1+1.); grid_y = -s1 + s2*(Y1+1.); initial(Variables, VariablesIn, y, dydx); ResultsPixel(0) = grid_x; ResultsPixel(1) = grid_y; ResultsPixel(2) = 0; while (1) { for(int i = 0; i < N; i++) ylaststep[i] = y[i]; geodesic(Variables, VariablesIn, y, dydx); for (int i = 0; i < N; i++) yscal[i] = fabs(y[i]) + fabs(dydx[i] * htry) + 1.0e-3; //fifth-order Runge-Kutta method hnext = rk5(Variables, VariablesIn, y, dydx, htry, escal, yscal, &hdid); // hit the disk, compute redshift if( y[0] < Rdisk && y[0] > Rmstable && (ylaststep[1] - PI/2.) * (y[1] - PI/2.) < 0. ) { ResultsPixel(2) = 1./task1fun_GetZ(Variables, VariablesIn, y); break; } // Inside the event horizon radius or escape to infinity if ((y[0] > r0) && (dydx[0]>0)) break; if (y[0] < Rhor) break; htry = hnext; } } __device__ double task2fun_GetZ(double* Variables, double* VariablesIn, double *y) { double ut,uphi,ur,E_local; double E_inf= -1.0; double r=y[0]; double theta=y[1]; double pr=y[4]; double r2 = r*r; double twor = 2.0*r; double sintheta, costheta; sintheta=sin(theta); costheta=cos(theta); double cos2 = costheta*costheta; double sin2 = sintheta*sintheta; double sigma = r2+a2*cos2; double delta = r2-twor+a2; double ssig=(r2+a2)*(r2+a2)-a2*delta*sin2; //======define (covariant) metric component double gtt=-(1.-2.*r/sigma); double gtph=-2.*A*r*sin2/sigma; double grr=sigma/delta; double gphph=ssig*sin2/sigma; //==========Keplerian flow: outside ISCO double ut_k =(r*r+A*sqrt(r))/(r*sqrt(r*r-3.*r+2.*A*sqrt(r))); double ur_k =0.; double uphi_k =1./(sqrt(r)*sqrt(r*r-3.*r+2.*A*sqrt(r))); //==========Keplerian flow: inside ISCO if( r<Rmstable) { double delta = r*r-2.*r+a2; double lambda=(Rmstable*Rmstable-2.*A*sqrt(Rmstable)+a2)/(sqrt(Rmstable*Rmstable*Rmstable)-2.*sqrt(Rmstable)+A); double gamma=sqrt(1-2./3./Rmstable); double h=(2.*r-A*lambda)/delta; ut_k=gamma*(1.+2/r*(1.+h)); ur_k=-sqrt(2./3./Rmstable)*sqrt(pow((Rmstable/r-1.),3.)); uphi_k=gamma/r/r*(lambda+A*h); } //normalize the four-velocity ut = ut_k; uphi = uphi_k; ur = ur_k; double omega = uphi/ut; double k0 = -(gtt + omega*omega*gphph + 2.*omega*gtph); ut = sqrt(((1. + grr*ur*ur) / k0)); uphi = omega*ut; // compute redshift E_local=-ut+L*uphi+pr*ur; return E_local/E_inf; } __global__ void task2(double*__restrict ResultsPixel, double*__restrict VariablesIn, int GridIdxX, int GridIdxY) { if(X1 >= SIZE || Y1 >= SIZE) return; double Variables[VarNUM]; r0 = 1000.0; theta0 = (PI/180.0) * INCLINATION; a2 = A * A; Rhor = 1.0 + sqrt(1.0 - a2) + 1e-5; Rmstable = ISCO(VariablesIn); double htry = 0.5, escal = 1e14, hdid = 0.0, hnext = 0.0; double y[N], dydx[N], yscal[N]; double Rdisk = 500.; double ima_width = 10.; double s1 = ima_width; double s2 = 2.*ima_width/((int)SIZE+1.); double Jy_corr=Jansky_Correction(VariablesIn,ima_width); double L_corr=Luminosity_Correction(VariablesIn,ima_width); grid_x = -s1 + s2*(X1+1.); grid_y = -s1 + s2*(Y1+1.); initial(Variables, VariablesIn, y, dydx); ResultsPixel(0) = grid_x; ResultsPixel(1) = grid_y; ResultsPixel(2) = 0; double ds=0.; double dtau=0.; double dI=0.; while (1) { geodesic(Variables, VariablesIn, y, dydx); for (int i = 0; i < N; i++) yscal[i] = fabs(y[i]) + fabs(dydx[i] * htry) + 1.0e-3; hnext = rk5(Variables, VariablesIn, y, dydx, htry, escal, yscal, &hdid); if ((y[0] > r0) && (dydx[0]>0)){ ResultsPixel(2) = dI*freq_obs*freq_obs*freq_obs*L_corr; break; } if (y[0] < Rhor){ ResultsPixel(2) = dI*freq_obs*freq_obs*freq_obs*L_corr; break; } double r=y[0]; double theta=y[1]; if(y[0]<Rdisk){ double zzz = task2fun_GetZ(Variables, VariablesIn, y); //zzz=E_em/E_obs double freq_local = freq_obs*zzz; // thermal synchrotron double nth0=3e7; double zc=r*cos(theta); double rc=r*sin(theta); double nth=nth0*exp(-zc*zc/2./rc/rc)*pow(r,-1.1); double Te=1.7e11*pow(r,-0.84); double b=sqrt(8.*PI*0.1*nth*C_mp*C_c*C_c/6./r); double vb=C_e*b/2./PI/C_me/C_c; double theta_E= C_kB*Te/C_me/C_c/C_c; double v=freq_local; double x=2.*v/3./vb/theta_E/theta_E; double K_value=K2(theta_E); double comp1=4.*PI*nth*C_e*C_e*v/sqrt(3.)/K_value/C_c; double comp2=4.0505/pow(x,(1./6.))*(1.+0.4/pow(x,0.25)+0.5316/sqrt(x))*exp(-1.8899*pow(x,1./3.)); double j_nu=comp1*comp2; double B_nu=2.0*v*v*v*C_h/C_c/C_c/(exp(C_h*v/C_kB/Te)-1.0); // integrate intensity along ray ds = htry; dtau = dtau + ds*C_sgrA_mbh*C_rgeo*j_nu/B_nu*zzz; dI = dI + ds*C_sgrA_mbh*C_rgeo*j_nu/freq_local/freq_local/freq_local*exp(-dtau)*zzz; } htry = hnext; } }
the_stack
#include <cuda_runtime_api.h> #include <stdio.h> #include "cuda_dxt.h" typedef unsigned int u32; typedef u32 UINT; struct vec3; struct uvec2; struct uvec3 { u32 r, g, b; __device__ uvec3(u32 r, u32 g, u32 b) : r(r), g(g), b(b) {} __device__ uvec3(u32 n = 0) : r(n), g(n), b(n) {} __device__ uvec3(const vec3 & v); }; struct vec2 { float r, g; __device__ vec2(float x, float y) : r(x), g(y) {} __device__ vec2(float n = 0.0f) : r(n), g(n) {} __device__ vec2(const vec3 & v); __device__ vec2(const uvec2 & v); __device__ vec2 operator+(const vec2 & o) const { return vec2(r + o.r, g + o.g); } __device__ vec2 operator*(const vec2 & v) const { return vec2(r * v.r, g * v.g); } __device__ vec2 operator*(const float n) const { return *this * vec2(n); } __device__ vec2 operator-(const vec2 & o) const { return *this + (o * -1.0f); } __device__ vec2 operator/(const float n) const { return *this * (1.0f / n); } }; struct uvec2 { u32 r, g; __device__ uvec2(u32 x, u32 y) : r(x), g(y) {} __device__ uvec2(u32 n = 0) : r(n), g(n) {} __device__ uvec2(const vec2 & v); }; struct vec3 : public vec2 { float b; __device__ vec3(float x, float y, float z) : vec2(x, y), b(z) {} __device__ vec3(float n = 0.0f) : vec2(n), b(n) {} __device__ vec3(const vec2 & v) : vec2(v), b(0.0f) {} __device__ vec3(const uvec3 & v) : vec2(v.r, v.g), b(v.b) {} __device__ vec3 operator+(const vec3 & o) const { return vec3(r + o.r, g + o.g, b + o.b); } __device__ vec3 operator*(const vec3 & v) const { return vec3(r * v.r, g * v.g, b * v.b); } __device__ vec3 operator*(const float n) const { return *this * vec3(n); } __device__ vec3 operator-(const vec3 & o) const { return *this + (o * -1.0f); } __device__ vec3 operator/(const float n) const { return *this * (1.0f / n); } // __device__ vec2 & yz() { // return *this; // } __device__ vec2 gb() const { return vec2(g, b); } }; __device__ uvec3::uvec3(const vec3 & v) : r(v.r), g(v.g), b(v.b) {} __device__ uvec2::uvec2(const vec2 & v) : r(v.r), g(v.g) {} __device__ vec2::vec2(const vec3 & v) : r(v.r), g(v.g) {} __device__ vec2::vec2(const uvec2 & v) : r(v.r), g(v.g) {} __device__ static vec3 min(const vec3 & a, const vec3 & b) { return vec3(min(a.r, b.r), min(a.g, b.g), min(a.b, b.b)); } __device__ static vec3 max(const vec3 & a, const vec3 & b) { return vec3(max(a.r, b.r), max(a.g, b.g), max(a.b, b.b)); } __device__ static vec2 min(const vec2 & a, const vec2 & b) { return vec2(min(a.r, b.r), min(a.g, b.g)); } __device__ static vec2 max(const vec2 & a, const vec2 & b) { return vec2(max(a.r, b.r), max(a.g, b.g)); } __device__ static float dot(const vec3 & a, const vec3 & b) { return a.r * b.r + a.g * b.g + a.b * b.b; } __device__ static vec2 clamp(const vec2 & v, float min_val, float max_val) { return min(vec2(max_val), max(vec2(min_val), v)); } __device__ static float clamp(const float & v, float min_val, float max_val) { return min(max_val, max(min_val, v)); } __device__ static vec2 abs(const vec2 & v) { return vec2(fabsf(v.r), fabsf(v.g)); } __device__ static vec3 round(const vec3 & v) { return vec3(roundf(v.r), roundf(v.g), roundf(v.b)); } __device__ static vec3 lerp(const vec3 & a, const vec3 & b, const float q) { return a * (1.0f - q) + b * q; } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// __device__ static const float offset = 128.0 / 255.0; __device__ static vec3 ConvertRGBToYCoCg(vec3 color) { float Y = (color.r + 2.0 * color.g + color.b) * 0.25; float Co = ( ( 2.0 * color.r - 2.0 * color.b ) * 0.25 + offset ); float Cg = ( ( -color.r + 2.0 * color.g - color.b) * 0.25 + offset ); return vec3(Y, Co, Cg); } // __device__ static float colorDistance(vec3 c0, vec3 c1) // { // return dot(c0-c1, c0-c1); // } __device__ static float colorDistance(vec2 c0, vec2 c1) { return dot(c0-c1, c0-c1); } __device__ static void FindMinMaxColorsBox(vec3 block[16], vec3 & mincol, vec3 & maxcol) { mincol = block[0]; maxcol = block[0]; for ( int i = 1; i < 16; i++ ) { mincol = min(mincol, block[i]); maxcol = max(maxcol, block[i]); } } // __device__ static void InsetBBox(vec3 & mincol, vec3 & maxcol) // { // vec3 inset = (maxcol - mincol) / 16.0 - (8.0 / 255.0) / 16.0; // mincol = clamp(mincol + inset, 0.0, 1.0); // maxcol = clamp(maxcol - inset, 0.0, 1.0); // } __device__ static void InsetYBBox(float & mincol, float & maxcol) { float inset = (maxcol - mincol) / 32.0 - (16.0 / 255.0) / 32.0; mincol = clamp(mincol + inset, 0.0, 1.0); maxcol = clamp(maxcol - inset, 0.0, 1.0); } __device__ static void InsetCoCgBBox(vec2 & mincol, vec2 & maxcol) { vec2 inset = (maxcol - mincol) / 16.0 - (8.0 / 255.0) / 16.0; mincol = clamp(mincol + inset, 0.0, 1.0); maxcol = clamp(maxcol - inset, 0.0, 1.0); } // __device__ static void SelectDiagonal(vec3 block[16], vec3 & mincol, vec3 & maxcol) // { // vec3 center = (mincol + maxcol) * 0.5; // // vec2 cov = vec2(0, 0); // for (int i = 0; i < 16; i++) { // vec3 t = block[i] - center; // cov.r += t.r * t.b; // cov.g += t.g * t.b; // } // // if (cov.r < 0.0) { // float temp = maxcol.r; // maxcol.r = mincol.r; // mincol.r = temp; // } // if (cov.g < 0.0) { // float temp = maxcol.g; // maxcol.g = mincol.g; // mincol.g = temp; // } // } // __device__ static vec3 RoundAndExpand(vec3 v, UINT & w) // { // uvec3 c = uvec3(round(v * vec3(31, 63, 31))); // w = (c.r << 11u) | (c.g << 5u) | c.b; // // c.r = (c.r << 3u) | (c.r >> 2u); // c.b = (c.b << 3u) | (c.b >> 2u); // c.g = (c.g << 2u) | (c.g >> 4u); // // return vec3(c) * (1.0 / 255.0); // } // __device__ static UINT EmitEndPointsDXT1(vec3 & mincol, vec3 & maxcol) // { // uvec2 outp; // maxcol = RoundAndExpand(maxcol, outp.r); // mincol = RoundAndExpand(mincol, outp.g); // // // We have to do this in case we select an alternate diagonal. // if (outp.r < outp.g) { // vec3 tmp = mincol; // mincol = maxcol; // maxcol = tmp; // return outp.g | (outp.r << 16u); // } // // return outp.r | (outp.g << 16u); // } __device__ static UINT ScaleYCoCg(vec2 minColor, vec2 maxColor) { vec2 m0 = abs(minColor - offset); vec2 m1 = abs(maxColor - offset); float m = max(max(m0.r, m0.g), max(m1.r, m1.g)); const float s0 = 64.0 / 255.0; const float s1 = 32.0 / 255.0; UINT scale = 1u; if ( m < s0 ) scale = 2u; if ( m < s1 ) scale = 4u; return scale; } __device__ static bool SelectYCoCgDiagonal(const vec3 block[16], vec2 minColor, vec2 maxColor) { vec2 mid = (maxColor + minColor) * 0.5; float cov = 0.0; for ( int i = 0; i < 16; i++ ) { vec2 t = block[i].gb() - mid; cov += t.r * t.g; } return cov < 0.0; } __device__ static UINT EmitEndPointsYCoCgDXT5(float & mincol_r, float & mincol_g, float & maxcol_r, float & maxcol_g, UINT scale) { vec2 mincol = vec2(mincol_r, mincol_g); vec2 maxcol = vec2(maxcol_r, maxcol_g); maxcol = (maxcol - offset) * float(scale) + float(offset); mincol = (mincol - offset) * float(scale) + float(offset); InsetCoCgBBox(mincol, maxcol); maxcol = round(maxcol * vec2(31, 63)); mincol = round(mincol * vec2(31, 63)); uvec2 imaxcol = uvec2(maxcol); uvec2 imincol = uvec2(mincol); uvec2 outp; outp.r = (imaxcol.r << 11u) | (imaxcol.g << 5u) | (scale - UINT(1)); outp.g = (imincol.r << 11u) | (imincol.g << 5u) | (scale - UINT(1)); imaxcol.r = (imaxcol.r << 3u) | (imaxcol.r >> 2u); imaxcol.g = (imaxcol.g << 2u) | (imaxcol.g >> 4u); imincol.r = (imincol.r << 3u) | (imincol.r >> 2u); imincol.g = (imincol.g << 2u) | (imincol.g >> 4u); maxcol = vec2(imaxcol) * (1.0 / 255.0); mincol = vec2(imincol) * (1.0 / 255.0); // Undo rescale. maxcol = (maxcol - offset) / float(scale) + float(offset); mincol = (mincol - offset) / float(scale) + float(offset); // distribute back mincol_r = mincol.r; mincol_g = mincol.g; maxcol_r = maxcol_r; maxcol_g = maxcol.g; return outp.r | (outp.g << 16u); } __device__ static UINT EmitIndicesYCoCgDXT5(vec3 block[16], vec2 mincol, vec2 maxcol) { // Compute palette vec2 c[4]; c[0] = maxcol; c[1] = mincol; c[2] = lerp(c[0], c[1], 1.0/3.0); c[3] = lerp(c[0], c[1], 2.0/3.0); // Compute indices UINT indices = 0u; for ( int i = 0; i < 16; i++ ) { // find index of closest color float4 dist; dist.x = colorDistance(block[i].gb(), c[0]); dist.y = colorDistance(block[i].gb(), c[1]); dist.z = colorDistance(block[i].gb(), c[2]); dist.w = colorDistance(block[i].gb(), c[3]); uint4 b; b.x = dist.x > dist.w ? 1u : 0u; b.y = dist.y > dist.z ? 1u : 0u; b.z = dist.x > dist.z ? 1u : 0u; b.w = dist.y > dist.w ? 1u : 0u; UINT b4 = dist.z > dist.w ? 1u : 0u; UINT index = (b.x & b4) | (((b.y & b.z) | (b.x & b.w)) << 1u); indices |= index << (UINT(i) * 2u); } // Output indices return indices; } __device__ static UINT EmitAlphaEndPointsYCoCgDXT5(float mincol, float maxcol) { uvec2 tmp = uvec2(roundf(mincol * 255.0), roundf(maxcol * 255.0)); UINT c0 = tmp.r; UINT c1 = tmp.g; return (c0 << 8u) | c1; } // Version shown in the YCoCg-DXT article. __device__ static uvec2 EmitAlphaIndicesYCoCgDXT5(vec3 block[16], float minAlpha, float maxAlpha) { const float ALPHA_RANGE = 7.0; float mid = (maxAlpha - minAlpha) / (2.0 * ALPHA_RANGE); float ab1 = minAlpha + mid; float ab2 = (6.0 * maxAlpha + 1.0 * minAlpha) * (1.0 / ALPHA_RANGE) + mid; float ab3 = (5.0 * maxAlpha + 2.0 * minAlpha) * (1.0 / ALPHA_RANGE) + mid; float ab4 = (4.0 * maxAlpha + 3.0 * minAlpha) * (1.0 / ALPHA_RANGE) + mid; float ab5 = (3.0 * maxAlpha + 4.0 * minAlpha) * (1.0 / ALPHA_RANGE) + mid; float ab6 = (2.0 * maxAlpha + 5.0 * minAlpha) * (1.0 / ALPHA_RANGE) + mid; float ab7 = (1.0 * maxAlpha + 6.0 * minAlpha) * (1.0 / ALPHA_RANGE) + mid; uvec2 indices = uvec2(0, 0); UINT index = 1u; for ( int i = 0; i < 6; i++ ) { float a = block[i].r; index = 1u; index += (a <= ab1) ? 1u : 0u; index += (a <= ab2) ? 1u : 0u; index += (a <= ab3) ? 1u : 0u; index += (a <= ab4) ? 1u : 0u; index += (a <= ab5) ? 1u : 0u; index += (a <= ab6) ? 1u : 0u; index += (a <= ab7) ? 1u : 0u; index &= 7u; index ^= (2u > index) ? 1u : 0u; indices.r |= index << (3u * UINT(i) + 16u); } indices.g = index >> 1u; for ( int i = 6; i < 16; i++ ) { float a = block[i].r; index = 1u; index += (a <= ab1) ? 1u : 0u; index += (a <= ab2) ? 1u : 0u; index += (a <= ab3) ? 1u : 0u; index += (a <= ab4) ? 1u : 0u; index += (a <= ab5) ? 1u : 0u; index += (a <= ab6) ? 1u : 0u; index += (a <= ab7) ? 1u : 0u; index &= 7u; index ^= (2u > index) ? 1u : 0u; indices.g |= index << (3u * UINT(i) - 16u); } return indices; } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// /// Encodes color palette endpoint into 565 code and adjusts input values. __device__ static u32 encode_endpoint(float & r, float & g, float & b) { // clamp to range [0,1] and use full output range for each component r = rintf(__saturatef(r) * 31.0f); g = rintf(__saturatef(g) * 63.0f); // 6 bits for green sample b = rintf(__saturatef(b) * 31.0f); // compose output 16bit code representing the endpoint color const u32 code = ((u32)r << 11) + ((u32)g << 5) + (u32)b; // convert all 3 endpoint component samples back to unit range r *= 0.0322580645161f; // divide by 31 g *= 0.015873015873f; // divide by 63 b *= 0.0322580645161f; // divide by 31 // return output 16bit code for the endpoint return code; } /// Transform YUV to RGB. __device__ static void yuv_to_rgb(float & r, float & g, float & b) { const float y = 1.1643f * (r - 0.0625f); // TODO: convert to FFMA const float u = g - 0.5f; const float v = b - 0.5f; r = y + 1.7926f * v; g = y - 0.2132f * u - 0.5328f * v; b = y + 2.1124f * u; } /// Swaps two referenced values. template <typename T> __device__ static void swap(T & a, T & b) { const T temp = a; a = b; b = temp; } /// Encodes and saves the block. template <int DXT_TYPE> __device__ void dxt_encode(void * out, const int block_idx, float r[16], float g[16], float b[16]); /// Encodes the block into DXT6 format (DXT5-YcOcG) and saves it into output /// buffer template <> __device__ void dxt_encode<6>(void * out, const int block_idx, float r[16], float g[16], float b[16]) { // Read block of data vec3 block[16]; for(int i = 0; i < 16; i++) { block[i] = ConvertRGBToYCoCg(vec3(r[i], g[i], b[i])); } // Find min and max colors vec3 mincol, maxcol; FindMinMaxColorsBox(block, mincol, maxcol); if(SelectYCoCgDiagonal(block, mincol.gb(), maxcol.gb())) { float tmp = maxcol.b; maxcol.b = mincol.b; mincol.b = tmp; } u32 scale = ScaleYCoCg(mincol.gb(), maxcol.gb()); // printf("Scale: %u.\n", scale); // Output CoCg in DXT1 block. uint4 outp; outp.z = EmitEndPointsYCoCgDXT5(mincol.g, mincol.b, maxcol.g, maxcol.b, scale); outp.w = EmitIndicesYCoCgDXT5(block, mincol.gb(), maxcol.gb()); InsetYBBox(mincol.r, maxcol.r); // Output Y in DXT5 alpha block. outp.x = EmitAlphaEndPointsYCoCgDXT5(mincol.r, maxcol.r); uvec2 indices = EmitAlphaIndicesYCoCgDXT5(block, mincol.r, maxcol.r); outp.x |= indices.r; outp.y = indices.g; ((uint4*)out)[block_idx] = outp; } /// Encodes the block into DXT1 format and saves it into output buffer template <> __device__ void dxt_encode<1>(void * out, const int block_idx, float r[16], float g[16], float b[16]) { // find min and max sample values for each component float mincol_r = r[0]; float mincol_g = g[0]; float mincol_b = b[0]; float maxcol_r = r[0]; float maxcol_g = g[0]; float maxcol_b = b[0]; for(int i = 1; i < 16; i++) { mincol_r = min(mincol_r, r[i]); mincol_g = min(mincol_g, g[i]); mincol_b = min(mincol_b, b[i]); maxcol_r = max(maxcol_r, r[i]); maxcol_g = max(maxcol_g, g[i]); maxcol_b = max(maxcol_b, b[i]); } // inset the bounding box const float inset_r = (maxcol_r - mincol_r) * 0.0625f; const float inset_g = (maxcol_g - mincol_g) * 0.0625f; const float inset_b = (maxcol_b - mincol_b) * 0.0625f; mincol_r += inset_r; mincol_g += inset_g; mincol_b += inset_b; maxcol_r -= inset_r; maxcol_g -= inset_g; maxcol_b -= inset_b; // select diagonal const float center_r = (mincol_r + maxcol_r) * 0.5f; const float center_g = (mincol_g + maxcol_g) * 0.5f; const float center_b = (mincol_b + maxcol_b) * 0.5f; float cov_x = 0.0f; float cov_y = 0.0f; for(int i = 0; i < 16; i++) { const float dir_r = r[i] - center_r; const float dir_g = g[i] - center_g; const float dir_b = b[i] - center_b; cov_x += dir_r * dir_b; cov_y += dir_g * dir_b; } if(cov_x < 0.0f) { swap(maxcol_r, mincol_r); } if(cov_y < 0.0f) { swap(maxcol_g, mincol_g); } // encode both endpoints into 565 color format const u32 max_code = encode_endpoint(maxcol_r, maxcol_g, maxcol_b); const u32 min_code = encode_endpoint(mincol_r, mincol_g, mincol_b); // swap palette end colors if 'max' code is less than 'min' color code // (Palette color #3 would otherwise be interpreted as 'transparent'.) const bool swap_end_colors = max_code < min_code; // encode the palette into 32 bits (Only 2 end colors are stored.) const u32 palette_code = swap_end_colors ? min_code + (max_code << 16): max_code + (min_code << 16); // pack palette color indices (if both endpoint colors are not equal) u32 indices = 0; if(max_code != min_code) { // project each color to line maxcol-mincol, represent it as // "mincol + t * (maxcol - mincol)" and then use 't' to find closest // palette color index. const float dir_r = mincol_r - maxcol_r; const float dir_g = mincol_g - maxcol_g; const float dir_b = mincol_b - maxcol_b; const float dir_sqr_len = dir_r * dir_r + dir_g * dir_g + dir_b * dir_b; const float dir_inv_sqr_len = __fdividef(1.0f, dir_sqr_len); const float t_r = dir_r * dir_inv_sqr_len; const float t_g = dir_g * dir_inv_sqr_len; const float t_b = dir_b * dir_inv_sqr_len; const float t_bias = t_r * maxcol_r + t_g * maxcol_g + t_b * maxcol_b; // for each pixel color: for(int i = 0; i < 16; i++) { // get 't' for the color const float col_t = r[i] * t_r + g[i] * t_g + b[i] * t_b - t_bias; // scale the range of the 't' to [0..3] and convert to integer // to get the index of palette color const u32 col_idx = (u32)(3.0f * __saturatef(col_t) + 0.5f); // pack the color palette index with others indices += col_idx << (i * 2); } } // possibly invert indices if end colors must be swapped if(swap_end_colors) { indices = ~indices; } // substitute all packed indices (each index is packed into two bits) // 00 -> 00, 01 -> 10, 10 -> 11 and 11 -> 01 const u32 lsbs = indices & 0x55555555; const u32 msbs = indices & 0xaaaaaaaa; indices = msbs ^ (2 * lsbs + (msbs >> 1)); // compose and save output ((uint2*)out)[block_idx] = make_uint2(palette_code, indices); } /// DXT compression - each thread compresses one 4x4 DXT block. /// Alpha-color palette mode is not used (always emmits 4color palette code). template <bool YUV_TO_RGB, bool VERTICAL_MIRRORING, int DXT_TYPE> __global__ static void dxt_kernel(const void * src, void * out, int size_x, int size_y) { // coordinates of this thread's 4x4 block const int block_idx_x = threadIdx.x + blockIdx.x * blockDim.x; const int block_idx_y = threadIdx.y + blockIdx.y * blockDim.y; // coordinates of block's top-left pixel const int block_x = block_idx_x * 4; const int block_y = block_idx_y * 4; // raster order index of the block const int block_idx = block_idx_x + (size_x >> 2) * block_idx_y; // skip if out of bounds if(block_y >= size_y || block_x >= size_x) { return; } // samples of 16 pixels float r[16]; float g[16]; float b[16]; // load RGB samples for all 16 input pixels const int src_stride = (size_x >> 2) * 3; for(int y = 0; y < 4; y++) { // offset of loaded pixels in the buffer const int load_offset = y * 4; // pointer to source of this input row int row_idx = block_y + y; if(VERTICAL_MIRRORING) { row_idx = size_y - 1 - row_idx; } const uchar4 * const row_src = (uchar4*)src + src_stride * row_idx + block_idx_x * 3; // load all 4 3component pixels of the row const uchar4 p0 = row_src[0]; const uchar4 p1 = row_src[1]; const uchar4 p2 = row_src[2]; // pixel #0 r[load_offset + 0] = p0.x * 0.00392156862745f; g[load_offset + 0] = p0.y * 0.00392156862745f; b[load_offset + 0] = p0.z * 0.00392156862745f; // pixel #1 r[load_offset + 1] = p0.w * 0.00392156862745f; g[load_offset + 1] = p1.x * 0.00392156862745f; b[load_offset + 1] = p1.y * 0.00392156862745f; // pixel #2 r[load_offset + 2] = p1.z * 0.00392156862745f; g[load_offset + 2] = p1.w * 0.00392156862745f; b[load_offset + 2] = p2.x * 0.00392156862745f; // pixel #3 r[load_offset + 3] = p2.y * 0.00392156862745f; g[load_offset + 3] = p2.z * 0.00392156862745f; b[load_offset + 3] = p2.w * 0.00392156862745f; } // transform colors from YUV to RGB if required if(YUV_TO_RGB) { for(int i = 0; i < 16; i++) { yuv_to_rgb(r[i], g[i], b[i]); } } // Select the right DXT type transform dxt_encode<DXT_TYPE>(out, block_idx, r, g, b); } __global__ static void yuv422_to_yuv444_kernel(const void * src, void * out, int pix_count) { // coordinates of this thread const int block_idx_x = threadIdx.x + blockIdx.x * blockDim.x; // skip if out of bounds if(block_idx_x >= pix_count / 2) { return; } uchar4 *this_src = ((uchar4 *) src) + block_idx_x * 2; uchar4 *this_out = ((uchar4 *) out) + block_idx_x * 3; uchar4 pix12 = this_src[0]; uchar4 pix34 = this_src[1]; uchar4 out_pix[3]; out_pix[0].x = pix12.y; out_pix[0].y = pix12.x; out_pix[0].z = pix12.z; out_pix[0].w = pix12.w; out_pix[1].x = pix12.x; out_pix[1].y = pix12.z; out_pix[1].z = pix34.y; out_pix[1].w = pix34.x; out_pix[2].x = pix34.z; out_pix[2].y = pix34.w; out_pix[2].z = pix34.x; out_pix[2].w = pix34.z; this_out[0] = out_pix[0]; this_out[1] = out_pix[1]; this_out[2] = out_pix[2]; } /// Compute grid size and launch DXT kernel. template <bool YUV_TO_RGB, int DXT_TYPE> static int dxt_launch(const void * src, void * out, int sx, int sy, cudaStream_t str) { // vertical mirroring? bool mirrored = false; if(sy < 0) { mirrored = true; sy = -sy; } // check image size and alignment if((sx & 3) || (sy & 3) || (15 & (size_t)src) || (7 & (size_t)out)) { return -1; } // grid and threadblock sizes const dim3 tsiz(16, 16); const dim3 gsiz((sx + tsiz.x - 1) / tsiz.x, (sy + tsiz.y - 1) / tsiz.y); // launch kernel, sync and check the result if(mirrored) { dxt_kernel<YUV_TO_RGB, true, DXT_TYPE><<<gsiz, tsiz, 0, str>>>(src, out, sx, sy); } else { dxt_kernel<YUV_TO_RGB, false, DXT_TYPE><<<gsiz, tsiz, 0, str>>>(src, out, sx, sy); } return cudaSuccess != cudaStreamSynchronize(str) ? -3 : 0; } CUDA_DLL_API int cuda_yuv422_to_yuv444(const void * src, void * out, int pix_count, cuda_wrapper_stream_t str) { // grid and threadblock sizes const dim3 tsiz(64, 1); int thread_count = pix_count / 4; // we process block of 4 pixels const dim3 gsiz((thread_count + tsiz.x - 1) / tsiz.x, 1); yuv422_to_yuv444_kernel<<<gsiz, tsiz, 0, (cudaStream_t) str>>>(src, out, pix_count); return cudaSuccess != cudaStreamSynchronize((cudaStream_t) str) ? -3 : 0; } /// CUDA DXT1 compression (only RGB without alpha). /// @param src Pointer to top-left source pixel in device-memory buffer. /// 8bit RGB samples are expected (no alpha and no padding). /// (Pointer must be aligned to multiples of 16 bytes.) /// @param out Pointer to output buffer in device memory. /// (Must be aligned to multiples of 8 bytes.) /// @param size_x Width of the input image (must be divisible by 4). /// @param size_y Height of the input image (must be divisible by 4). /// @param stream CUDA stream to run in, or 0 for default stream. /// @return 0 if OK, nonzero if failed. CUDA_DLL_API int cuda_rgb_to_dxt1(const void * src, void * out, int size_x, int size_y, cuda_wrapper_stream_t stream) { return dxt_launch<false, 1>(src, out, size_x, size_y, (cudaStream_t) stream); } /// CUDA DXT1 compression (only RGB without alpha). /// Converts input from YUV to RGB color space. /// @param src Pointer to top-left source pixel in device-memory buffer. /// 8bit RGB samples are expected (no alpha and no padding). /// (Pointer must be aligned to multiples of 16 bytes.) /// @param out Pointer to output buffer in device memory. /// (Must be aligned to multiples of 8 bytes.) /// @param size_x Width of the input image (must be divisible by 4). /// @param size_y Height of the input image (must be divisible by 4). /// @param stream CUDA stream to run in, or 0 for default stream. /// @return 0 if OK, nonzero if failed. CUDA_DLL_API int cuda_yuv_to_dxt1(const void * src, void * out, int size_x, int size_y, cuda_wrapper_stream_t stream) { return dxt_launch<true, 1>(src, out, size_x, size_y, (cudaStream_t) stream); } /// CUDA DXT6 (DXT5-YcOcG) compression (only RGB without alpha). /// @param src Pointer to top-left source pixel in device-memory buffer. /// 8bit RGB samples are expected (no alpha and no padding). /// (Pointer must be aligned to multiples of 16 bytes.) /// @param out Pointer to output buffer in device memory. /// (Must be aligned to multiples of 8 bytes.) /// @param size_x Width of the input image (must be divisible by 4). /// @param size_y Height of the input image (must be divisible by 4). /// (Input is read bottom up if negative) /// @param stream CUDA stream to run in, or 0 for default stream. /// @return 0 if OK, nonzero if failed. CUDA_DLL_API int cuda_rgb_to_dxt6(const void * src, void * out, int size_x, int size_y, cuda_wrapper_stream_t stream) { return dxt_launch<false, 6>(src, out, size_x, size_y, (cudaStream_t) stream); } CUDA_DLL_API int cuda_yuv_to_dxt6(const void * src, void * out, int size_x, int size_y, cuda_wrapper_stream_t stream) { return dxt_launch<true, 6>(src, out, size_x, size_y, (cudaStream_t) stream); }
the_stack
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } //__device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { // bitmap |= (1 << pos); //} // Returns the root index of the UFTree __device__ unsigned Find(const int *s_buf, unsigned n) { while (s_buf[n] != n) { n = s_buf[n]; } return n; } // Merges the UFTrees of a and b, linking one root to the other __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a); done = (old == b); b = old; } else if (b < a) { int old = atomicMin(s_buf + a, b); done = (old == a); a = old; } else { done = true; } } while (!done); } __global__ void InitLabeling(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { labels[labels_index] = labels_index; } } __global__ void Merge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { #define CONDITION_B col>0 && row>1 && img.data[img_index - 2 * img.step - 1] #define CONDITION_C row>1 && img.data[img_index - 2 * img.step] #define CONDITION_D col+1<img.cols && row>1 && img.data[img_index - 2 * img.step + 1] #define CONDITION_E col+2<img.cols && row>1 && img.data[img_index - 2 * img.step + 2] #define CONDITION_G col>1 && row>0 && img.data[img_index - img.step - 2] #define CONDITION_H col>0 && row>0 && img.data[img_index - img.step - 1] #define CONDITION_I row>0 && img.data[img_index - img.step] #define CONDITION_J col+1<img.step && row>0 && img.data[img_index - img.step + 1] #define CONDITION_K col+2<img.step && row>0 && img.data[img_index - img.step + 2] #define CONDITION_M col>1 && img.data[img_index - 2] #define CONDITION_N col>0 && img.data[img_index - 1] #define CONDITION_O img.data[img_index] #define CONDITION_P col+1<img.step && img.data[img_index + 1] #define CONDITION_R col>0 && row+1<img.rows && img.data[img_index + img.step - 1] #define CONDITION_S row+1<img.rows && img.data[img_index + img.step] #define CONDITION_T col+1<img.cols && row+1<img.rows && img.data[img_index + img.step + 1] // Action 1: No action #define ACTION_1 // // Action 2: New label (the block has foreground pixels and is not connected to anything else) #define ACTION_2 //Action P: Merge with block P #define ACTION_3 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); // Action Q: Merge with block Q #define ACTION_4 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); // Action R: Merge with block R #define ACTION_5 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); // Action S: Merge with block S #define ACTION_6 Union(labels.data, labels_index, labels_index - 2); // Action 7: Merge labels of block P and Q #define ACTION_7 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); \ Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); //Action 8: Merge labels of block P and R #define ACTION_8 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); \ Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); // Action 9 Merge labels of block P and S #define ACTION_9 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); \ Union(labels.data, labels_index, labels_index - 2); // Action 10 Merge labels of block Q and R #define ACTION_10 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); \ Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); // Action 11: Merge labels of block Q and S #define ACTION_11 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); \ Union(labels.data, labels_index, labels_index - 2); // Action 12: Merge labels of block R and S #define ACTION_12 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); \ Union(labels.data, labels_index, labels_index - 2); // Action 13: not used #define ACTION_13 // Action 14: Merge labels of block P, Q and S #define ACTION_14 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); \ Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); \ Union(labels.data, labels_index, labels_index - 2); //Action 15: Merge labels of block P, R and S #define ACTION_15 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2); \ Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); \ Union(labels.data, labels_index, labels_index - 2); //Action 16: labels of block Q, R and S #define ACTION_16 Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size)); \ Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2); \ Union(labels.data, labels_index, labels_index - 2); #include "labeling_bolelli_2018_drag.inc.h" #undef ACTION_0 #undef ACTION_2 #undef ACTION_P #undef ACTION_Q #undef ACTION_R #undef ACTION_S #undef ACTION_7 #undef ACTION_8 #undef ACTION_9 #undef ACTION_10 #undef ACTION_11 #undef ACTION_12 #undef ACTION_13 #undef ACTION_14 #undef ACTION_15 #undef ACTION_16 #undef CONDITION_B #undef CONDITION_C #undef CONDITION_D #undef CONDITION_E #undef CONDITION_G #undef CONDITION_H #undef CONDITION_I #undef CONDITION_J #undef CONDITION_K #undef CONDITION_M #undef CONDITION_N #undef CONDITION_O #undef CONDITION_P #undef CONDITION_R #undef CONDITION_S #undef CONDITION_T } } __global__ void Compression(cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { labels[labels_index] = Find(labels.data, labels_index); } } __global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2; unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned img_index = row * (img.step / img.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned int label = labels[labels_index] + 1; if (img.data[img_index]) labels[labels_index] = label; else { labels[labels_index] = 0; } if (col + 1 < labels.cols) { if (img.data[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (row + 1 < labels.rows) { if (img.data[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (row + 1 < labels.rows) { if (img.data[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class C_DRAG : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; public: C_DRAG() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); InitLabeling << <grid_size_, block_size_ >> > (d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); // d_img_labels_.download(img_labels_); cudaDeviceSynchronize(); } private: void Alloc() { d_img_labels_.create(d_img_.size(), CV_32SC1); } void Dealloc() { } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); InitLabeling << <grid_size_, block_size_ >> > (d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); Compression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); Alloc(); perf_.stop(); double alloc_timing = perf_.last(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(C_DRAG);
the_stack
#include "cuda_error.h" #include "cuda_runtime.h" #include "Utils.h" #include "ResourcePool.h" #include "StreamManager.h" #include "SignalProcessingFitterQueue.h" //job types #include "SingleFitStream.h" #include "MultiFitStream.h" //#include "GenerateBeadTraceStream.h" #define MAX_EXECUTION_ERRORS 10 #define NUM_ERRORS_TOGGLE_VERBOSE 5 using namespace std; bool cudaSimpleStreamExecutionUnit::_verbose = false; int cudaSimpleStreamExecutionUnit::_seuCnt = 0; bool cudaSimpleStreamManager::_verbose =false; int cudaSimpleStreamManager::_maxNumStreams = MAX_ALLOWED_NUM_STREAMS; ////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////// // SIMPLE STREAM EXECUTION UNIT cudaSimpleStreamExecutionUnit::cudaSimpleStreamExecutionUnit( streamResources * resources, WorkerInfoQueueItem item ) { _state = Init; _seuNum = _seuCnt++; _computeVersion = 35; //default set to latest setName("StreamExecutionUnit"); _item = item; _resource = resources; if(_resource == NULL) throw cudaStreamCreationError(__FILE__,__LINE__); _stream = _resource->getStream(); } cudaSimpleStreamExecutionUnit::~cudaSimpleStreamExecutionUnit() { if(_verbose) cout << getLogHeader() << " Completed, releasing Stream Resources" << endl; _resource->release(); } void cudaSimpleStreamExecutionUnit::setName(std::string name) { _name = name; } ////////////////////////////// // TASK EXECUTION FUNCTIONS bool cudaSimpleStreamExecutionUnit::execute() { switch(_state) { case Init: if(!InitJob()){ _state = Exit; if(_verbose) cout << getLogHeader() << " No Valid Job -> Exit" << endl; break; } _state = Working; if(_verbose) cout << getLogHeader() << " Init -> Working" << endl; break; case Working: // starting work state case ContinueWork: //continue work state after non async code ExecuteJob(); _state = Waiting; if(_verbose) cout << getLogHeader() << " -> Waiting" << endl; break; case Waiting: if(checkComplete()){ // check if completed if( handleResults() > 0 ){ _state = ContinueWork; if(_verbose) cout << getLogHeader() << " -> ContinueWork" << endl; }else{ //stopTimer(); _state = Exit ; if(_verbose) cout << getLogHeader() << " -> Exit" << endl; } } break; case Exit: default: return false; // return false if all is done! } // return true if there is still work to be done return true; } void * cudaSimpleStreamExecutionUnit::getJobData() { return (void*)_item.private_data; } WorkerInfoQueueItem cudaSimpleStreamExecutionUnit::getItem() { return _item; } bool cudaSimpleStreamExecutionUnit::checkComplete() { cudaError_t ret; ret = cudaStreamQuery(_stream); if( ret == cudaErrorNotReady ) return false; if( ret == cudaSuccess) return true; ret = cudaGetLastError(); throw cudaExecutionException(ret, __FILE__,__LINE__); // return false; } void cudaSimpleStreamExecutionUnit::setVerbose(bool v) { _verbose = v; } bool cudaSimpleStreamExecutionUnit::Verbose() { return _verbose; } string cudaSimpleStreamExecutionUnit::getName() { return _name; } string cudaSimpleStreamExecutionUnit::getLogHeader() { ostringstream headerinfo; headerinfo << "CUDA " << _resource->getDevId() << ": SEU " << getSeuNum() << ": " << getName() << " SR " << getStreamId()<< ":"; return headerinfo.str(); } int cudaSimpleStreamExecutionUnit::getSeuNum() { return _seuNum; } int cudaSimpleStreamExecutionUnit::getStreamId() { return _resource->getStreamId(); } // need to be overloaded to return true if initiated correctly bool cudaSimpleStreamExecutionUnit::InitJob() { return false; } //Factory Method to produce specialized SEUs cudaSimpleStreamExecutionUnit * cudaSimpleStreamExecutionUnit::makeExecutionUnit(streamResources * resources, WorkerInfoQueueItem item) { cudaSimpleStreamExecutionUnit * tmpSeu = NULL; ostringstream headerinfo; headerinfo << "CUDA " << resources->getDevId() << ": SEU Factory SR "<< resources->getStreamId() << ":"; int *type = (int*)item.private_data; switch(*type){ /* case GENERATE_BEAD_TRACES: if(_verbose) cout << headerinfo.str()<< " creating GenerateAllBeadTraces " << endl; tmpSeu = new GenerateBeadTraceStream( resources, item); break;*/ case INITIAL_FLOW_BLOCK_ALLBEAD_FIT: if(_verbose) cout << headerinfo.str()<< " creating MultiFit " << endl; tmpSeu = new SimpleMultiFitStream( resources, item); break; case SINGLE_FLOW_FIT: if(_verbose) cout << headerinfo.str()<< " creating SingleFit " << endl; tmpSeu = new SimpleSingleFitStream( resources, item); break; default: if(_verbose) cout << headerinfo.str()<< " received unknown item" << endl; } //set the compute version according to the device the streamManager is initiated for //tmpSeu->setCompute(_computeVersion); return tmpSeu; } void cudaSimpleStreamExecutionUnit::setCompute(int compute) { _computeVersion = compute; } int cudaSimpleStreamExecutionUnit::getCompute() { return _computeVersion; } /* // have to find better way to propagate since _myJob no longer part of base class // to keep the stream execution unit generic int cudaSimpleStreamExecutionUnit::getNumFrames() { int n=0; //if(_myJob.ValidJob()) n = _myJob.getNumFrames(); return n; } int cudaSimpleStreamExecutionUnit::getNumBeads() { int n=0; //if(_myJob.ValidJob()) n = _myJob.getNumBeads(); return n; } */ /////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////// // SIMPLE STREAM MANAGER cudaSimpleStreamManager::cudaSimpleStreamManager( WorkerInfoQueue * inQ, WorkerInfoQueue * fallbackQ ) { cudaGetDevice( &_devId ); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, _devId); _computeVersion = 10*deviceProp.major + deviceProp.minor; cout << getLogHeader() << " init with up to "<< getNumMaxStreams() <<" Stream Execution Units" << endl; _inQ = inQ; _fallBackQ = fallbackQ; _resourcePool = NULL; _item.finished = false; _item.private_data = NULL; _GPUerror = false; //_maxNumStreams = numStreams; // now static _tasks = 0; _sumBeads=0; _maxBeads=0; _sumFrames=0; _maxFrames=0; _executionErrorCount=0; _resourceAllocDone = false; //not yet allocated, flag added to allow for allocation when first job arrives allocateResources(); } cudaSimpleStreamManager::~cudaSimpleStreamManager() { ostringstream outputStr; outputStr << getLogHeader() << " handled " << _tasks <<" tasks."<< endl; // outputStr << getLogHeader() << " Beads max: "<< _maxBeads <<" avg: " << _sumBeads/_tasks << " Frames max: " << _maxFrames << " avg: " << _sumFrames/_tasks << endl; for ( map< string, TimeKeeper>::iterator iter = _timer.begin(); iter != _timer.end(); ++iter ) outputStr << getLogHeader() << " " << iter->first << " finished: " << iter->second.getJobCnt() << " in " << iter->second.getTime() << " time/job: " << iter->second.getAvgTime() << " (exceptions: " << iter->second.getErrorCnt()<< ")" << endl; freeResources(); cout << outputStr.str(); } void cudaSimpleStreamManager::allocateResources() { //size_t maxHostSize = getMaxHostSize(); //allocate a lot of frames to handle exponential tail fit //size_t maxDeviceSize = getMaxDeviceSize(MAX_PREALLOC_COMPRESSED_FRAMES_GPU); if(_resourcePool !=NULL) delete _resourcePool; _resourcePool = NULL; try{ _resourcePool = new cudaResourcePool(_maxNumStreams); // throws cudaException int n = getNumStreams(); if (n > 0){ cout << getLogHeader() <<" successfully acquired resources for " << n << " Stream Execution Units" <<endl; } _GPUerror = false; } catch(exception &e){ cout << e.what() << endl; cout << getLogHeader() << " No StreamResources could be acquired! retry pending. jobs will he handled by CPU for now!" << endl; _GPUerror = true; _resourcePool = NULL; } _resourceAllocDone = true; } void cudaSimpleStreamManager::freeResources() { if(_resourcePool != NULL) delete _resourcePool; _resourcePool = NULL; if(isFinishItem() && _inQ !=NULL){ cout << getLogHeader() << " signaling Queue that all jobs and cleanup completed" << endl; try{ _inQ->DecrementDone(); } catch(...){ cout << getLogHeader() << " signal to Queue caused exception, Queue seems to be destroyed already!" << endl; } } } int cudaSimpleStreamManager::getNumStreams() { if(_resourcePool != NULL) return _resourcePool->getNumStreams(); return 0; } int cudaSimpleStreamManager::availableResources() { // calculate free SEUs #allocated stream resources - #active SEUS return getNumStreams() - _activeSEU.size(); } /* // No Longer needed size_t cudaSimpleStreamManager::getMaxHostSize(int flow_block_size) { size_t ret = 0; ret = std::max( SimpleSingleFitStream::getMaxHostMem( flow_block_size ), ret ); ret = std::max( SimpleMultiFitStream ::getMaxHostMem( flow_block_size ), ret ); return ret; } size_t cudaSimpleStreamManager::getMaxDeviceSize(int maxFrames, int maxBeads, int flow_block_size) { size_t ret = 0; ret = std::max( SimpleSingleFitStream::getMaxDeviceMem(flow_block_size, maxFrames, maxBeads), ret ); ret = std::max( SimpleMultiFitStream ::getMaxDeviceMem(flow_block_size, maxFrames, maxBeads), ret ); return ret; } */ void cudaSimpleStreamManager::moveToCPU() { //get jobs and hand them over to the CPU Q after GPU error was encountered getJob(); if(checkItem()){ if(_verbose) cout << getLogHeader()<< " job received, try to reallocate resources!" << endl; //try to allocate and recover before handing job to CPU if(_executionErrorCount < MAX_EXECUTION_ERRORS){ if(getNumStreams() == 0 ){ allocateResources(); } if( getNumStreams() > 0){ cout << getLogHeader()<< " managed to acquire streamResources, switching execution back to GPU!" << endl; addSEU(); _GPUerror = false; return; } } if(_verbose) cout << getLogHeader() << " handing job on to CPU queue" << endl; _fallBackQ->PutItem(_item); // if no matching SEU put to CpuQ _inQ->DecrementDone(); // signale to Q that a job is completed } } void cudaSimpleStreamManager::getJob() { if(!isFinishItem()){ if(_activeSEU.empty()){ if(_verbose) cout << getLogHeader()<< " blocking Job request" << endl; _item = _inQ->GetItem(); //if no active SEUs block on Q if(!_resourceAllocDone) allocateResources(); // do allocation when first job received } else{ _item = _inQ->TryGetItem(); } if(isFinishItem()){ cout << getLogHeader()<< " received finish job" << endl; } } } // Depending on the type of job that was received from the inQ // creates the according SEU type or hands it back to the CPU // new GPU Jobs have to be added to this switch/case statement void cudaSimpleStreamManager::addSEU() { //create a SEU if item is not the finish item if(checkItem()){ if(_verbose) cout << getLogHeader()<< " job received, checking type and create SEU" << endl; cudaSimpleStreamExecutionUnit * tmpSeu = NULL; try{ tmpSeu = cudaSimpleStreamExecutionUnit::makeExecutionUnit(_resourcePool->getResource(), _item); if (tmpSeu == NULL){ if(_verbose) cout << getLogHeader()<< " received unknown item" << endl; _fallBackQ->PutItem(_item); // if no matching SEU put to Cpu _inQ->DecrementDone(); // Signal to Q that a job is completed }else{ _timer[tmpSeu->getName()].start(); _activeSEU.push_back(tmpSeu); //set the compute version according to the device the streamManager is initiated for tmpSeu->setCompute(_computeVersion); } } catch(cudaException &e){ if(getNumStreams() == 0){ cout << " *** ERROR DURING STREAM UNIT CREATION, handing job back to CPU" << endl; _fallBackQ->PutItem(_item); _inQ->DecrementDone(); // Signal to Q that a job is completed _GPUerror = true; }else{ cout << " *** ERROR DURING STREAM UNIT CREATION, retry on GPU" << endl; _inQ->PutItem(_item); // _inQ->DecrementDone(); // Signal to Q that a job is completed } } } } //handles the execution of all SEU in the StreamManager //cleans up the SEUs that completed their jobs void cudaSimpleStreamManager::executeSEU() { bool workDone = false; bool errorOccured = false; for(int i=(_activeSEU.size()-1); i >= 0 ; i--){ // iterate backwards for easy delete if(_activeSEU[i] != NULL){ // Safety, should not be possible to be NULL try{ workDone = !(_activeSEU[i]->execute()); } catch(cudaAllocationError &e){ //if execution exception, get item and hand it back to CPU //cout << e.what() << endl; if(_verbose) e.Print(); _item = _activeSEU[i]->getItem(); if(getNumStreams() == 0){ cout << getLogHeader() << "*** CUDA RESOURCE POOL EMPTY , handing incomplete Job back to CPU for retry" << endl; _fallBackQ->PutItem(_item); // if no matching SEU put to CpuQ _GPUerror = true; }else{ cout << getLogHeader() << "*** CUDA STREAM RESOURCE COULD NOT BE ALLOCATED, " << getNumStreams() << " StreamResources still avaiable, retry pending" << endl; _inQ->PutItem(_item); // if no matching SEU put to CpuQ } workDone = true ; // Make work as done so SEU gets cleaned up errorOccured = true; } // end catch block catch(cudaException &e){ //if execution exception, get item and hand it back to CPU e.Print(); _item = _activeSEU[i]->getItem(); cout << getLogHeader() << "*** ERROR DURING STREAM EXECUTION, handing incomplete Job back to CPU for retry" << endl; _fallBackQ->PutItem(_item); // if no matching SEU put to CpuQ workDone = true; // mark work as done so SEU gets cleaned up errorOccured = true; _executionErrorCount++; if(e.getCudaError() == cudaErrorLaunchFailure) { cout << getLogHeader() << "encountered Kernel Launch Failure. Stop retrying, set GPU error state" << endl; cout << getNumStreams() << " StreamResources available" << endl; _activeSEU[i]->printStatus(); _executionErrorCount = MAX_EXECUTION_ERRORS + 1; _GPUerror = true; }else{ if(_executionErrorCount == NUM_ERRORS_TOGGLE_VERBOSE){ cout << getLogHeader() << "encountered " << NUM_ERRORS_TOGGLE_VERBOSE << " errors, turning on verbose mode for debugging" << endl; setVerbose(true); cudaSimpleStreamExecutionUnit::setVerbose(true); } if(_executionErrorCount >= MAX_EXECUTION_ERRORS){ cout << getLogHeader() << "encountered " << MAX_EXECUTION_ERRORS << " errors. Stop retrying, set GPU error state" << endl; setVerbose(false); cudaSimpleStreamExecutionUnit::setVerbose(false); _GPUerror = true; } } } // end catch block // clean up if work for this SEU is done if(workDone){ if(!errorOccured){ _timer[_activeSEU[i]->getName()].stop(); // recordBeads(_activeSEU[i]->getNumBeads()); // recordFrames(_activeSEU[i]->getNumFrames()); _tasks++; }else{ _timer[_activeSEU[i]->getName()].stopAfterError(); } delete _activeSEU[i]; // destroy SEU object _activeSEU.erase(_activeSEU.begin()+i); //delete SEU from active list _inQ->DecrementDone(); // Signal to Q that a job is completed } } } } //perform actual work, polls jobs from inQ and executes them until finish-job received bool cudaSimpleStreamManager::DoWork() { if(_inQ == NULL){ cout << getLogHeader() << " No valid queue handle provided!" << endl; return false; } bool notDone = true; while(notDone){ if(_GPUerror ){ moveToCPU(); }else{ if(availableResources() > 0 ) { // if resources available get job getJob(); // get a Job from Q, block on Q if no job not already working addSEU(); // try to add whatever getJob acquired } else if ( getNumStreams() == 0 ) { // Something's wrong, and the streams went away. cout << getLogHeader() << " all the streams went away. Falling back to CPU." << endl; _GPUerror = true; } } // drive the state machine of the state execution units // and clean up when a SEU is don executeSEU(); // as long as no finish job received and there are still active SEUs // in the list we are not done yet notDone = (_activeSEU.empty() && isFinishItem())?(false):(true); } return false; } //bookkeeping /* void cudaSimpleStreamManager::recordBeads(int n) { _sumBeads+=n; _maxBeads = (_maxBeads>n)?(_maxBeads):(n); } void cudaSimpleStreamManager::recordFrames(int n) { _sumFrames+=n; _maxFrames = (_maxFrames>n)?(_maxFrames):(n); } */ void cudaSimpleStreamManager::setNumMaxStreams(int numMaxStreams) { if(numMaxStreams <= MAX_ALLOWED_NUM_STREAMS ){ _maxNumStreams = numMaxStreams; }else{ cout << "CUDA: tried to set number of streams to " << numMaxStreams << ", correcting to allowed maximum of " << MAX_ALLOWED_NUM_STREAMS << " streams " << endl; _maxNumStreams = MAX_ALLOWED_NUM_STREAMS; } } int cudaSimpleStreamManager::getNumMaxStreams() { return _maxNumStreams; } void cudaSimpleStreamManager::setVerbose(bool v) { _verbose = v; } string cudaSimpleStreamManager::getLogHeader() { ostringstream headerinfo; headerinfo << "CUDA " << _devId << ": StreamManager:"; return headerinfo.str(); } bool cudaSimpleStreamManager::checkItem() { if(!_item.finished && _item.private_data != NULL ) return true; return false; } bool cudaSimpleStreamManager::isFinishItem() { return _item.finished; } ///////////////////////////////// TimeKeeper::TimeKeeper() { _timesum = 0; _activeCnt = 0; _jobCnt = 0; _errCnt = 0; } void TimeKeeper::start(){ if( _activeCnt == 0) _T.restart(); _activeCnt++; _jobCnt++; } void TimeKeeper::stop(){ _activeCnt--; if(_activeCnt == 0){ _timesum += _T.elapsed(); } } void TimeKeeper::stopAfterError(){ stop(); _jobCnt--; _errCnt++; } double TimeKeeper::getTime() { return _timesum; } double TimeKeeper::getAvgTime() { return (_jobCnt > 0)?(_timesum/_jobCnt):(0); } int TimeKeeper::getJobCnt() { return _jobCnt; } int TimeKeeper::getErrorCnt() { return _errCnt; }
the_stack
namespace fastertransformer{ template <typename T> __inline__ __device__ T gelu(T x) { float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x)))); return x * cdf; } template <> __inline__ __device__ half2 gelu(half2 val) { half2 val_pow3 = __hmul2(val, __hmul2(val, val)); float2 tmp_pow = __half22float2(val_pow3); float2 tmp = __half22float2(val); tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x)))); tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y)))); return __hmul2(val, __float22half2_rn(tmp)); } template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if(lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f; val = warpReduceSum(val); return val; } template <typename T> __inline__ __device__ T warpReduceMax(T val) { for(int mask = 16; mask > 0; mask >>= 1) val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32)); return val; } /* Calculate the maximum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceMax(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; // in-warp idx int wid = threadIdx.x >> 5; // warp idx val = warpReduceMax(val); // get maxx in each warp if(lane == 0) // record in-warp maxx by warp Idx shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f; val = warpReduceMax<T>(val); return val; } //transpose matrix & transform COL32 to col-major //input matrix is (m n) COL32 //output matrix is (n m) col-major //grid((n+31)/32, (m+31)/32) //block(32, 32) template<typename T> __global__ void transposeMatrix_COL32ToColMajor_kernel(T*dst, const T* src, const int m, const int n) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; bool check = ((x < n) && (y < m)); // COL32_col = x >> 5 ; COL32_row = (y << 5) + (x & 31); // COL32_idx = (COL32_col << 5) * m + COL32_row = (x & 0xffffffe0)*m + (y << 5) + (x & 31) if (check) dst[y*n+x] = __ldg(src+((x & 0xffffffe0)*m + (y << 5) + (x & 31))); } //transpose matrix & transform COL32 to col-major //input matrix is (m n) COL32 //output matrix is (n m) col-major //grid((n+31)/32, (m+31)/32) //block(16, 32) template<> __global__ void transposeMatrix_COL32ToColMajor_kernel(half2*dst, const half2* src, const int m, const int n) { int x = (blockIdx.x*blockDim.x + threadIdx.x) << 1; int y = blockIdx.y*blockDim.y + threadIdx.y; bool check = ((x < n) && (y < m)); // COL32_col = x >> 5 ; COL32_row = (y << 5) + (x & 31); // COL32_idx = (COL32_col << 5) * m + COL32_row = (x & 0xffffffe0)*m + (y << 5) + (x & 31) if (check) dst[(y*n+x) >> 1] = __ldg(src+(((x & 0xffffffe0)*m + (y << 5) + (x & 31)) >> 1)); } //transpose matrix & transform COL32 to col-major //input matrix is (m n) COL32 //output matrix is (n m) col-major template <typename T> void transposeMatrix_COL32ToColMajor_kernelLauncher(T* dst, const T* src, const int m, const int n, cudaStream_t stream) { assert(n%32 == 0); if (sizeof(T) == sizeof(float)) transposeMatrix_COL32ToColMajor_kernel<T><<<dim3((n+31)/32, (m+31)/32), dim3(32, 32), 0, stream>>>(dst, src, m, n); else if (sizeof(T) == sizeof(half)) transposeMatrix_COL32ToColMajor_kernel<<<dim3((n+31)/32, (m+31)/32), dim3(16, 32), 0, stream>>>((half2*)dst, (const half2*)src, m, n); } template void transposeMatrix_COL32ToColMajor_kernelLauncher<float>(float* dst, const float* src, const int m, const int n, cudaStream_t stream); template void transposeMatrix_COL32ToColMajor_kernelLauncher<half>(half *dst, const half* src, const int m, const int n, cudaStream_t stream); template void transposeMatrix_COL32ToColMajor_kernelLauncher<int8_t>(int8_t* dst, const int8_t* src, const int m, const int n, cudaStream_t stream); //transpose matrix & transfrom col-major to COL32 & quantize //input matrix is (m, n) col-major //output matrix is (n, m) COL32, using char4 to write out //m should be a mutiple of 32 //grid((m+31)/32, (n+31)/32) //block(8, 32) template<typename T> __global__ void transposeMatrix_colMajorToCOL32_quantize_kernel(char4*dst, const T* src, const int m, const int n, const float* scale_ptr) { const float scale = __ldg(scale_ptr); int x = (blockIdx.x*blockDim.x + threadIdx.x) << 2; int y = blockIdx.y*blockDim.y + threadIdx.y; bool check = ((x < m) && (y < n)); if (check) { char4 tmp4; tmp4.x = float_to_int8_rn(static_cast<float>(__ldg(src+y*m+x))*scale); tmp4.y = float_to_int8_rn(static_cast<float>(__ldg(src+y*m+x+1))*scale); tmp4.z = float_to_int8_rn(static_cast<float>(__ldg(src+y*m+x+2))*scale); tmp4.w = float_to_int8_rn(static_cast<float>(__ldg(src+y*m+x+3))*scale); // COL32_col = x >> 5 ; COL32_row = (y << 5) + (x & 31); // COL32_idx = (COL32_col << 5) * n + COL32_row = (x & 0xffffffe0)*n + (y << 5) + (x & 31) dst[((x & 0xffffffe0) * n + (y << 5) + (x & 31)) >> 2] = tmp4; } } //transpose matrix & transfrom col-major to COL32 & quantize //input matrix is (m, n) col-major //output matrix is (n, m) COL32, using char4 to write out //m should be a mutiple of 32 //grid((m+31)/32, (n+31)/32) //block(8, 32) template <typename T> void transposeMatrix_colMajorToCOL32_quantize_kernelLauncher(int8_t* dst, const T* src, const int m, const int n, const float* scale_ptr, cudaStream_t stream) { assert(m%32 == 0); transposeMatrix_colMajorToCOL32_quantize_kernel<T><<<dim3((m+31)/32, (n+31)/32), dim3(8, 32), 0, stream>>>((char4 *)dst, src, m, n, scale_ptr); } template void transposeMatrix_colMajorToCOL32_quantize_kernelLauncher<float>(int8_t* dst, const float* src, const int m, const int n, const float* scale_ptr, cudaStream_t stream); template void transposeMatrix_colMajorToCOL32_quantize_kernelLauncher<half>(int8_t *dst, const half* src, const int m, const int n, const float* scale_ptr, cudaStream_t stream); //transpose matrix & transfrom col-major to COL32 //input matrix is (m, n) col-major //output matrix is (n, m) COL32 //m should be a mutiple of 32 //grid((m+31)/32, (n+31)/32) //block(32, 32) template<typename T> __global__ void transposeMatrix_colMajorToCOL32_kernel(T*dst, const T* src, const int m, const int n) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; bool check = ((x < m) && (y < n)); if (check) { // COL32_col = x >> 5 ; COL32_row = (y << 5) + (x & 31); // COL32_idx = (COL32_col << 5) * n + COL32_row = (x & 0xffffffe0)*n + (y << 5) + (x & 31) dst[(x & 0xffffffe0) * n + (y << 5) + (x & 31)] = __ldg(src+y*m+x); } } //transpose matrix & transfrom col-major to COL32 //input matrix is (m, n) col-major //output matrix is (n, m) COL32 //m should be a mutiple of 32 //grid((m+31)/32, (n+31)/32) //block(16, 32) template<> __global__ void transposeMatrix_colMajorToCOL32_kernel(half2*dst, const half2* src, const int m, const int n) { int x = (blockIdx.x*blockDim.x + threadIdx.x) << 1; int y = blockIdx.y*blockDim.y + threadIdx.y; bool check = ((x < m) && (y < n)); if (check) { // COL32_col = x >> 5 ; COL32_row = (y << 5) + (x & 31); // COL32_idx = (COL32_col << 5) * n + COL32_row = (x & 0xffffffe0)*n + (y << 5) + (x & 31) dst[((x & 0xffffffe0) * n + (y << 5) + (x & 31)) >> 1] = __ldg(src+((y*m+x) >> 1)); } } //transpose matrix & transfrom col-major to COL32 //input matrix is (m, n) col-major //output matrix is (n, m) COL32, using char4 to write out //m should be a mutiple of 32 //grid((m+31)/32, (n+31)/32) //block(8, 32) template <typename T> void transposeMatrix_colMajorToCOL32_kernelLauncher(T* dst, const T* src, const int m, const int n, cudaStream_t stream) { assert(m%32 == 0); if (sizeof(T) == sizeof(float)) transposeMatrix_colMajorToCOL32_kernel<T><<<dim3((m+31)/32, (n+31)/32), dim3(32, 32), 0, stream>>>(dst, src, m, n); else if (sizeof(T) == sizeof(half)) transposeMatrix_colMajorToCOL32_kernel<<<dim3((m+31)/32, (n+31)/32), dim3(16, 32), 0, stream>>>((half2*)dst, (const half2*)src, m, n); } template void transposeMatrix_colMajorToCOL32_kernelLauncher<float>(float* dst, const float* src, const int m, const int n, cudaStream_t stream); template void transposeMatrix_colMajorToCOL32_kernelLauncher<half>(half *dst, const half* src, const int m, const int n, cudaStream_t stream); //transfrom row-major to COL32 //input matrix is (m, n) row-major //output matrix is (m, n) COL32 //n should be a mutiple of 32 //grid((n+31)/32, (m+31)/32) //block(8, 32) __global__ void rowMajorToCOL32_kernel(char4*dst, const char4* src, const int m, const int n) { int n_id = (blockIdx.x*blockDim.x + threadIdx.x) << 2; int m_id = blockIdx.y*blockDim.y + threadIdx.y; bool check = ((m_id < m) && (n_id < n)); if (check) { // COL32_col = n_id >> 5 ; COL32_row = (m_id << 5) + (n_id & 31); // COL32_idx = (COL32_col << 5) * m + COL32_row = (n_id & 0xffffffe0)*m + (m_id << 5) + (n_id & 31) dst[((n_id & 0xffffffe0)*m + (m_id << 5) + (n_id & 31)) >> 2] = __ldg(src+((m_id*n+n_id) >> 2)); } } //transfrom row-major to COL32 //input matrix is (m, n) row-major //output matrix is (m, n) COL32 //n should be a mutiple of 32 //grid((n+31)/32, (m+31)/32) //block(8, 32) void rowMajorToCOL32_kernelLauncher(int8_t* dst, const int8_t* src, const int m, const int n, cudaStream_t stream) { assert(n%32 == 0); rowMajorToCOL32_kernel<<<dim3((n+31)/32, (m+31)/32), dim3(8, 32), 0, stream>>>((char4*)dst, (const char4*)src, m, n); } //add bias to matrix of m * n, CUBLASLT_ORDER_COL32 //grid, thread = (m), (n/4) //using char4 as output //for per-channel-quantization weight __global__ void add_bias_act_COL32_int32I_int8O(int8_t *out, const int32_t* input, const float* bias, const int m, const int n, const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr) { const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr); const float out_scale = __ldg(out_scale_ptr); int col_start = threadIdx.x << 2; char4 *outTmpPtr = (char4 *)out; char4 tmp; int outIdx = ((col_start & 0xffffffe0) * m + (blockIdx.x << 5) + (col_start&31)) >> 2; float val; const int4 input4 = __ldg(((const int4*)input)+outIdx); const float4 weight4 = __ldg(((const float4*)weight_amax)+threadIdx.x); const float4 bias4 = __ldg(((const float4*)bias)+threadIdx.x); val = static_cast<float>(input4.x)*weight4.x*input_deQFactor_div127 + bias4.x; val = gelu(val); tmp.x = float_to_int8_rn(val*out_scale); val = static_cast<float>(input4.y)*weight4.y*input_deQFactor_div127 + bias4.y; val = gelu(val); tmp.y = float_to_int8_rn(val*out_scale); col_start = col_start + 1; val = static_cast<float>(input4.z)*weight4.z*input_deQFactor_div127 + bias4.z; val = gelu(val); tmp.z = float_to_int8_rn(val*out_scale); col_start = col_start + 1; val = static_cast<float>(input4.w)*weight4.w*input_deQFactor_div127 + bias4.w; val = gelu(val); tmp.w = float_to_int8_rn(val*out_scale); outTmpPtr[outIdx] = tmp; } __global__ void add_bias_act_COL32_int32I_int8O(char4 *out, const int4* input, const half2* bias, const int m, const int n, const float4* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr) { const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr); const float out_scale = __ldg(out_scale_ptr); int col_start = threadIdx.x << 2; int threadIdx2 = threadIdx.x << 1; char4 tmp; int outIdx = ((col_start & 0xffffffe0) * m + (blockIdx.x << 5) + (col_start&31)) >> 2; float val; const int4 input4 = __ldg(input+outIdx); const float4 weight4 = __ldg(weight_amax+threadIdx.x); const half2 biasTmp = __ldg(bias+threadIdx2); const half2 biasTmp2 = __ldg(bias+threadIdx2+1); val = static_cast<float>(input4.x)*weight4.x*input_deQFactor_div127 + static_cast<float>(biasTmp.x); val = gelu(val); tmp.x = float_to_int8_rn(out_scale * val); val = static_cast<float>(input4.y)*weight4.y*input_deQFactor_div127 + static_cast<float>(biasTmp.y); val = gelu(val); tmp.y = float_to_int8_rn(out_scale * val); val = static_cast<float>(input4.z)*weight4.z*input_deQFactor_div127 + static_cast<float>(biasTmp2.x); val = gelu(val); tmp.z = float_to_int8_rn(out_scale * val); val = static_cast<float>(input4.w)*weight4.w*input_deQFactor_div127 + static_cast<float>(biasTmp2.y); val = gelu(val); tmp.w = float_to_int8_rn(out_scale * val); out[outIdx] = tmp; } template <typename T> void add_bias_act_COL32_int32I_int8O_kernelLauncher(int8_t *out, const int32_t* input, const T* bias, const int m, const int n, cudaStream_t stream, const float* weight_amax, const float* input_deQFactor_div127_ptr, const float* out_scale_ptr){ dim3 grid(m); dim3 block(n/4); assert(block.x <= 1024); if (sizeof(T) == sizeof(half)) add_bias_act_COL32_int32I_int8O<<<grid, block, 0, stream>>>((char4*)out, (const int4*)input, (const half2*)bias, m, n, (const float4*)weight_amax, input_deQFactor_div127_ptr, out_scale_ptr); else add_bias_act_COL32_int32I_int8O<<<grid, block, 0, stream>>>(out, input, (const float*)bias, m, n, weight_amax, input_deQFactor_div127_ptr, out_scale_ptr); } template void add_bias_act_COL32_int32I_int8O_kernelLauncher<float>(int8_t *out, const int32_t* input, const float* bias, const int m, const int n, cudaStream_t stream, const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr); template void add_bias_act_COL32_int32I_int8O_kernelLauncher<half>(int8_t *out, const int32_t* input, const half* bias, const int m, const int n, cudaStream_t stream, const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr); //add bias to matrix of m * n, CUBLASLT_ORDER_COL32 //grid, thread = (m), (n/4) //using char4 //for per-tensor-quantization weight template <typename T> __global__ void add_bias_act_COL32_int8IO(int8_t *out, const int8_t* input, const T* bias, const int m, const int n, const float *input_deQFactor_ptr, const float *out_scale_ptr) { const float input_deQFactor = __ldg(input_deQFactor_ptr); const float out_scale = __ldg(out_scale_ptr); int col_start = threadIdx.x << 2; char4 *outTmpPtr = (char4 *)out; char4 *inputTmpPtr = (char4*)input; char4 tmp; int outIdx = ((col_start & 0xffffffe0) * m + (blockIdx.x << 5) + (col_start&31)) >> 2; float val; tmp = __ldg(inputTmpPtr+outIdx); val = static_cast<float>(tmp.x)*input_deQFactor + static_cast<float>(__ldg(bias+col_start)); val = gelu(val); tmp.x = float_to_int8_rn(val*out_scale); col_start = col_start + 1; val = static_cast<float>(tmp.y)*input_deQFactor + static_cast<float>(__ldg(bias+col_start)); val = gelu(val); tmp.y = float_to_int8_rn(val*out_scale); col_start = col_start + 1; val = static_cast<float>(tmp.z)*input_deQFactor + static_cast<float>(__ldg(bias+col_start)); val = gelu(val); tmp.z = float_to_int8_rn(val*out_scale); col_start = col_start + 1; val = static_cast<float>(tmp.w)*input_deQFactor + static_cast<float>(__ldg(bias+col_start)); val = gelu(val); tmp.w = float_to_int8_rn(val*out_scale); outTmpPtr[outIdx] = tmp; } template <typename T> void add_bias_act_COL32_int8IO_kernelLauncher(int8_t *out, const int8_t* input, const T* bias, const int m, const int n, cudaStream_t stream, const float* input_deQFactor_ptr, const float* out_scale_ptr){ dim3 grid(m); dim3 block(n/4); assert(block.x <= 1024); add_bias_act_COL32_int8IO<<<grid, block, 0, stream>>>(out, input, bias, m, n, input_deQFactor_ptr, out_scale_ptr); } template void add_bias_act_COL32_int8IO_kernelLauncher<float>(int8_t *out, const int8_t* input, const float* bias, const int m, const int n, cudaStream_t stream, const float *input_deQFactor_ptr, const float *out_scale_ptr); template void add_bias_act_COL32_int8IO_kernelLauncher<half>(int8_t *out, const int8_t* input, const half* bias, const int m, const int n, cudaStream_t stream, const float *input_deQFactor_ptr, const float *out_scale_ptr); //input1/input2/out matrix with layout of cublasLt CUBLASLT_ORDER_COL32 (m*n) //(grid, block) must be (m, n) template <typename T> __global__ void add_bias_input_layernorm_COL32_int8I_DataTypeO(T* output, const int8_t* input1, const int8_t* input2, const T* bias, const T* gamma, const T* beta, int m, int n, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr) { const float input1_deQFactor = __ldg(input1_deQFactor_ptr); const float input2_deQFactor = __ldg(input2_deQFactor_ptr); int col_start = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out; int idx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31)); local_out = static_cast<float>(__ldg(input2+idx))*input2_deQFactor + static_cast<float>(__ldg(input1+idx))*input1_deQFactor + static_cast<float>(__ldg(bias+col_start)); mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean * __fdividef(1.0f, n); __syncthreads(); local_out = local_out - s_mean; variance = blockReduceSum<float>(local_out * local_out); if(threadIdx.x == 0){ s_variance = variance * __fdividef(1.0f, n) + 1e-6f; s_variance = rsqrtf(s_variance); } __syncthreads(); local_out = (local_out * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start)); output[idx] = local_out; } //input1/input2/out matrix with layout of cublasLt CUBLASLT_ORDER_COL32 (m*n) //(grid, block) must be (m, n/2) template <> __global__ void add_bias_input_layernorm_COL32_int8I_DataTypeO(half2* output, const int8_t* input1, const int8_t* input2, const half2* bias, const half2* gamma, const half2* beta, int m, int n, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr) { const float input1_deQFactor = __ldg(input1_deQFactor_ptr); const float input2_deQFactor = __ldg(input2_deQFactor_ptr); int col_start = threadIdx.x << 1; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out; int idx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31)) >> 1; const char2 * input1_ptr2 = (const char2*)input1; const char2 * input2_ptr2 = (const char2*)input2; char2 input_tmp1 = __ldg(input1_ptr2 + idx); char2 input_tmp2 = __ldg(input2_ptr2 + idx); half2 bias_tmp = __ldg(bias+threadIdx.x); local_out.x = static_cast<float>(input_tmp1.x)*input1_deQFactor + static_cast<float>(input_tmp2.x)*input2_deQFactor + static_cast<float>(bias_tmp.x); local_out.y = static_cast<float>(input_tmp1.y)*input1_deQFactor + static_cast<float>(input_tmp2.y)*input2_deQFactor + static_cast<float>(bias_tmp.y); mean = blockReduceSum<float>(local_out.x + local_out.y); if(threadIdx.x == 0) s_mean = mean * __fdividef(1.0f, n); __syncthreads(); local_out.x = local_out.x - s_mean; local_out.y = local_out.y - s_mean; variance = blockReduceSum<float>(local_out.x * local_out.x + local_out.y * local_out.y); if(threadIdx.x == 0){ s_variance = variance * __fdividef(1.0f, n) + 1e-6f; s_variance = rsqrtf(s_variance); } __syncthreads(); half2 gamma_tmp = __ldg(gamma+threadIdx.x); half2 beta_tmp = __ldg(beta+threadIdx.x); local_out.x = (local_out.x * s_variance) * static_cast<float>(gamma_tmp.x) + static_cast<float>(beta_tmp.x); local_out.y = (local_out.y * s_variance) * static_cast<float>(gamma_tmp.y) + static_cast<float>(beta_tmp.y); bias_tmp.x = half(local_out.x); bias_tmp.y = half(local_out.y); output[idx] = bias_tmp; } template<typename T> void add_bias_input_layernorm_COL32_int8I_DataTypeO_kernelLauncher(T *output, const int8_t *input1, const int8_t *input2, const T *bias, const T *gamma, const T *beta, int m, int n, cudaStream_t stream, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr) { dim3 grid(m); dim3 block(n); if (sizeof(T) == sizeof(half)){ assert(n/2 <= 1024 && n%2 == 0); block.x = n/2; add_bias_input_layernorm_COL32_int8I_DataTypeO<<<grid, block, 0, stream>>>((half2*)output, input1, input2, (const half2*)bias, (const half2*)gamma, (const half2*)beta, m, n, input1_deQFactor_ptr, input2_deQFactor_ptr); } else{ assert(n <= 1024); add_bias_input_layernorm_COL32_int8I_DataTypeO<T><<<grid, block, 0, stream>>>(output, input1, input2, bias, gamma, beta, m, n, input1_deQFactor_ptr, input2_deQFactor_ptr); } } template void add_bias_input_layernorm_COL32_int8I_DataTypeO_kernelLauncher<float>(float* output, const int8_t* input1, const int8_t* input2, const float* bias, const float* gamma, const float* beta, int m, int n, cudaStream_t stream, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr); template void add_bias_input_layernorm_COL32_int8I_DataTypeO_kernelLauncher<half>(half* output, const int8_t* input1, const int8_t* input2, const half* bias, const half* gamma, const half* beta, int m, int n, cudaStream_t stream, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr); //input1/input2/out matrix with layout of cublasLt CUBLASLT_ORDER_COL32 (m*n) //(grid, block) must be (m, n/4) //using char4 template <typename T> __global__ void add_bias_input_layernorm_COL32_int8IO(int8_t* output, const int8_t* input1, const int8_t* input2, const T* bias, const T* gamma, const T* beta, int m, int n, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr) { const float input1_deQFactor = __ldg(input1_deQFactor_ptr); const float input2_deQFactor = __ldg(input2_deQFactor_ptr); const float output_scale = __ldg(output_scale_ptr); int col_start = threadIdx.x << 2; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out[4]; int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31)) >> 2; char4 *outTmpPtr = (char4*)output; char4 *input1TmpPtr = (char4*)input1; char4 *input2TmpPtr = (char4*)input2; char4 input1Tmp = __ldg(input1TmpPtr+outIdx); char4 input2Tmp = __ldg(input2TmpPtr+outIdx); int col_start_tmp = col_start; local_out[0] = static_cast<float>(input2Tmp.x)*input2_deQFactor + static_cast<float>(input1Tmp.x)*input1_deQFactor + static_cast<float>(__ldg(bias+col_start_tmp)); col_start_tmp = col_start_tmp + 1; local_out[1] = static_cast<float>(input2Tmp.y)*input2_deQFactor + static_cast<float>(input1Tmp.y)*input1_deQFactor + static_cast<float>(__ldg(bias+col_start_tmp)); col_start_tmp = col_start_tmp + 1; local_out[2] = static_cast<float>(input2Tmp.z)*input2_deQFactor + static_cast<float>(input1Tmp.z)*input1_deQFactor + static_cast<float>(__ldg(bias+col_start_tmp)); col_start_tmp = col_start_tmp + 1; local_out[3] = static_cast<float>(input2Tmp.w)*input2_deQFactor + static_cast<float>(input1Tmp.w)*input1_deQFactor + static_cast<float>(__ldg(bias+col_start_tmp)); mean = blockReduceSum<float>(local_out[0] + local_out[1] + local_out[2] + local_out[3]); if(threadIdx.x == 0) s_mean = mean * __fdividef(1.0f, n); __syncthreads(); local_out[0] = local_out[0] - s_mean; local_out[1] = local_out[1] - s_mean; local_out[2] = local_out[2] - s_mean; local_out[3] = local_out[3] - s_mean; variance = blockReduceSum<float>(local_out[0] * local_out[0] + local_out[1] * local_out[1] + local_out[2] * local_out[2] + local_out[3] * local_out[3] ); if(threadIdx.x == 0){ s_variance = variance * __fdividef(1.0f, n) + 1e-6f; s_variance = rsqrtf(s_variance); } __syncthreads(); local_out[0] = (local_out[0] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start)); input2Tmp.x = float_to_int8_rn(local_out[0] * output_scale); col_start = col_start+1; local_out[1] = (local_out[1] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start)); input2Tmp.y = float_to_int8_rn(local_out[1] * output_scale); col_start = col_start+1; local_out[2] = (local_out[2] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start)); input2Tmp.z = float_to_int8_rn(local_out[2] * output_scale); col_start = col_start+1; local_out[3] = (local_out[3] * s_variance) * static_cast<float>(__ldg(gamma+col_start)) + static_cast<float>(__ldg(beta+col_start)); input2Tmp.w = float_to_int8_rn(local_out[3] * output_scale); outTmpPtr[outIdx] = input2Tmp; } template <> __global__ void add_bias_input_layernorm_COL32_int8IO(int8_t* output, const int8_t* input1, const int8_t* input2, const half2* bias, const half2* gamma, const half2* beta, int m, int n, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr) { const float input1_deQFactor = __ldg(input1_deQFactor_ptr); const float input2_deQFactor = __ldg(input2_deQFactor_ptr); const float output_scale = __ldg(output_scale_ptr); int col_start = threadIdx.x << 2; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out[4]; int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31)) >> 2; char4 *outTmpPtr = (char4*)output; char4 *input1TmpPtr = (char4*)input1; char4 *input2TmpPtr = (char4*)input2; char4 input1Tmp = __ldg(input1TmpPtr + outIdx); char4 input2Tmp = __ldg(input2TmpPtr + outIdx); int col_start_tmp = col_start; half2 biasTmp = __ldg(bias + (col_start_tmp >> 1)); local_out[0] = static_cast<float>(input2Tmp.x)*input2_deQFactor + static_cast<float>(input1Tmp.x)*input1_deQFactor + static_cast<float>(biasTmp.x); col_start_tmp = col_start_tmp + 1; local_out[1] = static_cast<float>(input2Tmp.y)*input2_deQFactor + static_cast<float>(input1Tmp.y)*input1_deQFactor + static_cast<float>(biasTmp.y); col_start_tmp = col_start_tmp + 1; biasTmp = __ldg(bias + (col_start_tmp >> 1)); local_out[2] = static_cast<float>(input2Tmp.z)*input2_deQFactor + static_cast<float>(input1Tmp.z)*input1_deQFactor + static_cast<float>(biasTmp.x); col_start_tmp = col_start_tmp + 1; local_out[3] = static_cast<float>(input2Tmp.w)*input2_deQFactor + static_cast<float>(input1Tmp.w)*input1_deQFactor + static_cast<float>(biasTmp.y); mean = blockReduceSum<float>(local_out[0] + local_out[1] + local_out[2] + local_out[3]); if(threadIdx.x == 0) s_mean = mean * __fdividef(1.0f, n); __syncthreads(); local_out[0] = local_out[0] - s_mean; local_out[1] = local_out[1] - s_mean; local_out[2] = local_out[2] - s_mean; local_out[3] = local_out[3] - s_mean; variance = blockReduceSum<float>(local_out[0] * local_out[0] + local_out[1] * local_out[1] + local_out[2] * local_out[2] + local_out[3] * local_out[3] ); if(threadIdx.x == 0){ s_variance = variance * __fdividef(1.0f, n) + 1e-6f; s_variance = rsqrtf(s_variance); } __syncthreads(); col_start_tmp = col_start >> 1; biasTmp = __ldg(gamma+col_start_tmp); half2 betaTmp = __ldg(beta+col_start_tmp); local_out[0] = (local_out[0] * s_variance) * static_cast<float>(biasTmp.x) + static_cast<float>(betaTmp.x); input2Tmp.x = float_to_int8_rn(local_out[0] * output_scale); col_start = col_start+1; local_out[1] = (local_out[1] * s_variance) * static_cast<float>(biasTmp.y) + static_cast<float>(betaTmp.y); input2Tmp.y = float_to_int8_rn(local_out[1] * output_scale); col_start = col_start+1; col_start_tmp = col_start >> 1; biasTmp = __ldg(gamma+col_start_tmp); betaTmp = __ldg(beta+col_start_tmp); local_out[2] = (local_out[2] * s_variance) * static_cast<float>(biasTmp.x) + static_cast<float>(betaTmp.x); input2Tmp.z = float_to_int8_rn(local_out[2] * output_scale); col_start = col_start+1; local_out[3] = (local_out[3] * s_variance) * static_cast<float>(biasTmp.y) + static_cast<float>(betaTmp.y); input2Tmp.w = float_to_int8_rn(local_out[3] * output_scale); outTmpPtr[outIdx] = input2Tmp; } template<typename T> void add_bias_input_layernorm_COL32_int8IO_kernelLauncher(int8_t* output, const int8_t* input1, const int8_t* input2, const T* bias, const T* gamma, const T* beta, int m, int n, cudaStream_t stream, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr) { dim3 grid(m); dim3 block(n/4); assert(n <= 1024); if (sizeof(T) == sizeof(half)){ add_bias_input_layernorm_COL32_int8IO<<<grid, block, 0, stream>>>(output, input1, input2, (const half2*)bias, (const half2*)gamma, (const half2*)beta, m, n, input1_deQFactor_ptr, input2_deQFactor_ptr, output_scale_ptr); } else{ add_bias_input_layernorm_COL32_int8IO<T><<<grid, block, 0, stream>>>(output, input1, input2, bias, gamma, beta, m, n, input1_deQFactor_ptr, input2_deQFactor_ptr, output_scale_ptr); } } template void add_bias_input_layernorm_COL32_int8IO_kernelLauncher<float>(int8_t* output, const int8_t* input1, const int8_t* input2, const float* bias, const float* gamma, const float* beta, int m, int n, cudaStream_t stream, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr); template void add_bias_input_layernorm_COL32_int8IO_kernelLauncher<half>(int8_t* output, const int8_t* input1, const int8_t* input2, const half* bias, const half* gamma, const half* beta, int m, int n, cudaStream_t stream, const float *input1_deQFactor_ptr, const float *input2_deQFactor_ptr, const float *output_scale_ptr); //input1/input2/output matrix with layout of cublasLt CUBLASLT_ORDER_COL32 (m*n) //(grid, block) must be (m, n) //for per_channel_quantization for weight __global__ void add_bias_input_layernorm_COL32_int32I_DataTypeO(float* output, const int32_t* input1, const float* input2, const float* bias, const float* gamma, const float* beta, int m, int n, const float* weight_amax, const float *input1_amax_ptr) { const float input1_amax = __ldg(input1_amax_ptr); int col_start = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out; int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31)); float tmp = static_cast<float>(__ldg(input1 + outIdx)) * __ldg(weight_amax + col_start) * input1_amax * 0.000062f; //(1/127/127); float inputTmp = __ldg(input2 + outIdx); local_out = tmp + inputTmp + __ldg(bias + col_start); mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = __fdividef(mean, n); __syncthreads(); local_out = local_out - s_mean; variance = blockReduceSum<float>(local_out * local_out); if(threadIdx.x == 0){ s_variance = __fdividef(variance, n) + 1e-6f; s_variance = rsqrtf(s_variance); } __syncthreads(); local_out = (local_out * s_variance) * __ldg(gamma + col_start) + __ldg(beta + col_start); output[outIdx] = local_out; } __global__ void add_bias_input_layernorm_COL32_int32I_DataTypeO(half2* output, const int2* input1, const half2* input2, const half2* bias, const half2* gamma, const half2* beta, int m, int n, const float2* weight_amax, const float *input1_amax_ptr) { int col_start = threadIdx.x << 1; const float input1_amax = __ldg(input1_amax_ptr); __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out; int outIdx = ((col_start & 0xffffffe0)*m+(blockIdx.x << 5) + (col_start&31)) >> 1; const int2 input1Tmp = __ldg(input1 + outIdx); const float2 weightTmp = __ldg(weight_amax + threadIdx.x); float2 addTmp2; addTmp2.x = static_cast<float>(input1Tmp.x) * weightTmp.x * input1_amax * 0.000062f; //(1/127/127); addTmp2.y = static_cast<float>(input1Tmp.y) * weightTmp.y * input1_amax * 0.000062f; //(1/127/127); const half2 inputTmp = __ldg(input2 + outIdx); const half2 biasTmp = __ldg(bias + threadIdx.x); local_out = __half22float2(__hadd2(inputTmp, biasTmp)); local_out.x = local_out.x + addTmp2.x; local_out.y = local_out.y + addTmp2.y; mean = blockReduceSum<float>(local_out.x + local_out.y); if(threadIdx.x == 0) s_mean = __fdividef(mean, n); __syncthreads(); local_out.x = local_out.x - s_mean; local_out.y = local_out.y - s_mean; variance = blockReduceSum<float>(local_out.x*local_out.x + local_out.y*local_out.y); if(threadIdx.x == 0){ s_variance = __fdividef(variance, n) + 1e-6f; s_variance = rsqrtf(s_variance); } __syncthreads(); float2 outputTmp; const half2 gammaTmp = __ldg(gamma + threadIdx.x); const half2 betaTmp = __ldg(beta + threadIdx.x); outputTmp.x = (local_out.x * s_variance) * static_cast<float>(gammaTmp.x) + static_cast<float>(betaTmp.x); outputTmp.y = (local_out.y * s_variance) * static_cast<float>(gammaTmp.y) + static_cast<float>(betaTmp.y); output[outIdx] = __float22half2_rn(outputTmp); } template <typename T> void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher(T* output, const int32_t* input1, const T* input2, const T* bias, const T* gamma, const T* beta, int m, int n, cudaStream_t stream, const float* weight_amax, const float* input1_amax_ptr){ dim3 grid(m); dim3 block(n); if (sizeof(T) == sizeof(half)){ block.x /= 2; assert(block.x <= 1024); add_bias_input_layernorm_COL32_int32I_DataTypeO<<<grid, block, 0, stream>>>((half2 *)output, (const int2*)input1, (const half2 *)input2, (const half2 *)bias, (const half2 *)gamma, (const half2 *)beta, m, n, (const float2*)weight_amax, input1_amax_ptr); } else{ assert(block.x <= 1024); add_bias_input_layernorm_COL32_int32I_DataTypeO<<<grid, block, 0, stream>>>((float *)output, input1, (const float*)input2, (const float*)bias, (const float*)gamma, (const float*)beta, m, n, weight_amax, input1_amax_ptr); } } template void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher<float>(float* output, const int32_t* input1, const float* input2, const float* bias, const float* gamma, const float* beta, int m, int n, cudaStream_t stream, const float* weight_amax, const float *input1_amax_ptr); template void add_bias_input_layernorm_COL32_int32I_DataTypeO_kernelLauncher<half>(half* output, const int32_t* input1, const half* input2, const half* bias, const half* gamma, const half* beta, int m, int n, cudaStream_t stream, const float* weight_amax, const float *input1_amax_ptr); //src is the result of batch MM, whose size is batch_size*head_num*(seq_len, size_per_head), CUBLASLT_ORDER_COL32 //dst is of m = batch_size*seq_len, k(n) = head_num*size_per_head, CUBLASLT_ORDER_COL32 //grid(seq_len, batch_size) //block(size_per_head/4, head_num) //assume size_per_head is multiples of 32 __global__ void transpose_COL32_kernel(char4* dst, const int4* src, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const float *v_buf_addBias_deQFactor, const float* qk_afterSM_deQFactor, const float* out_scale_ptr, const int batch_size_x_seq_len, const int seq_len_x_size_per_head) { const float scale = __ldg(v_buf_addBias_deQFactor) * __ldg(qk_afterSM_deQFactor) * __ldg(out_scale_ptr); int threadIdx4 = threadIdx.x << 2; int batch_id = blockIdx.y; int seq_id = blockIdx.x; int head_id = threadIdx.y; //get the (row, col) output layout of m*k //m = batch_size*seq_len //k = head_num*size_per_head int mk_row = batch_id*seq_len + seq_id; int mk_col = head_id*size_per_head + threadIdx4; //get the (row, col) layout of COL32; leading dimension = 32*m = 32*batch_size*seq_len int COL32_row = (mk_row << 5) + (mk_col&31); //int COL32_col = mk_col >> 5; int outIdx = ((mk_col & 0xffffffe0)*batch_size_x_seq_len + COL32_row) >> 2; //get the (row, col) input layout of m'*k' //m' = seq_len //k' = size_per_head mk_row = seq_id; mk_col = threadIdx4; //get the (row, col) layout of COL32; leading dimension = 32*m' = 32*seq_len COL32_row = (mk_row << 5) + (mk_col&31); //COL32_col = mk_col >> 5; int inIdx = ((batch_id*head_num + head_id)*seq_len_x_size_per_head + (mk_col & 0xffffffe0)*seq_len + COL32_row) >> 2; char4 tmp; int4 srcTmp4 = __ldg(src + inIdx); tmp.x = float_to_int8_rn(srcTmp4.x*scale); tmp.y = float_to_int8_rn(srcTmp4.y*scale); tmp.z = float_to_int8_rn(srcTmp4.z*scale); tmp.w = float_to_int8_rn(srcTmp4.w*scale); dst[outIdx] = tmp; } void transpose_COL32_kernelLauncher(int8_t* dst, const int* src, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const float *v_buf_addBias_deQFactor, const float* qk_afterSM_deQFactor, const float* out_scale_ptr, cudaStream_t stream){ assert(size_per_head%32==0); transpose_COL32_kernel<<<dim3(seq_len, batch_size), dim3(size_per_head/4, head_num), 0, stream>>>((char4*)dst, (const int4*)src, batch_size, seq_len, head_num, size_per_head, v_buf_addBias_deQFactor, qk_afterSM_deQFactor, out_scale_ptr, batch_size*seq_len, seq_len*size_per_head); } //src is the result of batch MM, whose size is batch_size*head_num*(seq_len, size_per_head), CUBLASLT_ORDER_COL32 //dst is of m = batch_size*seq_len, k(n) = head_num*size_per_head, CUBLASLT_ORDER_COL32 //grid(seq_len, batch_size) //block(size_per_head/4, head_num) //assume size_per_head is multiples of 32 __global__ void transpose_COL32_kernel(int8_t* dst, const int8_t* src, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const float *bmm2_deQFactor, const float* out_scale_ptr, const int batch_size_x_seq_len, const int seq_len_x_size_per_head) { int threadIdx4 = threadIdx.x << 2; int batch_id = blockIdx.y; int seq_id = blockIdx.x; int head_id = threadIdx.y; //get the (row, col) output layout of m*k //m = batch_size*seq_len //k = head_num*size_per_head int mk_row = batch_id*seq_len + seq_id; int mk_col = head_id*size_per_head + threadIdx4; //get the (row, col) layout of COL32; leading dimension = 32*m = 32*batch_size*seq_len int COL32_row = (mk_row << 5) + (mk_col&31); int COL32_col = mk_col >> 5; int outIdx = ((COL32_col << 5)*batch_size_x_seq_len + COL32_row) >> 2; //get the (row, col) input layout of m'*k' //m' = seq_len //k' = size_per_head mk_row = seq_id; mk_col = threadIdx4; //get the (row, col) layout of COL32; leading dimension = 32*m' = 32*seq_len COL32_row = (mk_row << 5) + (mk_col&31); COL32_col = mk_col >> 5; int inIdx = ((batch_id*head_num + head_id)*seq_len_x_size_per_head + (COL32_col << 5 )*seq_len + COL32_row) >> 2; const char4* src_ptr4 = (const char4*)src; char4 *dst_ptr4 = (char4 *)dst; dst_ptr4[outIdx] = __ldg(src_ptr4 + inIdx); } void transpose_COL32_kernelLauncher(int8_t* dst, const int8_t* src, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const float *bmm2_deQFactor, const float* out_scale_ptr, cudaStream_t stream){ assert(size_per_head%32==0); transpose_COL32_kernel<<<dim3(seq_len, batch_size), dim3(size_per_head/4, head_num), 0, stream>>>(dst, src, batch_size, seq_len, head_num, size_per_head, bmm2_deQFactor, out_scale_ptr, batch_size*seq_len, seq_len*size_per_head); } //src is the result of batch MM, whose size is batch_size*head_num*(seq_len, size_per_head), CUBLASLT_ORDER_COL32 //dst is of m = valid_word_num, k(n) = head_num*size_per_head, CUBLASLT_ORDER_COL32 //grid(seq_len, batch_size) //block(size_per_head/4, head_num) //assume size_per_head is multiples of 32 __global__ void transpose_COL32_rebuild_padding_kernel(int8_t* dst, const int32_t* src, const int* sequence_id_map, const int valid_word_num, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const float *v_buf_addBias_deQFactor, const float* qk_afterSM_deQFactor, const float* out_scale_ptr, const int seq_len_x_size_per_head) { const float scale = __ldg(v_buf_addBias_deQFactor) * __ldg(qk_afterSM_deQFactor) * __ldg(out_scale_ptr); int threadIdx4 = threadIdx.x << 2; int batch_id = blockIdx.y; int seq_id = blockIdx.x; int head_id = threadIdx.y; //get the (row, col) output layout of m*k //m = valid_word_num //k = head_num*size_per_head int mk_row = __ldg(sequence_id_map + batch_id*seq_len + seq_id); if (mk_row >= 0){ int mk_col = head_id*size_per_head + threadIdx4; //get the (row, col) layout of COL32; leading dimension = 32*m = 32*valid_word_num int COL32_row = (mk_row << 5) + (mk_col&31); int COL32_col = mk_col >> 5; int outIdx = ((COL32_col << 5)*valid_word_num + COL32_row) >> 2; //get the (row, col) input layout of m'*k' //m' = seq_len //k' = size_per_head mk_row = seq_id; mk_col = threadIdx4; //get the (row, col) layout of COL32; leading dimension = 32*m' = 32*seq_len COL32_row = (mk_row << 5) + (mk_col&31); COL32_col = mk_col >> 5; int inIdx = (batch_id*head_num + head_id)*seq_len_x_size_per_head + (COL32_col << 5 )*seq_len + COL32_row; char4 tmp; tmp.x = float_to_int8_rn(__ldg(src+inIdx)*scale); tmp.y = float_to_int8_rn(__ldg(src+inIdx+1)*scale); tmp.z = float_to_int8_rn(__ldg(src+inIdx+2)*scale); tmp.w = float_to_int8_rn(__ldg(src+inIdx+3)*scale); char4 *dst_ptr4 = (char4 *)dst; dst_ptr4[outIdx] = tmp; } } void transpose_COL32_rebuild_padding_kernelLauncher(int8_t* dst, const int* src, const int* sequence_id_map, const int valid_word_num, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const float *v_buf_addBias_deQFactor, const float* qk_afterSM_deQFactor, const float* out_scale_ptr, cudaStream_t stream){ assert(size_per_head%32==0); transpose_COL32_rebuild_padding_kernel<<<dim3(seq_len, batch_size), dim3(size_per_head/4, head_num), 0, stream>>>(dst, src, sequence_id_map, valid_word_num, batch_size, seq_len, head_num, size_per_head, v_buf_addBias_deQFactor, qk_afterSM_deQFactor, out_scale_ptr, seq_len*size_per_head); } //src is the result of batch MM, whose size is batch_size*head_num*(seq_len, size_per_head), CUBLASLT_ORDER_COL32 //dst is of m = valid_word_num, k(n) = head_num*size_per_head, CUBLASLT_ORDER_COL32 //grid(seq_len, batch_size) //block(size_per_head/4, head_num) //assume size_per_head is multiples of 32 __global__ void transpose_COL32_rebuild_padding_kernel(int8_t* dst, const int8_t* src, const int* sequence_id_map, const int valid_word_num, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const float *bmm2_deQFactor, const float* out_scale_ptr, const int seq_len_x_size_per_head) { int threadIdx4 = threadIdx.x << 2; int batch_id = blockIdx.y; int seq_id = blockIdx.x; int head_id = threadIdx.y; //get the (row, col) output layout of m*k //m = valid_word_num //k = head_num*size_per_head int mk_row = __ldg(sequence_id_map + batch_id*seq_len + seq_id); if (mk_row >= 0){ int mk_col = head_id*size_per_head + threadIdx4; //get the (row, col) layout of COL32; leading dimension = 32*m = 32*valid_word_num int COL32_row = (mk_row << 5) + (mk_col&31); int COL32_col = mk_col >> 5; int outIdx = ((COL32_col << 5)*valid_word_num + COL32_row) >> 2; //get the (row, col) input layout of m'*k' //m' = seq_len //k' = size_per_head mk_row = seq_id; mk_col = threadIdx4; //get the (row, col) layout of COL32; leading dimension = 32*m' = 32*seq_len COL32_row = (mk_row << 5) + (mk_col&31); COL32_col = mk_col >> 5; int inIdx = ((batch_id*head_num + head_id)*seq_len_x_size_per_head + (COL32_col << 5 )*seq_len + COL32_row) >> 2; const char4* src_ptr4 = (const char4*)src; char4 *dst_ptr4 = (char4 *)dst; dst_ptr4[outIdx] = __ldg(src_ptr4 + inIdx); } } void transpose_COL32_rebuild_padding_kernelLauncher(int8_t* dst, const int8_t* src, const int* sequence_id_map, const int valid_word_num, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const float *bmm2_deQFactor, const float* out_scale_ptr, cudaStream_t stream){ assert(size_per_head%32==0); transpose_COL32_rebuild_padding_kernel<<<dim3(seq_len, batch_size), dim3(size_per_head/4, head_num), 0, stream>>>(dst, src, sequence_id_map, valid_word_num, batch_size, seq_len, head_num, size_per_head, bmm2_deQFactor, out_scale_ptr, seq_len*size_per_head); } __global__ void quantized_kernel(char4 *dst, const float4* src, const int size_div_4, const float* scale_ptr) { int tid = (blockIdx.x*blockDim.x + threadIdx.x); if (tid < size_div_4){ const float scale = __ldg(scale_ptr); char4 tmp; const float4 floatTmp = __ldg(src + tid); tmp.x = float_to_int8_rn(floatTmp.x*scale); tmp.y = float_to_int8_rn(floatTmp.y*scale); tmp.z = float_to_int8_rn(floatTmp.z*scale); tmp.w = float_to_int8_rn(floatTmp.w*scale); dst[tid] = tmp; } } __global__ void quantized_kernel(char4 *dst, const half2* src, const int size_div_4, const float* scale_ptr) { int tid = (blockIdx.x*blockDim.x + threadIdx.x); if (tid < size_div_4){ const float scale = __ldg(scale_ptr); char4 tmp; int src_id = tid << 1; const half2 half2Tmp = __ldg(src + src_id); tmp.x = float_to_int8_rn(static_cast<float>(half2Tmp.x)*scale); tmp.y = float_to_int8_rn(static_cast<float>(half2Tmp.y)*scale); const half2 half2Tmp2 = __ldg(src + src_id + 1); tmp.z = float_to_int8_rn(static_cast<float>(half2Tmp2.x)*scale); tmp.w = float_to_int8_rn(static_cast<float>(half2Tmp2.y)*scale); dst[tid] = tmp; } } template <typename T> void quantized_kernelLauncher(int8_t* dst, const T * src, const int size, const float* scale_ptr, cudaStream_t stream) { assert(size % 4 == 0); dim3 grid((size+255)/256); dim3 block(64); if (sizeof(T) == sizeof(float)) quantized_kernel<<<grid, block, 0, stream>>>((char4*)dst, (const float4*)src, size/4, scale_ptr); else if (sizeof(T) == sizeof(half)) quantized_kernel<<<grid, block, 0, stream>>>((char4*)dst, (const half2*)src, size/4, scale_ptr); } template void quantized_kernelLauncher<float>(int8_t* dst, const float * src, const int size, const float* scale_ptr, cudaStream_t stream); template void quantized_kernelLauncher<half>(int8_t* dst, const half * src, const int size, const float* scale_ptr, cudaStream_t stream); template <typename T> __global__ void dequantized_kernel(T *dst, const int8_t* src, const int size, const float *scale_ptr) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < size){ float tmp = float(src[tid]); dst[tid] = T(float(tmp) * __ldg(scale_ptr)); } } template <typename T> void dequantized_kernelLauncher(T* dst, const int8_t * src, const int size, const float *scale_ptr, cudaStream_t stream) { dim3 grid((size+255)/256); dim3 block(256); dequantized_kernel<T><<<grid, block, 0, stream>>>(dst, src, size, scale_ptr); } template void dequantized_kernelLauncher<float>(float* dst, const int8_t * src, const int size, const float *scale_ptr, cudaStream_t stream); template void dequantized_kernelLauncher<half>(half* dst, const int8_t * src, const int size, const float *scale_ptr, cudaStream_t stream); template void dequantized_kernelLauncher<int32_t>(int32_t* dst, const int8_t * src, const int size, const float *scale_ptr, cudaStream_t stream); //layout should be COL32 template<typename T> __global__ void rebuild_sequence_length_padding_COL32(const T* src, T* tgt, const int* mask_offset, const int m, const int n, const int tgt_m) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int tgt_seq_id = bid + __ldg(mask_offset + bid); const int src_seq_id = bid; const int src_col32_lda = m << 5; const int tgt_col32_lda = tgt_m << 5; const int src_row_tmp = src_seq_id << 5; const int tgt_row_tmp = tgt_seq_id << 5; for(int i = tid; i < n; i += blockDim.x) { int col = i >> 5; int src_row = src_row_tmp + (i & 31); int tgt_row = tgt_row_tmp + (i & 31); tgt[col*tgt_col32_lda + tgt_row] = __ldg(src + col*src_col32_lda + src_row); } } //for half input //layout should be COL32 template<> __global__ void rebuild_sequence_length_padding_COL32(const half2* src, half2* tgt, const int* mask_offset, const int m, const int n, const int tgt_m) { const int tid2 = threadIdx.x << 1; const int bid = blockIdx.x; const int tgt_seq_id = bid + __ldg(mask_offset + bid); const int src_seq_id = bid; const int src_col32_lda = m << 5; const int tgt_col32_lda = tgt_m << 5; const int src_row_tmp = src_seq_id << 5; const int tgt_row_tmp = tgt_seq_id << 5; for(int i = tid2; i < n; i += 2*blockDim.x) { int col = i >> 5; int src_row = src_row_tmp + (i & 31); int tgt_row = tgt_row_tmp + (i & 31); tgt[(col*tgt_col32_lda + tgt_row) >> 1] = __ldg(src + ((col*src_col32_lda + src_row) >> 1)); } } //for int8 input //layout should be COL32 template<> __global__ void rebuild_sequence_length_padding_COL32(const char4* src, char4* tgt, const int* mask_offset, const int m, const int n, const int tgt_m) { const int tid4 = threadIdx.x << 2; const int bid = blockIdx.x; const int tgt_seq_id = bid + __ldg(mask_offset + bid); const int src_seq_id = bid; const int src_col32_lda = m << 5; const int tgt_col32_lda = tgt_m << 5; const int src_row_tmp = src_seq_id << 5; const int tgt_row_tmp = tgt_seq_id << 5; for(int i = tid4; i < n; i += 4*blockDim.x) { int col = i >> 5; int src_row = src_row_tmp + (i & 31); int tgt_row = tgt_row_tmp + (i & 31); tgt[(col*tgt_col32_lda + tgt_row) >> 2] = __ldg(src + ((col*src_col32_lda + src_row) >> 2)); } } template<typename T> void rebuild_sequence_length_padding_COL32_kernelLauncher(const T* src, T* tgt, const int* mask_offset, const int m, const int n, const int tgt_m, cudaStream_t stream) { // src: [valid_word_num, hidden_dim] // tgt: [batch_size*max_seq_len, hidden_dim] dim3 block(256); if (sizeof(T) == sizeof(half)) { if (n/2 < 256) block.x = n/2; rebuild_sequence_length_padding_COL32<<<m, block, 0, stream>>>((const half2*)src, (half2*)tgt, mask_offset, m, n, tgt_m); } else if (sizeof(T) == sizeof(int8_t)) { if (n/4 < 256) block.x = n/4; rebuild_sequence_length_padding_COL32<<<m, block, 0, stream>>>((const char4*)src, (char4*)tgt, mask_offset, m, n, tgt_m); } else rebuild_sequence_length_padding_COL32<<<m, block, 0, stream>>>(src, tgt, mask_offset, m, n, tgt_m); } template void rebuild_sequence_length_padding_COL32_kernelLauncher(const int8_t* src, int8_t* tgt, const int* mask_offset, const int m, const int n, const int tgt_m, cudaStream_t stream); template void rebuild_sequence_length_padding_COL32_kernelLauncher(const half* src, half* tgt, const int* mask_offset, const int m, const int n, const int tgt_m, cudaStream_t stream); template void rebuild_sequence_length_padding_COL32_kernelLauncher(const float* src, float* tgt, const int* mask_offset, const int m, const int n, const int tgt_m, cudaStream_t stream); }//namespace
the_stack
namespace paddle { namespace lite { namespace kernels { namespace cuda { using Tensor = lite::Tensor; using DDim = lite::DDim; #define MAX_VAL(a, b) (((a) > (b)) ? (a) : (b)) #define MIN_VAL(a, b) (((a) < (b)) ? (a) : (b)) __global__ void max_pool_kernel(const float* input, float* output, const int spatial_in, const int spatial_out, const int in_h, const int in_w, const int out_h, const int out_w, const int pad_h, const int pad_w, const int win_h, const int win_w, const int stride_h, const int stride_w, const int total_threads) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < total_threads) { const int nc_id = gid / spatial_out; const int w_id = gid % spatial_out % out_w; const int h_id = gid % spatial_out / out_w; const int w_s = w_id * stride_w - pad_w; const int iw_s = MAX_VAL(w_s, 0); const int iw_e = MIN_VAL(w_s + win_w, in_w); const int w_loop = iw_e - iw_s; const int h_s = h_id * stride_h - pad_h; const int ih_s = MAX_VAL(h_s, 0); const int ih_e = MIN_VAL(h_s + win_h, in_h); const int h_loop = ih_e - ih_s; const float* in_p = input + nc_id * spatial_in + ih_s * in_w + iw_s; float max_val = -FLT_MAX; for (int i = 0; i < h_loop; ++i) { for (int j = 0; j < w_loop; ++j) { max_val = MAX_VAL(max_val, *(in_p + j)); } in_p += in_w; } max_val = max_val == -FLT_MAX ? 0.f : max_val; output[nc_id * spatial_out + h_id * out_w + w_id] = max_val; } } __global__ void adaptive_max_pool_kernel(const float* input, float* output, const int spatial_in, const int spatial_out, const int in_h, const int in_w, const int out_h, const int out_w, const int pad_h, const int pad_w, const int win_h, const int win_w, const int stride_h, const int stride_w, const int total_threads) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < total_threads) { const int nc_id = gid / spatial_out; const int w_id = gid % spatial_out % out_w; const int h_id = gid % spatial_out / out_w; const int iw_s = floor(static_cast<double>(w_id * in_w) / out_w); const int iw_e = ceil(static_cast<double>((w_id + 1) * in_w) / out_w); const int w_loop = iw_e - iw_s; const int ih_s = floor(static_cast<double>(h_id * in_h) / out_h); const int ih_e = ceil(static_cast<double>((h_id + 1) * in_h) / out_h); const int h_loop = ih_e - ih_s; const float* in_p = input + nc_id * spatial_in + ih_s * in_w + iw_s; float max_val = -FLT_MAX; for (int i = 0; i < h_loop; ++i) { for (int j = 0; j < w_loop; ++j) { max_val = MAX_VAL(max_val, *(in_p + j)); } in_p += in_w; } output[nc_id * spatial_out + h_id * out_w + w_id] = max_val; } } __global__ void avg_pool_kernel(const float* input, float* output, const int spatial_in, const int spatial_out, const int in_h, const int in_w, const int out_h, const int out_w, const int pad_h, const int pad_w, const int win_h, const int win_w, const int stride_h, const int stride_w, bool exclusive, const int total_threads) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < total_threads) { const int nc_id = gid / spatial_out; const int w_id = gid % spatial_out % out_w; const int h_id = gid % spatial_out / out_w; const int w_s = w_id * stride_w - pad_w; const int iw_s = MAX_VAL(w_s, 0); const int iw_e = MIN_VAL(w_s + win_w, in_w); const int w_loop = iw_e - iw_s; const int h_s = h_id * stride_h - pad_h; const int ih_s = MAX_VAL(h_s, 0); const int ih_e = MIN_VAL(h_s + win_h, in_h); const int h_loop = ih_e - ih_s; const float* in_p = input + nc_id * spatial_in + ih_s * in_w + iw_s; float sum_val = 0.f; for (int i = 0; i < h_loop; ++i) { for (int j = 0; j < w_loop; ++j) { sum_val += *(in_p + j); } in_p += in_w; } int pool_size = exclusive ? h_loop * w_loop : win_w * win_h; pool_size = pool_size == 0 ? 1 : pool_size; output[nc_id * spatial_out + h_id * out_w + w_id] = sum_val / pool_size; } } __global__ void adaptive_avg_pool_kernel(const float* input, float* output, const int spatial_in, const int spatial_out, const int in_h, const int in_w, const int out_h, const int out_w, const int pad_h, const int pad_w, const int win_h, const int win_w, const int stride_h, const int stride_w, const int total_threads) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < total_threads) { const int nc_id = gid / spatial_out; const int w_id = gid % spatial_out % out_w; const int h_id = gid % spatial_out / out_w; const int iw_s = floor(static_cast<double>(w_id * in_w) / out_w); const int iw_e = ceil(static_cast<double>((w_id + 1) * in_w) / out_w); const int w_loop = iw_e - iw_s; const int ih_s = floor(static_cast<double>(h_id * in_h) / out_h); const int ih_e = ceil(static_cast<double>((h_id + 1) * in_h) / out_h); const int h_loop = ih_e - ih_s; const float* in_p = input + nc_id * spatial_in + ih_s * in_w + iw_s; float sum_val = 0.f; for (int i = 0; i < h_loop; ++i) { for (int j = 0; j < w_loop; ++j) { sum_val += *(in_p + j); } in_p += in_w; } int pool_size = h_loop * w_loop; pool_size = pool_size == 0 ? 1 : pool_size; output[nc_id * spatial_out + h_id * out_w + w_id] = sum_val / pool_size; } } __global__ void global_max_pool_kernel(const float* input, float* output, const int in_h, const int in_w, const int total_threads) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < total_threads) { const int spatial_in = in_h * in_w; const float* in_p = input + gid * spatial_in; int i = 0; float max_val = -0.f; // unroll 8 for (; i < spatial_in - 7; i += 8) { max_val = MAX_VAL(max_val, *(in_p + 0)); max_val = MAX_VAL(max_val, *(in_p + 1)); max_val = MAX_VAL(max_val, *(in_p + 2)); max_val = MAX_VAL(max_val, *(in_p + 3)); max_val = MAX_VAL(max_val, *(in_p + 4)); max_val = MAX_VAL(max_val, *(in_p + 5)); max_val = MAX_VAL(max_val, *(in_p + 6)); max_val = MAX_VAL(max_val, *(in_p + 7)); in_p += 8; } for (; i < spatial_in; i++) { max_val = MAX_VAL(max_val, *in_p); in_p++; } output[gid] = max_val; } } __global__ void global_avg_pool_kernel(const float* input, float* output, const int in_h, const int in_w, const int total_threads) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < total_threads) { const int spatial_in = in_h * in_w; const float* in_p = input + gid * spatial_in; int i = 0; float sum_val = 0.f; // unroll 8 for (; i < spatial_in - 7; i += 8) { sum_val += *in_p++; sum_val += *in_p++; sum_val += *in_p++; sum_val += *in_p++; sum_val += *in_p++; sum_val += *in_p++; sum_val += *in_p++; sum_val += *in_p++; } for (; i < spatial_in; i++) { sum_val += *in_p++; } output[gid] = sum_val / spatial_in; } } void PoolCompute::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); bool exclusive = param.exclusive; bool adaptive = param.adaptive; auto x_dims = param.x->dims(); auto out_dims = param.output->dims(); auto paddings = *param.paddings; const int in_h = x_dims[2]; const int in_w = x_dims[3]; const int out_h = out_dims[2]; const int out_w = out_dims[3]; const int spatial_in = in_h * in_w; const int spatial_out = out_h * out_w; const int win_h = param.ksize[0]; const int win_w = param.ksize[1]; const int stride_h = param.strides[0]; const int stride_w = param.strides[1]; const int pad_h = paddings[0]; const int pad_w = paddings[2]; const int total_threads = out_dims.production(); const int threads = 512; const int blocks = (total_threads + threads - 1) / threads; auto input_data = param.x->data<float>(); auto output_data = param.output->mutable_data<float>(TARGET(kCUDA)); if (param.global_pooling) { if (param.pooling_type == "max") { global_max_pool_kernel<<<blocks, threads, 0, stream>>>( input_data, output_data, in_h, in_w, total_threads); } else { global_avg_pool_kernel<<<blocks, threads, 0, stream>>>( input_data, output_data, in_h, in_w, total_threads); } } else { if (!adaptive) { if (param.pooling_type == "max") { max_pool_kernel<<<blocks, threads, 0, stream>>>(input_data, output_data, spatial_in, spatial_out, in_h, in_w, out_h, out_w, pad_h, pad_w, win_h, win_w, stride_h, stride_w, total_threads); } else { avg_pool_kernel<<<blocks, threads, 0, stream>>>(input_data, output_data, spatial_in, spatial_out, in_h, in_w, out_h, out_w, pad_h, pad_w, win_h, win_w, stride_h, stride_w, exclusive, total_threads); } } else { if (param.pooling_type == "max") { adaptive_max_pool_kernel<<<blocks, threads, 0, stream>>>(input_data, output_data, spatial_in, spatial_out, in_h, in_w, out_h, out_w, pad_h, pad_w, win_h, win_w, stride_h, stride_w, total_threads); } else { adaptive_avg_pool_kernel<<<blocks, threads, 0, stream>>>(input_data, output_data, spatial_in, spatial_out, in_h, in_w, out_h, out_w, pad_h, pad_w, win_h, win_w, stride_h, stride_w, total_threads); } } } cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) LOG(FATAL) << cudaGetErrorString(error); } inline int PoolOutputSize( int input_size, int filter_size, int padding, int stride, bool ceil_mode) { int output_size; if (!ceil_mode) { output_size = (input_size - filter_size + 2 * padding) / stride + 1; } else { output_size = (input_size - filter_size + 2 * padding + stride - 1) / stride + 1; } return output_size; } void PoolComputeNHWC::PrepareForRun() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); pool_impl_.reset(new lite::cuda::math::CudnnPool2DNHWC<PRECISION(kFloat)>); pool_impl_->init(param, &ctx); } void PoolComputeNHWC::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); const auto x_dims = param.x->dims(); std::vector<int>& ksize = param.ksize; if (param.global_pooling) { ksize.resize(static_cast<size_t>(x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) { (*param.paddings)[i] = 0; ksize[i] = static_cast<int>(x_dims[i + 1]); } } std::vector<int64_t> output_shape({x_dims[0]}); if (param.adaptive) { output_shape.insert( output_shape.end(), param.ksize.begin(), param.ksize.end()); } else { for (size_t i = 0; i < param.ksize.size(); ++i) { output_shape.push_back(PoolOutputSize(x_dims[i + 1], param.ksize[i], (*param.paddings)[i], param.strides[i], param.ceil_mode)); } } output_shape.push_back(x_dims[3]); param.output->Resize(lite::DDim(output_shape)); pool_impl_->run(param); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) LOG(FATAL) << cudaGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL( pool2d, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::PoolCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .Finalize(); REGISTER_LITE_KERNEL(pool2d, kCUDA, kFloat, kNHWC, paddle::lite::kernels::cuda::PoolComputeNHWC, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNHWC))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNHWC))}) .Finalize();
the_stack
namespace dgl { using runtime::NDArray; using namespace runtime::cuda; namespace aten { namespace impl { ///////////////////////////// BinaryElewise ///////////////////////////// template <typename IdType, typename Op> __global__ void _BinaryElewiseKernel( const IdType* lhs, const IdType* rhs, IdType* out, int64_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = Op::Call(lhs[tx], rhs[tx]); tx += stride_x; } } template <DLDeviceType XPU, typename IdType, typename Op> IdArray BinaryElewise(IdArray lhs, IdArray rhs) { const int64_t len = lhs->shape[0]; IdArray ret = NewIdArray(lhs->shape[0], lhs->ctx, lhs->dtype.bits); const IdType* lhs_data = static_cast<IdType*>(lhs->data); const IdType* rhs_data = static_cast<IdType*>(rhs->data); IdType* ret_data = static_cast<IdType*>(ret->data); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = cuda::FindNumThreads(len); int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL((_BinaryElewiseKernel<IdType, Op>), nb, nt, 0, thr_entry->stream, lhs_data, rhs_data, ret_data, len); return ret; } template IdArray BinaryElewise<kDLGPU, int32_t, arith::Add>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Sub>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Mul>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Div>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Mod>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::GT>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::LT>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::GE>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::LE>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::EQ>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::NE>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Add>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Sub>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Mul>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Div>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Mod>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::GT>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::LT>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::GE>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::LE>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::EQ>(IdArray lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::NE>(IdArray lhs, IdArray rhs); template <typename IdType, typename Op> __global__ void _BinaryElewiseKernel( const IdType* lhs, IdType rhs, IdType* out, int64_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = Op::Call(lhs[tx], rhs); tx += stride_x; } } template <DLDeviceType XPU, typename IdType, typename Op> IdArray BinaryElewise(IdArray lhs, IdType rhs) { const int64_t len = lhs->shape[0]; IdArray ret = NewIdArray(lhs->shape[0], lhs->ctx, lhs->dtype.bits); const IdType* lhs_data = static_cast<IdType*>(lhs->data); IdType* ret_data = static_cast<IdType*>(ret->data); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = cuda::FindNumThreads(len); int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL((_BinaryElewiseKernel<IdType, Op>), nb, nt, 0, thr_entry->stream, lhs_data, rhs, ret_data, len); return ret; } template IdArray BinaryElewise<kDLGPU, int32_t, arith::Add>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Sub>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Mul>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Div>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Mod>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::GT>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::LT>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::GE>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::LE>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::EQ>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::NE>(IdArray lhs, int32_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Add>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Sub>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Mul>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Div>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Mod>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::GT>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::LT>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::GE>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::LE>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::EQ>(IdArray lhs, int64_t rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::NE>(IdArray lhs, int64_t rhs); template <typename IdType, typename Op> __global__ void _BinaryElewiseKernel( IdType lhs, const IdType* rhs, IdType* out, int64_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = Op::Call(lhs, rhs[tx]); tx += stride_x; } } template <DLDeviceType XPU, typename IdType, typename Op> IdArray BinaryElewise(IdType lhs, IdArray rhs) { const int64_t len = rhs->shape[0]; IdArray ret = NewIdArray(rhs->shape[0], rhs->ctx, rhs->dtype.bits); const IdType* rhs_data = static_cast<IdType*>(rhs->data); IdType* ret_data = static_cast<IdType*>(ret->data); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = cuda::FindNumThreads(len); int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL((_BinaryElewiseKernel<IdType, Op>), nb, nt, 0, thr_entry->stream, lhs, rhs_data, ret_data, len); return ret; } template IdArray BinaryElewise<kDLGPU, int32_t, arith::Add>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Sub>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Mul>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Div>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::Mod>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::GT>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::LT>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::GE>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::LE>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::EQ>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int32_t, arith::NE>(int32_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Add>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Sub>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Mul>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Div>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::Mod>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::GT>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::LT>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::GE>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::LE>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::EQ>(int64_t lhs, IdArray rhs); template IdArray BinaryElewise<kDLGPU, int64_t, arith::NE>(int64_t lhs, IdArray rhs); template <typename IdType, typename Op> __global__ void _UnaryElewiseKernel( const IdType* lhs, IdType* out, int64_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = Op::Call(lhs[tx]); tx += stride_x; } } template <DLDeviceType XPU, typename IdType, typename Op> IdArray UnaryElewise(IdArray lhs) { const int64_t len = lhs->shape[0]; IdArray ret = NewIdArray(lhs->shape[0], lhs->ctx, lhs->dtype.bits); const IdType* lhs_data = static_cast<IdType*>(lhs->data); IdType* ret_data = static_cast<IdType*>(ret->data); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = cuda::FindNumThreads(len); int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL((_UnaryElewiseKernel<IdType, Op>), nb, nt, 0, thr_entry->stream, lhs_data, ret_data, len); return ret; } template IdArray UnaryElewise<kDLGPU, int32_t, arith::Neg>(IdArray lhs); template IdArray UnaryElewise<kDLGPU, int64_t, arith::Neg>(IdArray lhs); ///////////////////////////// Full ///////////////////////////// template <typename DType> __global__ void _FullKernel( DType* out, int64_t length, DType val) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = val; tx += stride_x; } } template <DLDeviceType XPU, typename DType> NDArray Full(DType val, int64_t length, DLContext ctx) { NDArray ret = NDArray::Empty({length}, DLDataTypeTraits<DType>::dtype, ctx); DType* ret_data = static_cast<DType*>(ret->data); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = cuda::FindNumThreads(length); int nb = (length + nt - 1) / nt; CUDA_KERNEL_CALL((_FullKernel<DType>), nb, nt, 0, thr_entry->stream, ret_data, length, val); return ret; } template IdArray Full<kDLGPU, int32_t>(int32_t val, int64_t length, DLContext ctx); template IdArray Full<kDLGPU, int64_t>(int64_t val, int64_t length, DLContext ctx); template IdArray Full<kDLGPU, float>(float val, int64_t length, DLContext ctx); template IdArray Full<kDLGPU, double>(double val, int64_t length, DLContext ctx); ///////////////////////////// Range ///////////////////////////// template <typename IdType> __global__ void _RangeKernel(IdType* out, IdType low, IdType length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = low + tx; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> IdArray Range(IdType low, IdType high, DLContext ctx) { CHECK(high >= low) << "high must be bigger than low"; const IdType length = high - low; IdArray ret = NewIdArray(length, ctx, sizeof(IdType) * 8); if (length == 0) return ret; IdType* ret_data = static_cast<IdType*>(ret->data); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = cuda::FindNumThreads(length); int nb = (length + nt - 1) / nt; CUDA_KERNEL_CALL((_RangeKernel<IdType>), nb, nt, 0, thr_entry->stream, ret_data, low, length); return ret; } template IdArray Range<kDLGPU, int32_t>(int32_t, int32_t, DLContext); template IdArray Range<kDLGPU, int64_t>(int64_t, int64_t, DLContext); ///////////////////////////// Relabel_ ////////////////////////////// template <typename IdType> __global__ void _RelabelKernel( IdType* out, int64_t length, DeviceOrderedHashTable<IdType> table) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = table.Search(out[tx])->local; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> IdArray Relabel_(const std::vector<IdArray>& arrays) { IdArray all_nodes = Concat(arrays); const int64_t total_length = all_nodes->shape[0]; if (total_length == 0) { return all_nodes; } const auto& ctx = arrays[0]->ctx; auto device = runtime::DeviceAPI::Get(ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // build node maps and get the induced nodes OrderedHashTable<IdType> node_map(total_length, ctx, thr_entry->stream); int64_t num_induced = 0; int64_t * num_induced_device = static_cast<int64_t*>( device->AllocWorkspace(ctx, sizeof(int64_t))); IdArray induced_nodes = NewIdArray(total_length, ctx, sizeof(IdType)*8); CUDA_CALL(cudaMemsetAsync( num_induced_device, 0, sizeof(*num_induced_device), thr_entry->stream)); node_map.FillWithDuplicates( all_nodes.Ptr<IdType>(), all_nodes->shape[0], induced_nodes.Ptr<IdType>(), num_induced_device, thr_entry->stream); device->CopyDataFromTo( num_induced_device, 0, &num_induced, 0, sizeof(num_induced), ctx, DGLContext{kDLCPU, 0}, DGLType{kDLInt, 64, 1}, thr_entry->stream); device->StreamSync(ctx, thr_entry->stream); device->FreeWorkspace(ctx, num_induced_device); // resize the induced nodes induced_nodes->shape[0] = num_induced; // relabel const int nt = 128; for (IdArray arr : arrays) { const int64_t length = arr->shape[0]; int nb = (length + nt - 1) / nt; CUDA_KERNEL_CALL((_RelabelKernel<IdType>), nb, nt, 0, thr_entry->stream, arr.Ptr<IdType>(), length, node_map.DeviceHandle()); } return induced_nodes; } template IdArray Relabel_<kDLGPU, int32_t>(const std::vector<IdArray>& arrays); template IdArray Relabel_<kDLGPU, int64_t>(const std::vector<IdArray>& arrays); ///////////////////////////// AsNumBits ///////////////////////////// template <typename InType, typename OutType> __global__ void _CastKernel(const InType* in, OutType* out, size_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = in[tx]; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> IdArray AsNumBits(IdArray arr, uint8_t bits) { const std::vector<int64_t> shape(arr->shape, arr->shape + arr->ndim); IdArray ret = IdArray::Empty(shape, DLDataType{kDLInt, bits, 1}, arr->ctx); const int64_t length = ret.NumElements(); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = cuda::FindNumThreads(length); int nb = (length + nt - 1) / nt; if (bits == 32) { CUDA_KERNEL_CALL((_CastKernel<IdType, int32_t>), nb, nt, 0, thr_entry->stream, static_cast<IdType*>(arr->data), static_cast<int32_t*>(ret->data), length); } else { CUDA_KERNEL_CALL((_CastKernel<IdType, int64_t>), nb, nt, 0, thr_entry->stream, static_cast<IdType*>(arr->data), static_cast<int64_t*>(ret->data), length); } return ret; } template IdArray AsNumBits<kDLGPU, int32_t>(IdArray arr, uint8_t bits); template IdArray AsNumBits<kDLGPU, int64_t>(IdArray arr, uint8_t bits); } // namespace impl } // namespace aten } // namespace dgl
the_stack
#include <assert.h> #include <stdio.h> #include <stdint.h> #include <sys/socket.h> // AF_LOCAL #include <poll.h> // POLL #include <sys/param.h> #define O_NONBLOCK 00004 #define O_GPUNET_BOUNCE 04000 #define EWOULDBLOCK 11 #ifndef UINT32_MAX #define UINT32_MAX 0xffffffffU #endif #define SOC_NONBLOCK(soc_e) ((soc_e)->e_flags & O_NONBLOCK) __device__ int gsocket( int domain, int type, int protocol){ int entry=g_soctable->findNewEntry(); return entry; } __device__ inline void free_socket(int soc){ GPU_ASSERT((soc >= 0) && (soc < SOC_TABLE_SIZE)); g_soctable->free_socket(soc); } // MUST BE CALLED FROM A SINGLE THREAD!! __device__ int ll_bind_connect(const char* name, int namelen, req_type_t req_type) { // all GPU failures should happen before getting to the CPU GPU_ASSERT(name); __shared__ int ret_val; __shared__ int newsocket; __shared__ gpu_socket *sock; newsocket=gsocket(0,0,0); assert(newsocket != E_SOCTABLE_FULL); int entry; cpu_ipc_entry* e; GET_QUEUE_SLOT(entry, e); e->sock_domain=AF_LOCAL; e->sock_proto = 0; e->req_type = req_type; e->cpu_sock = -1; memcpy_thread((char*)e->addr.local, name, namelen + 1); ret_val = fire_and_wait(e); g_ipc_manager->freeEntry(entry); sock = &g_soctable->_sockets[newsocket]; sock->cpu_soc = ret_val; memcpy_thread((char*)sock->addr, (char*)name, namelen+1); __threadfence(); // propagate to everybody if (ret_val<0) { g_soctable->free_socket(newsocket); newsocket = ret_val; } return newsocket; } __device__ int ll_bind_connect_in(const struct sockaddr_in *addr, int sock_domain, int sock_proto, req_type_t req_type) { // all GPU failures should happen before getting to the CPU GPU_ASSERT(addr); __shared__ int ret_val; __shared__ int newsocket; __shared__ gpu_socket *sock; newsocket=gsocket(0,0,0); assert(newsocket != E_SOCTABLE_FULL); int entry; cpu_ipc_entry* e; GET_QUEUE_SLOT(entry,e); e->sock_domain= sock_domain; e->sock_proto = sock_proto; e->req_type=req_type; e->cpu_sock=-1; memcpy_thread((char*)&e->addr.in, (char*)addr, sizeof(*addr)); ret_val=fire_and_wait(e); g_ipc_manager->freeEntry(entry); if (ret_val >= 0) { sock = &g_soctable->_sockets[newsocket]; sock->cpu_soc=ret_val; if ((req_type == CONNECT_IPC_REQ) || req_type == CONNECT_IPC_REQ_BOUNCE) { sock->sbuf = (g_ringbuf::ringbuf_t)readNoCache(&e->sbuf_addr); sock->rbuf = (uint8_t*)readNoCache(&e->rbuf_addr); sock->rbuf_offset = (uint64_t*)readNoCache(&e->dev_rbuf_offset); sock->rbuf_bytes_avail = (uint64_t*)readNoCache(&e->dev_rbuf_bytes_avail); sock->rbuf_size = readNoCache(&e->rbuf_size); sock->rbuf_bytes_avail_cache = 0; } memcpy_thread((char*)g_soctable->_sockets[newsocket].addr, (char*)addr, sizeof(*addr)); __threadfence(); // propagate to everybody } else { g_soctable->free_socket(newsocket); newsocket=ret_val; } return newsocket; } __device__ int single_thread_gbind(const char* name, int namelen) { return ll_bind_connect(name,namelen,BIND_IPC_REQ); } __device__ int single_thread_gbind_in(const sockaddr_in* addr) { return ll_bind_connect_in(addr, AF_INET, IPPROTO_IBP, BIND_IPC_REQ); } __device__ int single_thread_gbind_bounce_in(const sockaddr_in* addr) { return ll_bind_connect_in(addr, AF_INET, IPPROTO_IBP, BIND_IPC_REQ_BOUNCE); } __device__ int gconnect(const char* name, int namelen) { __shared__ int retval; BEGIN_SINGLE_THREAD_PART { retval=ll_bind_connect(name,namelen,CONNECT_IPC_REQ); } END_SINGLE_THREAD_PART; return retval; } __device__ int gconnect_in(const struct sockaddr_in* addr) { __shared__ int retval; BEGIN_SINGLE_THREAD_PART { retval = ll_bind_connect_in(addr, AF_INET, IPPROTO_IBP, CONNECT_IPC_REQ); } END_SINGLE_THREAD_PART; return retval; } __device__ int gconnect_bounce_in(const struct sockaddr_in* addr) { __shared__ int retval; BEGIN_SINGLE_THREAD_PART { retval = ll_bind_connect_in(addr, AF_INET, IPPROTO_IBP, CONNECT_IPC_REQ_BOUNCE); } END_SINGLE_THREAD_PART; return retval; } __device__ int gconnect_ib(const struct sockaddr_in* addr) { return gconnect_in(addr); } __device__ int gaccept(int soc) { GPU_ASSERT((soc >= 0) && (soc < SOC_TABLE_SIZE)); __shared__ int newsocket; __shared__ gpu_socket *serv_soc_e, *soc_e; __shared__ int ret_val; BEGIN_SINGLE_THREAD_PART { serv_soc_e=&g_soctable->_sockets[soc]; // init the new client socket newsocket=gsocket(0,0,0); soc_e=&g_soctable->_sockets[newsocket]; int entry; cpu_ipc_entry* e; GET_QUEUE_SLOT(entry,e); e->cpu_sock=serv_soc_e->cpu_soc; // server socket e->req_type=ACCEPT_IPC_REQ; ret_val=fire_and_wait(e); soc_e->cpu_soc=ret_val; soc_e->sbuf = (g_ringbuf::ringbuf_t)readNoCache(&e->sbuf_addr); soc_e->rbuf = (uint8_t*)readNoCache(&e->rbuf_addr); soc_e->rbuf_offset = (uint64_t*)readNoCache(&e->dev_rbuf_offset); soc_e->rbuf_bytes_avail = (uint64_t*)readNoCache(&e->dev_rbuf_bytes_avail); soc_e->rbuf_size = readNoCache(&e->rbuf_size); soc_e->rbuf_bytes_avail_cache = 0; __threadfence(); g_ipc_manager->freeEntry(entry); if (ret_val < 0) { GPU_ASSERT(ret_val>=0); g_soctable->free_socket(newsocket); } } END_SINGLE_THREAD_PART; if (ret_val<0) return ret_val; return newsocket; } __device__ int gaccept_nb(int soc) { GPU_ASSERT((soc >= 0) && (soc < SOC_TABLE_SIZE)); __shared__ int newsocket; __shared__ gpu_socket *serv_soc_e, *soc_e; __shared__ int ret_val; BEGIN_SINGLE_THREAD_PART { serv_soc_e=&g_soctable->_sockets[soc]; // init the new client socket newsocket=gsocket(0,0,0); soc_e=&g_soctable->_sockets[newsocket]; int entry; cpu_ipc_entry* e; GET_QUEUE_SLOT(entry,e); e->cpu_sock=serv_soc_e->cpu_soc; // server socket e->req_type = ACCEPT_NB_IPC_REQ; ret_val=fire_and_wait(e); g_ipc_manager->freeEntry(entry); soc_e->cpu_soc=ret_val; soc_e->sbuf = (g_ringbuf::ringbuf_t)readNoCache(&e->sbuf_addr); soc_e->rbuf = (uint8_t*)readNoCache(&e->rbuf_addr); soc_e->rbuf_offset = (uint64_t*)readNoCache(&e->dev_rbuf_offset); soc_e->rbuf_bytes_avail = (uint64_t*)readNoCache(&e->dev_rbuf_bytes_avail); soc_e->rbuf_size = readNoCache(&e->rbuf_size); __threadfence(); } END_SINGLE_THREAD_PART; if (ret_val<0) { g_soctable->free_socket(newsocket); GPU_ASSERT(ret_val>=0); return ret_val; } return newsocket; } __device__ int ll_shutdown_close(int soc, req_type_t req, int how) { GPU_ASSERT((soc >= 0) && (soc < SOC_TABLE_SIZE)); __shared__ gpu_socket* soc_e; __shared__ int ret_val; int entry; cpu_ipc_entry* e; soc_e=&g_soctable->_sockets[soc]; // make sure that send ringbuf is all consumed. // otherwise, close message may be sent before send requests, though the user requested send first. if (soc_e->sbuf) while (g_ringbuf::ringbuf_bytes_used(soc_e->sbuf) != 0); GET_QUEUE_SLOT(entry,e); assert(e->status!=CPU_IPC_PENDING); e->cpu_sock=soc_e->cpu_soc; // closing e->shutdown_flags = how; e->req_type = req; ret_val=fire_and_wait(e); g_ipc_manager->freeEntry(entry); if (req == CLOSE_IPC_REQ) g_soctable->free_socket(soc); return ret_val; } __device__ int single_thread_gshutdown(int soc, int how) { return ll_shutdown_close(soc,SHUTDOWN_IPC_REQ,how); } __device__ int single_thread_gclose(int soc) { return ll_shutdown_close(soc,CLOSE_IPC_REQ,SHUT_RDWR); } #ifdef GPUNET_PROFILE #define def_timer(n) __shared__ long long int _t[n]; #define set_timer(n) _t[(n)] = clock64(); #else #define def_timer(n) #define set_timer(n) #endif #ifdef DEFAULT_TO_UNIX_SOCKET __device__ int gsend(int soc, uchar* to_send, int size) { GPU_ASSERT(soc>=0 && soc< SOC_TABLE_SIZE); GPU_ASSERT(to_send&&size); __shared__ gpu_socket* soc_e; __shared__ int ret_val, ipc_slot,total_sent; __shared__ cpu_ipc_entry* e; BEGIN_SINGLE_THREAD GET_QUEUE_SLOT(ipc_slot,e); assert(e->status!=CPU_IPC_PENDING); END_SINGLE_THREAD soc_e=&g_soctable->_sockets[soc]; // copy to the socket buffer total_sent=0; while(total_sent!=size){ int single_send=min(size-total_sent,CPU_IO_BUF_SIZE); copy_block((uchar*)g_buffer_space+soc_e->send_buffer_offset,(uchar*)to_send+total_sent,single_send); __threadfence(); // make sure everything reached the main memory BEGIN_SINGLE_THREAD e->cpu_sock=soc_e->cpu_soc; e->req_type=SEND_IPC_REQ; e->data_size=single_send; e->req_buffer_offset=soc_e->send_buffer_offset; ret_val=fire_and_wait(e); if (ret_val>=0) total_sent+=ret_val; END_SINGLE_THREAD if (ret_val<0) { total_sent=ret_val; break;} if (ret_val==0) break; } BEGIN_SINGLE_THREAD g_ipc_manager->freeEntry(ipc_slot); END_SINGLE_THREAD return total_sent; } #else #ifdef GPUNET_PROFILE_SEND __device__ unsigned int __cnt = 0; #endif __device__ int gsend(int soc, uchar* to_send, int size) { GPU_ASSERT((soc >= 0) && (soc < SOC_TABLE_SIZE)); GPU_ASSERT(to_send && size); __shared__ size_t send_size; __shared__ gpu_socket* soc_e; #ifdef GPUNET_PROFILE_SEND def_timer(4); #endif BEGIN_SINGLE_THREAD_PART { #ifdef GPUNET_PROFILE_SEND _t[0] = _t[1] = _t[2] = _t[3] = 0; #endif soc_e=&g_soctable->_sockets[soc]; // checking recv buffer is necessary, so calling to CPU is // inevitable. #ifdef GPUNET_PROFILE_SEND set_timer(0); #endif if (size != 0) { do { send_size = g_ringbuf::ringbuf_bytes_free(soc_e->sbuf); } while(!SOC_NONBLOCK(soc_e) && send_size == 0); } send_size = MIN(send_size, size); #ifdef GPUNET_PROFILE_SEND set_timer(1); #endif } END_SINGLE_THREAD_PART; if (send_size == 0) return -EWOULDBLOCK; // send_size is the expected size to be sent. though the actual send // size may be increased after CQ handling, we can safely ignore the increase unless send_size is 0. if (send_size != 0) { #ifdef GPUNET_PROFILE_SEND g_ringbuf::ringbuf_memcpy_into(soc_e->sbuf, to_send, send_size, &send_size, 0); if (FIRST_THREAD_IN_BLOCK()) { set_timer(2); g_ringbuf::ringbuf_produce(soc_e->sbuf, send_size); } #else g_ringbuf::ringbuf_memcpy_into(soc_e->sbuf, to_send, send_size, &send_size, 1); #endif } #ifdef GPUNET_PROFILE_SEND if (FIRST_THREAD_IN_BLOCK()) { if (((_t[0] % 101) == 0)) { set_timer(3); #define t_diff(n) (_t[(n)] - _t[(n-1)]) printf("1: %ld\t2: %ld\t3: %ld\tsend: %d, offset: %lu, free: %lu\n", t_diff(1), t_diff(2), t_diff(3), send_size, g_ringbuf::ringbuf_head_offset(soc_e->sbuf), g_ringbuf::ringbuf_bytes_free(soc_e->sbuf) ); } } #endif return send_size; } #endif #ifdef DEFAULT_TO_UNIX_SOCKET __device__ int grecv(int soc, uchar* to_recv, int size) { GPU_ASSERT(soc>=0 && soc< SOC_TABLE_SIZE); GPU_ASSERT(to_recv&&size); __shared__ gpu_socket* soc_e; __shared__ int ret_val, ipc_slot,total_recv; __shared__ cpu_ipc_entry* e; BEGIN_SINGLE_THREAD GET_QUEUE_SLOT(ipc_slot,e); assert(e->status!=CPU_IPC_PENDING); END_SINGLE_THREAD soc_e=&g_soctable->_sockets[soc]; // copy to the socket buffer total_recv=0; while(total_recv<size) { int single_recv=min(size-total_recv,CPU_IO_BUF_SIZE); BEGIN_SINGLE_THREAD e->cpu_sock=soc_e->cpu_soc; e->req_type=RECV_IPC_REQ; e->data_size=single_recv; e->req_buffer_offset = soc_e->recv_buffer_offset; ret_val=fire_and_wait(e); if (ret_val>=0) {total_recv+=ret_val;} END_SINGLE_THREAD if (ret_val<0) { total_recv=ret_val; break;} if (ret_val==0) break; copy_block((uchar*)to_recv+total_recv-ret_val, (uchar*)g_buffer_space+soc_e->recv_buffer_offset, ret_val); } BEGIN_SINGLE_THREAD g_ipc_manager->freeEntry(ipc_slot); END_SINGLE_THREAD return total_recv; } __device__ int grecv_nb(int soc, void* to_recv, int size) { GPU_ASSERT(soc>=0 && soc< SOC_TABLE_SIZE); GPU_ASSERT(to_recv&&size); __shared__ gpu_socket* soc_e; __shared__ int ret_val, ipc_slot,total_recv; __shared__ cpu_ipc_entry* e; BEGIN_SINGLE_THREAD GET_QUEUE_SLOT(ipc_slot,e); assert(e->status!=CPU_IPC_PENDING); END_SINGLE_THREAD soc_e = &g_soctable->_sockets[soc]; // copy to the socket buffer total_recv = 0; while (total_recv < size) { int single_recv = min(size-total_recv, CPU_IO_BUF_SIZE); BEGIN_SINGLE_THREAD e->cpu_sock = soc_e->cpu_soc; e->req_type = RECV_NB_IPC_REQ; e->data_size = single_recv; e->req_buffer_offset = soc_e->recv_buffer_offset; ret_val = fire_and_wait(e); if (ret_val >= 0) total_recv += ret_val; if (total_recv == 0) total_recv = ret_val; END_SINGLE_THREAD if (ret_val <= 0) break; copy_block((uchar*)to_recv + total_recv - ret_val, (uchar*)g_buffer_space+soc_e->recv_buffer_offset, ret_val); } BEGIN_SINGLE_THREAD g_ipc_manager->freeEntry(ipc_slot); END_SINGLE_THREAD return total_recv; } #else __device__ int grecv(int soc, uchar* to_recv, int size) { GPU_ASSERT(soc>=0 && soc< SOC_TABLE_SIZE); GPU_ASSERT(to_recv&&size); __shared__ gpu_socket* soc_e; __shared__ int ret_val, end_size, rbuf_size; __shared__ uint64_t rbuf_offset, rbuf_bytes_avail; #ifdef GPUNET_PROFILE_RECV def_timer(6); #endif BEGIN_SINGLE_THREAD_PART { #ifdef GPUNET_PROFILE_RECV _t[0] = _t[1] = _t[2] = _t[3] = _t[4] = _t[5] = 0; set_timer(0); #endif soc_e = &g_soctable->_sockets[soc]; ret_val = 0; rbuf_size = soc_e->rbuf_size; rbuf_bytes_avail = soc_e->rbuf_bytes_avail_cache; do { rbuf_offset = *(soc_e->rbuf_offset); if (rbuf_offset > rbuf_bytes_avail) { ret_val = (rbuf_offset - rbuf_bytes_avail); } else if (rbuf_offset < rbuf_bytes_avail) { // rbuf_offset is overflown ret_val = (UINT32_MAX - rbuf_bytes_avail) + rbuf_offset + 1; } } while(!SOC_NONBLOCK(soc_e) && ret_val == 0); #ifdef GPUNET_PROFILE_RECV set_timer(1); #endif end_size = rbuf_size - (rbuf_bytes_avail % rbuf_size); ret_val = MIN(ret_val, size); #ifdef GPUNET_PROFILE_RECV set_timer(2); #endif } END_SINGLE_THREAD_PART; if (ret_val == 0) { return -EWOULDBLOCK; } if (ret_val > end_size) { // rbuf_size should be the power of 2. copy_block_src_volatile(to_recv, &soc_e->rbuf[rbuf_bytes_avail % rbuf_size], end_size); copy_block_src_volatile(to_recv + end_size, soc_e->rbuf, ret_val - end_size); } else { copy_block_src_volatile(to_recv, &soc_e->rbuf[rbuf_bytes_avail % rbuf_size], ret_val); } BEGIN_SINGLE_THREAD_PART { #ifdef GPUNET_PROFILE_RECV set_timer(3); #endif // indefinitely increases, and even wraps around. soc_e->rbuf_bytes_avail_cache += ret_val; *soc_e->rbuf_bytes_avail = soc_e->rbuf_bytes_avail_cache; #ifdef GPUNET_PROFILE_RECV set_timer(4); #endif #ifdef GPUNET_PROFILE_RECV set_timer(5); #define t_diff(n) (_t[(n)] - _t[(n-1)]) printf("1: %ld\t2: %ld\t3: %ld\t4: %ld\t5: %ld\ttotal: %ld\n", t_diff(1), t_diff(2), t_diff(3), t_diff(4), t_diff(5), _t[5] - _t[0]); #endif } END_SINGLE_THREAD_PART; return ret_val; } #endif __device__ int gpoll(struct gpollfd* fds, size_t nfds, int nclock_timeout) { __shared__ long long clock_start; __shared__ size_t nth; __shared__ bool is_timeout; __shared__ int may_return; BEGIN_SINGLE_THREAD_PART { clock_start = clock64(); is_timeout = false; nth = blockDim.x * blockDim.y * blockDim.z; may_return = 0; } END_SINGLE_THREAD_PART; do { for(int i = TID; i < nfds; i+= nth) { struct gpollfd pfd = fds[i]; pfd.revents = 0; pfd.rbytes = 0; pfd.wbytes = 0; int soc = pfd.fd; if (soc < 0 || soc >= SOC_TABLE_SIZE) { pfd.revents |= POLLNVAL; } else { gpu_socket *soc_e = &g_soctable->_sockets[soc]; int buf_size = soc_e->rbuf_size; uint64_t buf_avlb = soc_e->rbuf_bytes_avail_cache; uint64_t buf_offset = *(soc_e->rbuf_offset); int op_size = 0; if (buf_offset > buf_avlb) { op_size = (buf_offset - buf_avlb); } else if (buf_offset < buf_avlb) { op_size = (UINT32_MAX - buf_avlb) + buf_offset + 1; } if (op_size > 0) { pfd.revents |= POLLIN; pfd.rbytes = op_size; may_return = 1; } op_size = g_ringbuf::ringbuf_bytes_free(soc_e->sbuf); if (op_size > 0) { pfd.revents |= POLLOUT; pfd.wbytes = op_size; may_return = 1; } } pfd.revents &= pfd.events | POLLERR | POLLNVAL | POLLHUP; fds[i] = pfd; } BEGIN_SINGLE_THREAD_PART { if (nclock_timeout > 0 && clock64() - clock_start > nclock_timeout) is_timeout = true; } END_SINGLE_THREAD_PART; } while (may_return == 0 && !is_timeout); return may_return; } __device__ void gsetsock_block(int socket, int blocking) { __shared__ gpu_socket* soc_e; BEGIN_SINGLE_THREAD_PART { soc_e = &g_soctable->_sockets[socket]; if (!blocking) soc_e->e_flags |= O_NONBLOCK; } END_SINGLE_THREAD_PART; } __device__ void gsetsock_bounce(int socket, int bounce) { __shared__ gpu_socket* soc_e; BEGIN_SINGLE_THREAD_PART { soc_e = &g_soctable->_sockets[socket]; if (bounce) soc_e->e_flags |= O_GPUNET_BOUNCE; } END_SINGLE_THREAD_PART; } __device__ int ggetsock_block(int socket) { __shared__ gpu_socket* soc_e; __shared__ int ret; BEGIN_SINGLE_THREAD_PART { soc_e = &g_soctable->_sockets[socket]; ret = (soc_e->e_flags & O_NONBLOCK); } END_SINGLE_THREAD_PART; return (!ret); } __device__ void ggettimeofday(struct gtimeval *tv) { __shared__ int ipc_slot; __shared__ cpu_ipc_entry* e; BEGIN_SINGLE_THREAD_PART { GET_QUEUE_SLOT(ipc_slot,e); e->req_type = LOG_TIMESTAMP_REQ; fire_and_wait(e); tv->tv_sec = e->tv_sec; tv->tv_usec = e->tv_usec; g_ipc_manager->freeEntry(ipc_slot); } END_SINGLE_THREAD_PART; } __device__ void gtimersub(struct gtimeval *a, struct gtimeval *b, struct gtimeval *c) { c->tv_sec = 0; c->tv_usec = a->tv_usec - b->tv_usec; if (c->tv_usec < 0) { c->tv_usec += 1000000; c->tv_sec = -1; } c->tv_sec += (a->tv_sec - b->tv_sec); } __device__ void gputs_single(const char* str, int len) { __shared__ int ipc_slot; __shared__ cpu_ipc_entry* e; GET_QUEUE_SLOT(ipc_slot,e); // use local for copying string e->sock_domain = blockIdx.z * (blockDim.x * blockDim.y) + blockIdx.y * blockDim.x + blockIdx.x; e->req_type = PUTS_REQ; int len_copy = (len < GPU_LOCAL_SOC_MAX_PATH) ? len : GPU_LOCAL_SOC_MAX_PATH; strncpy_thread(e->addr.local, str, len_copy); e->addr.local[len_copy-1] = '\0'; fire_and_wait(e); g_ipc_manager->freeEntry(ipc_slot); } __device__ void gputs_single(const char* str, int len, unsigned int* threads) { gputs_single(str, len); for (int i = 0; i < blockDim.x; i++) { if (!(threads[i >> 5] & (1 << (i & 31)))) { gprintf4_single("thread %d missing\n", i, 0, 0, 0); } } } __device__ int ui2a(unsigned int num, unsigned int base,char * bf) { int n=0; unsigned int d=1; while (num/d >= base) d*=base; while (d!=0) { int dgt = num / d; num%= d; d/=base; if (n || dgt>0 || d==0) { *bf++ = dgt+(dgt<10 ? '0' : 'a'-10); ++n; } } *bf=0; return n; } __device__ int i2a (int num, char * bf) { if (num<0) { num=-num; *bf++ = '-'; } return ui2a(num,10,bf); } // printf for one integer __device__ void gprintf4_single(const char* str, int arg1, int arg2, int arg3, int arg4) { __shared__ int ipc_slot; __shared__ cpu_ipc_entry* e; char* buf, ch; __shared__ char bf[12]; assert(threadIdx.x == 0); GET_QUEUE_SLOT(ipc_slot,e); // use local for copying string e->sock_domain = blockIdx.z * (blockDim.x * blockDim.y) + blockIdx.y * blockDim.x + blockIdx.x; e->req_type = PUTS_REQ; buf = (char*)e->addr.local; int cnt = 0, len_copy; while ((ch=*(str++)) && (buf < (e->addr.local + GPU_LOCAL_SOC_MAX_PATH - 1))) { if (ch!='%') { *buf = ch; buf++; } else { ch=*(str++); if (ch == 'd'){ switch(cnt) { case 0: len_copy = i2a(arg1, bf); break; case 1: len_copy = i2a(arg2, bf); break; case 2: len_copy = i2a(arg3, bf); break; case 3: len_copy = i2a(arg4, bf); break; default: len_copy = i2a(arg4, bf); break; } strncpy_thread(buf, bf, len_copy); buf += len_copy; } else if (ch == 'x') { switch(cnt) { case 0: len_copy = ui2a(arg1, 16, bf); break; case 1: len_copy = ui2a(arg2, 16, bf); break; case 2: len_copy = ui2a(arg3, 16, bf); break; case 3: len_copy = ui2a(arg4, 16, bf); break; default: len_copy = ui2a(arg4, 16, bf); break; } strncpy_thread(buf, bf, len_copy); buf += len_copy; } if (ch != '%') cnt++; } } *buf = '\0'; fire_and_wait(e); g_ipc_manager->freeEntry(ipc_slot); } __device__ void gputs(const char* str, int len) { __shared__ unsigned int threads[32]; atomicOr(&threads[threadIdx.x >> 5], (1 << (threadIdx.x & 31))); BEGIN_SINGLE_THREAD_PART { gprintf4_single(str, 0, 0, 0, 0); } END_SINGLE_THREAD_PART; } static inline __device__ int __glaunch(req_type_t req, const char* func_name, int grid_dim[3], int block_dim[3], const void* ka) { __shared__ int ret; BEGIN_SINGLE_THREAD_PART { int entry; __shared__ cpu_ipc_entry *e; GET_QUEUE_SLOT(entry, e); int c = strncpy_thread(e->func_name, func_name, GPU_LOCAL_FNAME_MAX_SIZE); memcpy_thread(e->grid_dim, grid_dim, sizeof(int)*3); memcpy_thread(e->block_dim, block_dim, sizeof(int)*3); e->argument = (volatile void*)ka; e->req_type = req; ret = fire_and_wait(e); g_ipc_manager->freeEntry(entry); __threadfence(); } END_SINGLE_THREAD_PART; return ret; } __device__ int gsynclaunch(const char* func_name, int grid_dim[3], int block_dim[3], const void* ka) { return __glaunch(KERNEL_SYNC_EXECUTE_REQ, func_name, grid_dim, block_dim, ka); } __device__ int gasynclaunch(const char* func_name, int grid_dim[3], int block_dim[3], const void* ka) { return __glaunch(KERNEL_ASYNC_INVOKE_REQ, func_name, grid_dim, block_dim, ka); } __device__ int gaio_accept_th(cpu_ipc_entry* ipcent, int sock, void* buf, int size) { gpu_socket *serv_soc_e; serv_soc_e = &g_soctable->_sockets[sock]; ipcent->cpu_sock = serv_soc_e->cpu_soc; ipcent->req_type = ACCEPT_IPC_REQ; fire_async(ipcent); return 0; } __device__ int gaio_accept_bh(cpu_ipc_entry* e, int ret_val, int sock, void* buf, int size) { __shared__ int newsocket; BEGIN_SINGLE_THREAD gpu_socket *soc_e; newsocket = gsocket(0, 0, 0); soc_e = &g_soctable->_sockets[newsocket]; soc_e->cpu_soc = ret_val; #ifndef DEFAULT_TO_UNIX_SOCKET soc_e->sbuf = (g_ringbuf::ringbuf_t)readNoCache(&e->sbuf_addr); soc_e->rbuf = (uint8_t*)readNoCache(&e->rbuf_addr); soc_e->rbuf_offset = (uint64_t*)readNoCache(&e->dev_rbuf_offset); soc_e->rbuf_bytes_avail = (uint64_t*)readNoCache(&e->dev_rbuf_bytes_avail); soc_e->rbuf_size = readNoCache(&e->rbuf_size); #endif END_SINGLE_THREAD return newsocket; } __device__ int gaio_recv_th(cpu_ipc_entry* ipcent, int sock, void* buf, int size) { int single_recv = min(size, CPU_IO_BUF_SIZE); gpu_socket* soc_e = &g_soctable->_sockets[sock]; ipcent->cpu_sock = soc_e->cpu_soc; ipcent->req_type = RECV_IPC_REQ; ipcent->data_size = single_recv; #ifdef DEFAULT_TO_UNIX_SOCKET ipcent->req_buffer_offset = soc_e->recv_buffer_offset; #endif fire_async(ipcent); return 0; } __device__ int gaio_recv_bh(int , int sock, void* buf, int size) { #ifdef DEFAULT_TO_UNIX_SOCKET gpu_socket *soc_e = &g_soctable->_sockets[sock]; copy_block((uchar*)buf, (uchar*)g_buffer_space+soc_e->recv_buffer_offset, size); #endif return size; } __device__ int gaio(int sock, GAIOP op, void* buf, int size) { GPU_ASSERT((sock >= 0) && (sock < SOC_TABLE_SIZE)); __shared__ int slot; BEGIN_SINGLE_THREAD cpu_ipc_entry *ent; GET_QUEUE_SLOT(slot, ent); assert(ent->status!=CPU_IPC_PENDING); if (op == GAIO_ACCEPT) gaio_accept_th(ent, sock, buf, size); else if (op == GAIO_RECEIVE) gaio_recv_th(ent, sock, buf, size); END_SINGLE_THREAD return slot; } __device__ int gaio_poll(int slot, int sock, GAIOP op, void* buf, int size) { GPU_ASSERT((slot >= 0) && (slot < TASK_ARRAY_SIZE)); __shared__ int ipc_ret; __shared__ cpu_ipc_entry* e; int func_ret; BEGIN_SINGLE_THREAD e = POKE_QUEUE_SLOT(slot); if (peek_the_hole(e)) { ipc_ret = wait_for_ipc(e); } else { ipc_ret = -EWOULDBLOCK; } END_SINGLE_THREAD if (ipc_ret < 0) return ipc_ret; if (op == GAIO_ACCEPT) func_ret = gaio_accept_bh(e, ipc_ret, sock, buf, size); else func_ret = gaio_recv_bh(0, sock, buf, ipc_ret); BEGIN_SINGLE_THREAD g_ipc_manager->freeEntry(slot); END_SINGLE_THREAD return func_ret; }
the_stack
#include <cuda_runtime.h> #include <cuda_fp16.h> #include "cutensor.h" #define HANDLE_ERROR(x) { const auto err = x;\ if (err == CUTENSOR_STATUS_NOT_SUPPORTED) { return false; }\ if (err != CUTENSOR_STATUS_SUCCESS) {printf("Error: %s in line %d\n", cutensorGetErrorString(err), __LINE__); return false; } } #define HANDLE_CUDA_ERROR(x) { const auto err = x; if( err != cudaSuccess ) { printf("Error: %d in line %d\n", err, __LINE__); exit(-1); } } template<typename U> struct CuTensorTypeTraits; template<> struct CuTensorTypeTraits<double> { static const cudaDataType_t cudaType = CUDA_R_64F; static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_64F; typedef double ScalarType; }; template<> struct CuTensorTypeTraits<float> { static const cudaDataType_t cudaType = CUDA_R_32F; static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_32F; typedef float ScalarType; }; template<> struct CuTensorTypeTraits<__half> { static const cudaDataType_t cudaType = CUDA_R_16F; static const cutensorComputeType_t cutensorType = CUTENSOR_COMPUTE_32F; typedef float ScalarType; }; template<typename ComputeType, typename IntType, int kMaxNumModes_> struct Einsum { static const std::vector<IntType> emptyVec; Einsum(const std::string &equation, const std::vector<IntType> &A_shape, const std::vector<IntType> &B_shape = emptyVec) : numModesA_(A_shape.size()), numModesB_(B_shape.size()), numModesC_(0), isInitialized_(false) { const auto arrow_pos = equation.find("->"); const auto comma_pos = equation.find(","); const auto dots = equation.find("..."); const bool isBroadcast = (dots != std::string::npos); const bool isImplicit = (arrow_pos == std::string::npos); if (isBroadcast) // TODO { return; } const bool usesB = (comma_pos != std::string::npos); size_t a_start = 0; size_t a_end = isImplicit ? ((comma_pos == std::string::npos) ? equation.size() : comma_pos) : ((comma_pos == std::string::npos) ? arrow_pos : comma_pos); size_t b_start = usesB ? comma_pos + 1 : 0; size_t b_end = usesB ? (isImplicit ? equation.size() : arrow_pos) : 0; size_t c_start = isImplicit ? equation.size() : arrow_pos + 2; size_t c_end = equation.size(); char modeA[kMaxNumModes_ + 2]; uint32_t numModesA = 0; for (int i = a_start; i < a_end && numModesA < kMaxNumModes_ + 2; ++i){ if (equation.at(i) != ' ') // skip spaces { modeA[numModesA++] = equation.at(i); } } char modeB[kMaxNumModes_ + 2]; uint32_t numModesB = 0; for (int i = b_start; i < b_end && numModesB < kMaxNumModes_ + 2; ++i){ if (equation.at(i) != ' ') // skip spaces { modeB[numModesB++] = equation.at(i); } } char modeC[kMaxNumModes_ + 2]; uint32_t numModesC = 0; for (int i = c_start; i < c_end && numModesC < kMaxNumModes_ + 2; ++i){ if (equation.at(i) != ' ') // skip spaces { modeC[numModesC++] = equation.at(i); } } if ((numModesA != numModesA_) || (numModesB != numModesB_)) { // substring size and shape don't match return; } if (numModesA_ > kMaxNumModes_ || numModesB_ > kMaxNumModes_) { // too many modes return; } /** * Copy all modes from modeA to modeC if they don't appear in modeB */ auto copyModesIf = [](const char* modeA, uint32_t numModesA, const char* modeB, uint32_t numModesB, char* modeC, uint32_t &numModesC) { for (uint32_t i = 0; i < numModesA; i++) { auto mode = modeA[i]; bool found = false; for(uint32_t j=0; j < numModesB; ++j){ if(mode == modeB[j]) { found = true; break; } } if (!found) // is non-contracted mode { modeC[numModesC++] = mode; if (numModesC > kMaxNumModes_) { // too many modes return false; } } } return true; }; std::array<char, kMaxNumModes_+1> implicitModeC; char* redirectModeC; if (isImplicit) { // we have to copy all non-contracted modes from A over to C if (copyModesIf(modeA, numModesA_, modeB, numModesB_, implicitModeC.data(), numModesC_) == false) { return; } // we have to copy all non-contracted modes from B over to C if (copyModesIf(modeB, numModesB_, modeA, numModesA_, implicitModeC.data(), numModesC_) == false) { return; } std::sort(implicitModeC.begin(), std::next(implicitModeC.begin(), numModesC_)); // modes are sorted w.r.t. lexical order implicitModeC[numModesC_] = '\0'; redirectModeC = implicitModeC.data(); } else { redirectModeC = modeC; numModesC_ = numModesC; } for (uint32_t i = 0; i < numModesA_; i++) { modesA_[i] = modeA[numModesA_ - i - 1]; extentA_[i] = A_shape[numModesA_ - i - 1]; } for (uint32_t i = 0; i < numModesB_; i++) { modesB_[i] = modeB[numModesB_ - i - 1]; extentB_[i] = B_shape[numModesB_ - i - 1]; } for (uint32_t i = 0; i < numModesC_; i++) { const auto mode = redirectModeC[numModesC_ - i - 1]; modesC_[i] = mode; bool found = false; for (uint32_t j=0; j < numModesA_; ++j) { if (modesA_[j] == mode) { extentC_[i] = extentA_[j]; found = true; break; } } for (uint32_t j=0; !found && j < numModesB_; ++j) { if (modesB_[j] == mode) { extentC_[i] = extentB_[j]; break; } } } isInitialized_ = true; } size_t getWorksize() const { return kWorksize_; } std::vector<IntType> getOutputShape() const { if (!isInitialized_) return {}; std::vector<IntType> extentC(numModesC_); for (int i=0; i < numModesC_; ++i) { extentC[i] = extentC_.at(numModesC_ - i - 1); } return extentC; } /** * Computes the einsum call A,B->C * * \param[in] A_raw device pointer of A * \param[in] B_raw device pointer of B * \param[out] C_raw device pointer of C * \param[out] wor_raw device pointer to the scratchpad memory * Dispatch to contraction */ bool execute(const cutensorHandle_t *handle, const void* A_raw, const void* B_raw, void* C_raw, void *work_raw, cudaStream_t stream) const { if (!isInitialized_) return false; cudaDataType_t cudaType = CuTensorTypeTraits<ComputeType>::cudaType; cutensorComputeType_t computeType = CuTensorTypeTraits<ComputeType>::cutensorType; cutensorTensorDescriptor_t descA; HANDLE_ERROR(cutensorInitTensorDescriptor(handle, &descA, numModesA_, extentA_.data(), NULL /* = stride */, cudaType, CUTENSOR_OP_IDENTITY)); cutensorTensorDescriptor_t descC; HANDLE_ERROR(cutensorInitTensorDescriptor(handle, &descC, numModesC_, extentC_.data(), NULL /* = stride*/, cudaType, CUTENSOR_OP_IDENTITY)); uint32_t alignmentRequirementA; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, A_raw, &descA, &alignmentRequirementA)); uint32_t alignmentRequirementC; HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, C_raw, &descC, &alignmentRequirementC)); cutensorTensorDescriptor_t descB; uint32_t alignmentRequirementB; if (numModesB_ > 0) { // dispatch to contraction HANDLE_ERROR(cutensorInitTensorDescriptor(handle, &descB, numModesB_, extentB_.data(), NULL /* = stride*/, cudaType, CUTENSOR_OP_IDENTITY)); HANDLE_ERROR(cutensorGetAlignmentRequirement(handle, B_raw, &descB, &alignmentRequirementB)); cutensorContractionDescriptor_t desc; HANDLE_ERROR(cutensorInitContractionDescriptor(handle, &desc, &descA, modesA_.data(), alignmentRequirementA, &descB, modesB_.data(), alignmentRequirementB, &descC, modesC_.data(), alignmentRequirementC, &descC, modesC_.data(), alignmentRequirementC, computeType)); cutensorAlgo_t algo = CUTENSOR_ALGO_DEFAULT; cutensorContractionFind_t find; HANDLE_ERROR(cutensorInitContractionFind( handle, &find, algo)); cutensorContractionPlan_t plan; HANDLE_ERROR(cutensorInitContractionPlan(handle, &plan, &desc, &find, kWorksize_)); typename CuTensorTypeTraits<ComputeType>::ScalarType alpha = 1; typename CuTensorTypeTraits<ComputeType>::ScalarType beta = 0; HANDLE_ERROR(cutensorContraction(handle, &plan, (void*) &alpha, A_raw, B_raw, (void*) &beta, C_raw, C_raw, work_raw, kWorksize_, stream)); } else { // dispatch to reduction typename CuTensorTypeTraits<ComputeType>::ScalarType alpha = 1; typename CuTensorTypeTraits<ComputeType>::ScalarType beta = 0; HANDLE_ERROR(cutensorReduction(handle, (const void*)&alpha, A_raw, &descA, modesA_.data(), (const void*)&beta, A_raw, &descC, modesC_.data(), // beta == 0 => will not be used C_raw, &descC, modesC_.data(), CUTENSOR_OP_ADD, computeType, work_raw, kWorksize_, stream)); } return true; } bool isInitialized() const { return isInitialized_; } private: static const size_t kWorksize_ = 1024ULL * 1024ULL * 8ULL * 128ULL; uint32_t numModesA_; uint32_t numModesB_; uint32_t numModesC_; bool isInitialized_; std::array<int, kMaxNumModes_> modesA_; std::array<int, kMaxNumModes_> modesB_; std::array<int, kMaxNumModes_> modesC_; std::array<int64_t, kMaxNumModes_> extentA_; std::array<int64_t, kMaxNumModes_> extentB_; std::array<int64_t, kMaxNumModes_> extentC_; }; void einsum(cutensorHandle_t *handle, const std::vector<int> &A_shape, const std::vector<int> &B_shape, const std::string &subscripts) { constexpr int kMaxNumModes_ = 40; // maximal number of modes supported by cuTENSOR typedef float Compute; Einsum<Compute, int, kMaxNumModes_> myEinsum(subscripts, A_shape, B_shape); if (!myEinsum.isInitialized()) { return; } size_t totalElementsA = 1; for (const auto e : A_shape) { totalElementsA *= e; } size_t totalElementsB = 1; for (const auto e : B_shape) { totalElementsB *= e; } auto C_shape = myEinsum.getOutputShape(); size_t totalElementsC = 1; for (const auto e : C_shape) { totalElementsC *= e; } void* A_raw, *B_raw, *output_raw, *workspace_raw; HANDLE_CUDA_ERROR(cudaMalloc(&A_raw, sizeof(Compute) * totalElementsA)); HANDLE_CUDA_ERROR(cudaMalloc(&B_raw, sizeof(Compute) * totalElementsB)); HANDLE_CUDA_ERROR(cudaMalloc(&output_raw, sizeof(Compute) * totalElementsC)); HANDLE_CUDA_ERROR(cudaMalloc(&workspace_raw, myEinsum.getWorksize())); auto ret = myEinsum.execute(handle, A_raw, B_raw, output_raw, workspace_raw, 0); cudaFree(A_raw); cudaFree(B_raw); cudaFree(output_raw); cudaFree(workspace_raw); if (!ret) { printf("%s: not supported\n", subscripts.c_str()); }else{ printf("%s: succeeded\n", subscripts.c_str()); } } int main() { cutensorHandle_t handle; cutensorInit(&handle); /********************** * Setup planCache (optional) **********************/ constexpr int32_t numCachelines = 1024; size_t sizeCache = numCachelines * sizeof(cutensorPlanCacheline_t); cutensorPlanCacheline_t* cachelines = (cutensorPlanCacheline_t*) malloc(sizeCache); HANDLE_ERROR( cutensorHandleAttachPlanCachelines(&handle, cachelines, numCachelines) ); einsum(&handle, {2, 4, 5}, {4, 8, 7}, "ijn,jmk->inkm"); // contraction (explict) einsum(&handle, {2, 4, 5}, {4, 8, 7}, "ijn,jmk"); // contraction (implicit) einsum(&handle, {2, 4, 5}, {}, "nij"); // permutation (implicit) einsum(&handle, {2, 4, 5}, {}, "nij->ijn"); // permutation (same as previous example, but explicit) einsum(&handle, {2, 4, 5}, {}, "nij->ji"); // reduction // Detach cache and free-up resources HANDLE_ERROR( cutensorHandleDetachPlanCachelines(&handle) ); if (cachelines) free (cachelines); return 0; }
the_stack
#include "utility.hpp" #include "norm.hpp" using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { template <typename Tsrc, typename Tdst> __global__ void convertKernel(const Tsrc* src, int rows, int cols, int channels, int src_stride, const uchar* mask, int mask_stride, float* dst, int dst_stride, Tdst* norms_values, float alpha, float beta, NormTypes norm_type) { int threadIdx_x = threadIdx.x; int element_x = ((blockIdx.x << BLOCK_SHIFT) + threadIdx_x) << 2; int element_y = blockIdx.y; if (element_x >= cols) { return; } float scale, shift; if (norm_type == NORM_L1 || norm_type == NORM_INF) { scale = norms_values[0]; scale = scale > FLT_EPSILON ? alpha / scale : 0.f; } else if (norm_type == NORM_L2) { scale = sqrtf(norms_values[0]); scale = scale > FLT_EPSILON ? alpha / scale : 0.f; } else { // norm_type == NORM_MINMAX float src_max = norms_values[0]; float src_min = norms_values[1]; scale = src_max - src_min; scale = scale > FLT_EPSILON ? (beta - alpha) / scale : 0.f; shift = alpha - src_min * scale; } int offset; Tsrc* input; float* output; uchar* mask_row; float value0, value1, value2, value3; uchar mvalue0, mvalue1, mvalue2, mvalue3; for (; element_y < rows; element_y += gridDim.y) { offset = element_y * src_stride; input = (Tsrc*)((uchar*)src + offset); value0 = input[element_x]; value1 = input[element_x + 1]; value2 = input[element_x + 2]; value3 = input[element_x + 3]; value0 *= scale; value1 *= scale; value2 *= scale; value3 *= scale; if (norm_type == NORM_MINMAX) { value0 += shift; value1 += shift; value2 += shift; value3 += shift; } offset = element_y * dst_stride; output = (float*)((uchar*)dst + offset); if (element_x < cols - 3) { if (mask != nullptr) { mask_row = (uchar*)((uchar*)mask + element_y * mask_stride); mvalue0 = mask_row[element_x / channels]; mvalue1 = mask_row[(element_x + 1) / channels]; mvalue2 = mask_row[(element_x + 2) / channels]; mvalue3 = mask_row[(element_x + 3) / channels]; output[element_x] = mvalue0 > 0 ? value0 : 0; output[element_x + 1] = mvalue1 > 0 ? value1 : 0; output[element_x + 2] = mvalue2 > 0 ? value2 : 0; output[element_x + 3] = mvalue3 > 0 ? value3 : 0; } else { output[element_x] = value0; output[element_x + 1] = value1; output[element_x + 2] = value2; output[element_x + 3] = value3; } } else { if (mask != nullptr) { mask_row = (uchar*)((uchar*)mask + element_y * mask_stride); mvalue0 = mask_row[element_x / channels]; mvalue1 = mask_row[(element_x + 1) / channels]; mvalue2 = mask_row[(element_x + 2) / channels]; output[element_x] = mvalue0 > 0 ? value0 : 0; if (element_x < cols - 1) { output[element_x + 1] = mvalue1 > 0 ? value1 : 0; } if (element_x < cols - 2) { output[element_x + 2] = mvalue2 > 0 ? value2 : 0; } } else { output[element_x] = value0; if (element_x < cols - 1) { output[element_x + 1] = value1; } if (element_x < cols - 2) { output[element_x + 2] = value2; } } } } } inline void swap(float& alpha, float& beta) { float temp = alpha; alpha = beta; beta = temp; } RetCode normalize(const uchar* src, int rows, int cols, int channels, int src_stride, float* dst, int dst_stride, float alpha, float beta, NormTypes norm_type, const uchar* mask, int mask_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(rows >= 1 && cols >= 1); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar)); PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float)); PPL_ASSERT(norm_type == NORM_INF || norm_type == NORM_L1 || norm_type == NORM_L2 || norm_type == NORM_MINMAX); if (mask != nullptr) { PPL_ASSERT(mask_stride >= cols * (int)sizeof(uchar)); } cols *= channels; int grid_y, columns = divideUp(cols, 4, 2); dim3 block, grid; block.x = BLOCK_SIZE; block.y = 1; grid.x = divideUp(columns, BLOCK_SIZE, BLOCK_SHIFT); // Launchs about MAX_BLOCKS thread blocks on a GPU. grid_y = MAX_BLOCKS / grid.x; grid.y = (grid_y < rows) ? grid_y : rows; int blocks = grid.x * grid.y; long* norms_values; cudaError_t code; if (norm_type == NORM_MINMAX) { code = cudaMalloc(&norms_values, blocks * 2 * sizeof(long)); } else { code = cudaMalloc(&norms_values, blocks * sizeof(long)); } if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } if (norm_type == NORM_INF) { normLinfKernel<uchar, long><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, blocks, norms_values); } else if (norm_type == NORM_L1) { normL1Kernel<uchar, uint, long><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, blocks, norms_values); } else if (norm_type == NORM_L2) { normL2Kernel<uchar, long, long><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, blocks, norms_values); } else { // norm_type == NORM_MINMAX if (alpha > beta) { swap(alpha, beta); } MinMaxKernel<uchar, long><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, blocks, norms_values); } convertKernel<uchar, long><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, dst, dst_stride, norms_values, alpha, beta, norm_type); code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } cudaFree(norms_values); return RC_SUCCESS; } RetCode normalize(const float* src, int rows, int cols, int channels, int src_stride, float* dst, int dst_stride, float alpha, float beta, NormTypes norm_type, const uchar* mask, int mask_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(rows >= 1 && cols >= 1); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float)); PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float)); PPL_ASSERT(norm_type == NORM_INF || norm_type == NORM_L1 || norm_type == NORM_L2 || norm_type == NORM_MINMAX); if (mask != nullptr) { PPL_ASSERT(mask_stride >= cols * (int)sizeof(uchar)); } cols *= channels; int grid_y, columns = divideUp(cols, 4, 2); dim3 block, grid; block.x = BLOCK_SIZE; block.y = 1; grid.x = divideUp(columns, BLOCK_SIZE, BLOCK_SHIFT); // Launchs about MAX_BLOCKS thread blocks on a GPU. grid_y = MAX_BLOCKS / grid.x; grid.y = (grid_y < rows) ? grid_y : rows; int blocks = grid.x * grid.y; double* norms_values; cudaError_t code; if (norm_type == NORM_MINMAX) { code = cudaMalloc(&norms_values, blocks * 2 * sizeof(double)); } else { code = cudaMalloc(&norms_values, blocks * sizeof(double)); } if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } if (norm_type == NORM_INF) { normLinfKernel<float, double><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, blocks, norms_values); } else if (norm_type == NORM_L1) { normL1Kernel<float, float, double><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, blocks, norms_values); } else if (norm_type == NORM_L2) { normL2Kernel<float, float, double><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, blocks, norms_values); } else { // norm_type == NORM_MINMAX if (alpha > beta) { swap(alpha, beta); } MinMaxKernel<float, double><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, blocks, norms_values); } convertKernel<float, double><<<grid, block, 0, stream>>>(src, rows, cols, channels, src_stride, mask, mask_stride, dst, dst_stride, norms_values, alpha, beta, norm_type); code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } cudaFree(norms_values); return RC_SUCCESS; } template <> RetCode Normalize<uchar, 1>(cudaStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int outWidthStride, float* outData, float alpha, float beta, NormTypes normType, int maskWidthStride, const uchar* mask) { outWidthStride *= sizeof(float); RetCode code = normalize(inData, height, width, 1, inWidthStride, outData, outWidthStride, alpha, beta, normType, mask, maskWidthStride, stream); return code; } template <> RetCode Normalize<uchar, 3>(cudaStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int outWidthStride, float* outData, float alpha, float beta, NormTypes normType, int maskWidthStride, const uchar* mask) { outWidthStride *= sizeof(float); RetCode code = normalize(inData, height, width, 3, inWidthStride, outData, outWidthStride, alpha, beta, normType, mask, maskWidthStride, stream); return code; } template <> RetCode Normalize<uchar, 4>(cudaStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int outWidthStride, float* outData, float alpha, float beta, NormTypes normType, int maskWidthStride, const uchar* mask) { outWidthStride *= sizeof(float); RetCode code = normalize(inData, height, width, 4, inWidthStride, outData, outWidthStride, alpha, beta, normType, mask, maskWidthStride, stream); return code; } template <> RetCode Normalize<float, 1>(cudaStream_t stream, int height, int width, int inWidthStride, const float* inData, int outWidthStride, float* outData, float alpha, float beta, NormTypes normType, int maskWidthStride, const uchar* mask) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = normalize(inData, height, width, 1, inWidthStride, outData, outWidthStride, alpha, beta, normType, mask, maskWidthStride, stream); return code; } template <> RetCode Normalize<float, 3>(cudaStream_t stream, int height, int width, int inWidthStride, const float* inData, int outWidthStride, float* outData, float alpha, float beta, NormTypes normType, int maskWidthStride, const uchar* mask) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = normalize(inData, height, width, 3, inWidthStride, outData, outWidthStride, alpha, beta, normType, mask, maskWidthStride, stream); return code; } template <> RetCode Normalize<float, 4>(cudaStream_t stream, int height, int width, int inWidthStride, const float* inData, int outWidthStride, float* outData, float alpha, float beta, NormTypes normType, int maskWidthStride, const uchar* mask) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = normalize(inData, height, width, 4, inWidthStride, outData, outWidthStride, alpha, beta, normType, mask, maskWidthStride, stream); return code; } } // namespace cuda } // namespace cv } // namespace ppl
the_stack
//#include "../src/kat/on_device/constexpr_math.cuh" #include <kat/on_device/constexpr_math.cuh> //#include "../external/doctest/doctest.h" namespace kernels { } // namespace kernels namespace kce = kat::constexpr_; // TODO: What about invalid arguments? template <typename I> struct compile_time_execution_results { static_assert(kce::strictly_between<I>( I{ 0 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 1 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 4 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 5 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 6 }, I{ 5 }, I{ 10 } ) == true, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 8 }, I{ 5 }, I{ 10 } ) == true, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 9 }, I{ 5 }, I{ 10 } ) == true, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 10 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 11 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::strictly_between"); static_assert(kce::strictly_between<I>( I{ 123 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::strictly_between"); static_assert(kce::between_or_equal<I>( I{ 1 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::between_or_equal"); static_assert(kce::between_or_equal<I>( I{ 4 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::between_or_equal"); static_assert(kce::between_or_equal<I>( I{ 5 }, I{ 5 }, I{ 10 } ) == true, "kat::constexpr_::between_or_equal"); static_assert(kce::between_or_equal<I>( I{ 6 }, I{ 5 }, I{ 10 } ) == true, "kat::constexpr_::between_or_equal"); static_assert(kce::between_or_equal<I>( I{ 8 }, I{ 5 }, I{ 10 } ) == true, "kat::constexpr_::between_or_equal"); static_assert(kce::between_or_equal<I>( I{ 9 }, I{ 5 }, I{ 10 } ) == true, "kat::constexpr_::between_or_equal"); static_assert(kce::between_or_equal<I>( I{ 10 }, I{ 5 }, I{ 10 } ) == true, "kat::constexpr_::between_or_equal"); static_assert(kce::between_or_equal<I>( I{ 11 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::between_or_equal"); static_assert(kce::between_or_equal<I>( I{ 123 }, I{ 5 }, I{ 10 } ) == false, "kat::constexpr_::between_or_equal"); static_assert(kce::is_power_of_2<I>(I{ 1}) == true, "kat::constexpr_::is_power_of_2( 1) error"); static_assert(kce::is_power_of_2<I>(I{ 2}) == true, "kat::constexpr_::is_power_of_2( 2) error"); static_assert(kce::is_power_of_2<I>(I{ 4}) == true, "kat::constexpr_::is_power_of_2( 4) error"); static_assert(kce::is_power_of_2<I>(I{ 7}) == false, "kat::constexpr_::is_power_of_2( 7) error"); static_assert(kce::is_power_of_2<I>(I{32}) == true, "kat::constexpr_::is_power_of_2(32) error"); static_assert(kce::is_power_of_2<I>(I{33}) == false, "kat::constexpr_::is_power_of_2(33) error"); static_assert(kce::modular_increment<I>(I{ 0}, I{ 1}) == I{ 0 }, "kat::constexpr_::modular_increment error"); static_assert(kce::modular_increment<I>(I{ 1}, I{ 1}) == I{ 0 }, "kat::constexpr_::modular_increment error"); static_assert(kce::modular_increment<I>(I{ 0}, I{ 3}) == I{ 1 }, "kat::constexpr_::modular_increment error"); static_assert(kce::modular_increment<I>(I{ 1}, I{ 3}) == I{ 2 }, "kat::constexpr_::modular_increment error"); static_assert(kce::modular_increment<I>(I{ 2}, I{ 3}) == I{ 0 }, "kat::constexpr_::modular_increment error"); static_assert(kce::modular_increment<I>(I{ 3}, I{ 3}) == I{ 1 }, "kat::constexpr_::modular_increment error"); static_assert(kce::modular_increment<I>(I{ 4}, I{ 3}) == I{ 2 }, "kat::constexpr_::modular_increment error"); static_assert(kce::modular_decrement<I>(I{ 0}, I{ 1}) == I{ 0 }, "kat::constexpr_::modular_decrement error"); static_assert(kce::modular_decrement<I>(I{ 1}, I{ 1}) == I{ 0 }, "kat::constexpr_::modular_decrement error"); static_assert(kce::modular_decrement<I>(I{ 0}, I{ 3}) == I{ 2 }, "kat::constexpr_::modular_decrement error"); static_assert(kce::modular_decrement<I>(I{ 1}, I{ 3}) == I{ 0 }, "kat::constexpr_::modular_decrement error"); static_assert(kce::modular_decrement<I>(I{ 2}, I{ 3}) == I{ 1 }, "kat::constexpr_::modular_decrement error"); static_assert(kce::modular_decrement<I>(I{ 3}, I{ 3}) == I{ 2 }, "kat::constexpr_::modular_decrement error"); static_assert(kce::modular_decrement<I>(I{ 4}, I{ 3}) == I{ 0 }, "kat::constexpr_::modular_decrement error"); static_assert(kce::ipow<I>(I{ 0 }, 1 ) == I{ 0 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 0 }, 2 ) == I{ 0 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 0 }, 100 ) == I{ 0 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 1 }, 0 ) == I{ 1 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 1 }, 1 ) == I{ 1 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 1 }, 2 ) == I{ 1 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 1 }, 100 ) == I{ 1 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 3 }, 0 ) == I{ 1 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 3 }, 1 ) == I{ 3 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 3 }, 2 ) == I{ 9 }, "kat::constexpr_::ipow error"); static_assert(kce::ipow<I>(I{ 3 }, 4 ) == I{ 81 }, "kat::constexpr_::ipow error"); static_assert(kce::unsafe::div_rounding_up<I>( I{ 0 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::unsafe::div_rounding_up error"); static_assert(kce::unsafe::div_rounding_up<I>( I{ 0 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::unsafe::div_rounding_up error"); static_assert(kce::unsafe::div_rounding_up<I>( I{ 0 }, I{ 123 } ) == I{ 0 }, "kat::constexpr_::unsafe::div_rounding_up error"); static_assert(kce::unsafe::div_rounding_up<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::unsafe::div_rounding_up error"); static_assert(kce::unsafe::div_rounding_up<I>( I{ 1 }, I{ 2 } ) == I{ 1 }, "kat::constexpr_::unsafe::div_rounding_up error"); static_assert(kce::unsafe::div_rounding_up<I>( I{ 122 }, I{ 123 } ) == I{ 1 }, "kat::constexpr_::unsafe::div_rounding_up error"); static_assert(kce::unsafe::div_rounding_up<I>( I{ 123 }, I{ 123 } ) == I{ 1 }, "kat::constexpr_::unsafe::div_rounding_up error"); static_assert(kce::unsafe::div_rounding_up<I>( I{ 124 }, I{ 123 } ) == I{ 2 }, "kat::constexpr_::unsafe::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 0 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 0 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 0 }, I{ 123 } ) == I{ 0 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 1 }, I{ 2 } ) == I{ 1 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 122 }, I{ 123 } ) == I{ 1 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 123 }, I{ 123 } ) == I{ 1 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 124 }, I{ 123 } ) == I{ 2 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( I{ 124 }, I{ 123 } ) == I{ 2 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( std::numeric_limits<I>::max() , std::numeric_limits<I>::max() - 1 ) == I{ 2 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::div_rounding_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ) == I{ 1 }, "kat::constexpr_::div_rounding_up error"); static_assert(kce::round_down<I>( I{ 0 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::round_down error"); static_assert(kce::round_down<I>( I{ 0 }, I{ 123 } ) == I{ 0 }, "kat::constexpr_::round_down error"); static_assert(kce::round_down<I>( I{ 1 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::round_down error"); static_assert(kce::round_down<I>( I{ 122 }, I{ 123 } ) == I{ 0 }, "kat::constexpr_::round_down error"); static_assert(kce::round_down<I>( I{ 123 }, I{ 123 } ) == I{ 123 }, "kat::constexpr_::round_down error"); static_assert(kce::round_down<I>( I{ 124 }, I{ 123 } ) == I{ 123 }, "kat::constexpr_::round_down error"); static_assert(kce::round_down_to_full_warps<I>( I{ 0 } ) == I{ 0 }, "kat::constexpr_::round_down_to_full_warps error"); static_assert(kce::round_down_to_full_warps<I>( I{ 1 } ) == I{ 0 }, "kat::constexpr_::round_down_to_full_warps error"); static_assert(kce::round_down_to_full_warps<I>( I{ 8 } ) == I{ 0 }, "kat::constexpr_::round_down_to_full_warps error"); static_assert(kce::round_down_to_full_warps<I>( I{ 16 } ) == I{ 0 }, "kat::constexpr_::round_down_to_full_warps error"); static_assert(kce::round_down_to_full_warps<I>( I{ 31 } ) == I{ 0 }, "kat::constexpr_::round_down_to_full_warps error"); static_assert(kce::round_down_to_full_warps<I>( I{ 32 } ) == I{ 32 }, "kat::constexpr_::round_down_to_full_warps error"); static_assert(kce::round_down_to_full_warps<I>( I{ 33 } ) == I{ 32 }, "kat::constexpr_::round_down_to_full_warps error"); static_assert(kce::round_down_to_full_warps<I>( I{ 125 } ) == I{ 96 }, "kat::constexpr_::round_down_to_full_warps error"); // TODO: Consider testing rounding-up with negative dividends static_assert(kce::unsafe::round_up<I>( I{ 0 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::unsafe::round_up error"); static_assert(kce::unsafe::round_up<I>( I{ 0 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::unsafe::round_up error"); static_assert(kce::unsafe::round_up<I>( I{ 0 }, I{ 123 } ) == I{ 0 }, "kat::constexpr_::unsafe::round_up error"); static_assert(kce::unsafe::round_up<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::unsafe::round_up error"); static_assert(kce::unsafe::round_up<I>( I{ 1 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::unsafe::round_up error"); static_assert(kce::unsafe::round_up<I>( I{ 63 }, I{ 64 } ) == I{ 64 }, "kat::constexpr_::unsafe::round_up error"); static_assert(kce::unsafe::round_up<I>( I{ 64 }, I{ 64 } ) == I{ 64 }, "kat::constexpr_::unsafe::round_up error"); static_assert(kce::unsafe::round_up<I>( I{ 65 }, I{ 32 } ) == I{ 96 }, "kat::constexpr_::unsafe::round_up error"); static_assert(kce::round_up<I>( I{ 0 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::round_up error"); static_assert(kce::round_up<I>( I{ 0 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::round_up error"); static_assert(kce::round_up<I>( I{ 0 }, I{ 123 } ) == I{ 0 }, "kat::constexpr_::round_up error"); static_assert(kce::round_up<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::round_up error"); static_assert(kce::round_up<I>( I{ 1 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::round_up error"); static_assert(kce::round_up<I>( I{ 63 }, I{ 64 } ) == I{ 64 }, "kat::constexpr_::round_up error"); static_assert(kce::round_up<I>( I{ 64 }, I{ 64 } ) == I{ 64 }, "kat::constexpr_::round_up error"); static_assert(kce::round_up<I>( I{ 65 }, I{ 32 } ) == I{ 96 }, "kat::constexpr_::round_up error"); static_assert(kce::round_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ) == I{ std::numeric_limits<I>::max() }, "kat::constexpr_::round_up error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 2 }, I{ 1 } ) == I{ 2 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 3 }, I{ 1 } ) == I{ 3 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 4 }, I{ 1 } ) == I{ 4 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 123 }, I{ 1 } ) == I{ 123 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 1 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 2 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 3 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 4 }, I{ 2 } ) == I{ 4 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_down_to_power_of_2<I>( I{ 123 }, I{ 2 } ) == I{ 122 }, "kat::constexpr_::round_down_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ) == I{ 2 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ) == I{ 3 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ) == I{ 4 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 123 }, I{ 1 } ) == I{ 123 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ) == I{ 4 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ) == I{ 4 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_power_of_2<I>( I{ 123 }, I{ 2 } ) == I{ 124 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ) == I{ 2 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ) == I{ 3 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ) == I{ 4 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ) == I{ 23 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ) == I{ 4 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ) == I{ 4 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::unsafe::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ) == I{ 64 }, "kat::constexpr_::unsafe::round_up_to_power_of_2 error"); static_assert(kce::round_up_to_full_warps<I>( I{ 0 } ) == I{ 0 }, "kat::constexpr_::round_up_to_full_warps error"); static_assert(kce::round_up_to_full_warps<I>( I{ 1 } ) == I{ 32 }, "kat::constexpr_::round_up_to_full_warps error"); static_assert(kce::round_up_to_full_warps<I>( I{ 8 } ) == I{ 32 }, "kat::constexpr_::round_up_to_full_warps error"); static_assert(kce::round_up_to_full_warps<I>( I{ 16 } ) == I{ 32 }, "kat::constexpr_::round_up_to_full_warps error"); static_assert(kce::round_up_to_full_warps<I>( I{ 31 } ) == I{ 32 }, "kat::constexpr_::round_up_to_full_warps error"); static_assert(kce::round_up_to_full_warps<I>( I{ 32 } ) == I{ 32 }, "kat::constexpr_::round_up_to_full_warps error"); static_assert(kce::round_up_to_full_warps<I>( I{ 33 } ) == I{ 64 }, "kat::constexpr_::round_up_to_full_warps error"); static_assert(kce::round_up_to_full_warps<I>( I{ 63 } ) == I{ 64 }, "kat::constexpr_::round_up_to_full_warps error"); #if __cplusplus >= 201402L static_assert(kce::gcd<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 2 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 1 }, I{ 2 } ) == I{ 1 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 2 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 8 }, I{ 4 } ) == I{ 4 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 4 }, I{ 8 } ) == I{ 4 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 10 }, I{ 6 } ) == I{ 2 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 120 }, I{ 70 } ) == I{ 10 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 70 }, I{ 120 } ) == I{ 10 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 97 }, I{ 120 } ) == I{ 1 }, "kat::constexpr_::gcd error"); #endif static_assert(kce::gcd<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 2 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 1 }, I{ 2 } ) == I{ 1 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 2 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 5 }, I{ 3 } ) == I{ 1 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 8 }, I{ 4 } ) == I{ 4 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 4 }, I{ 8 } ) == I{ 4 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 10 }, I{ 6 } ) == I{ 2 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 120 }, I{ 70 } ) == I{ 10 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 70 }, I{ 120 } ) == I{ 10 }, "kat::constexpr_::gcd error"); static_assert(kce::gcd<I>( I{ 97 }, I{ 120 } ) == I{ 1 }, "kat::constexpr_::gcd error"); static_assert(kce::lcm<I>( I{ 1 }, I{ 1 } ) == I{ 1 }, "kat::constexpr_::lcm error"); static_assert(kce::lcm<I>( I{ 2 }, I{ 1 } ) == I{ 2 }, "kat::constexpr_::lcm error"); static_assert(kce::lcm<I>( I{ 1 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::lcm error"); static_assert(kce::lcm<I>( I{ 2 }, I{ 2 } ) == I{ 2 }, "kat::constexpr_::lcm error"); static_assert(kce::lcm<I>( I{ 5 }, I{ 3 } ) == I{ 15 }, "kat::constexpr_::lcm error"); static_assert(kce::lcm<I>( I{ 8 }, I{ 4 } ) == I{ 8 }, "kat::constexpr_::lcm error"); static_assert(kce::lcm<I>( I{ 4 }, I{ 8 } ) == I{ 8 }, "kat::constexpr_::lcm error"); static_assert(kce::lcm<I>( I{ 10 }, I{ 6 } ) == I{ 30 }, "kat::constexpr_::lcm error"); static_assert(kce::is_even<I>( I{ 0 } ) == true, "kat::constexpr_::is_even error"); static_assert(kce::is_even<I>( I{ 1 } ) == false, "kat::constexpr_::is_even error"); static_assert(kce::is_even<I>( I{ 2 } ) == true, "kat::constexpr_::is_even error"); static_assert(kce::is_even<I>( I{ 3 } ) == false, "kat::constexpr_::is_even error"); static_assert(kce::is_even<I>( I{ 123 } ) == false, "kat::constexpr_::is_even error"); static_assert(kce::is_even<I>( I{ 124 } ) == true, "kat::constexpr_::is_even error"); static_assert(kce::is_odd<I>( I{ 0 } ) == false, "kat::constexpr_::is_odd error"); static_assert(kce::is_odd<I>( I{ 1 } ) == true, "kat::constexpr_::is_odd error"); static_assert(kce::is_odd<I>( I{ 2 } ) == false, "kat::constexpr_::is_odd error"); static_assert(kce::is_odd<I>( I{ 3 } ) == true, "kat::constexpr_::is_odd error"); static_assert(kce::is_odd<I>( I{ 123 } ) == true, "kat::constexpr_::is_odd error"); static_assert(kce::is_odd<I>( I{ 124 } ) == false, "kat::constexpr_::is_odd error"); static_assert(kce::log2<I>( I{ 1 } ) == 0, "kat::constexpr_::log2 error"); static_assert(kce::log2<I>( I{ 2 } ) == 1, "kat::constexpr_::log2 error"); static_assert(kce::log2<I>( I{ 3 } ) == 1, "kat::constexpr_::log2 error"); static_assert(kce::log2<I>( I{ 4 } ) == 2, "kat::constexpr_::log2 error"); static_assert(kce::log2<I>( I{ 6 } ) == 2, "kat::constexpr_::log2 error"); static_assert(kce::log2<I>( I{ 7 } ) == 2, "kat::constexpr_::log2 error"); static_assert(kce::log2<I>( I{ 8 } ) == 3, "kat::constexpr_::log2 error"); static_assert(kce::log2<I>( I{ 127 } ) == 6, "kat::constexpr_::log2 error"); static_assert(kce::sqrt<I>( I{ 0 } ) == 0, "kat::constexpr_::sqrt error"); static_assert(kce::sqrt<I>( I{ 1 } ) == 1, "kat::constexpr_::sqrt error"); static_assert(kce::sqrt<I>( I{ 2 } ) == 1, "kat::constexpr_::sqrt error"); static_assert(kce::sqrt<I>( I{ 3 } ) == 1, "kat::constexpr_::sqrt error"); static_assert(kce::sqrt<I>( I{ 4 } ) == 2, "kat::constexpr_::sqrt error"); static_assert(kce::sqrt<I>( I{ 5 } ) == 2, "kat::constexpr_::sqrt error"); static_assert(kce::sqrt<I>( I{ 9 } ) == 3, "kat::constexpr_::sqrt error"); static_assert(kce::sqrt<I>( I{ 10 } ) == 3, "kat::constexpr_::sqrt error"); static_assert(kce::sqrt<I>( I{ 127 } ) == 11, "kat::constexpr_::sqrt error"); static_assert(kce::div_by_power_of_2<I>( I{ 0 }, I { 1 }) == I{ 0 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 1 }, I { 1 }) == I{ 1 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 111 }, I { 1 }) == I{ 111 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 0 }, I { 2 }) == I{ 0 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 1 }, I { 2 }) == I{ 0 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 2 }, I { 2 }) == I{ 1 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 3 }, I { 2 }) == I{ 1 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 4 }, I { 2 }) == I{ 2 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 111 }, I { 2 }) == I{ 55 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 0 }, I { 16 }) == I{ 0 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 1 }, I { 16 }) == I{ 0 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 15 }, I { 16 }) == I{ 0 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 16 }, I { 16 }) == I{ 1 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 17 }, I { 16 }) == I{ 1 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 32 }, I { 16 }) == I{ 2 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::div_by_power_of_2<I>( I{ 111 }, I { 16 }) == I{ 6 }, "kat::constexpr_::div_by_power_of_2 error"); static_assert(kce::divides<I>( I{ 1 }, I{ 0 } ) == true, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 2 }, I{ 0 } ) == true, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 3 }, I{ 0 } ) == true, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 1 }, I{ 1 } ) == true, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 2 }, I{ 1 } ) == false, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 3 }, I{ 1 } ) == false, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 1 }, I{ 2 } ) == true, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 2 }, I{ 2 } ) == true, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 3 }, I{ 2 } ) == false, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 4 }, I{ 2 } ) == false, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 6 }, I{ 9 } ) == false, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 9 }, I{ 6 } ) == false, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 4 }, I{ 24 } ) == true, "kat::constexpr_::divides error"); static_assert(kce::divides<I>( I{ 24 }, I{ 4 } ) == false, "kat::constexpr_::divides error"); static_assert(kce::is_divisible_by<I>( I{ 0 }, I{ 1 } ) == true, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 0 }, I{ 2 } ) == true, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 0 }, I{ 3 } ) == true, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 1 }, I{ 1 } ) == true, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 1 }, I{ 2 } ) == false, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 1 }, I{ 3 } ) == false, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 2 }, I{ 1 } ) == true, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 2 }, I{ 2 } ) == true, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 2 }, I{ 3 } ) == false, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 2 }, I{ 4 } ) == false, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 9 }, I{ 6 } ) == false, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 6 }, I{ 9 } ) == false, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 24 }, I{ 4 } ) == true, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by<I>( I{ 4 }, I{ 24 } ) == false, "kat::constexpr_::is_divisible_by error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 1 } ) == true, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 2 } ) == true, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 1 } ) == true, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 2 } ) == false, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 1 } ) == true, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 2 } ) == true, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 4 } ) == false, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 24 }, I{ 4 } ) == true, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 72 }, I{ 16 } ) == false, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::is_divisible_by_power_of_2<I>( I{ 64 }, I{ 16 } ) == true, "kat::constexpr_::is_divisible_by_power_of_2 error"); static_assert(kce::power_of_2_divides<I>( I{ 1 }, I{ 0 } ) == true, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 2 }, I{ 0 } ) == true, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 1 }, I{ 1 } ) == true, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 2 }, I{ 1 } ) == false, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 1 }, I{ 2 } ) == true, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 2 }, I{ 2 } ) == true, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 4 }, I{ 2 } ) == false, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 4 }, I{ 24 } ) == true, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 16 }, I{ 72 } ) == false, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::power_of_2_divides<I>( I{ 16 }, I{ 64 } ) == true, "kat::constexpr_::power_of_2_divides error"); static_assert(kce::log2_of_power_of_2<I>( I{ 1 } ) == I { 0 }, "kat::constexpr_::log2_of_power_of_2"); static_assert(kce::log2_of_power_of_2<I>( I{ 2 } ) == I { 1 }, "kat::constexpr_::log2_of_power_of_2"); static_assert(kce::log2_of_power_of_2<I>( I{ 4 } ) == I { 2 }, "kat::constexpr_::log2_of_power_of_2"); static_assert(kce::log2_of_power_of_2<I>( I{ 8 } ) == I { 3 }, "kat::constexpr_::log2_of_power_of_2"); static_assert(kce::log2_of_power_of_2<I>( I{ 16 } ) == I { 4 }, "kat::constexpr_::log2_of_power_of_2"); static_assert(kce::log2_of_power_of_2<I>( I{ 32 } ) == I { 5 }, "kat::constexpr_::log2_of_power_of_2"); static_assert(kce::log2_of_power_of_2<I>( I{ 64 } ) == I { 6 }, "kat::constexpr_::log2_of_power_of_2"); static_assert(kce::modulo_power_of_2<I>( I{ 0 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 1 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 2 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 3 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 4 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 5 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 63 }, I{ 1 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 0 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 1 }, I{ 2 } ) == I{ 1 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 2 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 3 }, I{ 2 } ) == I{ 1 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 4 }, I{ 2 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 5 }, I{ 2 } ) == I{ 1 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 63 }, I{ 2 } ) == I{ 1 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 0 }, I{ 4 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 1 }, I{ 4 } ) == I{ 1 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 2 }, I{ 4 } ) == I{ 2 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 3 }, I{ 4 } ) == I{ 3 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 4 }, I{ 4 } ) == I{ 0 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 5 }, I{ 4 } ) == I{ 1 }, "kat::constexpr_::modulo_power_of_2 error"); static_assert(kce::modulo_power_of_2<I>( I{ 63 }, I{ 4 } ) == I{ 3 }, "kat::constexpr_::modulo_power_of_2 error"); }; // TODO: // * Test between_or_equal and strictly_between with differing types for all 3 arguments // * Some floating-point tests // * gcd tests with values of different types // * Some tests with negative values #define INSTANTIATE_CONSTEXPR_MATH_TEST(_tp) \ compile_time_execution_results<_tp> UNIQUE_IDENTIFIER(test_struct_); \ MAP(INSTANTIATE_CONSTEXPR_MATH_TEST, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, char, short, int, long, long long, signed char, signed short, signed int, signed long, signed long long, unsigned char, unsigned short, unsigned int, unsigned long, unsigned long long); TEST_SUITE("constexpr_math") { TEST_CASE_TEMPLATE("run-time on-host", T, int32_t, int64_t, float, double) { (void) 0; // Don't need to do anything } } // TEST_SUITE("constexpr_math")
the_stack
#include <gsl/gsl_math.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_eigen.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_linalg.h> #include "int_lib/cints.h" #include "int_lib/crys.h" #include "typedef.h" #include "basis.h" #include "scf.h" #include "cuda_rys_sp.h" #include "cuda_rys_dp.h" int main(int argc, char* argv[]) { // use spherical harmonic d function? int use_5d = 1; // use double precision? int use_dp = 1; if (argc > 1) { for (int i = 1; i < argc; ++ i) { if (0 == strcmp(argv[i],"sp")) { use_dp = 0; } if (0 == strcmp(argv[i],"dp")) { use_dp = 1; } if (0 == strcmp(argv[i],"6d")) { use_5d = 0; } if (0 == strcmp(argv[i],"5d")) { use_5d = 1; } } } // initialize timer clock_t t0, t1; double time_in_sec, time_total, time_mat_J, time_mat_K; t0 = clock(); std::string time_txt (""); time_total = 0.0; time_mat_J = 0.0; time_mat_K = 0.0; Atom *p_atom = (Atom *)my_malloc(sizeof(Atom) * 1); Basis *p_basis = (Basis *)my_malloc(sizeof(Basis) * 1); //====== parse geom.dat ======== // get number of atoms p_atom->num = get_natoms(); fprintf(stdout, "Natoms = %d\n", p_atom->num); // atomic coordinates and atom name p_atom->pos = (double **)my_malloc(sizeof(double *) * p_atom->num); p_atom->name = (char **)my_malloc(sizeof(char *) * p_atom->num); for (int iatom = 0; iatom < p_atom->num; ++ iatom) { p_atom->pos[iatom] = (double *)my_malloc(sizeof(double) * CART_DIM); p_atom->name[iatom] = (char *)my_malloc(sizeof(char) * 5); } // nuclear charge p_atom->nuc_chg = (int *)my_malloc(sizeof(int) * p_atom->num); // read atomic positions, nuclear charge and atom name read_geom(p_atom); fprintf(stdout, "Coordinates in atomic unit:\n"); for (int iatom = 0; iatom < p_atom->num; ++ iatom) { fprintf(stdout, "%s (%.1f) %.10f %.10f %.10f\n", p_atom->name[iatom], (double)p_atom->nuc_chg[iatom], p_atom->pos[iatom][0], p_atom->pos[iatom][1], p_atom->pos[iatom][2]); } // nuclear repulsion energy double ene_nucl = calc_ene_nucl(p_atom); fprintf(stdout, "Nuclear repulsion = %-20.10f\n", ene_nucl); //====== parse basis.dat ======== // parse basis functions parse_basis(p_atom, p_basis, use_5d); fprintf(stdout, "System Nbasis = %d\n", p_basis->num); // basis function exponents, coefficients, and normalization factors p_basis->expon = (double **)my_malloc(sizeof(double *) * p_basis->num); p_basis->coef = (double **)my_malloc(sizeof(double *) * p_basis->num); p_basis->norm = (double **)my_malloc(sizeof(double *) * p_basis->num); // number of primitive functions in each contracted funciton p_basis->nprims = (int *)my_malloc(sizeof(int) * p_basis->num); // Cartesian coordinates and l,m,n numbers p_basis->xbas = (double *)my_malloc(sizeof(double) * p_basis->num); p_basis->ybas = (double *)my_malloc(sizeof(double) * p_basis->num); p_basis->zbas = (double *)my_malloc(sizeof(double) * p_basis->num); p_basis->lx = (int **)my_malloc(sizeof(int *) * p_basis->num); p_basis->ly = (int **)my_malloc(sizeof(int *) * p_basis->num); p_basis->lz = (int **)my_malloc(sizeof(int *) * p_basis->num); // read basis set (also calculate normalization factors) read_basis(p_atom, p_basis, use_5d); #ifdef DEBUG print_basis(p_basis); #endif t1 = clock(); time_in_sec = (t1 - t0) / (double)CLOCKS_PER_SEC; time_txt += "Time_Basis = " + std::to_string(time_in_sec) + " sec\n"; time_total += time_in_sec; t0 = t1; //====== one-electron integrals ======== // overlap, kinetic energy and nuclear attraction integral gsl_matrix *S = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_matrix *T = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_matrix *V = gsl_matrix_alloc(p_basis->num, p_basis->num); for (int a = 0; a < p_basis->num; ++ a) { for (int b = 0; b <= a; ++ b) { // overlap double s = calc_int_overlap(p_basis, a, b); // kinetic energy double t = calc_int_kinetic(p_basis, a, b); // nuclear repulsion double v = calc_int_nuc_attr(p_basis, a, b, p_atom); // save one-electron integrals in matrices gsl_matrix_set(S, a, b, s); gsl_matrix_set(T, a, b, t); gsl_matrix_set(V, a, b, v); if (a != b) { gsl_matrix_set(S, b, a, s); gsl_matrix_set(T, b, a, t); gsl_matrix_set(V, b, a, v); } } } t1 = clock(); time_in_sec = (t1 - t0) / (double)CLOCKS_PER_SEC; time_txt += "Time_1e_Ints = " + std::to_string(time_in_sec) + " sec\n"; time_total += time_in_sec; t0 = t1; //====== allocate memory for arrays on host ======== // number of primitive basis functions (pbf) int n_pbf = 0; for (int a = 0; a < p_basis->num; ++ a) { n_pbf += p_basis->nprims[a]; } // number of unique pbf pairs int n_pbf_combi = n_pbf * (n_pbf + 1) / 2; size_t n_PBF_bytes = sizeof(double) * n_pbf; size_t n_PBF_bytes_int = sizeof(int) * n_pbf; size_t n_PI_bytes = sizeof(double) * n_pbf_combi; // pbf_xlec contains information of each pbf: x,y,z, lx,ly,lz, expon, coef*norm // pbf_to_cbf returns index of contracted basis function (cbf) of each pbf double *h_pbf_xlec = (double *)my_malloc(n_PBF_bytes * 8); int *h_pbf_to_cbf = (int *)my_malloc(n_PBF_bytes_int); // mat_J_PI and mat_K_PI are primitive J and K matrices double *h_mat_J_PI = (double *)my_malloc(n_PI_bytes); double *h_mat_K_PI = (double *)my_malloc(n_PI_bytes); // counter for pbf_xlec; i_pbf for pbf_to_cbf int counter = 0; int i_pbf = 0; for (int a = 0; a < p_basis->num; ++ a) { for (int i = 0; i < p_basis->nprims[a]; ++ i) { h_pbf_to_cbf[i_pbf] = a; ++ i_pbf; h_pbf_xlec[counter] = p_basis->xbas[a]; ++ counter; h_pbf_xlec[counter] = p_basis->ybas[a]; ++ counter; h_pbf_xlec[counter] = p_basis->zbas[a]; ++ counter; h_pbf_xlec[counter] = (double)p_basis->lx[a][i]; ++ counter; h_pbf_xlec[counter] = (double)p_basis->ly[a][i]; ++ counter; h_pbf_xlec[counter] = (double)p_basis->lz[a][i]; ++ counter; h_pbf_xlec[counter] = p_basis->expon[a][i]; ++ counter; h_pbf_xlec[counter] = p_basis->coef[a][i] * p_basis->norm[a][i]; ++ counter; // note that 'norm' is absorbed into 'coef' } } assert(counter == n_pbf * 8); // number of unique pairs of contracted basis functions int n_combi = p_basis->num * (p_basis->num + 1) / 2; size_t n_CI_bytes = sizeof(double) * n_combi; // D: density matrix // J: Coulomb matrix // K: exchange matrix // Q: Schwartz pre-screening matrix double *h_mat_D = (double *)my_malloc(n_CI_bytes); double *h_mat_J = (double *)my_malloc(n_CI_bytes); double *h_mat_K = (double *)my_malloc(n_CI_bytes); double *h_mat_Q = (double *)my_malloc(n_CI_bytes); //====== allocate memory for arrays on device ======== // initialize arrays on device double *dev_pbf_xlec; int *dev_pbf_to_cbf; double *dev_mat_D, *dev_mat_Q, *dev_mat_J_PI, *dev_mat_K_PI; // memory usage on device size_t mem_on_dev = n_PBF_bytes*8 + n_PBF_bytes_int + n_PI_bytes*2 + n_CI_bytes*2; fprintf(stdout, "Mem_on_Device = "); if (mem_on_dev > 1000000000) { fprintf(stdout, "%zu GB\n", mem_on_dev / 1000000000); } else if (mem_on_dev > 1000000) { fprintf(stdout, "%zu MB\n", mem_on_dev / 1000000); } else if (mem_on_dev > 1000) { fprintf(stdout, "%zu KB\n", mem_on_dev / 1000); } else { fprintf(stdout, "%zu B\n", mem_on_dev); } // allocate memories for arrays on device my_cuda_safe(hipMalloc((void**)&dev_pbf_xlec, n_PBF_bytes * 8),"alloc_pbf_xlec"); my_cuda_safe(hipMalloc((void**)&dev_pbf_to_cbf, n_PBF_bytes_int),"alloc_pbf_to_cbf"); my_cuda_safe(hipMalloc((void**)&dev_mat_J_PI, n_PI_bytes),"alloc_mat_J_PI"); my_cuda_safe(hipMalloc((void**)&dev_mat_K_PI, n_PI_bytes),"alloc_mat_K_PI"); my_cuda_safe(hipMalloc((void**)&dev_mat_D, n_CI_bytes),"alloc_D"); my_cuda_safe(hipMalloc((void**)&dev_mat_Q, n_CI_bytes),"alloc_Q"); // copy data from host to device my_cuda_safe(hipMemcpy(dev_pbf_xlec, h_pbf_xlec, n_PBF_bytes * 8, hipMemcpyHostToDevice),"mem_pbf_xlec"); my_cuda_safe(hipMemcpy(dev_pbf_to_cbf, h_pbf_to_cbf, n_PBF_bytes_int, hipMemcpyHostToDevice),"mem_pbf_to_cbf"); t1 = clock(); time_in_sec = (t1 - t0) / (double)CLOCKS_PER_SEC; time_txt += "Time_2e_Prep = " + std::to_string(time_in_sec) + " sec\n"; time_total += time_in_sec; t0 = t1; //====== start SCF calculation ======== // NOTE: assume zero charge and closed-shell electronics structure int n_elec = 0; for (int iatom = 0; iatom < p_atom->num; ++ iatom) { n_elec += p_atom->nuc_chg[iatom]; } if (n_elec % 2 != 0) { fprintf(stderr, "Error: Number of electrons (%d) is not even!\n", n_elec); } int n_occ = n_elec / 2; // get core Hamiltonian gsl_matrix *H_core = gsl_matrix_alloc(p_basis->num, p_basis->num); sum_H_core(p_basis->num, H_core, T, V); // get S^-1/2 gsl_matrix *S_invsqrt = gsl_matrix_alloc(p_basis->num, p_basis->num); diag_overlap(p_basis->num, S, S_invsqrt); #ifdef DEBUG printf("S:\n"); my_print_matrix(S); printf("T:\n"); my_print_matrix(T); printf("V:\n"); my_print_matrix(V); printf("H_core:\n"); my_print_matrix(H_core); printf("S^-1/2:\n"); my_print_matrix(S_invsqrt); #endif // matrices, vector and variables to be used in SCF gsl_matrix *D = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_matrix *D_prev = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_matrix *D_diff = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_matrix *Fock = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_matrix *Fock_prev = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_matrix *Coef = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_vector *emo = gsl_vector_alloc(p_basis->num); double ene_elec, ene_total, ene_prev; // Coulomb(J) and exchange(K) matrices gsl_matrix *J = gsl_matrix_alloc(p_basis->num, p_basis->num); gsl_matrix *K = gsl_matrix_alloc(p_basis->num, p_basis->num); // initialize density matrix gsl_matrix_set_zero(D_prev); gsl_matrix_set_zero(D); ene_prev = 0.0; // Generalized Wolfsberg-Helmholtz initial guess init_guess_GWH(p_basis, H_core, S, Fock); Fock_to_Coef(p_basis->num, Fock, S_invsqrt, Coef, emo); Coef_to_Dens(p_basis->num, n_occ, Coef, D_prev); gsl_matrix_memcpy(D_diff, D_prev); gsl_matrix_memcpy(Fock_prev, Fock); // DIIS error and Fock matrices double ***diis_err = (double ***)my_malloc(sizeof(double **) * MAX_DIIS_DIM); double ***diis_Fock = (double ***)my_malloc(sizeof(double **) * MAX_DIIS_DIM); int idiis, ibasis; for (idiis = 0; idiis < MAX_DIIS_DIM; ++ idiis) { diis_err[idiis] = (double **)my_malloc(sizeof(double *) * p_basis->num); diis_Fock[idiis] = (double **)my_malloc(sizeof(double *) * p_basis->num); for (ibasis = 0; ibasis < p_basis->num; ++ ibasis) { diis_err[idiis][ibasis] = (double *)my_malloc(sizeof(double) * p_basis->num); diis_Fock[idiis][ibasis] = (double *)my_malloc(sizeof(double) * p_basis->num); } } // DIIS index and dimension int diis_index = 0; int diis_dim = 0; double delta_DIIS; fprintf(stdout, "%5s %20s %20s %20s %20s\n", "Iter", "E_total", "delta_E", "rms_D", "delta_DIIS"); // mat_Q: sqrt(ab|ab) for prescreening of two-electron integrals for (int a = 0; a < p_basis->num; ++ a) { for (int b = 0; b <= a; ++ b) { h_mat_Q[ij2intindex(a,b)] = calc_int_eri_rys(p_basis, a, b, a, b); } } my_cuda_safe(hipMemcpy(dev_mat_Q, h_mat_Q, n_CI_bytes, hipMemcpyHostToDevice),"mem_Q"); t1 = clock(); time_in_sec = (t1 - t0) / (double)CLOCKS_PER_SEC; time_txt += "Time_SCF_Init = " + std::to_string(time_in_sec) + " sec\n"; time_total += time_in_sec; t0 = t1; // start SCF iterations int iter = 0; while (1) { /*------------------------------------* * SCF procedure: * Form new Fock matrix * F' = S^-1/2 * F * S^-1/2 * diagonalize F' matrix to get C' * C = S^-1/2 * C' * compute new density matrix *------------------------------------*/ // when iter > 0, use incremental Fock matrix formation and DIIS int use_incr_fock = iter; int use_diis = 0; if (p_basis->num > 5) { use_diis = iter; } // sometimes DIIS does not work well // copy density matrix to device for (int a = 0; a < p_basis->num; ++ a) { for (int b = 0; b <= a; ++ b) { if (use_incr_fock) { h_mat_D[ij2intindex(a,b)] = gsl_matrix_get(D_diff,a,b); } else { h_mat_D[ij2intindex(a,b)] = gsl_matrix_get(D_prev,a,b); } } } my_cuda_safe(hipMemcpy(dev_mat_D, h_mat_D, n_CI_bytes, hipMemcpyHostToDevice),"mem_D"); // create 8x8 thread blocks dim3 block_size(BLOCKSIZE,BLOCKSIZE); // configure a two dimensional grid dim3 grid_size(n_pbf,n_pbf); // timer for J and K matrices clock_t t2,t3; t2 = clock(); // use 1T1PI for J and K matrices if (use_dp) { hipLaunchKernelGGL(cuda_mat_J_PI_dp, grid_size, block_size, 0, 0, dev_pbf_xlec, dev_pbf_to_cbf, n_pbf, dev_mat_D, dev_mat_J_PI, dev_mat_Q); } else { hipLaunchKernelGGL(cuda_mat_J_PI, grid_size, block_size, 0, 0, dev_pbf_xlec, dev_pbf_to_cbf, n_pbf, dev_mat_D, dev_mat_J_PI, dev_mat_Q); } my_cuda_safe(hipMemcpy(h_mat_J_PI, dev_mat_J_PI, n_PI_bytes, hipMemcpyDeviceToHost),"mem_mat_J_PI"); t3 = clock(); time_in_sec = (t3 - t2) / (double)CLOCKS_PER_SEC; time_mat_J += time_in_sec; t2 = t3; if (use_dp) { hipLaunchKernelGGL(cuda_mat_K_PI_dp, grid_size, block_size, 0, 0, dev_pbf_xlec, dev_pbf_to_cbf, n_pbf, dev_mat_D, dev_mat_K_PI, dev_mat_Q); } else { hipLaunchKernelGGL(cuda_mat_K_PI, grid_size, block_size, 0, 0, dev_pbf_xlec, dev_pbf_to_cbf, n_pbf, dev_mat_D, dev_mat_K_PI, dev_mat_Q); } my_cuda_safe(hipMemcpy(h_mat_K_PI, dev_mat_K_PI, n_PI_bytes, hipMemcpyDeviceToHost),"mem_mat_K_PI"); t3 = clock(); time_in_sec = (t3 - t2) / (double)CLOCKS_PER_SEC; time_mat_K += time_in_sec; t2 = t3; // sum up primitive J and K matrices to contracted ones for (int a = 0; a < p_basis->num; ++ a) { for (int b = 0; b <= a; ++ b) { int ab = ij2intindex(a,b); h_mat_J[ab] = 0.0; h_mat_K[ab] = 0.0; } } for (int i = 0; i < n_pbf; ++ i) { int a = h_pbf_to_cbf[i]; for (int j = 0; j < n_pbf; ++ j) { int b = h_pbf_to_cbf[j]; if (a < b) { continue; } int ab = ij2intindex(a,b); int ij = ij2intindex(i,j); h_mat_J[ab] += h_mat_J_PI[ij]; h_mat_K[ab] += h_mat_K_PI[ij]; } } // use J and K matrix from GPU for (int a = 0; a < p_basis->num; ++ a) { for (int b = 0; b < p_basis->num; ++ b) { int ab = ij2intindex(a,b); gsl_matrix_set(J,a,b,h_mat_J[ab]); gsl_matrix_set(K,a,b,h_mat_K[ab]); } } #ifdef DEBUG printf("J:\n"); my_print_matrix(J); printf("K:\n"); my_print_matrix(K); #endif if (use_incr_fock) { form_Fock(p_basis->num, Fock_prev, J, K, Fock); } else { form_Fock(p_basis->num, H_core, J, K, Fock); } // save Fock_prev at this point, so as not to mix with DIIS gsl_matrix_memcpy(Fock_prev, Fock); // DIIS if (use_diis) { update_Fock_DIIS(&diis_dim, &diis_index, &delta_DIIS, Fock, D_prev, S, p_basis, diis_err, diis_Fock); } // update density matrix and energies Fock_to_Coef(p_basis->num, Fock, S_invsqrt, Coef, emo); Coef_to_Dens(p_basis->num, n_occ, Coef, D); ene_elec = get_elec_ene(p_basis->num, D, H_core, Fock); ene_total = ene_nucl + ene_elec; #ifdef DEBUG printf("F:\n"); my_print_matrix(Fock); printf("C:\n"); my_print_matrix(Coef); printf("P:\n"); my_print_matrix(D); #endif // check convergence double delta_E = ene_total - ene_prev; double rms_D = 0.0; double dd_max = 0.0; int mu, nu; for (mu = 0; mu < p_basis->num; ++ mu) { for (nu = 0; nu < p_basis->num; ++ nu) { double dd = gsl_matrix_get(D, mu, nu) - gsl_matrix_get(D_prev, mu, nu); gsl_matrix_set(D_diff, mu, nu, dd); rms_D += dd * dd; if (fabs(dd) > dd_max) { dd_max = fabs(dd); } } } rms_D = sqrt(rms_D) / p_basis->num; fprintf(stdout, "%5d %20.10f", iter, ene_total); if (iter > 0) { fprintf(stdout, " %20.10f %20.10f", delta_E, rms_D); } if (use_diis && iter > 1) { fprintf(stdout, " %20.10f", delta_DIIS); } fprintf(stdout, "\n"); // convergence criteria if (fabs(delta_E/ene_total) < 1.0e-9 && rms_D < 1.0e-7 && dd_max < 1.0e-6) { break; } // update energy and density matrix for the next iteration ene_prev = ene_total; gsl_matrix_memcpy(D_prev, D); // count iterations ++ iter; } // SCF converged fprintf(stdout, "SCF converged! E_total = %20.10f\n", ene_total); t1 = clock(); time_in_sec = (t1 - t0) / (double)CLOCKS_PER_SEC; time_txt += "Time_SCF_Conv = " + std::to_string(time_in_sec) + " sec\n"; time_total += time_in_sec; t0 = t1; // print MO information fprintf(stdout, "%5s %10s %15s %12s\n", "MO", "State", "E(Eh)", "E(eV)"); for (ibasis = 0; ibasis < p_basis->num; ++ ibasis) { char occ[10]; if (ibasis < n_occ) { strcpy(occ, "occ."); } else { strcpy(occ, "virt."); } double ener = gsl_vector_get(emo, ibasis); fprintf(stdout, "%5d %10s %15.5f %12.2f\n", ibasis + 1, occ, ener, ener * HARTREE2EV); } //====== free device memories ======== hipFree(dev_mat_D); hipFree(dev_mat_Q); hipFree(dev_mat_J_PI); hipFree(dev_mat_K_PI); //====== free host memories ======== free(h_mat_D); free(h_mat_Q); free(h_mat_J); free(h_mat_K); // free DIIS error and Fock matrices for (idiis = 0; idiis < MAX_DIIS_DIM; ++ idiis) { for (ibasis = 0; ibasis < p_basis->num; ++ ibasis) { free(diis_err[idiis][ibasis]); free(diis_Fock[idiis][ibasis]); } free(diis_err[idiis]); free(diis_Fock[idiis]); } free(diis_err); free(diis_Fock); // free arrays for one- and two-electron integral gsl_matrix_free(S); gsl_matrix_free(T); gsl_matrix_free(V); // free matrices and vector for SCF gsl_matrix_free(H_core); gsl_matrix_free(S_invsqrt); gsl_matrix_free(D_prev); gsl_matrix_free(Fock); gsl_matrix_free(Coef); gsl_matrix_free(D); gsl_vector_free(emo); gsl_matrix_free(D_diff); gsl_matrix_free(Fock_prev); gsl_matrix_free(J); gsl_matrix_free(K); // free arrays for geometry for (int iatom = 0; iatom < p_atom->num; ++ iatom) { free(p_atom->pos[iatom]); free(p_atom->name[iatom]); } free(p_atom->pos); free(p_atom->name); free(p_atom->nuc_chg); free(p_atom); // free arrays for basis set for (ibasis = 0; ibasis < p_basis->num; ++ ibasis) { free(p_basis->expon[ibasis]); free(p_basis->coef[ibasis]); free(p_basis->lx[ibasis]); free(p_basis->ly[ibasis]); free(p_basis->lz[ibasis]); free(p_basis->norm[ibasis]); } free(p_basis->expon); free(p_basis->coef); free(p_basis->lx); free(p_basis->ly); free(p_basis->lz); free(p_basis->norm); free(p_basis->xbas); free(p_basis->ybas); free(p_basis->zbas); free(p_basis->nprims); free(p_basis); t1 = clock(); time_in_sec = (t1 - t0) / (double)CLOCKS_PER_SEC; time_txt += "Time_Finalize = " + std::to_string(time_in_sec) + " sec\n"; time_total += time_in_sec; t0 = t1; std::cout << time_txt; std::cout << "Total time used " << time_total << " sec\n"; std::cout << "Mat_J time used " << time_mat_J << " sec\n"; std::cout << "Mat_K time used " << time_mat_K << " sec\n"; //====== the end of program ======== return 0; }
the_stack
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <curand_kernel.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cufft.h> #include "runtime/device/gpu/cuda_common.h" #define TWO_DIVIDED_BY_SQRT_PI 1.1283791670218446 #define CONSTANT_kB 0.00198716 #define CONSTANT_Pi 3.1415926535897932f static dim3 thread_LJ(8, 32); __constant__ float XRD3D_Ma[4] = {1.0 / 6.0, -0.5, 0.5, -1.0 / 6.0}; __constant__ float XRD3D_Mb[4] = {0, 0.5, -1, 0.5}; __constant__ float XRD3D_Mc[4] = {0, 0.5, 0, -0.5}; __constant__ float XRD3D_Md[4] = {0, 1.0 / 6.0, 4.0 / 6.0, 1.0 / 6.0}; __constant__ float XRD3D_dMa[4] = {0.5, -1.5, 1.5, -0.5}; __constant__ float XRD3D_dMb[4] = {0, 1, -2, 1}; __constant__ float XRD3D_dMc[4] = {0, 0.5, 0, -0.5}; struct VECTOR { float x; float y; float z; }; struct INT_VECTOR { int int_x; int int_y; int int_z; }; struct UNSIGNED_INT_VECTOR { unsigned int uint_x; unsigned int uint_y; unsigned int uint_z; }; struct NEIGHBOR_LIST { int atom_numbers; int *atom_serial; }; struct UINT_VECTOR_LJ_TYPE { unsigned int uint_x; unsigned int uint_y; unsigned int uint_z; int LJ_type; float charge; }; struct ATOM_NEAR { int *atom_serial; }; struct GRID_BUCKET { int *atom_serial; }; struct GRID_POINTER { int *grid_serial; }; struct VIRTUAL_TYPE_0 { float virtual_atom; float from_1; float h_double; }; struct VIRTUAL_TYPE_1 { float virtual_atom; float from_1; float from_2; float a; }; struct VIRTUAL_TYPE_2 { float virtual_atom; float from_1; float from_2; float from_3; float a; float b; }; struct VIRTUAL_TYPE_3 { float virtual_atom; float from_1; float from_2; float from_3; float d; float k; }; struct CONSTRAIN_PAIR { int atom_i_serial; int atom_j_serial; float constant_r; float constrain_k; }; __device__ __host__ static inline VECTOR operator-(const VECTOR &veca, const VECTOR &vecb) { VECTOR vec; vec.x = veca.x - vecb.x; vec.y = veca.y - vecb.y; vec.z = veca.z - vecb.z; return vec; } __device__ __host__ static inline VECTOR Get_Periodic_Displacement(const UNSIGNED_INT_VECTOR uvec_a, const UNSIGNED_INT_VECTOR uvec_b, const VECTOR scaler) { VECTOR dr; dr.x = (static_cast<int>(uvec_a.uint_x - uvec_b.uint_x)) * scaler.x; dr.y = (static_cast<int>(uvec_a.uint_y - uvec_b.uint_y)) * scaler.y; dr.z = (static_cast<int>(uvec_a.uint_z - uvec_b.uint_z)) * scaler.z; return dr; } __device__ __host__ static inline VECTOR Get_Periodic_Displacement(const UINT_VECTOR_LJ_TYPE uvec_a, const UINT_VECTOR_LJ_TYPE uvec_b, const VECTOR scaler) { VECTOR dr; dr.x = (static_cast<int>(uvec_a.uint_x - uvec_b.uint_x)) * scaler.x; dr.y = (static_cast<int>(uvec_a.uint_y - uvec_b.uint_y)) * scaler.y; dr.z = (static_cast<int>(uvec_a.uint_z - uvec_b.uint_z)) * scaler.z; return dr; } __device__ __host__ static inline VECTOR Get_Periodic_Displacement(const VECTOR vec_a, const VECTOR vec_b, const VECTOR box_length) { VECTOR dr; dr = vec_a - vec_b; dr.x = dr.x - floorf(dr.x / box_length.x + 0.5) * box_length.x; dr.y = dr.y - floorf(dr.y / box_length.y + 0.5) * box_length.y; dr.z = dr.z - floorf(dr.z / box_length.z + 0.5) * box_length.z; return dr; } __device__ __host__ static inline VECTOR Get_Periodic_Displacement(const VECTOR vec_a, const VECTOR vec_b, const VECTOR box_length, const VECTOR box_length_inverse) { VECTOR dr; dr = vec_a - vec_b; dr.x = dr.x - floorf(dr.x * box_length_inverse.x + 0.5) * box_length.x; dr.y = dr.y - floorf(dr.y * box_length_inverse.y + 0.5) * box_length.y; dr.z = dr.z - floorf(dr.z * box_length_inverse.z + 0.5) * box_length.z; return dr; } __device__ __host__ static inline VECTOR operator+(const VECTOR &veca, const VECTOR &vecb) { VECTOR vec; vec.x = veca.x + vecb.x; vec.y = veca.y + vecb.y; vec.z = veca.z + vecb.z; return vec; } __device__ __host__ static inline float operator*(const VECTOR &veca, const VECTOR &vecb) { return veca.x * vecb.x + veca.y * vecb.y + veca.z * vecb.z; } __device__ __host__ static inline VECTOR operator*(const float &a, const VECTOR &vecb) { VECTOR vec; vec.x = a * vecb.x; vec.y = a * vecb.y; vec.z = a * vecb.z; return vec; } __device__ __host__ static inline VECTOR operator-(const VECTOR &vecb) { VECTOR vec; vec.x = -vecb.x; vec.y = -vecb.y; vec.z = -vecb.z; return vec; } __device__ __host__ static inline VECTOR operator^(const VECTOR &veca, const VECTOR &vecb) { VECTOR vec; vec.x = veca.y * vecb.z - veca.z * vecb.y; vec.y = veca.z * vecb.x - veca.x * vecb.z; vec.z = veca.x * vecb.y - veca.y * vecb.x; return vec; } __device__ __host__ static inline float normfloat(const float *x, const float *y, int i, int j) { float s = 0; s += (x[3 * i + 0] - y[3 * j + 0]) * (x[3 * i + 0] - y[3 * j + 0]); s += (x[3 * i + 1] - y[3 * j + 1]) * (x[3 * i + 1] - y[3 * j + 1]); s += (x[3 * i + 2] - y[3 * j + 2]) * (x[3 * i + 2] - y[3 * j + 2]); return s; } __global__ static void construct_neighbor_list_kernel(int atom_numbers, int max_neighbor_numbers, int *nl_atom_numbers, int *nl_atom_serial, NEIGHBOR_LIST *nl) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { nl[i].atom_numbers = nl_atom_numbers[i]; nl[i].atom_serial = nl_atom_serial + i * max_neighbor_numbers; } } __global__ static void construct_atom_near(int atom_numbers, int near_numbers, int *atom_serial, ATOM_NEAR *an) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { an[i].atom_serial = atom_serial + i * near_numbers; } } static inline bool Malloc_Safely(void **address, size_t size) { address[0] = NULL; address[0] = reinterpret_cast<void *>(malloc(size)); if (address[0] != NULL) { return true; } else { printf("malloc failed!\n"); getchar(); return false; } } static inline bool Cuda_Malloc_Safely(void **address, size_t size) { cudaError_t cuda_error = cudaMalloc(&address[0], size); if (cuda_error == 0) { return true; } else { printf("cudaMalloc failed! error %d\n", cuda_error); getchar(); return false; } } __global__ static void construct_constrain_pair(int constrain_pair_numbers, const int *atom_i_serials, const int *atom_j_serials, const float *constant_rs, const float *constrain_ks, CONSTRAIN_PAIR *constrain_pair) { int atom_i = blockDim.x * blockIdx.x + threadIdx.x; if (atom_i < constrain_pair_numbers) { constrain_pair[atom_i].atom_i_serial = atom_i_serials[atom_i]; constrain_pair[atom_i].atom_j_serial = atom_j_serials[atom_i]; constrain_pair[atom_i].constant_r = constant_rs[atom_i]; constrain_pair[atom_i].constrain_k = constrain_ks[atom_i]; } } __global__ static void Copy_Crd_To_New_Crd_Start(const int atom_numbers, const UNSIGNED_INT_VECTOR *crd, UINT_VECTOR_LJ_TYPE *new_crd, const int *LJ_type, const float *charge) { int atom_i = blockDim.x * blockIdx.x + threadIdx.x; if (atom_i < atom_numbers) { new_crd[atom_i].uint_x = crd[atom_i].uint_x; new_crd[atom_i].uint_y = crd[atom_i].uint_y; new_crd[atom_i].uint_z = crd[atom_i].uint_z; new_crd[atom_i].LJ_type = LJ_type[atom_i]; new_crd[atom_i].charge = charge[atom_i]; } } // void Constrain_Force_Cycle_With_Virial(int atom_numbers, int constrain_pair_numbers, const unsigned int *uint_crd_f, // const float *scaler_f, float *constrain_pair_f, const float *pair_dr_f, // const int *atom_i_serials, const int *atom_j_serials, const float // *constant_rs, const float *constrain_ks, float *test_frc_f, float // *d_atom_virial, cudaStream_t stream); __global__ static void Rand_Normal(const int float4_numbers, curandStatePhilox4_32_10_t *rand_state, float4 *rand_float4) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < float4_numbers) { rand_float4[i] = curand_normal4(&rand_state[i]); } } __global__ static void Setup_Rand_Normal_Kernel(const int float4_numbers, curandStatePhilox4_32_10_t *rand_state, const int seed) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ if (id < float4_numbers) { curand_init(seed, id, 0, &rand_state[id]); } } __global__ static void Reset_List(const int element_numbers, int *list, const int replace_element) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < element_numbers) { list[i] = replace_element; } } __global__ static void Reset_List(const int element_numbers, float *list, const float replace_element) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < element_numbers) { list[i] = replace_element; } } __global__ static void Sum_Of_List(const int element_numbers, const float *list, float *sum) { if (threadIdx.x == 0) { sum[0] = 0.; } __syncthreads(); float lin = 0.; for (int i = threadIdx.x; i < element_numbers; i = i + blockDim.x) { lin = lin + list[i]; } atomicAdd(sum, lin); } __global__ static void Scale_List(const int element_numbers, float *list, float scaler) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < element_numbers) { list[i] = list[i] * scaler; } } __global__ static void Copy_List(const int element_numbers, const int *origin_list, int *list) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < element_numbers) { list[i] = origin_list[i]; } } __global__ static void Copy_List(const int element_numbers, const float *origin_list, float *list) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < element_numbers) { list[i] = origin_list[i]; } } __global__ static void Print(const size_t size, const float *input_x) { for (size_t i = 0; i < size; i++) { printf("%f\n", input_x[i]); } return; } __global__ static void Print(const size_t size, const int *input_x) { for (size_t i = 0; i < size; i++) { printf("%d\n", input_x[i]); } return; } __device__ static VECTOR Make_Vector_Not_Exceed_Value(VECTOR vector, const float value) { return fminf(1.0, value * rnorm3df(vector.x, vector.y, vector.z)) * vector; } #endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_COMMON_SPONGE_H_
the_stack
#define TPB 320 __constant__ uint32_t c_PaddedMessage80[32]; // padded message (80 bytes + padding) // #ifdef NOASM #include "cuda_x11_aes_noasm.cu" // #else // #include "cuda_x11_aes.cu" // #endif static __device__ __forceinline__ void AES_ROUND_NOKEY( const uint32_t*const __restrict__ sharedMemory, uint32_t &x0, uint32_t &x1, uint32_t &x2, uint32_t &x3) { aes_round(sharedMemory, x0, x1, x2, x3, x0, x1, x2, x3); } static __device__ __forceinline__ void KEY_EXPAND_ELT( const uint32_t*const __restrict__ sharedMemory, uint32_t &k0, uint32_t &k1, uint32_t &k2, uint32_t &k3) { uint32_t y0, y1, y2, y3; aes_round(sharedMemory, k0, k1, k2, k3, y0, y1, y2, y3); k0 = y1; k1 = y2; k2 = y3; k3 = y0; } static __device__ __forceinline__ void shavite_gpu_init(uint32_t *sharedMemory) { /* each thread startup will fill a uint32 */ if (threadIdx.x < 256) { /* each thread startup will fill a uint32 */ sharedMemory[threadIdx.x] = d_AES0[threadIdx.x]; sharedMemory[threadIdx.x + 256] = ROL8(d_AES0[threadIdx.x]); sharedMemory[threadIdx.x + 512] = ROL16(d_AES0[threadIdx.x]); sharedMemory[threadIdx.x + 768] = ROL24(d_AES0[threadIdx.x]); // sharedMemory[threadIdx.x + 64 * 2 ] = d_AES0[threadIdx.x + 64 * 2]; // sharedMemory[threadIdx.x + 64 * 2 + 256] = d_AES1[threadIdx.x + 64 * 2]; // sharedMemory[threadIdx.x + 64 * 2 + 512] = d_AES2[threadIdx.x + 64 * 2]; // sharedMemory[threadIdx.x + 64 * 2 + 768] = d_AES3[threadIdx.x + 64 * 2]; } } __global__ __launch_bounds__(TPB, 3) void x11_shavite512_gpu_hash_64(uint32_t threads, uint32_t *const __restrict__ g_hash) { __shared__ __align__(128) uint32_t sharedMemory[1024]; shavite_gpu_init(sharedMemory); __syncthreads(); const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if(thread < threads) { uint32_t *Hash = &g_hash[thread * 16]; uint32_t rk[32]; uint32_t state[16]; rk[0] = Hash[0]; uint32_t x0 = Hash[0] ^ 0xD1901A06; rk[1] = Hash[1]; uint32_t x1 = Hash[1] ^ 0x430AE307; rk[2] = Hash[2]; uint32_t x2 = Hash[2] ^ 0xB29F5CD1; rk[3] = Hash[3]; uint32_t x3 = Hash[3] ^ 0xDF07FBFC; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); for(int i = 4; i < 16; i += 4) { rk[i ] = Hash[i ]; x0 ^= Hash[i ]; rk[i + 1] = Hash[i + 1]; x1 ^= Hash[i + 1]; rk[i + 2] = Hash[i + 2]; x2 ^= Hash[i + 2]; rk[i + 3] = Hash[i + 3]; x3 ^= Hash[i + 3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); } state[0] = x0 ^ 0x72FCCDD8; state[1] = x1 ^ 0x79CA4727; state[2] = x2 ^ 0x128A077B; state[3] = x3 ^ 0x40D55AEC; // 1 KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[3] ^= (0x02000000UL ^ 0xFFFFFFFFUL); //rk[31]; rk[0] ^= 512; // rk[3] ^= 0xFFFFFFFF; x0 = state[0] ^ rk[0]; x1 = state[1] ^ rk[1]; x2 = state[2] ^ rk[2]; x3 = state[3] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[ 8] = 0xBCFBF352; state[ 9] = 0x8B2064DD; state[10] = 0x6BB1D446; state[11] = 0x6BF6D76B; state[12] = x0 ^ 0xE275EADE; state[13] = x1 ^ 0x502D9FCD; state[14] = x2 ^ 0xB9357178; state[15] = x3 ^ 0x022A4B9A; rk[16] = rk[12] ^ 0x63636363UL; rk[17] = rk[13] ^ 0x63636363UL; rk[18] = rk[14] ^ 0x63636363UL; rk[19] = rk[15] ^ 0x8acdcd24UL; rk[20] = rk[12]; rk[21] = rk[13]; rk[22] = rk[14]; rk[23] = rk[15] ^ 0xE9AEAE47UL; rk[24] = rk[16]; rk[25] = rk[17]; rk[26] = rk[18]; rk[27] = rk[15] ^ 0xA2F1D930UL; rk[28] = rk[12]; rk[29] = rk[13]; rk[30] = rk[14]; rk[31] = rk[23]; x0 = rk[12] ^ 0xDF989031UL; x1 = rk[13] ^ 0xE84307BEUL; x2 = rk[14] ^ 0x08D2B725UL; x3 = rk[15] ^ 0xE13B1A4FUL; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[4] = x0 ^ 0xD1901A06; state[5] = x1 ^ 0x430AE307; state[6] = x2 ^ 0xB29F5CD1; state[7] = x3 ^ 0xDF07FBFC; rk[0] ^= rk[25]; x0 = state[12] ^ rk[0]; rk[1] ^= rk[26]; x1 = state[13] ^ rk[1]; rk[2] ^= rk[27]; x2 = state[14] ^ rk[2]; rk[3] ^= rk[28]; x3 = state[15] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[8] ^= x0; state[9] ^= x1; state[10] ^= x2; state[11] ^= x3; rk[16] ^= rk[9]; x0 = state[4] ^ rk[16]; rk[17] ^= rk[10]; x1 = state[5] ^ rk[17]; rk[18] ^= rk[11]; x2 = state[6] ^ rk[18]; rk[19] ^= rk[12]; x3 = state[7] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[0] ^= x0; state[1] ^= x1; state[2] ^= x2; state[3] ^= x3; /* round 3, 7, 11 */ KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = state[8] ^ rk[0]; x1 = state[9] ^ rk[1]; x2 = state[10] ^ rk[2]; x3 = state[11] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[4] ^= x0; state[5] ^= x1; state[6] ^= x2; state[7] ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = state[0] ^ rk[16]; x1 = state[1] ^ rk[17]; x2 = state[2] ^ rk[18]; x3 = state[3] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[12] ^= x0; state[13] ^= x1; state[14] ^= x2; state[15] ^= x3; /* round 4, 8, 12 */ rk[0] ^= rk[25]; x0 = state[4] ^ rk[0]; rk[1] ^= rk[26]; x1 = state[5] ^ rk[1]; rk[2] ^= rk[27]; x2 = state[6] ^ rk[2]; rk[3] ^= rk[28]; x3 = state[7] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[0] ^= x0; state[1] ^= x1; state[2] ^= x2; state[3] ^= x3; rk[16] ^= rk[9]; x0 = state[12] ^ rk[16]; rk[17] ^= rk[10]; x1 = state[13] ^ rk[17]; rk[18] ^= rk[11]; x2 = state[14] ^ rk[18]; rk[19] ^= rk[12]; x3 = state[15] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[8] ^= x0; state[9] ^= x1; state[10] ^= x2; state[11] ^= x3; // 2 KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = state[0] ^ rk[0]; x1 = state[1] ^ rk[1]; x2 = state[2] ^ rk[2]; x3 = state[3] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; rk[7] ^= ~512; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[12] ^= x0; state[13] ^= x1; state[14] ^= x2; state[15] ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = state[8] ^ rk[16]; x1 = state[9] ^ rk[17]; x2 = state[10] ^ rk[18]; x3 = state[11] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[4] ^= x0; state[5] ^= x1; state[6] ^= x2; state[7] ^= x3; rk[0] ^= rk[25]; x0 = state[12] ^ rk[0]; rk[1] ^= rk[26]; x1 = state[13] ^ rk[1]; rk[2] ^= rk[27]; x2 = state[14] ^ rk[2]; rk[3] ^= rk[28]; x3 = state[15] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[8] ^= x0; state[9] ^= x1; state[10] ^= x2; state[11] ^= x3; rk[16] ^= rk[9]; x0 = state[4] ^ rk[16]; rk[17] ^= rk[10]; x1 = state[5] ^ rk[17]; rk[18] ^= rk[11]; x2 = state[6] ^ rk[18]; rk[19] ^= rk[12]; x3 = state[7] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[0] ^= x0; state[1] ^= x1; state[2] ^= x2; state[3] ^= x3; /* round 3, 7, 11 */ KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = state[8] ^ rk[0]; x1 = state[9] ^ rk[1]; x2 = state[10] ^ rk[2]; x3 = state[11] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[4] ^= x0; state[5] ^= x1; state[6] ^= x2; state[7] ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = state[0] ^ rk[16]; x1 = state[1] ^ rk[17]; x2 = state[2] ^ rk[18]; x3 = state[3] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[12] ^= x0; state[13] ^= x1; state[14] ^= x2; state[15] ^= x3; /* round 4, 8, 12 */ rk[0] ^= rk[25]; x0 = state[4] ^ rk[0]; rk[1] ^= rk[26]; x1 = state[5] ^ rk[1]; rk[2] ^= rk[27]; x2 = state[6] ^ rk[2]; rk[3] ^= rk[28]; x3 = state[7] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[0] ^= x0; state[1] ^= x1; state[2] ^= x2; state[3] ^= x3; rk[16] ^= rk[9]; x0 = state[12] ^ rk[16]; rk[17] ^= rk[10]; x1 = state[13] ^ rk[17]; rk[18] ^= rk[11]; x2 = state[14] ^ rk[18]; rk[19] ^= rk[12]; x3 = state[15] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[8] ^= x0; state[9] ^= x1; state[10] ^= x2; state[11] ^= x3; // 3 KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = state[0] ^ rk[0]; x1 = state[1] ^ rk[1]; x2 = state[2] ^ rk[2]; x3 = state[3] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[12] ^= x0; state[13] ^= x1; state[14] ^= x2; state[15] ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = state[8] ^ rk[16]; x1 = state[9] ^ rk[17]; x2 = state[10] ^ rk[18]; x3 = state[11] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= ~rk[27]; rk[30] ^= 512; // rk[31] ^= 0xFFFFFFFF; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[4] ^= x0; state[5] ^= x1; state[6] ^= x2; state[7] ^= x3; rk[0] ^= rk[25]; x0 = state[12] ^ rk[0]; rk[1] ^= rk[26]; x1 = state[13] ^ rk[1]; rk[2] ^= rk[27]; x2 = state[14] ^ rk[2]; rk[3] ^= rk[28]; x3 = state[15] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[8] ^= x0; state[9] ^= x1; state[10] ^= x2; state[11] ^= x3; rk[16] ^= rk[9]; x0 = state[4] ^ rk[16]; rk[17] ^= rk[10]; x1 = state[5] ^ rk[17]; rk[18] ^= rk[11]; x2 = state[6] ^ rk[18]; rk[19] ^= rk[12]; x3 = state[7] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[0] ^= x0; state[1] ^= x1; state[2] ^= x2; state[3] ^= x3; /* round 3, 7, 11 */ KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = state[8] ^ rk[0]; x1 = state[9] ^ rk[1]; x2 = state[10] ^ rk[2]; x3 = state[11] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[4] ^= x0; state[5] ^= x1; state[6] ^= x2; state[7] ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = state[0] ^ rk[16]; x1 = state[1] ^ rk[17]; x2 = state[2] ^ rk[18]; x3 = state[3] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[12] ^= x0; state[13] ^= x1; state[14] ^= x2; state[15] ^= x3; /* round 4, 8, 12 */ rk[0] ^= rk[25]; x0 = state[4] ^ rk[0]; rk[1] ^= rk[26]; x1 = state[5] ^ rk[1]; rk[2] ^= rk[27]; x2 = state[6] ^ rk[2]; rk[3] ^= rk[28]; x3 = state[7] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[0] ^= x0; state[1] ^= x1; state[2] ^= x2; state[3] ^= x3; rk[16] ^= rk[9]; x0 = state[12] ^ rk[16]; rk[17] ^= rk[10]; x1 = state[13] ^ rk[17]; rk[18] ^= rk[11]; x2 = state[14] ^ rk[18]; rk[19] ^= rk[12]; x3 = state[15] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[8] ^= x0; state[9] ^= x1; state[10] ^= x2; state[11] ^= x3; /* round 13 */ KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = state[0] ^ rk[0]; x1 = state[1] ^ rk[1]; x2 = state[2] ^ rk[2]; x3 = state[3] ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[12] ^= x0; state[13] ^= x1; state[14] ^= x2; state[15] ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = state[8] ^ rk[16]; x1 = state[9] ^ rk[17]; x2 = state[10] ^ rk[18]; x3 = state[11] ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21] ^ 512; rk[26] ^= rk[22]; rk[27] ^= ~rk[23]; //^ 0xFFFFFFFF; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); state[4] ^= x0; state[5] ^= x1; state[6] ^= x2; state[7] ^= x3; Hash[0] = 0x72FCCDD8 ^ state[8]; Hash[1] = 0x79CA4727 ^ state[9]; Hash[2] = 0x128A077B ^ state[10]; Hash[3] = 0x40D55AEC ^ state[11]; Hash[4] = 0xD1901A06 ^ state[12]; Hash[5] = 0x430AE307 ^ state[13]; Hash[6] = 0xB29F5CD1 ^ state[14]; Hash[7] = 0xDF07FBFC ^ state[15]; Hash[8] = 0x8E45D73D ^ state[0]; Hash[9] = 0x681AB538 ^ state[1]; Hash[10] = 0xBDE86578 ^ state[2]; Hash[11] = 0xDD577E47 ^ state[3]; Hash[12] = 0xE275EADE ^ state[4]; Hash[13] = 0x502D9FCD ^ state[5]; Hash[14] = 0xB9357178 ^ state[6]; Hash[15] = 0x022A4B9A ^ state[7]; } } __device__ __forceinline__ static void c512(const uint32_t*const __restrict__ sharedMemory, uint32_t *const __restrict__ state, uint32_t *const __restrict__ msg) { uint32_t p0, p1, p2, p3, p4, p5, p6, p7; uint32_t p8, p9, pA, pB, pC, pD, pE, pF; uint32_t x0, x1, x2, x3; uint32_t rk[32]; uint32_t i; const uint32_t counter = 640; p0 = state[0x0]; p1 = state[0x1]; p2 = state[0x2]; p3 = state[0x3]; p4 = state[0x4]; p5 = state[0x5]; p6 = state[0x6]; p7 = state[0x7]; p8 = state[0x8]; p9 = state[0x9]; pA = state[0xA]; pB = state[0xB]; pC = state[0xC]; pD = state[0xD]; pE = state[0xE]; pF = state[0xF]; x0 = state[0x4]; x1 = state[0x5]; x2 = state[0x6]; x3 = state[0x7]; #pragma unroll for(i = 0; i<16; i += 4) { rk[i] = msg[i]; x0 ^= msg[i]; rk[i + 1] = msg[i + 1]; x1 ^= msg[i + 1]; rk[i + 2] = msg[i + 2]; x2 ^= msg[i + 2]; rk[i + 3] = msg[i + 3]; x3 ^= msg[i + 3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); } p0 ^= x0; p1 ^= x1; p2 ^= x2; p3 ^= x3; x0 = pC; x1 = pD; x2 = pE; x3 = pF; #pragma unroll for(i = 16; i<32; i += 4) { rk[i] = msg[i]; x0 ^= msg[i]; rk[i + 1] = msg[i + 1]; x1 ^= msg[i + 1]; rk[i + 2] = msg[i + 2]; x2 ^= msg[i + 2]; rk[i + 3] = msg[i + 3]; x3 ^= msg[i + 3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); } p8 ^= x0; p9 ^= x1; pA ^= x2; pB ^= x3; // 1 KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= ~rk[31]; rk[0] ^= counter; //rk[3] ^= 0xFFFFFFFF; x0 = p0 ^ rk[0]; x1 = p1 ^ rk[1]; x2 = p2 ^ rk[2]; x3 = p3 ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); pC ^= x0; pD ^= x1; pE ^= x2; pF ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = p8 ^ rk[16]; x1 = p9 ^ rk[17]; x2 = pA ^ rk[18]; x3 = pB ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p4 ^= x0; p5 ^= x1; p6 ^= x2; p7 ^= x3; rk[0] ^= rk[25]; x0 = pC ^ rk[0]; rk[1] ^= rk[26]; x1 = pD ^ rk[1]; rk[2] ^= rk[27]; x2 = pE ^ rk[2]; rk[3] ^= rk[28]; x3 = pF ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p8 ^= x0; p9 ^= x1; pA ^= x2; pB ^= x3; rk[16] ^= rk[9]; x0 = p4 ^ rk[16]; rk[17] ^= rk[10]; x1 = p5 ^ rk[17]; rk[18] ^= rk[11]; x2 = p6 ^ rk[18]; rk[19] ^= rk[12]; x3 = p7 ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p0 ^= x0; p1 ^= x1; p2 ^= x2; p3 ^= x3; /* round 3, 7, 11 */ KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = p8 ^ rk[0]; x1 = p9 ^ rk[1]; x2 = pA ^ rk[2]; x3 = pB ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p4 ^= x0; p5 ^= x1; p6 ^= x2; p7 ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = p0 ^ rk[16]; x1 = p1 ^ rk[17]; x2 = p2 ^ rk[18]; x3 = p3 ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); pC ^= x0; pD ^= x1; pE ^= x2; pF ^= x3; /* round 4, 8, 12 */ rk[0] ^= rk[25]; x0 = p4 ^ rk[0]; rk[1] ^= rk[26]; x1 = p5 ^ rk[1]; rk[2] ^= rk[27]; x2 = p6 ^ rk[2]; rk[3] ^= rk[28]; x3 = p7 ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p0 ^= x0; p1 ^= x1; p2 ^= x2; p3 ^= x3; rk[16] ^= rk[9]; x0 = pC ^ rk[16]; rk[17] ^= rk[10]; x1 = pD ^ rk[17]; rk[18] ^= rk[11]; x2 = pE ^ rk[18]; rk[19] ^= rk[12]; x3 = pF ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p8 ^= x0; p9 ^= x1; pA ^= x2; pB ^= x3; // 2 KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = p0 ^ rk[0]; x1 = p1 ^ rk[1]; x2 = p2 ^ rk[2]; x3 = p3 ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; rk[7] ^= ~counter; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); pC ^= x0; pD ^= x1; pE ^= x2; pF ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = p8 ^ rk[16]; x1 = p9 ^ rk[17]; x2 = pA ^ rk[18]; x3 = pB ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p4 ^= x0; p5 ^= x1; p6 ^= x2; p7 ^= x3; rk[0] ^= rk[25]; x0 = pC ^ rk[0]; rk[1] ^= rk[26]; x1 = pD ^ rk[1]; rk[2] ^= rk[27]; x2 = pE ^ rk[2]; rk[3] ^= rk[28]; x3 = pF ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p8 ^= x0; p9 ^= x1; pA ^= x2; pB ^= x3; rk[16] ^= rk[9]; x0 = p4 ^ rk[16]; rk[17] ^= rk[10]; x1 = p5 ^ rk[17]; rk[18] ^= rk[11]; x2 = p6 ^ rk[18]; rk[19] ^= rk[12]; x3 = p7 ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p0 ^= x0; p1 ^= x1; p2 ^= x2; p3 ^= x3; /* round 3, 7, 11 */ KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = p8 ^ rk[0]; x1 = p9 ^ rk[1]; x2 = pA ^ rk[2]; x3 = pB ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p4 ^= x0; p5 ^= x1; p6 ^= x2; p7 ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = p0 ^ rk[16]; x1 = p1 ^ rk[17]; x2 = p2 ^ rk[18]; x3 = p3 ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); pC ^= x0; pD ^= x1; pE ^= x2; pF ^= x3; /* round 4, 8, 12 */ rk[0] ^= rk[25]; x0 = p4 ^ rk[0]; rk[1] ^= rk[26]; x1 = p5 ^ rk[1]; rk[2] ^= rk[27]; x2 = p6 ^ rk[2]; rk[3] ^= rk[28]; x3 = p7 ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p0 ^= x0; p1 ^= x1; p2 ^= x2; p3 ^= x3; rk[16] ^= rk[9]; x0 = pC ^ rk[16]; rk[17] ^= rk[10]; x1 = pD ^ rk[17]; rk[18] ^= rk[11]; x2 = pE ^ rk[18]; rk[19] ^= rk[12]; x3 = pF ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p8 ^= x0; p9 ^= x1; pA ^= x2; pB ^= x3; // 3 KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = p0 ^ rk[0]; x1 = p1 ^ rk[1]; x2 = p2 ^ rk[2]; x3 = p3 ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); pC ^= x0; pD ^= x1; pE ^= x2; pF ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = p8 ^ rk[16]; x1 = p9 ^ rk[17]; x2 = pA ^ rk[18]; x3 = pB ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= ~rk[27]; rk[30] ^= counter; //rk[31] ^= 0xFFFFFFFF; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p4 ^= x0; p5 ^= x1; p6 ^= x2; p7 ^= x3; rk[0] ^= rk[25]; x0 = pC ^ rk[0]; rk[1] ^= rk[26]; x1 = pD ^ rk[1]; rk[2] ^= rk[27]; x2 = pE ^ rk[2]; rk[3] ^= rk[28]; x3 = pF ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p8 ^= x0; p9 ^= x1; pA ^= x2; pB ^= x3; rk[16] ^= rk[9]; x0 = p4 ^ rk[16]; rk[17] ^= rk[10]; x1 = p5 ^ rk[17]; rk[18] ^= rk[11]; x2 = p6 ^ rk[18]; rk[19] ^= rk[12]; x3 = p7 ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p0 ^= x0; p1 ^= x1; p2 ^= x2; p3 ^= x3; /* round 3, 7, 11 */ KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = p8 ^ rk[0]; x1 = p9 ^ rk[1]; x2 = pA ^ rk[2]; x3 = pB ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p4 ^= x0; p5 ^= x1; p6 ^= x2; p7 ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = p0 ^ rk[16]; x1 = p1 ^ rk[17]; x2 = p2 ^ rk[18]; x3 = p3 ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21]; rk[26] ^= rk[22]; rk[27] ^= rk[23]; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); pC ^= x0; pD ^= x1; pE ^= x2; pF ^= x3; /* round 4, 8, 12 */ rk[0] ^= rk[25]; x0 = p4 ^ rk[0]; rk[1] ^= rk[26]; x1 = p5 ^ rk[1]; rk[2] ^= rk[27]; x2 = p6 ^ rk[2]; rk[3] ^= rk[28]; x3 = p7 ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[4] ^= rk[29]; x0 ^= rk[4]; rk[5] ^= rk[30]; x1 ^= rk[5]; rk[6] ^= rk[31]; x2 ^= rk[6]; rk[7] ^= rk[0]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[8] ^= rk[1]; x0 ^= rk[8]; rk[9] ^= rk[2]; x1 ^= rk[9]; rk[10] ^= rk[3]; x2 ^= rk[10]; rk[11] ^= rk[4]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[12] ^= rk[5]; x0 ^= rk[12]; rk[13] ^= rk[6]; x1 ^= rk[13]; rk[14] ^= rk[7]; x2 ^= rk[14]; rk[15] ^= rk[8]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p0 ^= x0; p1 ^= x1; p2 ^= x2; p3 ^= x3; rk[16] ^= rk[9]; x0 = pC ^ rk[16]; rk[17] ^= rk[10]; x1 = pD ^ rk[17]; rk[18] ^= rk[11]; x2 = pE ^ rk[18]; rk[19] ^= rk[12]; x3 = pF ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[20] ^= rk[13]; x0 ^= rk[20]; rk[21] ^= rk[14]; x1 ^= rk[21]; rk[22] ^= rk[15]; x2 ^= rk[22]; rk[23] ^= rk[16]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[24] ^= rk[17]; x0 ^= rk[24]; rk[25] ^= rk[18]; x1 ^= rk[25]; rk[26] ^= rk[19]; x2 ^= rk[26]; rk[27] ^= rk[20]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); rk[28] ^= rk[21]; x0 ^= rk[28]; rk[29] ^= rk[22]; x1 ^= rk[29]; rk[30] ^= rk[23]; x2 ^= rk[30]; rk[31] ^= rk[24]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p8 ^= x0; p9 ^= x1; pA ^= x2; pB ^= x3; /* round 13 */ KEY_EXPAND_ELT(sharedMemory, rk[0], rk[1], rk[2], rk[3]); rk[0] ^= rk[28]; rk[1] ^= rk[29]; rk[2] ^= rk[30]; rk[3] ^= rk[31]; x0 = p0 ^ rk[0]; x1 = p1 ^ rk[1]; x2 = p2 ^ rk[2]; x3 = p3 ^ rk[3]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[4], rk[5], rk[6], rk[7]); rk[4] ^= rk[0]; rk[5] ^= rk[1]; rk[6] ^= rk[2]; rk[7] ^= rk[3]; x0 ^= rk[4]; x1 ^= rk[5]; x2 ^= rk[6]; x3 ^= rk[7]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[8], rk[9], rk[10], rk[11]); rk[8] ^= rk[4]; rk[9] ^= rk[5]; rk[10] ^= rk[6]; rk[11] ^= rk[7]; x0 ^= rk[8]; x1 ^= rk[9]; x2 ^= rk[10]; x3 ^= rk[11]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[12], rk[13], rk[14], rk[15]); rk[12] ^= rk[8]; rk[13] ^= rk[9]; rk[14] ^= rk[10]; rk[15] ^= rk[11]; x0 ^= rk[12]; x1 ^= rk[13]; x2 ^= rk[14]; x3 ^= rk[15]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); pC ^= x0; pD ^= x1; pE ^= x2; pF ^= x3; KEY_EXPAND_ELT(sharedMemory, rk[16], rk[17], rk[18], rk[19]); rk[16] ^= rk[12]; rk[17] ^= rk[13]; rk[18] ^= rk[14]; rk[19] ^= rk[15]; x0 = p8 ^ rk[16]; x1 = p9 ^ rk[17]; x2 = pA ^ rk[18]; x3 = pB ^ rk[19]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[20], rk[21], rk[22], rk[23]); rk[20] ^= rk[16]; rk[21] ^= rk[17]; rk[22] ^= rk[18]; rk[23] ^= rk[19]; x0 ^= rk[20]; x1 ^= rk[21]; x2 ^= rk[22]; x3 ^= rk[23]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[24], rk[25], rk[26], rk[27]); rk[24] ^= rk[20]; rk[25] ^= rk[21] ^ counter; rk[26] ^= rk[22]; rk[27] ^= ~rk[23]; //^ 0xFFFFFFFF; x0 ^= rk[24]; x1 ^= rk[25]; x2 ^= rk[26]; x3 ^= rk[27]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); KEY_EXPAND_ELT(sharedMemory, rk[28], rk[29], rk[30], rk[31]); rk[28] ^= rk[24]; rk[29] ^= rk[25]; rk[30] ^= rk[26]; rk[31] ^= rk[27]; x0 ^= rk[28]; x1 ^= rk[29]; x2 ^= rk[30]; x3 ^= rk[31]; AES_ROUND_NOKEY(sharedMemory, x0, x1, x2, x3); p4 ^= x0; p5 ^= x1; p6 ^= x2; p7 ^= x3; state[0x0] ^= p8; state[0x1] ^= p9; state[0x2] ^= pA; state[0x3] ^= pB; state[0x4] ^= pC; state[0x5] ^= pD; state[0x6] ^= pE; state[0x7] ^= pF; state[0x8] ^= p0; state[0x9] ^= p1; state[0xA] ^= p2; state[0xB] ^= p3; state[0xC] ^= p4; state[0xD] ^= p5; state[0xE] ^= p6; state[0xF] ^= p7; } __global__ __launch_bounds__(TPB, 3) void x11_shavite512_gpu_hash_80(uint32_t threads, uint32_t startNounce, void *outputHash) { __shared__ uint32_t sharedMemory[1024]; if(threadIdx.x < 256) { sharedMemory[threadIdx.x] = d_AES0[threadIdx.x]; sharedMemory[threadIdx.x + 256] = ROL8(d_AES0[threadIdx.x]); sharedMemory[threadIdx.x + 512] = ROL16(d_AES0[threadIdx.x]); sharedMemory[threadIdx.x + 768] = ROL24(d_AES0[threadIdx.x]); } __syncthreads(); const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if(thread < threads) { const uint32_t nounce = startNounce + thread; // kopiere init-state uint32_t state[16] = { 0x72FCCDD8, 0x79CA4727, 0x128A077B, 0x40D55AEC, 0xD1901A06, 0x430AE307, 0xB29F5CD1, 0xDF07FBFC, 0x8E45D73D, 0x681AB538, 0xBDE86578, 0xDD577E47, 0xE275EADE, 0x502D9FCD, 0xB9357178, 0x022A4B9A }; uint32_t msg[32]; #pragma unroll for(int i = 0; i<31; i++) { msg[i] = c_PaddedMessage80[i]; } msg[19] = cuda_swab32(nounce); msg[20] = 0x80; msg[27] = 0x2800000; msg[31] = 0x2000000; c512(sharedMemory, state, msg); uint32_t *outHash = (uint32_t *)outputHash + 16 * thread; #pragma unroll 16 for(int i = 0; i<16; i++) outHash[i] = state[i]; } //thread < threads } __host__ void x11_shavite512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash) { // berechne wie viele Thread Blocks wir brauchen dim3 grid((threads + TPB-1)/TPB); dim3 block(TPB); x11_shavite512_gpu_hash_64<<<grid, block, 0, gpustream[thr_id]>>>(threads, d_hash); CUDA_SAFE_CALL(cudaGetLastError()); } __host__ void x11_shavite512_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_outputHash) { // berechne wie viele Thread Blocks wir brauchen dim3 grid((threads + TPB-1)/TPB); dim3 block(TPB); x11_shavite512_gpu_hash_80<<<grid, block, 0, gpustream[thr_id]>>>(threads, startNounce, d_outputHash); } __host__ void x11_shavite512_setBlock_80(int thr_id, void *pdata) { // Message mit Padding bereitstellen // lediglich die korrekte Nonce ist noch ab Byte 76 einzusetzen. unsigned char PaddedMessage[128]; memcpy(PaddedMessage, pdata, 80); memset(PaddedMessage+80, 0, 48); cudaMemcpyToSymbolAsync(c_PaddedMessage80, PaddedMessage, 32 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice, gpustream[thr_id]); }
the_stack
#include <cudf/column/column_device_view.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/utilities/bit.hpp> #include <io/utilities/block_utils.cuh> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <cub/cub.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/tuple.h> namespace cudf { namespace io { namespace orc { namespace gpu { using cudf::detail::device_2dspan; constexpr int scratch_buffer_size = 512 * 4; // Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2 // Workaround replaces zero-length patch lists by a dummy zero patch constexpr bool zero_pll_war = true; static __device__ __constant__ int64_t kORCTimeToUTC = 1420070400; // Seconds from January 1st, 1970 to January 1st, 2015 struct byterle_enc_state_s { uint32_t literal_run; uint32_t repeat_run; volatile uint32_t rpt_map[(512 / 32) + 1]; }; struct intrle_enc_state_s { uint32_t literal_run; uint32_t delta_run; uint32_t literal_mode; uint32_t literal_w; uint32_t hdr_bytes; uint32_t pl_bytes; volatile uint32_t delta_map[(512 / 32) + 1]; }; struct strdata_enc_state_s { uint32_t char_count; uint32_t lengths_red[(512 / 32)]; const char* str_data[512]; }; struct orcenc_state_s { uint32_t cur_row; // Current row in group uint32_t present_rows; // # of rows in present buffer uint32_t present_out; // # of rows in present buffer that have been flushed uint32_t nrows; // # of rows in current batch uint32_t numvals; // # of non-zero values in current batch (<=nrows) uint32_t numlengths; // # of non-zero values in DATA2 batch uint32_t nnz; // Running count of non-null values encoder_chunk_streams stream; EncChunk chunk; uint32_t strm_pos[CI_NUM_STREAMS]; uint8_t valid_buf[512]; // valid map bits union { byterle_enc_state_s byterle; intrle_enc_state_s intrle; strdata_enc_state_s strenc; StripeDictionary dict_stripe; } u; union { uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer uint32_t u32[scratch_buffer_size / 4]; } buf; union { uint8_t u8[2048]; uint32_t u32[1024]; int32_t i32[1024]; uint64_t u64[1024]; int64_t i64[1024]; } vals; union { uint8_t u8[2048]; uint32_t u32[1024]; uint64_t u64[1024]; } lengths; }; static inline __device__ uint32_t zigzag(uint32_t v) { return v; } static inline __device__ uint32_t zigzag(int32_t v) { int32_t s = (v >> 31); return ((v ^ s) * 2) - s; } static inline __device__ uint64_t zigzag(uint64_t v) { return v; } static inline __device__ uint64_t zigzag(int64_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ __uint128_t zigzag(__int128_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; } static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; } /** * @brief Raw data output * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] count number of bytes to encode * @param[in] t thread id */ template <StreamIndexType cid, uint32_t inmask> static __device__ void StoreBytes( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t count, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; while (count > 0) { uint32_t n = min(count, 512); if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; } dst += n; inpos += n; count -= n; } __syncthreads(); if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } } /** * @brief ByteRLE encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * * @return number of input values encoded */ template <StreamIndexType cid, uint32_t inmask> static __device__ uint32_t ByteRLE( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; while (numvals > 0) { uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run, maxvals = min(numvals, 512); if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map; __syncthreads(); if (t == 0) { // Find the start of an identical 3-byte sequence // TBD: The two loops below could be eliminated using more ballot+ffs using warp0 literal_run = 0; repeat_run = 0; while (literal_run < maxvals) { uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1]; uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1); if (mask) { uint32_t literal_run_ofs = __ffs(mask) - 1; literal_run += literal_run_ofs; repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1)); if (repeat_run + literal_run_ofs == 32) { while (next == ~0) { uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1; next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0; repeat_run += 32; } repeat_run += __ffs(~next) - 1; } repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals)); if (repeat_run < 3) { literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0; repeat_run = 0; } break; } rpt_map = next; literal_run += 32; } if (repeat_run >= 130) { // Limit large runs to multiples of 130 repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130; } else if (literal_run && literal_run + repeat_run == maxvals) { repeat_run = 0; // Try again at next iteration } s->u.byterle.repeat_run = repeat_run; s->u.byterle.literal_run = min(literal_run, maxvals); } __syncthreads(); literal_run = s->u.byterle.literal_run; if (!flush && literal_run == numvals) { literal_run &= ~0x7f; if (!literal_run) break; } if (literal_run > 0) { uint32_t num_runs = (literal_run + 0x7f) >> 7; if (t < literal_run) { uint32_t run_id = t >> 7; uint32_t run = min(literal_run - run_id * 128, 128); if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run; dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += num_runs + literal_run; out_cnt += literal_run; numvals -= literal_run; inpos += literal_run; } repeat_run = s->u.byterle.repeat_run; if (repeat_run > 0) { while (repeat_run >= 130) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = 0x7f; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += 130; numvals -= 130; inpos += 130; repeat_run -= 130; } if (!flush && repeat_run == numvals) { // Wait for more data in case we can continue the run later break; } if (repeat_run >= 3) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = repeat_run - 3; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += repeat_run; numvals -= repeat_run; inpos += repeat_run; } } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } return out_cnt; } /** * @brief Maps the symbol size in bytes to RLEv2 5-bit length code */ static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = { 0, 7, 15, 23, 27, 28, 29, 30, 31}; /** * @brief Encode a varint value, return the number of bytes written */ static inline __device__ uint32_t StoreVarint(uint8_t* dst, __uint128_t v) { uint32_t bytecnt = 0; for (;;) { auto c = static_cast<uint32_t>(v & 0x7f); v >>= 7u; if (v == 0) { dst[bytecnt++] = c; break; } else { dst[bytecnt++] = c + 0x80; } } return bytecnt; } template <class T> static inline __device__ void StoreBytesBigEndian(uint8_t* dst, T v, uint32_t w) { for (uint32_t i = 0, b = w * 8; i < w; ++i) { b -= 8; dst[i] = static_cast<uint8_t>(v >> b); } } // Combine and store bits for symbol widths less than 8 static inline __device__ void StoreBitsBigEndian( uint8_t* dst, uint32_t v, uint32_t w, int num_vals, int t) { if (t <= (num_vals | 0x1f)) { uint32_t mask; if (w <= 1) { v = (v << 1) | (shuffle_xor(v, 1) & 0x1); v = (v << 2) | (shuffle_xor(v, 2) & 0x3); v = (v << 4) | (shuffle_xor(v, 4) & 0xf); mask = 0x7; } else if (w <= 2) { v = (v << 2) | (shuffle_xor(v, 1) & 0x3); v = (v << 4) | (shuffle_xor(v, 2) & 0xf); mask = 0x3; } else // if (w <= 4) { v = (v << 4) | (shuffle_xor(v, 1) & 0xf); mask = 0x1; } if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); } } } /** * @brief Integer RLEv2 encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * @param[in] temp_storage shared memory storage to perform block reduce * * @return number of input values encoded */ template <StreamIndexType cid, class T, bool is_signed, uint32_t inmask, int block_size, typename Storage> static __device__ uint32_t IntegerRLE( orcenc_state_s* s, const T* inbuf, uint32_t inpos, uint32_t numvals, int t, Storage& temp_storage) { using block_reduce = cub::BlockReduce<T, block_size>; uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; __shared__ volatile uint64_t block_vmin; while (numvals > 0) { T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0; uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512), literal_run, delta_run; if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map; __syncthreads(); if (!t) { // Find the start of the next delta run (2 consecutive values with the same delta) literal_run = delta_run = 0; while (literal_run < maxvals) { if (delta_map != 0) { uint32_t literal_run_ofs = __ffs(delta_map) - 1; literal_run += literal_run_ofs; delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1)); if (literal_run_ofs + delta_run == 32) { for (;;) { uint32_t delta_idx = (literal_run + delta_run) >> 5; delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0; if (delta_map != ~0) break; delta_run += 32; } delta_run += __ffs(~delta_map) - 1; } delta_run += 2; break; } literal_run += 32; delta_map = s->u.intrle.delta_map[(literal_run >> 5)]; } literal_run = min(literal_run, maxvals); s->u.intrle.literal_run = literal_run; s->u.intrle.delta_run = min(delta_run, maxvals - literal_run); } __syncthreads(); literal_run = s->u.intrle.literal_run; // Find minimum and maximum values if (literal_run > 0) { // Find min & max T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max(); T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min(); uint32_t literal_mode, literal_w; vmin = block_reduce(temp_storage).Reduce(vmin, cub::Min()); __syncthreads(); vmax = block_reduce(temp_storage).Reduce(vmax, cub::Max()); if (t == 0) { uint32_t mode1_w, mode2_w; typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2; block_vmin = static_cast<uint64_t>(vmin); if constexpr (sizeof(T) > 4) { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7); mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7); } else { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3); mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3); } // Decide between mode1 & mode2 (also mode3 for length=2 repeat) if (vrange_mode2 == 0 && mode1_w > 1) { // Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >= // 3) uint32_t bytecnt = 2; dst[0] = 0xC0 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; bytecnt += StoreVarint(dst + 2, vrange_mode1); dst[bytecnt++] = 0; // Zero delta s->u.intrle.literal_mode = 3; s->u.intrle.literal_w = bytecnt; } else { uint32_t range, w; if (mode1_w > mode2_w && (literal_run - 1) * (mode1_w - mode2_w) > 4) { s->u.intrle.literal_mode = 2; w = mode2_w; range = (uint32_t)vrange_mode2; } else { s->u.intrle.literal_mode = 1; w = mode1_w; range = (uint32_t)vrange_mode1; } if (w == 1) w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1; else w <<= 3; // bytes -> bits s->u.intrle.literal_w = w; } } __syncthreads(); vmin = static_cast<T>(block_vmin); literal_mode = s->u.intrle.literal_mode; literal_w = s->u.intrle.literal_w; if (literal_mode == 1) { // Direct mode if (!t) { dst[0] = 0x40 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; } dst += 2; typename std::make_unsigned<T>::type zzv0 = v0; if (t < literal_run) { zzv0 = zigzag(v0); } if (literal_w < 8) { StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t); } else if (t < literal_run) { StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3)); } } else if (literal_mode == 2) { // Patched base mode if (!t) { uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1; vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin; bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7)) : (4 - min(CountLeadingBytes32(vmax << bv_scale), 3)); if (zero_pll_war) { // Insert a dummy zero patch pll = 1; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0; } else { pll = 0; } dst[0] = 0x80 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw]; dst[3] = ((pgw - 1) << 5) | pll; if (is_signed) { vmax >>= 1; vmax |= vmin & ((T)1 << (bw * 8 - 1)); } StoreBytesBigEndian(dst + 4, vmax, bw); s->u.intrle.hdr_bytes = 4 + bw; s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3; } __syncthreads(); dst += s->u.intrle.hdr_bytes; v0 -= (t < literal_run) ? vmin : 0; if (literal_w < 8) StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t); else if (t < literal_run) StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3)); dst += s->u.intrle.pl_bytes; } else { // Delta mode dst += literal_w; literal_w = 0; } dst += (literal_run * literal_w + 7) >> 3; numvals -= literal_run; inpos += literal_run; out_cnt += literal_run; __syncthreads(); } delta_run = s->u.intrle.delta_run; if (delta_run > 0) { if (t == literal_run) { int64_t delta = (int64_t)v1 - (int64_t)v0; uint64_t delta_base = zigzag(v0); if (delta == 0 && delta_run >= 3 && delta_run <= 10) { // Short repeat uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7); dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3); for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) { b -= 8; dst[1 + i] = static_cast<uint8_t>(delta_base >> b); } s->u.intrle.hdr_bytes = 1 + delta_bw; } else { // Delta uint64_t delta_u = zigzag(delta); uint32_t bytecnt = 2; dst[0] = 0xC0 + ((delta_run - 1) >> 8); dst[1] = (delta_run - 1) & 0xff; bytecnt += StoreVarint(dst + bytecnt, delta_base); bytecnt += StoreVarint(dst + bytecnt, delta_u); s->u.intrle.hdr_bytes = bytecnt; } } __syncthreads(); dst += s->u.intrle.hdr_bytes; numvals -= delta_run; inpos += delta_run; out_cnt += delta_run; } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } __syncthreads(); return out_cnt; } /** * @brief Store a group of strings as a single concatenated string * * @param[in] dst destination buffer * @param[in] strenc string encoder state * @param[in] len(t) string length (per thread) * @param[in] t thread id */ static __device__ void StoreStringData(uint8_t* dst, strdata_enc_state_s* strenc, uint32_t len, int t) { // Start with summing up all the lengths uint32_t pos = len; uint32_t wt = t & 0x1f; for (uint32_t n = 1; n < 32; n <<= 1) { uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1)); pos += (wt & n) ? tmp : 0; } if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; } dst += pos - len; __syncthreads(); if (t < 32) { uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0; uint32_t wpos = wlen; for (uint32_t n = 1; n < 16; n <<= 1) { uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1)); wpos += (wt & n) ? tmp : 0; } if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; } if (wt == 0xf) { strenc->char_count = wpos; // Update stream position } } __syncthreads(); // TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time // rather than have each thread to a memcpy if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); } } /** * @brief In-place conversion from lengths to positions * * @param[in] vals input values * @param[in] numvals number of values * @param[in] t thread id */ template <class T> inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)]; } } template <int block_size, typename Storage> static __device__ void encode_null_mask(orcenc_state_s* s, bitmask_type const* pushdown_mask, Storage& scan_storage, int t) { if (s->stream.ids[CI_PRESENT] < 0) return; auto const column = *s->chunk.column; while (s->present_rows < s->chunk.null_mask_num_rows or s->numvals > 0) { // Number of rows read so far auto present_rows = s->present_rows; // valid_buf capacity is byte per thread in block auto const buf_available_bits = encode_block_size * 8 - s->numvals; // Number of rows for the block to process in this iteration auto const nrows = min(s->chunk.null_mask_num_rows - present_rows, buf_available_bits); // Number of rows for this thread to process in this iteration auto const t_nrows = min(max(static_cast<int32_t>(nrows) - t * 8, 0), 8); auto const row = s->chunk.null_mask_start_row + present_rows + t * 8; auto get_mask_byte = [&](bitmask_type const* mask, size_type offset) -> uint8_t { if (t_nrows == 0) return 0; if (mask == nullptr) return 0xff; auto const begin_offset = row + offset; auto const end_offset = min(begin_offset + 8, offset + column.size()); auto const mask_word = cudf::detail::get_mask_offset_word(mask, 0, begin_offset, end_offset); return mask_word & 0xff; }; uint8_t pd_byte = (1 << t_nrows) - 1; uint32_t pd_set_cnt = t_nrows; uint32_t offset = t_nrows != 0 ? t * 8 : nrows; if (pushdown_mask != nullptr) { pd_byte = get_mask_byte(pushdown_mask, 0) & ((1 << t_nrows) - 1); pd_set_cnt = __popc(pd_byte); // Scan the number of valid bits to get dst offset for each thread cub::BlockScan<uint32_t, block_size>(scan_storage).ExclusiveSum(pd_set_cnt, offset); } auto const mask_byte = get_mask_byte(column.null_mask(), column.offset()); auto dst_offset = offset + s->nnz; auto vbuf_bit_idx = [](int row) { // valid_buf is a circular buffer with validity of 8 rows in each element return row % (encode_block_size * 8); }; if (dst_offset % 8 == 0 and pd_set_cnt == 8) { s->valid_buf[vbuf_bit_idx(dst_offset) / 8] = mask_byte; } else { for (auto bit_idx = 0; bit_idx < t_nrows; ++bit_idx) { // skip bits where pushdown mask is not set if (not(pd_byte & (1 << bit_idx))) continue; if (mask_byte & (1 << bit_idx)) { set_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } else { clear_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } } } __syncthreads(); if (t == block_size - 1) { // Number of loaded rows, available for encode s->numvals += offset + pd_set_cnt; // Number of loaded rows (different from present_rows because of pushdown masks) s->nnz += offset + pd_set_cnt; } present_rows += nrows; if (!t) { s->present_rows = present_rows; } __syncthreads(); // RLE encode the present stream if (s->numvals > ((present_rows < s->chunk.null_mask_num_rows) ? 130 * 8 : 0)) { auto const flush = (present_rows < s->chunk.null_mask_num_rows) ? 0 : 7; auto const nbytes_out = (s->numvals + flush) / 8; auto const nrows_encoded = ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, s->present_out / 8, nbytes_out, flush, t) * 8; if (!t) { // Number of rows encoded so far s->present_out += nrows_encoded; s->numvals -= min(s->numvals, nrows_encoded); } __syncthreads(); } } // reset shared state if (t == 0) { s->nnz = 0; } } /** * @brief Encode column data * * @param[in] chunks encoder chunks device array [column][rowgroup] * @param[in, out] streams chunk streams device array [column][rowgroup] */ // blockDim {`encode_block_size`,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ union { typename cub::BlockScan<uint32_t, block_size>::TempStorage scan_u32; typename cub::BlockReduce<int32_t, block_size>::TempStorage i32; typename cub::BlockReduce<int64_t, block_size>::TempStorage i64; typename cub::BlockReduce<uint32_t, block_size>::TempStorage u32; typename cub::BlockReduce<uint64_t, block_size>::TempStorage u64; } temp_storage; orcenc_state_s* const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t group_id = blockIdx.y; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[col_id][group_id]; s->stream = streams[col_id][group_id]; s->cur_row = 0; s->present_rows = 0; s->present_out = 0; s->numvals = 0; s->numlengths = 0; s->nnz = 0; s->strm_pos[CI_DATA] = 0; s->strm_pos[CI_PRESENT] = 0; s->strm_pos[CI_INDEX] = 0; // Dictionary data is encoded in a separate kernel s->strm_pos[CI_DATA2] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DATA2] : 0; s->strm_pos[CI_DICTIONARY] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DICTIONARY] : 0; } __syncthreads(); auto const pushdown_mask = [&]() -> cudf::bitmask_type const* { auto const parent_index = s->chunk.column->parent_index; if (!parent_index.has_value()) return nullptr; return chunks[parent_index.value()][0].column->pushdown_mask; }(); encode_null_mask<block_size>(s, pushdown_mask, temp_storage.scan_u32, t); __syncthreads(); auto const column = *s->chunk.column; while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) { // Fetch non-null values auto const length_stream_only = s->chunk.type_kind == LIST or s->chunk.type_kind == MAP; if (not length_stream_only && s->stream.data_ptrs[CI_DATA] == nullptr) { // Pass-through __syncthreads(); if (!t) { s->cur_row = s->chunk.num_rows; s->strm_pos[CI_DATA] = s->chunk.num_rows * s->chunk.dtype_len; } } else if (s->cur_row < s->chunk.num_rows) { uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024; uint32_t nrows = min(min(s->chunk.num_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), encode_block_size); auto const row = s->chunk.start_row + s->cur_row + t; auto const is_value_valid = [&]() { if (t >= nrows) return false; return bit_value_or(pushdown_mask, column.offset() + row, true) and bit_value_or(column.null_mask(), column.offset() + row, true); }(); s->buf.u32[t] = is_value_valid ? 1u : 0u; // TODO: Could use a faster reduction relying on _popc() for the initial phase lengths_to_positions(s->buf.u32, encode_block_size, t); __syncthreads(); if (is_value_valid) { int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1); switch (s->chunk.type_kind) { case INT: case DATE: case FLOAT: s->vals.u32[nz_idx] = column.element<uint32_t>(row); break; case DOUBLE: case LONG: s->vals.u64[nz_idx] = column.element<uint64_t>(row); break; case SHORT: s->vals.u32[nz_idx] = column.element<uint16_t>(row); break; case BOOLEAN: case BYTE: s->vals.u8[nz_idx] = column.element<uint8_t>(row); break; case TIMESTAMP: { int64_t ts = column.element<int64_t>(row); int32_t ts_scale = powers_of_ten[9 - min(s->chunk.scale, 9)]; int64_t seconds = ts / ts_scale; int64_t nanos = (ts - seconds * ts_scale); // There is a bug in the ORC spec such that for negative timestamps, it is understood // between the writer and reader that nanos will be adjusted to their positive component // but the negative seconds will be left alone. This means that -2.6 is encoded as // seconds = -2 and nanos = 1+(-0.6) = 0.4 // This leads to an error in decoding time where -1 < time (s) < 0 // Details: https://github.com/rapidsai/cudf/pull/5529#issuecomment-648768925 if (nanos < 0) { nanos += ts_scale; } s->vals.i64[nz_idx] = seconds - kORCTimeToUTC; if (nanos != 0) { // Trailing zeroes are encoded in the lower 3-bits uint32_t zeroes = 0; nanos *= powers_of_ten[min(s->chunk.scale, 9)]; if (!(nanos % 100)) { nanos /= 100; zeroes = 1; while (zeroes < 7 && !(nanos % 10)) { nanos /= 10; zeroes++; } } nanos = (nanos << 3) + zeroes; } s->lengths.u64[nz_idx] = nanos; break; } case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { uint32_t dict_idx = s->chunk.dict_index[row]; if (dict_idx > 0x7fffffffu) { dict_idx = s->chunk.dict_index[dict_idx & 0x7fffffffu]; } s->vals.u32[nz_idx] = dict_idx; } else { string_view value = column.element<string_view>(row); s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data(); s->lengths.u32[nz_idx] = value.size_bytes(); } break; // Reusing the lengths array for the scale stream // Note: can be written in a faster manner, given that all values are equal case DECIMAL: s->lengths.u32[nz_idx] = zigzag(s->chunk.scale); break; case LIST: case MAP: { auto const& offsets = column.child(lists_column_view::offsets_column_index); // Compute list length from the offsets s->lengths.u32[nz_idx] = offsets.element<size_type>(row + 1 + column.offset()) - offsets.element<size_type>(row + column.offset()); } break; default: break; } } __syncthreads(); if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) { // Store string data uint32_t nz = s->buf.u32[511]; uint32_t nz_idx = (s->nnz + t) & 0x3ff; uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0; StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t); if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; } __syncthreads(); } else if (s->chunk.type_kind == BOOLEAN) { // bool8 -> 8x bool1 uint32_t nz = s->buf.u32[511]; uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3; if (t < n) { uint32_t idx8 = (s->nnz & ~7) + (t << 3); s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) | ((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) | ((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) | ((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) | ((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) | ((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) | ((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) | ((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0); } __syncthreads(); } if (!t) { uint32_t nz = s->buf.u32[511]; s->nnz += nz; s->numvals += nz; s->numlengths += (s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == LIST || s->chunk.type_kind == MAP || (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)) ? nz : 0; s->cur_row += nrows; } __syncthreads(); // Encode values if (s->numvals > 0) { uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n; switch (s->chunk.type_kind) { case SHORT: case INT: case DATE: n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>( s, s->vals.i32, s->nnz - s->numvals, s->numvals, t, temp_storage.i32); break; case LONG: case TIMESTAMP: n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>( s, s->vals.i64, s->nnz - s->numvals, s->numvals, t, temp_storage.i64); break; case BYTE: n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t); break; case BOOLEAN: n = ByteRLE<CI_DATA, 0x1ff>(s, s->lengths.u8, (s->nnz - s->numvals + flush) >> 3, (s->numvals + flush) >> 3, flush, t) * 8; break; case FLOAT: StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t); n = s->numvals; break; case DOUBLE: StoreBytes<CI_DATA, 0x1fff>( s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t); n = s->numvals; break; case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>( s, s->vals.u32, s->nnz - s->numvals, s->numvals, t, temp_storage.u32); } else { n = s->numvals; } break; case DECIMAL: { if (is_value_valid) { auto const id = column.type().id(); __uint128_t const zz_val = id == type_id::DECIMAL32 ? zigzag(column.element<int32_t>(row)) : id == type_id::DECIMAL64 ? zigzag(column.element<int64_t>(row)) : zigzag(column.element<__int128_t>(row)); auto const offset = (row == s->chunk.start_row) ? 0 : s->chunk.decimal_offsets[row - 1]; StoreVarint(s->stream.data_ptrs[CI_DATA] + offset, zz_val); } n = s->numvals; } break; default: n = s->numvals; break; } __syncthreads(); if (!t) { s->numvals -= min(n, s->numvals); } } // Encode secondary stream values if (s->numlengths > 0) { uint32_t n; switch (s->chunk.type_kind) { case TIMESTAMP: n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>( s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u64); break; case DECIMAL: case LIST: case MAP: case STRING: n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u32); break; default: n = s->numlengths; break; } __syncthreads(); if (!t) { s->numlengths -= min(n, s->numlengths); } } } __syncthreads(); } __syncthreads(); if (t <= CI_PRESENT && s->stream.ids[t] >= 0) { // Update actual compressed length // (not needed for decimal data, whose exact size is known before encode) if (!(t == CI_DATA && s->chunk.type_kind == DECIMAL)) streams[col_id][group_id].lengths[t] = s->strm_pos[t]; if (!s->stream.data_ptrs[t]) { streams[col_id][group_id].data_ptrs[t] = static_cast<uint8_t*>(const_cast<void*>(column.head())) + (column.offset() + s->chunk.start_row) * s->chunk.dtype_len; } } } /** * @brief Encode column dictionaries * * @param[in] stripes Stripe dictionaries device array [stripe][string_column] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeStringDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ typename cub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage; orcenc_state_s* const s = &state_g; uint32_t stripe_id = blockIdx.x; uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2; int t = threadIdx.x; if (t == 0) s->u.dict_stripe = stripes[stripe_id]; __syncthreads(); auto const strm_ptr = &streams[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; if (t == 0) { s->chunk = chunks[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; s->stream = *strm_ptr; s->strm_pos[cid] = 0; s->numlengths = 0; s->nrows = s->u.dict_stripe.num_strings; s->cur_row = 0; } auto const string_column = s->u.dict_stripe.leaf_column; auto const dict_data = s->u.dict_stripe.dict_data; __syncthreads(); if (s->chunk.encoding_kind != DICTIONARY_V2) { return; // This column isn't using dictionary encoding -> bail out } while (s->cur_row < s->nrows || s->numlengths != 0) { uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512)); uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0; if (cid == CI_DICTIONARY) { // Encoding string contents const char* ptr = nullptr; uint32_t count = 0; if (t < numvals) { auto string_val = string_column->element<string_view>(string_idx); ptr = string_val.data(); count = string_val.size_bytes(); } s->u.strenc.str_data[t] = ptr; StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY], &s->u.strenc, (ptr) ? count : 0, t); if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; } } else { // Encoding string lengths uint32_t count = (t < numvals) ? static_cast<uint32_t>(string_column->element<string_view>(string_idx).size_bytes()) : 0; uint32_t nz_idx = (s->cur_row + t) & 0x3ff; if (t < numvals) s->lengths.u32[nz_idx] = count; __syncthreads(); if (s->numlengths + numvals > 0) { uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->cur_row, s->numlengths + numvals, t, temp_storage); __syncthreads(); if (!t) { s->numlengths += numvals; s->numlengths -= min(n, s->numlengths); } } } if (t == 0) { s->cur_row += numvals; } __syncthreads(); } if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; } } /** * @brief Merge chunked column data into a single contiguous stream * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in,out] streams List of encoder chunk streams [column][rowgroup] */ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024) gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) StripeStream ss; __shared__ __align__(16) encoder_chunk_streams strm0; __shared__ uint8_t* volatile ck_curptr_g; __shared__ uint32_t volatile ck_curlen_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; strm0 = streams[ss.column_id][ss.first_chunk_id]; } __syncthreads(); auto const cid = ss.stream_type; auto dst_ptr = strm0.data_ptrs[cid] + strm0.lengths[cid]; for (auto group = ss.first_chunk_id + 1; group < ss.first_chunk_id + ss.num_chunks; ++group) { uint8_t* src_ptr; uint32_t len; if (t == 0) { src_ptr = streams[ss.column_id][group].data_ptrs[cid]; len = streams[ss.column_id][group].lengths[cid]; if (src_ptr != dst_ptr) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; } ck_curptr_g = src_ptr; ck_curlen_g = len; } __syncthreads(); src_ptr = ck_curptr_g; len = ck_curlen_g; if (len > 0 && src_ptr != dst_ptr) { for (uint32_t i = 0; i < len; i += 1024) { uint8_t v = (i + t < len) ? src_ptr[i + t] : 0; __syncthreads(); if (i + t < len) { dst_ptr[i + t] = v; } } } dst_ptr += len; __syncthreads(); } if (!t) { strm_desc[stripe_id][stream_id].stream_size = dst_ptr - strm0.data_ptrs[cid]; } } /** * @brief Initializes compression input/output structures * * @param[in] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[out] inputs Per-block compression input buffers * @param[out] outputs Per-block compression output buffers * @param[out] statuses Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression */ // blockDim {256,1,1} __global__ void __launch_bounds__(256) gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc, device_2dspan<encoder_chunk_streams> streams, // const? device_span<device_span<uint8_t const>> inputs, device_span<device_span<uint8_t>> outputs, device_span<decompress_status> statuses, uint8_t* compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; __shared__ uint8_t* volatile uncomp_base_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks; uint8_t *src, *dst; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type]; } __syncthreads(); src = uncomp_base_g; dst = compressed_bfr + ss.bfr_offset; num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1; for (uint32_t b = t; b < num_blocks; b += 256) { uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); inputs[ss.first_block + b] = {src + b * comp_blk_size, blk_size}; outputs[ss.first_block + b] = { dst + b * (BLOCK_HEADER_SIZE + max_comp_blk_size) + BLOCK_HEADER_SIZE, max_comp_blk_size}; statuses[ss.first_block + b] = {blk_size, 1, 0}; } } /** * @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length *fields * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[out] inputs Per-block compression input buffers * @param[out] outputs Per-block compression output buffers * @param[out] statuses Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression */ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024) gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc, device_span<device_span<uint8_t const> const> inputs, device_span<device_span<uint8_t> const> outputs, device_span<decompress_status> statuses, uint8_t* compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; __shared__ const uint8_t* volatile comp_src_g; __shared__ uint32_t volatile comp_len_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks, b, blk_size; const uint8_t* src; uint8_t* dst; if (t == 0) ss = strm_desc[stripe_id][stream_id]; __syncthreads(); num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0; dst = compressed_bfr + ss.bfr_offset; b = 0; do { if (t == 0) { auto const src_len = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); auto dst_len = (statuses[ss.first_block + b].status == 0) ? statuses[ss.first_block + b].bytes_written : src_len; uint32_t blk_size24{}; if (dst_len >= src_len) { // Copy from uncompressed source src = inputs[ss.first_block + b].data(); statuses[ss.first_block + b].bytes_written = src_len; dst_len = src_len; blk_size24 = dst_len * 2 + 1; } else { // Compressed block src = outputs[ss.first_block + b].data(); blk_size24 = dst_len * 2 + 0; } dst[0] = static_cast<uint8_t>(blk_size24 >> 0); dst[1] = static_cast<uint8_t>(blk_size24 >> 8); dst[2] = static_cast<uint8_t>(blk_size24 >> 16); comp_src_g = src; comp_len_g = dst_len; } __syncthreads(); src = comp_src_g; blk_size = comp_len_g; dst += 3; // skip over length written by thread0 if (src != dst) { for (uint32_t i = 0; i < blk_size; i += 1024) { uint8_t v = (i + t < blk_size) ? src[i + t] : 0; __syncthreads(); if (i + t < blk_size) { dst[i + t] = v; } } } dst += blk_size; __syncthreads(); } while (++b < num_blocks); // Update stripe stream with the compressed size if (t == 0) { strm_desc[stripe_id][stream_id].stream_size = static_cast<uint32_t>(dst - (compressed_bfr + ss.bfr_offset)); } } void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams, rmm::cuda_stream_view stream) { dim3 dim_block(encode_block_size, 1); // `encode_block_size` threads per chunk dim3 dim_grid(chunks.size().first, chunks.size().second); gpuEncodeOrcColumnData<encode_block_size> <<<dim_grid, dim_block, 0, stream.value()>>>(chunks, streams); } void EncodeStripeDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, uint32_t num_string_columns, uint32_t num_stripes, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(512, 1); // 512 threads per dictionary dim3 dim_grid(num_string_columns * num_stripes, 2); gpuEncodeStringDictionaries<512> <<<dim_grid, dim_block, 0, stream.value()>>>(stripes, chunks, enc_streams); } void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(1024, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); gpuCompactOrcDataStreams<<<dim_grid, dim_block, 0, stream.value()>>>(strm_desc, enc_streams); } void CompressOrcDataStreams(uint8_t* compressed_data, uint32_t num_compressed_blocks, CompressionKind compression, uint32_t comp_blk_size, uint32_t max_comp_blk_size, device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, device_span<device_span<uint8_t const>> comp_in, device_span<device_span<uint8_t>> comp_out, device_span<decompress_status> comp_stat, rmm::cuda_stream_view stream) { dim3 dim_block_init(256, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); gpuInitCompressionBlocks<<<dim_grid, dim_block_init, 0, stream.value()>>>(strm_desc, enc_streams, comp_in, comp_out, comp_stat, compressed_data, comp_blk_size, max_comp_blk_size); if (compression == SNAPPY) { if (detail::nvcomp_integration::is_stable_enabled()) { try { size_t temp_size; nvcompStatus_t nvcomp_status = nvcompBatchedSnappyCompressGetTempSize( num_compressed_blocks, comp_blk_size, nvcompBatchedSnappyDefaultOpts, &temp_size); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in getting snappy compression scratch size"); rmm::device_buffer scratch(temp_size, stream); rmm::device_uvector<void const*> uncompressed_data_ptrs(num_compressed_blocks, stream); rmm::device_uvector<size_t> uncompressed_data_sizes(num_compressed_blocks, stream); rmm::device_uvector<void*> compressed_data_ptrs(num_compressed_blocks, stream); rmm::device_uvector<size_t> compressed_bytes_written(num_compressed_blocks, stream); auto comp_it = thrust::make_zip_iterator(uncompressed_data_ptrs.begin(), uncompressed_data_sizes.begin()); thrust::transform( rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(auto const& in) { return thrust::make_tuple(in.data(), in.size()); }); thrust::transform(rmm::exec_policy(stream), comp_out.begin(), comp_out.end(), compressed_data_ptrs.begin(), [] __device__(auto const& out) { return out.data(); }); nvcomp_status = nvcompBatchedSnappyCompressAsync(uncompressed_data_ptrs.data(), uncompressed_data_sizes.data(), max_comp_blk_size, num_compressed_blocks, scratch.data(), scratch.size(), compressed_data_ptrs.data(), compressed_bytes_written.data(), nvcompBatchedSnappyDefaultOpts, stream.value()); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in snappy compression"); thrust::transform(rmm::exec_policy(stream), compressed_bytes_written.begin(), compressed_bytes_written.end(), comp_stat.begin(), [] __device__(size_t size) { decompress_status status{}; status.bytes_written = size; return status; }); } catch (...) { // If we reach this then there was an error in compressing so set an error status for each // block thrust::for_each(rmm::exec_policy(stream), comp_stat.begin(), comp_stat.end(), [] __device__(decompress_status & stat) { stat.status = 1; }); }; } else { gpu_snap(comp_in, comp_out, comp_stat, stream); } } dim3 dim_block_compact(1024, 1); gpuCompactCompressedBlocks<<<dim_grid, dim_block_compact, 0, stream.value()>>>( strm_desc, comp_in, comp_out, comp_stat, compressed_data, comp_blk_size, max_comp_blk_size); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
the_stack
#if (USE_TEXTURE) #define LOAD_FLOAT(i) tex1Dfetch<float>(texFloat, i) #define SET_FLOAT_BASE #else #define LOAD_FLOAT(i) d_Src[i] #define SET_FLOAT_BASE #endif //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// __global__ void padKernel_kernel(float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX #if (USE_TEXTURE) , cudaTextureObject_t texFloat #endif ) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; if (y < kernelH && x < kernelW) { int ky = y - kernelY; if (ky < 0) { ky += fftH; } int kx = x - kernelX; if (kx < 0) { kx += fftW; } d_Dst[ky * fftW + kx] = LOAD_FLOAT(y * kernelW + x); } } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// __global__ void padDataClampToBorder_kernel(float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelH, int kernelW, int kernelY, int kernelX #if (USE_TEXTURE) , cudaTextureObject_t texFloat #endif ) { const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; const int borderH = dataH + kernelY; const int borderW = dataW + kernelX; if (y < fftH && x < fftW) { int dy, dx; if (y < dataH) { dy = y; } if (x < dataW) { dx = x; } if (y >= dataH && y < borderH) { dy = dataH - 1; } if (x >= dataW && x < borderW) { dx = dataW - 1; } if (y >= borderH) { dy = 0; } if (x >= borderW) { dx = 0; } d_Dst[y * fftW + x] = LOAD_FLOAT(dy * dataW + dx); } } //////////////////////////////////////////////////////////////////////////////// // Modulate Fourier image of padded data by Fourier image of padded kernel // and normalize by FFT size //////////////////////////////////////////////////////////////////////////////// inline __device__ void mulAndScale(fComplex &a, const fComplex &b, const float &c) { fComplex t = {c * (a.x * b.x - a.y * b.y), c * (a.y * b.x + a.x * b.y)}; a = t; } __global__ void modulateAndNormalize_kernel(fComplex *d_Dst, fComplex *d_Src, int dataSize, float c) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= dataSize) { return; } fComplex a = d_Src[i]; fComplex b = d_Dst[i]; mulAndScale(a, b, c); d_Dst[i] = a; } //////////////////////////////////////////////////////////////////////////////// // 2D R2C / C2R post/preprocessing kernels //////////////////////////////////////////////////////////////////////////////// #if (USE_TEXTURE) #define LOAD_FCOMPLEX(i) tex1Dfetch<fComplex>(texComplex, i) #define LOAD_FCOMPLEX_A(i) tex1Dfetch<fComplex>(texComplexA, i) #define LOAD_FCOMPLEX_B(i) tex1Dfetch<fComplex>(texComplexB, i) #define SET_FCOMPLEX_BASE #define SET_FCOMPLEX_BASE_A #define SET_FCOMPLEX_BASE_B #else #define LOAD_FCOMPLEX(i) d_Src[i] #define LOAD_FCOMPLEX_A(i) d_SrcA[i] #define LOAD_FCOMPLEX_B(i) d_SrcB[i] #define SET_FCOMPLEX_BASE #define SET_FCOMPLEX_BASE_A #define SET_FCOMPLEX_BASE_B #endif inline __device__ void spPostprocessC2C(fComplex &D1, fComplex &D2, const fComplex &twiddle) { float A1 = 0.5f * (D1.x + D2.x); float B1 = 0.5f * (D1.y - D2.y); float A2 = 0.5f * (D1.y + D2.y); float B2 = 0.5f * (D1.x - D2.x); D1.x = A1 + (A2 * twiddle.x + B2 * twiddle.y); D1.y = (A2 * twiddle.y - B2 * twiddle.x) + B1; D2.x = A1 - (A2 * twiddle.x + B2 * twiddle.y); D2.y = (A2 * twiddle.y - B2 * twiddle.x) - B1; } // Premultiply by 2 to account for 1.0 / (DZ * DY * DX) normalization inline __device__ void spPreprocessC2C(fComplex &D1, fComplex &D2, const fComplex &twiddle) { float A1 = /* 0.5f * */ (D1.x + D2.x); float B1 = /* 0.5f * */ (D1.y - D2.y); float A2 = /* 0.5f * */ (D1.y + D2.y); float B2 = /* 0.5f * */ (D1.x - D2.x); D1.x = A1 - (A2 * twiddle.x - B2 * twiddle.y); D1.y = (B2 * twiddle.x + A2 * twiddle.y) + B1; D2.x = A1 + (A2 * twiddle.x - B2 * twiddle.y); D2.y = (B2 * twiddle.x + A2 * twiddle.y) - B1; } inline __device__ void getTwiddle(fComplex &twiddle, float phase) { __sincosf(phase, &twiddle.y, &twiddle.x); } inline __device__ uint mod(uint a, uint DA) { //(DA - a) % DA, assuming a <= DA return a ? (DA - a) : a; } static inline uint factorRadix2(uint &log2N, uint n) { if (!n) { log2N = 0; return 0; } else { for (log2N = 0; n % 2 == 0; n /= 2, log2N++) ; return n; } } inline __device__ void udivmod(uint &dividend, uint divisor, uint &rem) { #if (!POWER_OF_TWO) rem = dividend % divisor; dividend /= divisor; #else rem = dividend & (divisor - 1); dividend >>= (__ffs(divisor) - 1); #endif } __global__ void spPostprocess2D_kernel(fComplex *d_Dst, fComplex *d_Src, uint DY, uint DX, uint threadCount, uint padding, float phaseBase #if (USE_TEXTURE) , cudaTextureObject_t texComplex #endif ) { const uint threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= threadCount) { return; } uint x, y, i = threadId; udivmod(i, DX / 2, x); udivmod(i, DY, y); // Avoid overwrites in columns DX / 2 by different threads if ((x == 0) && (y > DY / 2)) { return; } const uint srcOffset = i * DY * DX; const uint dstOffset = i * DY * (DX + padding); // Process x = [0 .. DX / 2 - 1] U [DX / 2 + 1 .. DX] { const uint loadPos1 = srcOffset + y * DX + x; const uint loadPos2 = srcOffset + mod(y, DY) * DX + mod(x, DX); const uint storePos1 = dstOffset + y * (DX + padding) + x; const uint storePos2 = dstOffset + mod(y, DY) * (DX + padding) + (DX - x); fComplex D1 = LOAD_FCOMPLEX(loadPos1); fComplex D2 = LOAD_FCOMPLEX(loadPos2); fComplex twiddle; getTwiddle(twiddle, phaseBase * (float)x); spPostprocessC2C(D1, D2, twiddle); d_Dst[storePos1] = D1; d_Dst[storePos2] = D2; } // Process x = DX / 2 if (x == 0) { const uint loadPos1 = srcOffset + y * DX + DX / 2; const uint loadPos2 = srcOffset + mod(y, DY) * DX + DX / 2; const uint storePos1 = dstOffset + y * (DX + padding) + DX / 2; const uint storePos2 = dstOffset + mod(y, DY) * (DX + padding) + DX / 2; fComplex D1 = LOAD_FCOMPLEX(loadPos1); fComplex D2 = LOAD_FCOMPLEX(loadPos2); // twiddle = getTwiddle(phaseBase * (DX / 2)) = exp(dir * j * PI / 2) fComplex twiddle = {0, (phaseBase > 0) ? 1.0f : -1.0f}; spPostprocessC2C(D1, D2, twiddle); d_Dst[storePos1] = D1; d_Dst[storePos2] = D2; } } __global__ void spPreprocess2D_kernel(fComplex *d_Dst, fComplex *d_Src, uint DY, uint DX, uint threadCount, uint padding, float phaseBase #if (USE_TEXTURE) , cudaTextureObject_t texComplex #endif ) { const uint threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= threadCount) { return; } uint x, y, i = threadId; udivmod(i, DX / 2, x); udivmod(i, DY, y); // Avoid overwrites in columns 0 and DX / 2 by different threads (lower and // upper halves) if ((x == 0) && (y > DY / 2)) { return; } const uint srcOffset = i * DY * (DX + padding); const uint dstOffset = i * DY * DX; // Process x = [0 .. DX / 2 - 1] U [DX / 2 + 1 .. DX] { const uint loadPos1 = srcOffset + y * (DX + padding) + x; const uint loadPos2 = srcOffset + mod(y, DY) * (DX + padding) + (DX - x); const uint storePos1 = dstOffset + y * DX + x; const uint storePos2 = dstOffset + mod(y, DY) * DX + mod(x, DX); fComplex D1 = LOAD_FCOMPLEX(loadPos1); fComplex D2 = LOAD_FCOMPLEX(loadPos2); fComplex twiddle; getTwiddle(twiddle, phaseBase * (float)x); spPreprocessC2C(D1, D2, twiddle); d_Dst[storePos1] = D1; d_Dst[storePos2] = D2; } // Process x = DX / 2 if (x == 0) { const uint loadPos1 = srcOffset + y * (DX + padding) + DX / 2; const uint loadPos2 = srcOffset + mod(y, DY) * (DX + padding) + DX / 2; const uint storePos1 = dstOffset + y * DX + DX / 2; const uint storePos2 = dstOffset + mod(y, DY) * DX + DX / 2; fComplex D1 = LOAD_FCOMPLEX(loadPos1); fComplex D2 = LOAD_FCOMPLEX(loadPos2); // twiddle = getTwiddle(phaseBase * (DX / 2)) = exp(-dir * j * PI / 2) fComplex twiddle = {0, (phaseBase > 0) ? 1.0f : -1.0f}; spPreprocessC2C(D1, D2, twiddle); d_Dst[storePos1] = D1; d_Dst[storePos2] = D2; } } //////////////////////////////////////////////////////////////////////////////// // Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D //////////////////////////////////////////////////////////////////////////////// __global__ void spProcess2D_kernel(fComplex *d_Dst, fComplex *d_SrcA, fComplex *d_SrcB, uint DY, uint DX, uint threadCount, float phaseBase, float c #if (USE_TEXTURE) , cudaTextureObject_t texComplexA, cudaTextureObject_t texComplexB #endif ) { const uint threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= threadCount) { return; } uint x, y, i = threadId; udivmod(i, DX, x); udivmod(i, DY / 2, y); const uint offset = i * DY * DX; // Avoid overwrites in rows 0 and DY / 2 by different threads (left and right // halves) Otherwise correctness for in-place transformations is affected if ((y == 0) && (x > DX / 2)) { return; } fComplex twiddle; // Process y = [0 .. DY / 2 - 1] U [DY - (DY / 2) + 1 .. DY - 1] { const uint pos1 = offset + y * DX + x; const uint pos2 = offset + mod(y, DY) * DX + mod(x, DX); fComplex D1 = LOAD_FCOMPLEX_A(pos1); fComplex D2 = LOAD_FCOMPLEX_A(pos2); fComplex K1 = LOAD_FCOMPLEX_B(pos1); fComplex K2 = LOAD_FCOMPLEX_B(pos2); getTwiddle(twiddle, phaseBase * (float)x); spPostprocessC2C(D1, D2, twiddle); spPostprocessC2C(K1, K2, twiddle); mulAndScale(D1, K1, c); mulAndScale(D2, K2, c); spPreprocessC2C(D1, D2, twiddle); d_Dst[pos1] = D1; d_Dst[pos2] = D2; } if (y == 0) { const uint pos1 = offset + (DY / 2) * DX + x; const uint pos2 = offset + (DY / 2) * DX + mod(x, DX); fComplex D1 = LOAD_FCOMPLEX_A(pos1); fComplex D2 = LOAD_FCOMPLEX_A(pos2); fComplex K1 = LOAD_FCOMPLEX_B(pos1); fComplex K2 = LOAD_FCOMPLEX_B(pos2); spPostprocessC2C(D1, D2, twiddle); spPostprocessC2C(K1, K2, twiddle); mulAndScale(D1, K1, c); mulAndScale(D2, K2, c); spPreprocessC2C(D1, D2, twiddle); d_Dst[pos1] = D1; d_Dst[pos2] = D2; } }
the_stack
using namespace std; namespace gpu { __global__ void unitarysingle( gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qsize_t Dim, gpu_qsize_t Block, gpu_qstate_t matrix_real00, gpu_qstate_t matrix_real01, gpu_qstate_t matrix_real10, gpu_qstate_t matrix_real11, gpu_qstate_t matrix_imag00, gpu_qstate_t matrix_imag01, gpu_qstate_t matrix_imag10, gpu_qstate_t matrix_imag11) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number gpu_qsize_t BlockNum = idx / Block; gpu_qsize_t BlockInt = idx % Block; gpu_qsize_t realIdx = BlockNum * 2 * Block + BlockInt; gpu_qsize_t corIdx = realIdx + Block; if (corIdx < Dim) { gpu_qstate_t X1 = psireal[realIdx]; gpu_qstate_t X2 = psireal[corIdx]; gpu_qstate_t Y1 = psiimag[realIdx]; gpu_qstate_t Y2 = psiimag[corIdx]; psireal[realIdx] = matrix_real00 * X1 - matrix_imag00 * Y1 + matrix_real01 * X2 - matrix_imag01 * Y2; psireal[corIdx] = matrix_real10 * X1 - matrix_imag10 * Y1 + matrix_real11 * X2 - matrix_imag11 * Y2; psiimag[realIdx] = matrix_real00 * Y1 + matrix_imag00 * X1 + matrix_real01 * Y2 + matrix_imag01 * X2; psiimag[corIdx] = matrix_real10 * Y1 + matrix_imag10 * X1 + matrix_real11 * Y2 + matrix_imag11 * X2; } } __global__ void controlunitarysingle( gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qsize_t Dim, gpu_qsize_t target_qubit, gpu_qsize_t controller_mask, gpu_qstate_t matrix_real00, gpu_qstate_t matrix_real01, gpu_qstate_t matrix_real10, gpu_qstate_t matrix_real11, gpu_qstate_t matrix_imag00, gpu_qstate_t matrix_imag01, gpu_qstate_t matrix_imag10, gpu_qstate_t matrix_imag11 ) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number if ( idx < Dim && ((idx & controller_mask) == controller_mask) && ((idx & target_qubit) == target_qubit) ) { gpu_qsize_t corIdx = idx; //1 gpu_qsize_t realIdx = corIdx - target_qubit; //0 gpu_qstate_t X1 = psireal[realIdx]; gpu_qstate_t X2 = psireal[corIdx]; gpu_qstate_t Y1 = psiimag[realIdx]; gpu_qstate_t Y2 = psiimag[corIdx]; psireal[realIdx] = matrix_real00 * X1 - matrix_imag00 * Y1 + matrix_real01 * X2 - matrix_imag01 * Y2; psireal[corIdx] = matrix_real10 * X1 - matrix_imag10 * Y1 + matrix_real11 * X2 - matrix_imag11 * Y2; psiimag[realIdx] = matrix_real00 * Y1 + matrix_imag00 * X1 + matrix_real01 * Y2 + matrix_imag01 * X2; psiimag[corIdx] = matrix_real10 * Y1 + matrix_imag10 * X1 + matrix_real11 * Y2 + matrix_imag11 * X2; } } __global__ void unitarydouble( gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qsize_t Dim, gpu_qsize_t Block1, gpu_qsize_t Block2, gpu_qstate_t real0000, gpu_qstate_t real0001, gpu_qstate_t real0010, gpu_qstate_t real0011, gpu_qstate_t real0100, gpu_qstate_t real0101, gpu_qstate_t real0110, gpu_qstate_t real0111, gpu_qstate_t real1000, gpu_qstate_t real1001, gpu_qstate_t real1010, gpu_qstate_t real1011, gpu_qstate_t real1100, gpu_qstate_t real1101, gpu_qstate_t real1110, gpu_qstate_t real1111, gpu_qstate_t imag0000, gpu_qstate_t imag0001, gpu_qstate_t imag0010, gpu_qstate_t imag0011, gpu_qstate_t imag0100, gpu_qstate_t imag0101, gpu_qstate_t imag0110, gpu_qstate_t imag0111, gpu_qstate_t imag1000, gpu_qstate_t imag1001, gpu_qstate_t imag1010, gpu_qstate_t imag1011, gpu_qstate_t imag1100, gpu_qstate_t imag1101, gpu_qstate_t imag1110, gpu_qstate_t imag1111) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; gpu_qsize_t Idx00, Idx01, Idx10, Idx11; if (Block1 > Block2) { Idx10 = (idx / (Block1 / 2)) * 2 * Block1 + Block1 + (idx % (Block1 / 2) / Block2) * 2 * Block2 + idx % Block2; } else { Idx10 = (idx / (Block2 / 2)) * 2 * Block2 + (idx % (Block2 / 2) / Block1) * 2 * Block1 + Block1 + idx % Block1; } Idx00 = Idx10 - Block1; Idx01 = Idx00 + Block2; Idx11 = Idx10 + Block2; if (Idx11 < Dim) { gpu_qstate_t X00 = psireal[Idx00]; gpu_qstate_t X01 = psireal[Idx01]; gpu_qstate_t X10 = psireal[Idx10]; gpu_qstate_t X11 = psireal[Idx11]; gpu_qstate_t Y00 = psiimag[Idx00]; gpu_qstate_t Y01 = psiimag[Idx01]; gpu_qstate_t Y10 = psiimag[Idx10]; gpu_qstate_t Y11 = psiimag[Idx11]; psireal[Idx00] = real0000 * X00 - imag0000 * Y00 + real0001 * X01 - imag0001 * Y01 + real0010 * X10 - imag0010 * Y10 + real0011 * X11 - imag0011 * Y11; psiimag[Idx00] = imag0000 * X00 + real0000 * Y00 + imag0001 * X01 + real0001 * Y01 + imag0010 * X10 + real0010 * Y10 + imag0011 * X11 + real0011 * Y11; psireal[Idx01] = real0100 * X00 - imag0100 * Y00 + real0101 * X01 - imag0101 * Y01 + real0110 * X10 - imag0110 * Y10 + real0111 * X11 - imag0111 * Y11; psiimag[Idx01] = imag0100 * X00 + real0100 * Y00 + imag0101 * X01 + real0101 * Y01 + imag0110 * X10 + real0110 * Y10 + imag0111 * X11 + real0111 * Y11; psireal[Idx10] = real1000 * X00 - imag1000 * Y00 + real1001 * X01 - imag1001 * Y01 + real1010 * X10 - imag1010 * Y10 + real1011 * X11 - imag1011 * Y11; psiimag[Idx10] = imag1000 * X00 + real1000 * Y00 + imag1001 * X01 + real1001 * Y01 + imag1010 * X10 + real1010 * Y10 + imag1011 * X11 + real1011 * Y11; psireal[Idx11] = real1100 * X00 - imag1100 * Y00 + real1101 * X01 - imag1101 * Y01 + real1110 * X10 - imag1110 * Y10 + real1111 * X11 - imag1111 * Y11; psiimag[Idx11] = imag1100 * X00 + real1100 * Y00 + imag1101 * X01 + real1101 * Y01 + imag1110 * X10 + real1110 * Y10 + imag1111 * X11 + real1111 * Y11; } } __global__ void controlunitarydouble( gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qsize_t Dim, gpu_qsize_t controller_mask, gpu_qsize_t control_qubit, gpu_qsize_t target_qubit, gpu_qstate_t real0000, gpu_qstate_t real0001, gpu_qstate_t real0010, gpu_qstate_t real0011, gpu_qstate_t real0100, gpu_qstate_t real0101, gpu_qstate_t real0110, gpu_qstate_t real0111, gpu_qstate_t real1000, gpu_qstate_t real1001, gpu_qstate_t real1010, gpu_qstate_t real1011, gpu_qstate_t real1100, gpu_qstate_t real1101, gpu_qstate_t real1110, gpu_qstate_t real1111, gpu_qstate_t imag0000, gpu_qstate_t imag0001, gpu_qstate_t imag0010, gpu_qstate_t imag0011, gpu_qstate_t imag0100, gpu_qstate_t imag0101, gpu_qstate_t imag0110, gpu_qstate_t imag0111, gpu_qstate_t imag1000, gpu_qstate_t imag1001, gpu_qstate_t imag1010, gpu_qstate_t imag1011, gpu_qstate_t imag1100, gpu_qstate_t imag1101, gpu_qstate_t imag1110, gpu_qstate_t imag1111) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number if ( idx < Dim && ((idx & controller_mask) == controller_mask) && ((idx & control_qubit) == control_qubit) && ((idx & target_qubit) == target_qubit) ) { gpu_qsize_t Idx00 = idx - control_qubit - target_qubit; gpu_qsize_t Idx01 = Idx00 - control_qubit; gpu_qsize_t Idx10 = Idx00 - target_qubit; gpu_qsize_t Idx11 = idx; gpu_qstate_t X00 = psireal[Idx00]; gpu_qstate_t X01 = psireal[Idx01]; gpu_qstate_t X10 = psireal[Idx10]; gpu_qstate_t X11 = psireal[Idx11]; gpu_qstate_t Y00 = psiimag[Idx00]; gpu_qstate_t Y01 = psiimag[Idx01]; gpu_qstate_t Y10 = psiimag[Idx10]; gpu_qstate_t Y11 = psiimag[Idx11]; psireal[Idx00] = real0000 * X00 - imag0000 * Y00 + real0001 * X01 - imag0001 * Y01 + real0010 * X10 - imag0010 * Y10 + real0011 * X11 - imag0011 * Y11; psiimag[Idx00] = imag0000 * X00 + real0000 * Y00 + imag0001 * X01 + real0001 * Y01 + imag0010 * X10 + real0010 * Y10 + imag0011 * X11 + real0011 * Y11; psireal[Idx01] = real0100 * X00 - imag0100 * Y00 + real0101 * X01 - imag0101 * Y01 + real0110 * X10 - imag0110 * Y10 + real0111 * X11 - imag0111 * Y11; psiimag[Idx01] = imag0100 * X00 + real0100 * Y00 + imag0101 * X01 + real0101 * Y01 + imag0110 * X10 + real0110 * Y10 + imag0111 * X11 + real0111 * Y11; psireal[Idx10] = real1000 * X00 - imag1000 * Y00 + real1001 * X01 - imag1001 * Y01 + real1010 * X10 - imag1010 * Y10 + real1011 * X11 - imag1011 * Y11; psiimag[Idx10] = imag1000 * X00 + real1000 * Y00 + imag1001 * X01 + real1001 * Y01 + imag1010 * X10 + real1010 * Y10 + imag1011 * X11 + real1011 * Y11; psireal[Idx11] = real1100 * X00 - imag1100 * Y00 + real1101 * X01 - imag1101 * Y01 + real1110 * X10 - imag1110 * Y10 + real1111 * X11 - imag1111 * Y11; psiimag[Idx11] = imag1100 * X00 + real1100 * Y00 + imag1101 * X01 + real1101 * Y01 + imag1110 * X10 + real1110 * Y10 + imag1111 * X11 + real1111 * Y11; } } __global__ void initState(gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qsize_t Dim) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number if (idx < Dim && idx != 0) { psireal[idx] = 0; psiimag[idx] = 0; } if (0 == idx) { psireal[0] = 1; psiimag[0] = 0; } } __global__ void qubitprob(gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qsize_t Dim, gpu_qsize_t Block, gpu_qstate_t *pr) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; gpu_qsize_t bid = blockIdx.x, tid = threadIdx.x; gpu_qsize_t BlockNum = idx / Block; gpu_qsize_t BlockInt = idx % Block; gpu_qsize_t realIdx = BlockNum * 2 * Block + BlockInt; gpu_qsize_t corIdx = realIdx + Block; extern __shared__ gpu_qstate_t dprob[]; dprob[tid] = 0; if (corIdx < Dim) { dprob[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx]; __syncthreads(); gpu_qsize_t offset = 1, mask = 1; while (offset < kThreadDim) { if ((tid & mask) == 0) { dprob[tid] += dprob[tid + offset]; } offset += offset; mask = offset + mask; __syncthreads(); } if (tid == 0) { pr[bid] = dprob[0]; } } } __global__ void probsumnew1(gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qstate_t *probtemp, size_t num1, size_t m, size_t Dim, size_t * block) { size_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number size_t index1, index = 0, index2, k, s; gpu_qstate_t temp = 0; index1 = num1 + idx; //index1��ʾidx��Ӧ�IJ���Ȩ��̬���� if (index1 < (1u << m)) { for (size_t j = 0; j < m; j++) { index += block[j] * ((index1 >> j) % 2); }//index ��ʾidx��Ӧ��̬������ for (size_t i = 0; i < Dim / (1u << m); i++) { index2 = i; for (size_t j = 0; j < m; j++) { s = index2 / block[j]; k = index2 % block[j]; index2 = s * 2 * block[j] + k; } index2 += index; temp += psireal[index2] * psireal[index2] + psiimag[index2] * psiimag[index2]; } probtemp[idx] = temp; } } __global__ void probsum(gpu_qstate_t *pr, gpu_qstate_t *prob) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number if (0 == idx) { gpu_qstate_t dprob = 0; for (int i = 0; i < gridDim.x; i++) { dprob += pr[i]; } *prob = dprob; } }//checked and can be optimized __global__ void qubitcollapse0(gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qsize_t Dim, gpu_qsize_t Block, gpu_qstate_t coef) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number gpu_qsize_t BlockNum = idx / Block; gpu_qsize_t BlockInt = idx % Block; gpu_qsize_t realIdx = BlockNum * 2 * Block + BlockInt; gpu_qsize_t corIdx = realIdx + Block; if (corIdx < Dim) { gpu_qstate_t X1 = psireal[realIdx]; gpu_qstate_t Y1 = psiimag[realIdx]; psireal[realIdx] = X1 * coef; psireal[corIdx] = 0; psiimag[realIdx] = Y1 * coef; psiimag[corIdx] = 0; } }//checked __global__ void qubitcollapse1(gpu_qstate_t * psireal, gpu_qstate_t * psiimag, gpu_qsize_t Dim, gpu_qsize_t Block, gpu_qstate_t coef) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number gpu_qsize_t BlockNum = idx / Block; gpu_qsize_t BlockInt = idx % Block; gpu_qsize_t realIdx = BlockNum * 2 * Block + BlockInt; gpu_qsize_t corIdx = realIdx + Block; if (corIdx < Dim) { gpu_qstate_t X2 = psireal[corIdx]; gpu_qstate_t Y2 = psiimag[corIdx]; psireal[realIdx] = 0; psireal[corIdx] = X2 * coef; psiimag[realIdx] = 0; psiimag[corIdx] = Y2 * coef; } }//checked /************************************************************************************** psireal: psiimag: pro: save probability block: qubit number m: target qubit number dec: target qubit state ****************************************************************************************/ __global__ void multiprob(gpu_qstate_t *psireal, gpu_qstate_t *psiimag, gpu_qsize_t Dim, gpu_qstate_t *pro, gpu_qsize_t *block, gpu_qsize_t m, gpu_qsize_t dec) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number gpu_qsize_t bid = blockIdx.x, tid = threadIdx.x; //gpu_qsize_t BlockNum = idx / Block; //gpu_qsize_t BlockInt = idx% Block; extern __shared__ gpu_qstate_t dprob[]; dprob[tid] = 0; gpu_qsize_t i, j, k; if (idx < Dim / (1 << m)) { gpu_qsize_t index = idx; for (i = 0; i < m; i++) { j = index / block[i]; k = index % block[i]; index = j * 2 * block[i] + k; } //index Ŀ������ȫΪ0 gpu_qsize_t realIdx = index + dec; //��Ҫ�ӵ�̬�ĸ��� dprob[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx]; __syncthreads();//��״�ӷ� int offset = 1, mask = 1; while (offset < kThreadDim) { if ((tid & mask) == 0) { dprob[tid] += dprob[tid + offset]; } offset += offset; mask = offset + mask; __syncthreads(); } //����ʱ��,��¼����,ֻ�� thread 0���� threadIdx.x = //dprob[0]���ͼ��õ������ĸ��� if (tid == 0) { pro[bid] = dprob[0]; //�ټ���pro�ĺ;͵õ������ĸ��� } } } __global__ void pmeasure_many_target(gpu_qstate_t* psireal, gpu_qstate_t* psiimag, double *result, gpu_qsize_t qnum_mask, gpu_qsize_t result_size, gpu_qsize_t Dim) { gpu_qsize_t bid = blockIdx.x; gpu_qsize_t tid = threadIdx.x; gpu_qsize_t idx = blockDim.x*bid + tid; // ��ÿ��result���У����и���Ϊresult_size�� // ������target��������threaddim��������������10��qubit����pmeasure result[idx] = 0; if (idx < result_size) { for (gpu_qsize_t i = 0; i < Dim / result_size; ++i) { gpu_qsize_t realIdx = 0; gpu_qsize_t copy_i = i; // ����i��Ҫ������λ�ģ����Ը���һ�� gpu_qsize_t copy_idx = idx; // ͬ�� // ��������realIdx // ���磺 // qnum_mask : 00100100 // copy_i = abcdef // copy_idx = xy // // realIdx Ӧ��Ϊ abxcdyef // �ò������Ƶ�digit�ж�mask����0����1 // ��flag�ж�һ���ж���λ��Dim=100000000�����������ƶ�6�Σ�������1��˵������ // ��set_digit˵�����ڲ�����һλ��ÿ��set_digit����һλ��realIdx = set_digit * (?) + realIdx // ����digit & 1 == 0 ˵����copy_i�����ţ�ͨ�� copy_i & 1 ȡ������λ�����ƶ�һλ // ����digit & 1 == 1 ˵����copy_idx�����ţ�ͬ�� gpu_qsize_t set_digit = 1; gpu_qsize_t qnum_mask_copy = qnum_mask; int loops = 0; for (gpu_qsize_t flag = Dim; flag != 1; flag >>= 1) { loops++; if ((qnum_mask_copy & 1) == 0) { realIdx += (set_digit *(copy_i & 1)); copy_i >>= 1; } else { realIdx += (set_digit *(copy_idx & 1)); copy_idx >>= 1; } set_digit <<= 1; qnum_mask_copy >>= 1; } result[idx] += psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx]; } } } __global__ void pmeasure_one_target( gpu_qstate_t* psireal, gpu_qstate_t* psiimag, double *result, gpu_qsize_t qnum_mask, size_t result_idx, gpu_qsize_t result_dim, gpu_qsize_t Dim) { gpu_qsize_t idx = blockDim.x*blockIdx.x + threadIdx.x; //thread number gpu_qsize_t bid = blockIdx.x, tid = threadIdx.x; extern __shared__ double dprob_result[]; dprob_result[tid] = 0; if (idx < (Dim>>result_dim)) { gpu_qsize_t copy_idx = idx; gpu_qsize_t copy_result_idx = result_idx; // ��������realIdx // ���磺 // qnum_mask : 00100100 // idx = abcdef // result_idx = xy // // realIdx Ӧ��Ϊ abxcdyef // �ò������Ƶ�digit�ж�mask����0����1 // ��flag�ж�һ���ж���λ��Dim=100000000�����������ƶ�6�Σ�������1��˵������ // ��set_digit˵�����ڲ�����һλ��ÿ��set_digit����һλ��realIdx = set_digit * (?) + realIdx // ����digit & 1 == 0 ˵����copy_idx�����ţ�ͨ�� copy_idx & 1 ȡ������λ�����ƶ�һλ // ����digit & 1 == 1 ˵����copy_result_idx�����ţ�ͬ�� gpu_qsize_t realIdx = 0; gpu_qsize_t set_digit = 1; gpu_qsize_t qnum_mask_copy = qnum_mask; int loops = 0; for (gpu_qsize_t flag = Dim; flag != 1; flag >>= 1) { loops++; if ((qnum_mask_copy & 1) == 0) { realIdx += (set_digit *(copy_idx & 1)); copy_idx >>= 1; } else { realIdx += (set_digit *(copy_result_idx & 1)); copy_result_idx >>= 1; } set_digit <<= 1; qnum_mask_copy >>= 1; } dprob_result[tid] = psireal[realIdx] * psireal[realIdx] + psiimag[realIdx] * psiimag[realIdx]; __syncthreads(); size_t offset = 1, mask = 1; while (offset < kThreadDim) { if ((tid & mask) == 0) { dprob_result[tid] += dprob_result[tid + offset]; } offset += offset; mask = offset + mask; __syncthreads(); } if (tid == 0) { result[bid] = dprob_result[0]; } } } double randGenerator() { int ia = 16807, im = 2147483647, iq = 127773, ir = 2836; /*difine constant number in 16807 generator.*/ time_t rawtime; struct tm * timeinfo; time(&rawtime); timeinfo = localtime(&rawtime); static int irandseed = timeinfo->tm_year + 70 * (timeinfo->tm_mon + 1 + 12 * (timeinfo->tm_mday + 31 * (timeinfo->tm_hour + 23 * (timeinfo->tm_min + 59 * timeinfo->tm_sec)))); static int irandnewseed; if (ia*(irandseed%iq) - ir * (irandseed / iq) >= 0) irandnewseed = ia * (irandseed%iq) - ir * (irandseed / iq); else irandnewseed = ia * (irandseed%iq) - ir * (irandseed / iq) + im; irandseed = irandnewseed; return (double)irandnewseed / im; } } // namespace gpu
the_stack
#define START_IL_TIMER() start = std::chrono::high_resolution_clock::now(); #define END_IL_TIMER(x) \ stop = std::chrono::high_resolution_clock::now(); \ duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); \ x += duration; \ total_time += duration; #define PRINT_IL_TIMER(x) std::cout << #x << ": " << ((float)x.count()) / 1000000.0 << "s" << std::endl void tsnecuda::RunTsne(tsnecuda::Options &opt, tsnecuda::GpuOptions &gpu_opt) { auto start = std::chrono::high_resolution_clock::now(); auto stop = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start); auto total_time = duration; auto _time_initialization = duration; auto _time_knn = duration; auto _time_symmetry = duration; auto _time_init_low_dim = duration; auto _time_init_fft = duration; auto _time_precompute_2d = duration; auto _time_nbodyfft = duration; auto _time_compute_charges = duration; auto _time_other = duration; auto _time_norm = duration; auto _time_attr = duration; auto _time_apply_forces = duration; // Check the validity of the options file if (!opt.validate()) { std::cout << "E: Invalid options file. Terminating." << std::endl; return; } START_IL_TIMER(); if (opt.verbosity > 0) { std::cout << "Initializing cuda handles... " << std::flush; } // Construct the handles // TODO: Move this outside of the timing code, since RAPIDs is cheating by pre-initializing the handle. // TODO: Allow for multi-stream on the computation, since we can overlap portions of our computation to be quicker. cublasHandle_t dense_handle; CublasSafeCall(cublasCreate(&dense_handle)); cusparseHandle_t sparse_handle; CusparseSafeCall(cusparseCreate(&sparse_handle)); // TODO: Pre-allocate device memory, and look for the ability to reuse in our code // Set CUDA device properties // TODO: Add new GPUs to the gpu_opt, and tune for that. const int num_blocks = gpu_opt.sm_count; // Construct sparse matrix descriptor cusparseMatDescr_t sparse_matrix_descriptor; cusparseCreateMatDescr(&sparse_matrix_descriptor); cusparseSetMatType(sparse_matrix_descriptor, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(sparse_matrix_descriptor, CUSPARSE_INDEX_BASE_ZERO); // Setup some return information if we're working on snapshots // TODO: Add compile flag to remove snapshotting for timing parity int snap_num = 0; int snap_interval = 1; if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT) { snap_interval = opt.iterations / (opt.num_snapshots - 1); } // Get constants from options const int num_points = opt.num_points; // TODO: Warn if the number of neighbors is more than the number of points const int num_neighbors = (opt.num_neighbors < num_points) ? opt.num_neighbors : num_points; const float *high_dim_points = opt.points; const int high_dim = opt.num_dims; const float perplexity = opt.perplexity; const float perplexity_search_epsilon = opt.perplexity_search_epsilon; const float eta = opt.learning_rate; float momentum = opt.pre_exaggeration_momentum; float attr_exaggeration = opt.early_exaggeration; float normalization; // Allocate host memory // TODO: Pre-determine GPU/CPU memory requirements, since we will know them ahead of time, and can estimate // if you're going to run out of GPU memory // TODO: Investigate what it takes to use unified memory + Async fetch and execution float *knn_squared_distances = new float[num_points * num_neighbors]; memset(knn_squared_distances, 0, num_points * num_neighbors * sizeof(float)); long *knn_indices = new long[num_points * num_neighbors]; // Set cache configs // cudaFuncSetCacheConfig(tsnecuda::IntegrationKernel, cudaFuncCachePreferL1); // cudaFuncSetCacheConfig(tsnecuda::ComputePijxQijKernel, cudaFuncCachePreferShared); GpuErrorCheck(cudaDeviceSynchronize()); END_IL_TIMER(_time_initialization); START_IL_TIMER(); if (opt.verbosity > 0) { std::cout << "done.\nKNN Computation... " << std::flush; } // Compute approximate K Nearest Neighbors and squared distances // TODO: See if we can gain some time here by updating FAISS, and building better indicies // TODO: Add suport for arbitrary metrics on GPU (Introduced by recent FAISS computation) // TODO: Expose Multi-GPU computation (+ Add streaming memory support for GPU optimization) tsnecuda::util::KNearestNeighbors(gpu_opt, opt, knn_indices, knn_squared_distances, high_dim_points, high_dim, num_points, num_neighbors); thrust::device_vector<long> knn_indices_long_device(knn_indices, knn_indices + num_points * num_neighbors); thrust::device_vector<int> pij_indices_device(num_points * num_neighbors); tsnecuda::util::PostprocessNeighborIndices(gpu_opt, pij_indices_device, knn_indices_long_device, num_points, num_neighbors); // Max-norm the distances to avoid exponentiating by large numbers thrust::device_vector<float> knn_squared_distances_device(knn_squared_distances, knn_squared_distances + (num_points * num_neighbors)); tsnecuda::util::MaxNormalizeDeviceVector(knn_squared_distances_device); END_IL_TIMER(_time_knn); START_IL_TIMER(); if (opt.verbosity > 0) { std::cout << "done.\nComputing Pij matrix... " << std::endl; } // Search Perplexity thrust::device_vector<float> pij_non_symmetric_device(num_points * num_neighbors); tsnecuda::SearchPerplexity(gpu_opt, dense_handle, pij_non_symmetric_device, knn_squared_distances_device, perplexity, perplexity_search_epsilon, num_points, num_neighbors); // Clean up memory cudaDeviceSynchronize(); knn_squared_distances_device.clear(); knn_squared_distances_device.shrink_to_fit(); // knn_indices_long_device.clear(); // knn_indices_long_device.shrink_to_fit(); delete[] knn_squared_distances; delete[] knn_indices; // Symmetrize the pij matrix thrust::device_vector<float> pij_device(num_points * num_neighbors); tsnecuda::util::SymmetrizeMatrixV2(pij_device, pij_non_symmetric_device, pij_indices_device, num_points, num_neighbors); // Clean up memory pij_non_symmetric_device.clear(); pij_non_symmetric_device.shrink_to_fit(); // Declare memory thrust::device_vector<float> pij_workspace_device(num_points * num_neighbors * 2); thrust::device_vector<float> repulsive_forces_device(opt.num_points * 2, 0); thrust::device_vector<float> attractive_forces_device(opt.num_points * 2, 0); thrust::device_vector<float> gains_device(opt.num_points * 2, 1); thrust::device_vector<float> old_forces_device(opt.num_points * 2, 0); // for momentum thrust::device_vector<float> normalization_vec_device(opt.num_points); thrust::device_vector<float> ones_device(opt.num_points * 2, 1); // This is for reduce summing, etc. // thrust::device_vector<int> coo_indices_device(sparse_pij_device.size() * 2); // tsnecuda::util::Csr2Coo(gpu_opt, coo_indices_device, pij_row_ptr_device, // pij_col_ind_device, num_points, num_nonzero); END_IL_TIMER(_time_symmetry); START_IL_TIMER(); if (opt.verbosity > 0) { std::cout << "done.\nInitializing low dim points... " << std::flush; } // Initialize Low-Dim Points thrust::device_vector<float> points_device(num_points * 2); thrust::device_vector<float> random_vector_device(points_device.size()); std::default_random_engine generator(opt.random_seed); std::normal_distribution<float> distribution1(0.0, 1.0); thrust::host_vector<float> h_points_device(num_points * 2); // Initialize random noise vector for (int i = 0; i < h_points_device.size(); i++) h_points_device[i] = 0.001 * distribution1(generator); thrust::copy(h_points_device.begin(), h_points_device.end(), random_vector_device.begin()); // TODO: this will only work with gaussian init if (opt.initialization == tsnecuda::TSNE_INIT::UNIFORM) { // Random uniform initialization points_device = tsnecuda::util::RandomDeviceVectorInRange(generator, points_device.size(), -5, 5); } else if (opt.initialization == tsnecuda::TSNE_INIT::GAUSSIAN) { // Random gaussian initialization // Generate some Gaussian noise for the points for (int i = 0; i < h_points_device.size(); i++) h_points_device[i] = 0.0001 * distribution1(generator); thrust::copy(h_points_device.begin(), h_points_device.end(), points_device.begin()); } else if (opt.initialization == tsnecuda::TSNE_INIT::RESUME) { // Preinit from vector // Load from vector if (opt.preinit_data != nullptr) { thrust::copy(opt.preinit_data, opt.preinit_data + points_device.size(), points_device.begin()); } else { std::cerr << "E: Invalid initialization. Initialization points are null." << std::endl; exit(1); } } else if (opt.initialization == tsnecuda::TSNE_INIT::VECTOR) { // Preinit from vector points only // Copy the pre-init data if (opt.preinit_data != nullptr) { thrust::copy(opt.preinit_data, opt.preinit_data + points_device.size(), points_device.begin()); } else { std::cerr << "E: Invalid initialization. Initialization points are null." << std::endl; exit(1); } } else { // Invalid initialization std::cerr << "E: Invalid initialization type specified." << std::endl; exit(1); } END_IL_TIMER(_time_init_low_dim); START_IL_TIMER(); if (opt.verbosity > 0) { std::cout << "done.\nInitializing CUDA memory... " << std::flush; } // FIT-TNSE Parameters int n_interpolation_points = 3; // float intervals_per_integer = 1; int min_num_intervals = 125; int N = num_points; // int D = 2; // The number of "charges" or s+2 sums i.e. number of kernel sums int n_terms = 4; int n_boxes_per_dim = min_num_intervals; // FFTW works faster on numbers that can be written as 2^a 3^b 5^c 7^d // 11^e 13^f, where e+f is either 0 or 1, and the other exponents are // arbitrary int allowed_n_boxes_per_dim[21] = {25, 36, 50, 55, 60, 65, 70, 75, 80, 85, 90, 96, 100, 110, 120, 130, 140, 150, 175, 200, 1125}; if (n_boxes_per_dim < allowed_n_boxes_per_dim[20]) { //Round up to nearest grid point int chosen_i; for (chosen_i = 0; allowed_n_boxes_per_dim[chosen_i] < n_boxes_per_dim; chosen_i++) ; n_boxes_per_dim = allowed_n_boxes_per_dim[chosen_i]; } int n_total_boxes = n_boxes_per_dim * n_boxes_per_dim; int total_interpolation_points = n_total_boxes * n_interpolation_points * n_interpolation_points; int n_fft_coeffs_half = n_interpolation_points * n_boxes_per_dim; int n_fft_coeffs = 2 * n_interpolation_points * n_boxes_per_dim; int n_interpolation_points_1d = n_interpolation_points * n_boxes_per_dim; // FIT-TSNE Device Vectors thrust::device_vector<int> point_box_idx_device(N); thrust::device_vector<float> x_in_box_device(N); thrust::device_vector<float> y_in_box_device(N); thrust::device_vector<float> y_tilde_values(total_interpolation_points * n_terms); thrust::device_vector<float> x_interpolated_values_device(N * n_interpolation_points); thrust::device_vector<float> y_interpolated_values_device(N * n_interpolation_points); thrust::device_vector<float> potentialsQij_device(N * n_terms); thrust::device_vector<float> w_coefficients_device(total_interpolation_points * n_terms); thrust::device_vector<float> all_interpolated_values_device( n_terms * n_interpolation_points * n_interpolation_points * N); thrust::device_vector<float> output_values( n_terms * n_interpolation_points * n_interpolation_points * N); thrust::device_vector<int> all_interpolated_indices( n_terms * n_interpolation_points * n_interpolation_points * N); thrust::device_vector<int> output_indices( n_terms * n_interpolation_points * n_interpolation_points * N); thrust::device_vector<float> chargesQij_device(N * n_terms); thrust::device_vector<float> box_lower_bounds_device(2 * n_total_boxes); thrust::device_vector<float> box_upper_bounds_device(2 * n_total_boxes); thrust::device_vector<float> kernel_tilde_device(n_fft_coeffs * n_fft_coeffs); thrust::device_vector<thrust::complex<float>> fft_kernel_tilde_device(2 * n_interpolation_points_1d * 2 * n_interpolation_points_1d); thrust::device_vector<float> fft_input(n_terms * n_fft_coeffs * n_fft_coeffs); thrust::device_vector<thrust::complex<float>> fft_w_coefficients(n_terms * n_fft_coeffs * (n_fft_coeffs / 2 + 1)); thrust::device_vector<float> fft_output(n_terms * n_fft_coeffs * n_fft_coeffs); // std::cout << "Floats allocated: " << n_terms * n_fft_coeffs * (n_fft_coeffs / 2 + 1) + 2 * n_terms * n_fft_coeffs * n_fft_coeffs + 2 * n_interpolation_points_1d * 2 * n_interpolation_points_1d + n_fft_coeffs * n_fft_coeffs + 4 * n_total_boxes + 2 * N * n_terms + total_interpolation_points * n_terms + 2 * N * n_interpolation_points + total_interpolation_points * n_terms + N + N + N + 4 * n_terms * n_interpolation_points * n_interpolation_points * N << std::endl; // Easier to compute denominator on CPU, so we should just calculate y_tilde_spacing on CPU also float h = 1 / (float)n_interpolation_points; float y_tilde_spacings[n_interpolation_points]; y_tilde_spacings[0] = h / 2; for (int i = 1; i < n_interpolation_points; i++) { y_tilde_spacings[i] = y_tilde_spacings[i - 1] + h; } float denominator[n_interpolation_points]; for (int i = 0; i < n_interpolation_points; i++) { denominator[i] = 1; for (int j = 0; j < n_interpolation_points; j++) { if (i != j) { denominator[i] *= y_tilde_spacings[i] - y_tilde_spacings[j]; } } } thrust::device_vector<float> y_tilde_spacings_device(y_tilde_spacings, y_tilde_spacings + n_interpolation_points); thrust::device_vector<float> denominator_device(denominator, denominator + n_interpolation_points); // Create the FFT Handles cufftHandle plan_kernel_tilde, plan_dft, plan_idft; CufftSafeCall(cufftCreate(&plan_kernel_tilde)); CufftSafeCall(cufftCreate(&plan_dft)); CufftSafeCall(cufftCreate(&plan_idft)); size_t work_size, work_size_dft, work_size_idft; int fft_dimensions[2] = {n_fft_coeffs, n_fft_coeffs}; CufftSafeCall(cufftMakePlan2d(plan_kernel_tilde, fft_dimensions[0], fft_dimensions[1], CUFFT_R2C, &work_size)); CufftSafeCall(cufftMakePlanMany(plan_dft, 2, fft_dimensions, NULL, 1, n_fft_coeffs * n_fft_coeffs, NULL, 1, n_fft_coeffs * (n_fft_coeffs / 2 + 1), CUFFT_R2C, n_terms, &work_size_dft)); CufftSafeCall(cufftMakePlanMany(plan_idft, 2, fft_dimensions, NULL, 1, n_fft_coeffs * (n_fft_coeffs / 2 + 1), NULL, 1, n_fft_coeffs * n_fft_coeffs, CUFFT_C2R, n_terms, &work_size_idft)); // Dump file float *host_ys = nullptr; std::ofstream dump_file; if (opt.get_dump_points()) { dump_file.open(opt.get_dump_file()); host_ys = new float[num_points * 2]; dump_file << num_points << " " << 2 << std::endl; } #ifndef NO_ZMQ bool send_zmq = opt.get_use_interactive(); zmq::context_t context(1); zmq::socket_t publisher(context, ZMQ_REQ); if (opt.get_use_interactive()) { // Try to connect to the socket if (opt.verbosity >= 1) std::cout << "Initializing Connection...." << std::endl; publisher.setsockopt(ZMQ_RCVTIMEO, opt.get_viz_timeout()); publisher.setsockopt(ZMQ_SNDTIMEO, opt.get_viz_timeout()); if (opt.verbosity >= 1) std::cout << "Waiting for connection to visualization for 10 secs...." << std::endl; publisher.connect(opt.get_viz_server()); // Send the number of points we should be expecting to the server std::string message = std::to_string(opt.num_points); send_zmq = publisher.send(message.c_str(), message.length()); // Wait for server reply zmq::message_t request; send_zmq = publisher.recv(&request); // If there's a time-out, don't bother. if (send_zmq) { if (opt.verbosity >= 1) std::cout << "Visualization connected!" << std::endl; } else { std::cout << "No Visualization Terminal, continuing..." << std::endl; send_zmq = false; } } #else if (opt.get_use_interactive()) std::cout << "This version is not built with ZMQ for interative viz. Rebuild with WITH_ZMQ=TRUE for viz." << std::endl; #endif if (opt.verbosity > 0) { std::cout << "done." << std::endl; } END_IL_TIMER(_time_init_fft); // Support for infinite iteration for (size_t step = 0; step != opt.iterations; step++) { START_IL_TIMER(); // TODO: We might be able to write a kernel which does this more efficiently. It probably doesn't require much // TODO: but it could be done. float fill_value = 0; thrust::fill(w_coefficients_device.begin(), w_coefficients_device.end(), fill_value); thrust::fill(potentialsQij_device.begin(), potentialsQij_device.end(), fill_value); // Setup learning rate schedule if (step == opt.force_magnify_iters) { momentum = opt.post_exaggeration_momentum; attr_exaggeration = 1.0f; } END_IL_TIMER(_time_other); // Prepare the terms that we'll use to compute the sum i.e. the repulsive forces START_IL_TIMER(); tsnecuda::ComputeChargesQij(chargesQij_device, points_device, num_points, n_terms); END_IL_TIMER(_time_compute_charges); // Compute Minimax elements START_IL_TIMER(); auto minimax_iter = thrust::minmax_element(points_device.begin(), points_device.end()); float min_coord = minimax_iter.first[0]; float max_coord = minimax_iter.second[0]; // Compute the number of boxes in a single dimension and the total number of boxes in 2d // auto n_boxes_per_dim = static_cast<int>(fmax(min_num_intervals, (max_coord - min_coord) / intervals_per_integer)); tsnecuda::PrecomputeFFT2D( plan_kernel_tilde, max_coord, min_coord, max_coord, min_coord, n_boxes_per_dim, n_interpolation_points, box_lower_bounds_device, box_upper_bounds_device, kernel_tilde_device, fft_kernel_tilde_device); float box_width = ((max_coord - min_coord) / (float)n_boxes_per_dim); END_IL_TIMER(_time_precompute_2d); START_IL_TIMER(); tsnecuda::NbodyFFT2D( plan_dft, plan_idft, N, n_terms, n_boxes_per_dim, n_interpolation_points, fft_kernel_tilde_device, n_total_boxes, total_interpolation_points, min_coord, box_width, n_fft_coeffs_half, n_fft_coeffs, fft_input, fft_w_coefficients, fft_output, point_box_idx_device, x_in_box_device, y_in_box_device, points_device, box_lower_bounds_device, y_tilde_spacings_device, denominator_device, y_tilde_values, all_interpolated_values_device, output_values, all_interpolated_indices, output_indices, w_coefficients_device, chargesQij_device, x_interpolated_values_device, y_interpolated_values_device, potentialsQij_device); END_IL_TIMER(_time_nbodyfft); START_IL_TIMER(); // TODO: We can overlap the computation of the attractive and repulsive forces, this requires changing the // TODO: default streams of the code in both of these methods // TODO: See: https://stackoverflow.com/questions/24368197/getting-cuda-thrust-to-use-a-cuda-stream-of-your-choice // Make the negative term, or F_rep in the equation 3 of the paper normalization = tsnecuda::ComputeRepulsiveForces( repulsive_forces_device, normalization_vec_device, points_device, potentialsQij_device, num_points, n_terms); END_IL_TIMER(_time_norm); START_IL_TIMER(); // Calculate Attractive Forces // tsnecuda::ComputeAttractiveForces(gpu_opt, // sparse_handle, // sparse_matrix_descriptor, // attractive_forces_device, // sparse_pij_device, // pij_row_ptr_device, // pij_col_ind_device, // coo_indices_device, // points_device, // ones_device, // num_points, // num_nonzero); tsnecuda::ComputeAttractiveForcesV3(dense_handle, gpu_opt, attractive_forces_device, pij_device, pij_indices_device, pij_workspace_device, points_device, ones_device, num_points, num_neighbors); END_IL_TIMER(_time_attr); START_IL_TIMER(); // TODO: Add stream synchronization here. // Apply Forces tsnecuda::ApplyForces(gpu_opt, points_device, attractive_forces_device, repulsive_forces_device, gains_device, old_forces_device, eta, normalization, momentum, attr_exaggeration, num_points, num_blocks); // Compute the gradient norm float grad_norm = tsnecuda::util::L2NormDeviceVector(old_forces_device); thrust::fill(attractive_forces_device.begin(), attractive_forces_device.end(), 0.0f); if (grad_norm < opt.min_gradient_norm) { if (opt.verbosity >= 1) std::cout << "Reached minimum gradient norm: " << grad_norm << std::endl; break; } if (opt.verbosity >= 1 && step % opt.print_interval == 0) { std::cout << "[Step " << step << "] Avg. Gradient Norm: " << grad_norm << std::endl; } END_IL_TIMER(_time_apply_forces); #ifndef NO_ZMQ if (send_zmq) { zmq::message_t message(sizeof(float) * opt.num_points * 2); thrust::copy(points_device.begin(), points_device.end(), static_cast<float *>(message.data())); bool res = false; res = publisher.send(message); zmq::message_t request; res = publisher.recv(&request); if (!res) { std::cout << "Server Disconnected, Not sending anymore for this session." << std::endl; } send_zmq = res; } #endif if (opt.get_dump_points() && step % opt.get_dump_interval() == 0) { thrust::copy(points_device.begin(), points_device.end(), host_ys); for (int i = 0; i < opt.num_points; i++) { dump_file << host_ys[i] << " " << host_ys[i + num_points] << std::endl; } } // Handle snapshoting if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT && step % snap_interval == 0 && opt.return_data != nullptr) { thrust::copy(points_device.begin(), points_device.end(), snap_num * opt.num_points * 2 + opt.return_data); snap_num += 1; } } // End for loop CufftSafeCall(cufftDestroy(plan_kernel_tilde)); CufftSafeCall(cufftDestroy(plan_dft)); CufftSafeCall(cufftDestroy(plan_idft)); if (opt.verbosity > 0) { PRINT_IL_TIMER(_time_initialization); PRINT_IL_TIMER(_time_knn); PRINT_IL_TIMER(_time_symmetry); PRINT_IL_TIMER(_time_init_low_dim); PRINT_IL_TIMER(_time_init_fft); PRINT_IL_TIMER(_time_compute_charges); PRINT_IL_TIMER(_time_precompute_2d); PRINT_IL_TIMER(_time_nbodyfft); PRINT_IL_TIMER(_time_norm); PRINT_IL_TIMER(_time_attr); PRINT_IL_TIMER(_time_apply_forces); PRINT_IL_TIMER(_time_other); PRINT_IL_TIMER(total_time); } // Clean up the dump file if we are dumping points if (opt.get_dump_points()) { delete[] host_ys; dump_file.close(); } // Handle a once off return type if (opt.return_style == tsnecuda::RETURN_STYLE::ONCE && opt.return_data != nullptr) { thrust::copy(points_device.begin(), points_device.end(), opt.return_data); } // Handle snapshoting if (opt.return_style == tsnecuda::RETURN_STYLE::SNAPSHOT && opt.return_data != nullptr) { thrust::copy(points_device.begin(), points_device.end(), snap_num * opt.num_points * 2 + opt.return_data); } // Return some final values opt.trained = true; opt.trained_norm = normalization; return; }
the_stack
extern "C" { #include <ccv.h> #include <ccv_internal.h> #include <nnc/ccv_nnc.h> #include <nnc/ccv_nnc_easy.h> #include <nnc/ccv_nnc_internal.h> } #include <nnc/gpu/ccv_nnc_compat.h> #ifdef HAVE_CUDNN static int _ccv_nnc_mul_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) { assert(input_size == 2); cudnnHandle_t cudnn = ccv_nnc_stream_context_get_cudnn(stream_context); const float p = cmd.info.blas.a[0]; static const float zero = 0, one = 1; if (inputs[1] == 0) { const ccv_nnc_cudnn_tensor_view_descriptor_t a = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)inputs[0]); const ccv_nnc_cudnn_tensor_view_descriptor_t c = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)outputs[0]); CUDNN_ENFORCE(cudnnTransformTensor(cudnn, &p, a.descriptor, a.data.u8, &zero, c.descriptor, c.data.u8)); ccv_nnc_cudnn_deinit_tensor_view_descriptor(a); ccv_nnc_cudnn_deinit_tensor_view_descriptor(c); return CCV_NNC_EXEC_SUCCESS; } ccv_nnc_tensor_view_t atv = ccv_nnc_get_tensor_view(inputs[0]); ccv_nnc_tensor_view_t btv = ccv_nnc_get_tensor_view(inputs[1]); ccv_nnc_tensor_view_t* tvs[] = { &atv, &btv }; ccv_nnc_tensor_view_alignment(tvs, 2); int adim[CCV_NNC_MAX_DIM_ALLOC]; ccv_nnc_tensor_view_get_dim(&atv, adim); int bdim[CCV_NNC_MAX_DIM_ALLOC]; ccv_nnc_tensor_view_get_dim(&btv, bdim); // If the input a doesn't match the output. We can do two things: // 1. If b matches, we switch; // 2. Otherwise, we change a's dimension and stride. cudnnOpTensorDescriptor_t mul = ccv_nnc_stream_context_get_op_tensor_descriptor(stream_context); ccv_nnc_cudnn_tensor_view_descriptor_t a; if (!ccv_nnc_tensor_view_check_dim((const ccv_nnc_tensor_view_t*)outputs[0], adim)) { if (ccv_nnc_tensor_view_check_dim((const ccv_nnc_tensor_view_t*)outputs[0], bdim)) { ccv_nnc_tensor_view_t t; CCV_SWAP(atv, btv, t); a = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, &atv); } else { const ccv_nnc_cudnn_tensor_view_descriptor_t old_a = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, &atv); void* const workspace = ccv_nnc_stream_context_get_workspace(stream_context, ccv_nnc_tensor_data_size(outputs[0]->info), CCV_TENSOR_GPU_MEMORY); ccv_nnc_tensor_t tensor = ccv_nnc_tensor(workspace, outputs[0]->info, 0); a = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)&tensor); cudnnSetOpTensorDescriptor(mul, CUDNN_OP_TENSOR_ADD, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN); CUDNN_ENFORCE(cudnnOpTensor(cudnn, mul, &zero, a.descriptor, a.data.u8, &one, old_a.descriptor, old_a.data.u8, &zero, a.descriptor, a.data.u8)); ccv_nnc_cudnn_deinit_tensor_view_descriptor(old_a); } } else a = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, &atv); const ccv_nnc_cudnn_tensor_view_descriptor_t b = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, &btv); const ccv_nnc_cudnn_tensor_view_descriptor_t c = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)outputs[0]); cudnnSetOpTensorDescriptor(mul, CUDNN_OP_TENSOR_MUL, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN); CUDNN_ENFORCE(cudnnOpTensor(cudnn, mul, &p, a.descriptor, a.data.u8, &one, b.descriptor, b.data.u8, &zero, c.descriptor, c.data.u8)); ccv_nnc_stream_context_return_op_tensor_descriptor(stream_context, mul); ccv_nnc_cudnn_deinit_tensor_view_descriptor(a); ccv_nnc_cudnn_deinit_tensor_view_descriptor(b); ccv_nnc_cudnn_deinit_tensor_view_descriptor(c); return CCV_NNC_EXEC_SUCCESS; } static int _ccv_nnc_mul_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) { cudnnHandle_t cudnn = ccv_nnc_stream_context_get_cudnn(stream_context); const float p = cmd.info.blas.a[0]; static const float zero = 0, one = 1; ccv_nnc_tensor_view_t* const a = (ccv_nnc_tensor_view_t*)outputs[0]; ccv_nnc_tensor_view_t* const b = output_size > 1 ? (ccv_nnc_tensor_view_t*)outputs[1] : 0; int gdim[CCV_NNC_MAX_DIM_ALLOC]; ccv_nnc_cudnn_tensor_view_descriptor_t acu; ccv_nnc_cudnn_tensor_view_descriptor_t gbcu; ccv_nnc_tensor_param_t info; int a_broadcast_b = 0, b_broadcast_a = 0; if (a) { ccv_nnc_tensor_view_t atv = ccv_nnc_get_tensor_view(outputs[0]); ccv_nnc_tensor_view_t gbtv = ccv_nnc_get_tensor_view(inputs[2]); ccv_nnc_tensor_view_t* tvs[] = { &atv, &gbtv }; ccv_nnc_tensor_view_alignment(tvs, 2); info = atv.info; const int nd = ccv_nnc_tensor_nd(atv.info.dim); int i; for (i = 0; i < nd; i++) { info.dim[i] = ccv_max(atv.info.dim[i], gbtv.info.dim[i]); if (atv.info.dim[i] > gbtv.info.dim[i]) b_broadcast_a = 1; if (gbtv.info.dim[i] > atv.info.dim[i]) a_broadcast_b = 1; } acu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, &atv); gbcu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, &gbtv); ccv_nnc_tensor_view_get_dim((ccv_nnc_tensor_view_t*)inputs[2], gdim); } const int reduce_a_dim = a ? !ccv_nnc_tensor_view_check_dim(a, gdim) : 0; ccv_nnc_cudnn_tensor_view_descriptor_t bcu; ccv_nnc_cudnn_tensor_view_descriptor_t gacu; if (b) { ccv_nnc_tensor_view_t gatv = ccv_nnc_get_tensor_view(inputs[1]); ccv_nnc_tensor_view_t btv = ccv_nnc_get_tensor_view(outputs[1]); ccv_nnc_tensor_view_t* tvs[] = { &gatv, &btv }; ccv_nnc_tensor_view_alignment(tvs, 2); if (!a) { info = gatv.info; const int nd = ccv_nnc_tensor_nd(gatv.info.dim); int i; for (i = 0; i < nd; i++) { info.dim[i] = ccv_max(gatv.info.dim[i], btv.info.dim[i]); if (gatv.info.dim[i] > btv.info.dim[i]) b_broadcast_a = 1; if (btv.info.dim[i] > gatv.info.dim[i]) a_broadcast_b = 1; } } gacu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, &gatv); bcu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, &btv); ccv_nnc_tensor_view_get_dim((ccv_nnc_tensor_view_t*)inputs[1], gdim); } const int reduce_b_dim = b ? !ccv_nnc_tensor_view_check_dim(b, gdim) : 0; cudnnReduceTensorDescriptor_t reduce_sum; if (reduce_a_dim || reduce_b_dim) { reduce_sum = ccv_nnc_stream_context_get_reduce_tensor_descriptor(stream_context); cudnnSetReduceTensorDescriptor(reduce_sum, CUDNN_REDUCE_TENSOR_ADD, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN, CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_32BIT_INDICES); } size_t workspace_size = 0; void* workspace = 0; if (reduce_a_dim && !b_broadcast_a) { size_t a_workspace_size = 0; CUDNN_ENFORCE(cudnnGetReductionWorkspaceSize(cudnn, reduce_sum, gbcu.descriptor, acu.descriptor, &a_workspace_size)); if (a_workspace_size > workspace_size) workspace_size = a_workspace_size; } if (reduce_b_dim && !a_broadcast_b) { size_t b_workspace_size = 0; CUDNN_ENFORCE(cudnnGetReductionWorkspaceSize(cudnn, reduce_sum, gacu.descriptor, bcu.descriptor, &b_workspace_size)); if (b_workspace_size > workspace_size) workspace_size = b_workspace_size; } if (inputs[0] == 0) { cudnnOpTensorDescriptor_t add = 0; ccv_nnc_cudnn_tensor_view_descriptor_t tcu = {}; if ((reduce_a_dim && b_broadcast_a) || (reduce_b_dim && a_broadcast_b)) { add = ccv_nnc_stream_context_get_op_tensor_descriptor(stream_context); cudnnSetOpTensorDescriptor(add, CUDNN_OP_TENSOR_ADD, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN); if (a_broadcast_b && b_broadcast_a) { const ccv_nnc_tensor_t t = ccv_nnc_tensor(&workspace_size /* This is a placeholder. */, info, 0); tcu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)&t); if (reduce_a_dim) { size_t a_workspace_size = 0; CUDNN_ENFORCE(cudnnGetReductionWorkspaceSize(cudnn, reduce_sum, tcu.descriptor, acu.descriptor, &a_workspace_size)); if (a_workspace_size > workspace_size) workspace_size = a_workspace_size; } if (reduce_b_dim) { size_t b_workspace_size = 0; CUDNN_ENFORCE(cudnnGetReductionWorkspaceSize(cudnn, reduce_sum, tcu.descriptor, bcu.descriptor, &b_workspace_size)); if (b_workspace_size > workspace_size) workspace_size = b_workspace_size; } workspace = ccv_nnc_stream_context_get_workspace(stream_context, workspace_size + ccv_nnc_tensor_data_size(info), CCV_TENSOR_GPU_MEMORY); tcu.data.u8 = (uint8_t*)workspace + workspace_size; } else if (workspace_size) workspace = ccv_nnc_stream_context_get_workspace(stream_context, workspace_size, CCV_TENSOR_GPU_MEMORY); } else { if (workspace_size) workspace = ccv_nnc_stream_context_get_workspace(stream_context, workspace_size, CCV_TENSOR_GPU_MEMORY); } if (a) { if (reduce_a_dim) { if (b_broadcast_a) { if (a_broadcast_b) { // If b cannot be reduced to a (because, b broadcast to a, thus, some dimensions of b is smaller than a). // In that case, we have to broadcast b first before reduce it down. CUDNN_ENFORCE(cudnnOpTensor(cudnn, add, &zero, tcu.descriptor, tcu.data.u8, &one, gbcu.descriptor, gbcu.data.u8, &zero, tcu.descriptor, tcu.data.u8)); CUDNN_ENFORCE(cudnnReduceTensor(cudnn, reduce_sum, 0, 0, workspace, workspace_size, &p, tcu.descriptor, tcu.data.u8, &zero, acu.descriptor, acu.data.u8)); } else { CUDNN_ENFORCE(cudnnOpTensor(cudnn, add, &zero, acu.descriptor, acu.data.u8, &p, gbcu.descriptor, gbcu.data.u8, &zero, acu.descriptor, acu.data.u8)); } } else { CUDNN_ENFORCE(cudnnReduceTensor(cudnn, reduce_sum, 0, 0, workspace, workspace_size, &p, gbcu.descriptor, gbcu.data.u8, &zero, acu.descriptor, acu.data.u8)); } } else { CUDNN_ENFORCE(cudnnTransformTensor(cudnn, &p, gbcu.descriptor, gbcu.data.u8, &zero, acu.descriptor, acu.data.u8)); } ccv_nnc_cudnn_deinit_tensor_view_descriptor(acu); ccv_nnc_cudnn_deinit_tensor_view_descriptor(gbcu); } if (b) { if (reduce_b_dim) { if (a_broadcast_b) { if (b_broadcast_a) { // If a cannot be reduced to b (because, a broadcast to b, thus, some dimensions of a is smaller than b). // In that case, we have to broadcast a first before reduce it down. CUDNN_ENFORCE(cudnnOpTensor(cudnn, add, &zero, tcu.descriptor, tcu.data.u8, &one, gacu.descriptor, gacu.data.u8, &zero, tcu.descriptor, tcu.data.u8)); CUDNN_ENFORCE(cudnnReduceTensor(cudnn, reduce_sum, 0, 0, workspace, workspace_size, &p, tcu.descriptor, tcu.data.u8, &zero, bcu.descriptor, bcu.data.u8)); } else { CUDNN_ENFORCE(cudnnOpTensor(cudnn, add, &zero, bcu.descriptor, bcu.data.u8, &p, gacu.descriptor, gacu.data.u8, &zero, bcu.descriptor, bcu.data.u8)); } } else { CUDNN_ENFORCE(cudnnReduceTensor(cudnn, reduce_sum, 0, 0, workspace, workspace_size, &p, gacu.descriptor, gacu.data.u8, &zero, bcu.descriptor, bcu.data.u8)); } } else { CUDNN_ENFORCE(cudnnTransformTensor(cudnn, &p, gacu.descriptor, gacu.data.u8, &zero, bcu.descriptor, bcu.data.u8)); } ccv_nnc_cudnn_deinit_tensor_view_descriptor(bcu); ccv_nnc_cudnn_deinit_tensor_view_descriptor(gacu); } if (add) ccv_nnc_stream_context_return_op_tensor_descriptor(stream_context, add); if (tcu.data.u8) ccv_nnc_cudnn_deinit_tensor_view_descriptor(tcu); if (reduce_a_dim || reduce_b_dim) ccv_nnc_stream_context_return_reduce_tensor_descriptor(stream_context, reduce_sum); return CCV_NNC_EXEC_SUCCESS; } ccv_nnc_tensor_view_t* const g = (ccv_nnc_tensor_view_t*)inputs[0]; const ccv_nnc_cudnn_tensor_view_descriptor_t gcu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, g); // Compute again to reduce from g. if (reduce_a_dim) { size_t a_workspace_size = 0; CUDNN_ENFORCE(cudnnGetReductionWorkspaceSize(cudnn, reduce_sum, gcu.descriptor, acu.descriptor, &a_workspace_size)); if (a_workspace_size > workspace_size) workspace_size = a_workspace_size; } if (reduce_b_dim) { size_t b_workspace_size = 0; CUDNN_ENFORCE(cudnnGetReductionWorkspaceSize(cudnn, reduce_sum, gcu.descriptor, bcu.descriptor, &b_workspace_size)); if (b_workspace_size > workspace_size) workspace_size = b_workspace_size; } if ((reduce_a_dim && a_broadcast_b) || (reduce_b_dim && b_broadcast_a)) workspace = ccv_nnc_stream_context_get_workspace(stream_context, workspace_size + ccv_nnc_tensor_data_size(g->info), CCV_TENSOR_GPU_MEMORY); else if (workspace_size) workspace = ccv_nnc_stream_context_get_workspace(stream_context, workspace_size, CCV_TENSOR_GPU_MEMORY); cudnnOpTensorDescriptor_t mul = 0; if (a) { if (!mul) { mul = ccv_nnc_stream_context_get_op_tensor_descriptor(stream_context); cudnnSetOpTensorDescriptor(mul, CUDNN_OP_TENSOR_MUL, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN); } if (reduce_a_dim && a_broadcast_b) { const ccv_nnc_tensor_t t = ccv_nnc_tensor((uint8_t*)workspace + workspace_size, g->info, 0); const ccv_nnc_cudnn_tensor_view_descriptor_t tcu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)&t); CUDNN_ENFORCE(cudnnOpTensor(cudnn, mul, &one, gcu.descriptor, gcu.data.u8, &one, gbcu.descriptor, gbcu.data.u8, &zero, tcu.descriptor, tcu.data.u8)); CUDNN_ENFORCE(cudnnReduceTensor(cudnn, reduce_sum, 0, 0, workspace, workspace_size, &p, tcu.descriptor, tcu.data.u8, &zero, acu.descriptor, acu.data.u8)); ccv_nnc_cudnn_deinit_tensor_view_descriptor(tcu); } else { CUDNN_ENFORCE(cudnnOpTensor(cudnn, mul, &one, gcu.descriptor, gcu.data.u8, &p, gbcu.descriptor, gbcu.data.u8, &zero, acu.descriptor, acu.data.u8)); } ccv_nnc_cudnn_deinit_tensor_view_descriptor(acu); ccv_nnc_cudnn_deinit_tensor_view_descriptor(gbcu); } if (b) { if (!mul) { mul = ccv_nnc_stream_context_get_op_tensor_descriptor(stream_context); cudnnSetOpTensorDescriptor(mul, CUDNN_OP_TENSOR_MUL, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN); } if (reduce_b_dim && b_broadcast_a) { const ccv_nnc_tensor_t t = ccv_nnc_tensor((uint8_t*)workspace + workspace_size, g->info, 0); const ccv_nnc_cudnn_tensor_view_descriptor_t tcu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)&t); CUDNN_ENFORCE(cudnnOpTensor(cudnn, mul, &one, gcu.descriptor, gcu.data.u8, &one, gacu.descriptor, gacu.data.u8, &zero, tcu.descriptor, tcu.data.u8)); CUDNN_ENFORCE(cudnnReduceTensor(cudnn, reduce_sum, 0, 0, workspace, workspace_size, &p, tcu.descriptor, tcu.data.u8, &zero, bcu.descriptor, bcu.data.u8)); ccv_nnc_cudnn_deinit_tensor_view_descriptor(tcu); } else { CUDNN_ENFORCE(cudnnOpTensor(cudnn, mul, &p, gacu.descriptor, gacu.data.u8, &p, gcu.descriptor, gcu.data.u8, &zero, bcu.descriptor, bcu.data.u8)); } ccv_nnc_cudnn_deinit_tensor_view_descriptor(gacu); ccv_nnc_cudnn_deinit_tensor_view_descriptor(bcu); } if (mul) ccv_nnc_stream_context_return_op_tensor_descriptor(stream_context, mul); ccv_nnc_cudnn_deinit_tensor_view_descriptor(gcu); if (reduce_a_dim || reduce_b_dim) ccv_nnc_stream_context_return_reduce_tensor_descriptor(stream_context, reduce_sum); return CCV_NNC_EXEC_SUCCESS; } static int _ccv_nnc_scalar_mul_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) { assert(input_size >= 1); cudnnHandle_t cudnn = ccv_nnc_stream_context_get_cudnn(stream_context); const float p = cmd.info.blas.a[0]; static const float zero = 0; const ccv_nnc_cudnn_tensor_view_descriptor_t a = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)inputs[0]); const ccv_nnc_cudnn_tensor_view_descriptor_t c = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, (const ccv_nnc_tensor_view_t*)outputs[0]); CUDNN_ENFORCE(cudnnTransformTensor(cudnn, &p, a.descriptor, a.data.u8, &zero, c.descriptor, c.data.u8)); ccv_nnc_cudnn_deinit_tensor_view_descriptor(a); ccv_nnc_cudnn_deinit_tensor_view_descriptor(c); return CCV_NNC_EXEC_SUCCESS; } static int _ccv_nnc_scalar_mul_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) { cudnnHandle_t cudnn = ccv_nnc_stream_context_get_cudnn(stream_context); const float p = cmd.info.blas.a[0]; static const float zero = 0; ccv_nnc_tensor_view_t* const a = (ccv_nnc_tensor_view_t*)outputs[0]; ccv_nnc_cudnn_tensor_view_descriptor_t acu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, a); if (inputs[0] == 0) { CUDNN_ENFORCE(cudnnSetTensor(cudnn, acu.descriptor, acu.data.u8, &p)); ccv_nnc_cudnn_deinit_tensor_view_descriptor(acu); return CCV_NNC_EXEC_SUCCESS; } ccv_nnc_tensor_view_t* const g = (ccv_nnc_tensor_view_t*)inputs[0]; const ccv_nnc_cudnn_tensor_view_descriptor_t gcu = ccv_nnc_cudnn_get_tensor_view_descriptor_for_op(stream_context, g); CUDNN_ENFORCE(cudnnTransformTensor(cudnn, &p, gcu.descriptor, gcu.data.u8, &zero, acu.descriptor, acu.data.u8)); ccv_nnc_cudnn_deinit_tensor_view_descriptor(acu); ccv_nnc_cudnn_deinit_tensor_view_descriptor(gcu); return CCV_NNC_EXEC_SUCCESS; } #endif REGISTER_COMMAND_BACKEND(CCV_NNC_MUL_FORWARD, CCV_NNC_BACKEND_GPU_CUDNN)(ccv_nnc_cmd_backend_registry_t* const registry) { #ifdef HAVE_CUDNN registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_CHWN; registry->tensor_datatypes = CCV_32F | CCV_16F; registry->tensor_memory = CCV_TENSOR_GPU_MEMORY; registry->algorithms = 1; registry->exec = _ccv_nnc_mul_forw; #endif } REGISTER_COMMAND_BACKEND(CCV_NNC_MUL_BACKWARD, CCV_NNC_BACKEND_GPU_CUDNN)(ccv_nnc_cmd_backend_registry_t* const registry) { #ifdef HAVE_CUDNN registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_CHWN; registry->tensor_datatypes = CCV_32F | CCV_16F; registry->tensor_memory = CCV_TENSOR_GPU_MEMORY; registry->algorithms = 1; registry->exec = _ccv_nnc_mul_back; #endif } REGISTER_COMMAND_BACKEND(CCV_NNC_SCALAR_MUL_FORWARD, CCV_NNC_BACKEND_GPU_CUDNN)(ccv_nnc_cmd_backend_registry_t* const registry) { #ifdef HAVE_CUDNN registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_CHWN; registry->tensor_datatypes = CCV_32F | CCV_16F; registry->tensor_memory = CCV_TENSOR_GPU_MEMORY; registry->algorithms = 1; registry->exec = _ccv_nnc_scalar_mul_forw; #endif } REGISTER_COMMAND_BACKEND(CCV_NNC_SCALAR_MUL_BACKWARD, CCV_NNC_BACKEND_GPU_CUDNN)(ccv_nnc_cmd_backend_registry_t* const registry) { #ifdef HAVE_CUDNN registry->tensor_formats = CCV_TENSOR_FORMAT_NHWC | CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_CHWN; registry->tensor_datatypes = CCV_32F | CCV_16F; registry->tensor_memory = CCV_TENSOR_GPU_MEMORY; registry->algorithms = 1; registry->exec = _ccv_nnc_scalar_mul_back; #endif }
the_stack
//////////////////////////////////////////////////////////////////////////////// //! Initialize the image array, ie, set all pixels to zero //! Essentially, this function has the same effect as the command: //! "cutilSafeCall(cudaMemcpy(image_device, image, image_bytes, cudaMemcpyHostToDevice))"; //! //! CUDA performs some initialization work the first time a GPU kernel is called. //! Therefore, calling a short kernel before the real particle tracking is performed //! may improve the accuracy of the timing measurements in the relevant kernel. //! //! @param[in,out] image Pointer to the image array. //! @param[in] pixels_per_image Number of pixels in the image (ie, elements in the array). //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __global__ void init_image_array_GPU(unsigned long long int* image, int pixels_per_image) { int my_pixel = threadIdx.x + blockIdx.x*blockDim.x; if (my_pixel < pixels_per_image) { // -- Set the current pixel to 0 and return, avoiding overflow image[my_pixel] = (unsigned long long int)(0); // Initialize non-scatter image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize Compton image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize Rayleigh image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize multi-scatter image } } #endif //////////////////////////////////////////////////////////////////////////////// //! Main function to simulate x-ray tracks inside a voxelized geometry. //! Secondary electrons are not simulated (in photoelectric and Compton //! events the energy is locally deposited). //! //! The following global variables, in the GPU __constant__ memory are used: //! voxel_data_CONST, //! source_data_CONST, //! detector_data_CONST, //! mfp_table_data_CONST. //! //! @param[in] history_batch Particle batch number (only used in the CPU version when CUDA is disabled!, the GPU uses the built-in variable threadIdx) //! @param[in] num_p Projection number in the CT simulation. This variable defines a specific angle and the corresponding source and detector will be used. //! @param[in] histories_per_thread Number of histories to simulate for each call to this function (ie, for GPU thread). //! @param[in] seed_input Random number generator seed (the same seed is used to initialize the two MLCGs of RANECU). //! @param[in] voxel_mat_dens Pointer to the voxel densities and material vector (the voxelized geometry), stored in GPU glbal memory. //! @param[in] mfp_Woodcock_table Two parameter table for the linear interpolation of the Woodcock mean free path (MFP) (stored in GPU global memory). //! @param[in] mfp_table_a First element for the linear interpolation of the interaction mean free paths (stored in GPU global memory). //! @param[in] mfp_table_b Second element for the linear interpolation of the interaction mean free paths (stored in GPU global memory). //! @param[in] rayleigh_table Pointer to the table with the data required by the Rayleigh interaction sampling, stored in GPU global memory. //! @param[in] compton_table Pointer to the table with the data required by the Compton interaction sampling, stored in GPU global memory. //! @param[in,out] image Pointer to the image vector in the GPU glbal memory. //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __global__ void track_particles(int histories_per_thread, int num_p, // For a CT simulation: allocate space for up to MAX_NUM_PROJECTIONS projections. int seed_input, unsigned long long int* image, float2* voxel_mat_dens, float2* mfp_Woodcock_table, float3* mfp_table_a, float3* mfp_table_b, struct rayleigh_struct* rayleigh_table, struct compton_struct* compton_table) #else void track_particles(int history_batch, // This variable is not required in the GPU, it uses the thread ID int histories_per_thread, int num_p, int seed_input, unsigned long long int* image, float2* voxel_mat_dens, float2* mfp_Woodcock_table, float3* mfp_table_a, float3* mfp_table_b, struct rayleigh_struct* rayleigh_table, struct compton_struct* compton_table) #endif { // -- Declare the track state variables: float3 position, direction; float energy, step, prob, randno, mfp_density, mfp_Woodcock; float3 mfp_table_read_a, mfp_table_read_b; int2 seed; int index; int material0, // Current material, starting at 0 for 1st material material_old; // Flag to mark a material or energy change signed char scatter_state; // Flag for scatter images: scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter. // -- Store the Compton table in shared memory from global memory: // For Compton and Rayleigh the access to memory is not coherent and the caching capability do not speeds up the accesses, they actually slows down the acces to other data. #ifdef USING_CUDA __shared__ #endif struct compton_struct cgco_SHARED; #ifdef USING_CUDA __shared__ #endif float3 detector_center_SHARED; #ifdef USING_CUDA if (0==threadIdx.x) // First GPU thread copies the variables to shared memory { #endif // -Copy the compton data to shared memory: cgco_SHARED = *compton_table; // -Store in shared memory the center of the detector straight ahead of the focal spot, for the present projection (used by "tally_image"). detector_center_SHARED.x = source_data_CONST.position[num_p].x + source_data_CONST.direction[num_p].x * detector_data_CONST.sdd; detector_center_SHARED.y = source_data_CONST.position[num_p].y + source_data_CONST.direction[num_p].y * detector_data_CONST.sdd; detector_center_SHARED.z = source_data_CONST.position[num_p].z + source_data_CONST.direction[num_p].z * detector_data_CONST.sdd; #ifdef USING_CUDA } #endif // -- Initialize the RANECU generator in a position far away from the previous history: #ifdef USING_CUDA init_PRNG((threadIdx.x + blockIdx.x*blockDim.x), histories_per_thread, seed_input, &seed); // Using a 1D block #else init_PRNG(history_batch, histories_per_thread, seed_input, &seed); #endif // -- Store the energy bin and Woodcock MFP for the single energy of the source // !!DeBuG!! Not valid for a source with an energy spectra (this gives about ~10% speed up for a monoenergetic x-ray beam). float2 mfp_Woodcock_read; #ifdef USING_CUDA __shared__ #endif int index_E0; #ifdef USING_CUDA __shared__ #endif float mfp_Woodcock_E0; #ifdef USING_CUDA if (0==threadIdx.x) { #endif index_E0 = (int)((source_data_CONST.energy - mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Truncating to int because initial energy always > e0 mfp_Woodcock_read = mfp_Woodcock_table[index_E0]; // Read the 2 parameters for the linear interpolation in a single read from global memory mfp_Woodcock_E0 = mfp_Woodcock_read.x + source_data_CONST.energy * mfp_Woodcock_read.y; // Interpolated minimum MFP for monoenergetic source #ifdef USING_CUDA } __syncthreads(); // Make sure all threads will see the initialized shared variable #endif // -- Loop for the "histories_per_thread" particles in the current history_batch: for( ; histories_per_thread>0; histories_per_thread--) { // printf("\n\n********* NEW HISTORY: %d [seeds: %d, %d]\n\n",histories_per_thread, seed.x, seed.y); fflush(stdout); int absvox = 1; // -- Call the source function to get a primary x ray: source(&num_p, &position, &direction, &energy, &seed, &absvox); scatter_state = (signed char)0; // Reset previous scatter state: new non-scattered particle loaded // -- Find the current energy bin by truncation: // index = (short int)((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); index = index_E0; // !!DeBuG!! Valid only for monoenergetic beam material_old = -1; // Reset previous material (negative materials are not allowed). // -- Get the minimum mfp at the current energy using linear interpolation (Woodcock trick): // float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read ffrom global memory // float mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP mfp_Woodcock = mfp_Woodcock_E0; // !!DeBuG!! Valid only for monoenergetic beam // *** X-ray interaction loop: for(;;) { if (absvox<0) break; // -- Primary particle was not pointing to the voxel region! (but may still be detected after moving in vacuum in a straight line). step = -(mfp_Woodcock)*logf(ranecu(&seed)); // Using the minimum MFP in the geometry for the input energy (Woodcock trick) position.x += step*direction.x; position.y += step*direction.y; position.z += step*direction.z; // -- Locate the new particle in the voxel geometry: absvox = locate_voxel(&position); // Get the voxel number at the current position. if (absvox<0) break; // -- Particle escaped the voxel region! ("index" is still >0 at this moment) float2 matdens = voxel_mat_dens[absvox]; // Get the voxel material and density in a single read from global memory material0 = matdens.x - 1; // Set the current material by truncation, and set 1st material to value '0'. // -- Get the data for the linear interpolation of the interaction MFPs, in case the energy or material have changed: if (material0 != material_old) { mfp_table_read_a = mfp_table_a[index*(MAX_MATERIALS)+material0]; mfp_table_read_b = mfp_table_b[index*(MAX_MATERIALS)+material0]; material_old = material0; // Store the new material } // *** Apply Woodcock trick: mfp_density = mfp_Woodcock * matdens.y; // -- Calculate probability of delta scattering, using the total mean free path for the current material and energy (linear interpolation): prob = 1.0f - mfp_density * (mfp_table_read_a.x + energy * mfp_table_read_b.x); randno = ranecu(&seed); // Sample uniform PRN if (randno<prob) // [Checking delta scattering] { // *** No real event; continue jumping: continue; } else { prob += mfp_density * (mfp_table_read_a.y + energy * mfp_table_read_b.y); // Interpolate total Compton MFP ('y' component) if (randno<prob) // [Checking Compton scattering] { // *** Compton interaction: // -- Sample new direction and energy: double costh_Compton; // Energy lost in the Compton interaction and angular deflection. GCOa(&energy, &costh_Compton, &material0, &seed, &cgco_SHARED); rotate_double(&direction, costh_Compton, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed)); // -- Find the new energy interval: #ifdef USING_CUDA index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite) #else index = (int)(floor((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide) + 0.00001f); // Adding EPSILON to make sure truncation to int (towards minus infinite) is correct(?) #endif if (index<0) break; // Energy below minimum: particle will be absorbed (or rejected) outside the loop. // -- Get the Woodcock MFP for the new energy: mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read ffrom global memory mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP material_old = -2; // Set an impossible material to force an update of the MFPs data for the nex energy interval // -- Update scatter state: if (scatter_state==(signed char)0) scatter_state = (signed char)1; // Set scatter_state == 1: Compton scattered particle else scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle } else { prob += mfp_density * (mfp_table_read_a.z + energy * mfp_table_read_b.z); // Interpolate total Rayleigh MFP ('z' component) if (randno<prob) // [Checking Rayleigh scattering] { // *** Rayleigh interaction: // -- Sample angular deflection: double costh_Rayleigh; float pmax_current = rayleigh_table->pmax[(index+1)*MAX_MATERIALS+material0]; // Get max (ie, value for next bin?) cumul prob square form factor for Rayleigh sampling GRAa(&energy, &costh_Rayleigh, &material0, &pmax_current, &seed, rayleigh_table); rotate_double(&direction, costh_Rayleigh, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed)); // -- Update scatter state: if (scatter_state==(signed char)0) scatter_state = (signed char)2; // Set scatter_state == 1: Rayleigh scattered particle else scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle } else { // *** Photoelectric interaction (or pair production): absorb particle! index = -11; // A negative "index" marks that the energy has to be deposited, particle will not arrive at the detector break; } } } } if (index>-1) { // -- Particle escaped the voxels but was not absorbed, check if it will arrive at the detector and tally its energy: tally_image(&num_p, &energy, &position, &direction, &scatter_state, image, &detector_center_SHARED); } } // [Continue with a new history] } // [All tracks simulated for this kernel call: return to CPU] //////////////////////////////////////////////////////////////////////////////// //! Tally a radiographic projection image. //! This function is called whenever a particle escapes the voxelized volume. //! The code checks if the particle would arrive at the detector if it kept //! moving in a straight line after exiting the voxels (assuming vacuum enclosure). //! An ideal image formation model is implemented: each pixel counts the total energy //! of the x rays that enter the pixel (100% detection efficiency for any energy). //! The image due to primaries and different kinds of scatter is tallied separately. //! //! In the GPU, and atomicAdd() function is used to make sure that multiple threads do //! not update the same pixel at the same time, which would result in a lose of information. //! Since the atomicAdd function is only available for 'unsigned long long int' data, //! the float pixel values are scaled by a factor "SCALE_eV" defined in the header file //! (eg, #define SCALE_eV 10000.0f) and stored as unsigned long long integers in main //! memory. //! //! WARNING! If the total tallied signal (for all particles) is larger than "1.8e19/SCALE_eV", //! there will be a bit overflow and the value will be reset to 0 giving bogus results. //! //! //! @param[in] energy X-ray energy //! @param[in] position Particle position //! @param[in] direction Particle direction (cosine vectors) //! @param[in] scatter_state Flag marking primaries, single Compton, single Rayleigh or multiple scattered radiation //! @param[out] image Integer array containing the image, ie, the pixel values (in tenths of meV) //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline void tally_image(int* num_p, float* energy, float3* position, float3* direction, signed char* scatter_state, unsigned long long int* image, float3* detector_center_SHARED) { float dist_detector, rotated_position; if (detector_data_CONST.rotation_flag == 1) // --> Initial source direction is not (0,1,0): detector has to be rotated to +Y to find the pixel number { // *** Find the distance from the current particle location (likely just after the edge of the voxel bbox) to the intersection with the detector plane: // (NOTE: the particle was moved outside the voxel region and not turned back, therefore if the detector is very close to the voxels the distance could be negative and the algorithm fail! However this is unlikely with the short mfp_Woodcock !!DeBuG!!). dist_detector = ( source_data_CONST.direction[*num_p].x * (detector_center_SHARED->x - position->x) + source_data_CONST.direction[*num_p].y * (detector_center_SHARED->y - position->y) + source_data_CONST.direction[*num_p].z * (detector_center_SHARED->z - position->z) ) / ( source_data_CONST.direction[*num_p].x * direction->x + source_data_CONST.direction[*num_p].y * direction->y + source_data_CONST.direction[*num_p].z * direction->z ); // *** Eliminate particles not moving towards the detector (dist<0) or that intersect the detector plane // too far away (max dist arbitrarily set to the source-to-detector distance): // !!DeBuG!! NOTE: This may give problems for big detectors very close to the source if (dist_detector<0.0f || dist_detector>(3.0f*detector_data_CONST.sdd)) return; // *** Translate the particle to the detector plane (we assume the detector is completely absorbent: 100% detection efficiency): position->x = position->x + dist_detector * direction->x; position->y = position->y + dist_detector * direction->y; position->z = position->z + dist_detector * direction->z; // *** Rotate the particle position vector to a reference system where the detector is perpendicular to the +Y axis to find out if the particle is inside a pixel: #ifdef USING_CUDA rotated_position = detector_data_CONST.rot_inv[*num_p][0]*position->x + detector_data_CONST.rot_inv[*num_p][1]*position->y + detector_data_CONST.rot_inv[*num_p][2]*position->z; // X coordinate int pixel_coord_x = __float2int_rd((rotated_position - detector_data_CONST.corner_min_rotated_to_Y[*num_p].x) * detector_data_CONST.inv_pixel_size_X); // Using CUDA intrinsic function to convert float to integer rounding down (towards minus infinite) if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_CONST.num_pixels.x)) { rotated_position = detector_data_CONST.rot_inv[*num_p][6]*position->x + detector_data_CONST.rot_inv[*num_p][7]*position->y + detector_data_CONST.rot_inv[*num_p][8]*position->z; // Z coordinate int pixel_coord_z = __float2int_rd((rotated_position - detector_data_CONST.corner_min_rotated_to_Y[*num_p].z) * detector_data_CONST.inv_pixel_size_Z); if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_CONST.num_pixels.y)) { // -- Particle enters the detector! Tally the particle energy in the corresponding pixel (in tenths of meV): // Using a CUDA atomic function (not available for global floats yet) to read and increase the pixel value in a single instruction, blocking interferences from other threads. // The offset for the primaries or scatter images are calculated considering that: // scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter. atomicAdd(( image + // Pointer to beginning of image array (int)(*scatter_state) * detector_data_CONST.total_num_pixels + // Offset to corresponding scatter image (pixel_coord_x + pixel_coord_z*(detector_data_CONST.num_pixels.x)) ), // Offset to the corresponding pixel __float2ull_rn((*energy)*SCALE_eV) ); // Energy arriving at the pixel, scaled by the factor SCALE_eV and rounded. // The maximum unsigned long long int value is ~1.8e19: } } #else // CPU version (not using CUDA intrinsics: atomicAdd, fast type casting) rotated_position = detector_data_CONST.rot_inv[*num_p][0]*position->x + detector_data_CONST.rot_inv[*num_p][1]*position->y + detector_data_CONST.rot_inv[*num_p][2]*position->z; // X coordinate int pixel_coord_x = (int)(floor((rotated_position - detector_data_CONST.corner_min_rotated_to_Y[*num_p].x)*detector_data_CONST.inv_pixel_size_X) + 0.00001f); // Adding EPS to have correct truncation to int if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_CONST.num_pixels.x)) { rotated_position = detector_data_CONST.rot_inv[*num_p][6]*position->x + detector_data_CONST.rot_inv[*num_p][7]*position->y + detector_data_CONST.rot_inv[*num_p][8]*position->z; // Z coordinate int pixel_coord_z = (int)(floor((rotated_position - detector_data_CONST.corner_min_rotated_to_Y[*num_p].z)*detector_data_CONST.inv_pixel_size_Z) + 0.00001f); // Adding EPS to have correct truncation to int if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_CONST.num_pixels.y)) image[((int)(*scatter_state))*detector_data_CONST.total_num_pixels + pixel_coord_x + pixel_coord_z*(detector_data_CONST.num_pixels.x)] += (unsigned long long int)((*energy)*SCALE_eV + 0.5f); // Tally the particle energy in the pixel. This instruction is not thread-safe, but it is ok in sequential code. } #endif } else // (detector_data_CONST.rotation_flag != 1) --> Initial source direction is (0,1,0): pixel number and distance can be found easily { // !!DeBuG!! Check that the codes for +Y and rotated detector are equivalent and that the +Y case is faster than the general case (otherwise simplify the code). if (direction->y < 0.0001f) return; // *** Reject particles not moving towards the detector plane at +Y. dist_detector = (detector_center_SHARED->y - position->y)/(direction->y); // Distance to the intersection with the detector at +Y. if (dist_detector>(2.0f*detector_data_CONST.sdd)) return; // *** Eliminate particles that intersect the detector plane too far away #ifdef USING_CUDA int pixel_coord_x = __float2int_rd((position->x + dist_detector*direction->x - detector_data_CONST.corner_min_rotated_to_Y[*num_p].x)*detector_data_CONST.inv_pixel_size_X); if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_CONST.num_pixels.x)) { int pixel_coord_z = __float2int_rd((position->z + dist_detector*direction->z - detector_data_CONST.corner_min_rotated_to_Y[*num_p].z)*detector_data_CONST.inv_pixel_size_Z); if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_CONST.num_pixels.y)) atomicAdd( ( image + // Pointer to beginning of image array (int)(*scatter_state) * detector_data_CONST.total_num_pixels + // Offset to corresponding scatter image (pixel_coord_x + pixel_coord_z*(detector_data_CONST.num_pixels.x)) ), // Offset to the corresponding pixel __float2ull_rn((*energy)*SCALE_eV) ); // Energy arriving at the pixel, scaled by the factor SCALE_eV and rounded. } #else // --Calculate the pixel the xray enters, truncating towards minus infinite and making sure the conversion to int is safe: float pixel_coord_x = floor((position->x + dist_detector*direction->x - detector_data_CONST.corner_min_rotated_to_Y[*num_p].x)*detector_data_CONST.inv_pixel_size_X); if ( (pixel_coord_x>-0.5f) && (pixel_coord_x<(detector_data_CONST.num_pixels.x-0.5f)) ) { float pixel_coord_z = floor((position->z + dist_detector*direction->z - detector_data_CONST.corner_min_rotated_to_Y[*num_p].z)*detector_data_CONST.inv_pixel_size_Z); if ( (pixel_coord_z>-0.5f) && (pixel_coord_z<(detector_data_CONST.num_pixels.y-0.5f)) ) image[((int)(*scatter_state))*detector_data_CONST.total_num_pixels + (int)(pixel_coord_x+0.001f)+ (int)(pixel_coord_z+0.001f)*detector_data_CONST.num_pixels.x] += (unsigned long long int)((*energy)*SCALE_eV + 0.5f); } #endif } } //////////////////////////////////////////////////////////////////////////////// //! Source that creates primary x rays, according to the defined source model. //! The particles are automatically moved to the surface of the voxel bounding box, //! to start the tracking inside a real material. If the sampled particle do not //! enter the voxels, it is init in the focal spot and the main program will check //! if it arrives at the detector or not. //! //! @param[in] source_data Structure describing the source. //! @param[out] position Initial particle position (particle transported inside the voxel bbox). //! @param[out] direction Sampled particle direction (cosine vectors). //! @param[out] energy Sampled energy of the new x ray. //! @param[in] seed Current seed of the random number generator, requiered to sample the movement direction. //! @param[out] absvox Set to <0 if primary particle will not cross the voxels, not changed otherwise (>0). //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline void source(int* num_p, float3* position, float3* direction, float* energy, int2* seed, int* absvox) { // *** Assign initial energy (monoenergetic beam): *energy = source_data_CONST.energy; // *** Sample the initial x ray direction with a fan beam ("pyramidal" source): /* !!DeBuG!! OLD WRONG FAN BEAM: // --Sample uniform points on a plane at Y=1cm, centered at (0,+1,0), with the aperture given by the input fan beam: direction->x = source_data_CONST.tan_phi_semiaperture * (2.0f*ranecu(seed) - 1.0f); direction->y = 1.0f; direction->z = source_data_CONST.tan_theta_semiaperture * (2.0f*ranecu(seed) - 1.0f); */ do // Iterate sampling if the sampled direction is not acceptable to get a square field at the given phi (rejection sampling) //!!DeBuG!! Force square field for any phi!! { // Using the algorithm used in PENMAIN.f, from penelope 2008 (by F. Salvat). direction->z = source_data_CONST.cos_theta_low + ranecu(seed)*source_data_CONST.D_cos_theta; // direction->z = w = cos(theta_sampled) register float phi_sampled = source_data_CONST.phi_low + ranecu(seed)*source_data_CONST.D_phi; register float sin_theta_sampled = sqrtf(1.0f - direction->z*direction->z); float sinphi_sampled, cosphi_sampled; #ifdef USING_CUDA sincos(phi_sampled, &sinphi_sampled,&cosphi_sampled); // Calculate the SIN and COS at the same time. #else sinphi_sampled = sin(phi_sampled); // Some CPU compilers will be able to use "sincos", but let's be safe. (!!DeBuG!!) cosphi_sampled = cos(phi_sampled); #endif direction->y = sin_theta_sampled * sinphi_sampled; direction->x = sin_theta_sampled * cosphi_sampled; } while( fabsf(direction->z/direction->y) > source_data_CONST.max_height_at_y1cm ); // !!DeBuG!! Force square field for any phi!! if (detector_data_CONST.rotation_flag == 1) { // --Initial beam not pointing to (0,1,0), apply rotation: register float direction_x_tmp = direction->x; register float direction_y_tmp = direction->y; direction->x = source_data_CONST.rot_fan[*num_p][0]*direction_x_tmp + source_data_CONST.rot_fan[*num_p][1]*direction_y_tmp + source_data_CONST.rot_fan[*num_p][2]*direction->z; direction->y = source_data_CONST.rot_fan[*num_p][3]*direction_x_tmp + source_data_CONST.rot_fan[*num_p][4]*direction_y_tmp + source_data_CONST.rot_fan[*num_p][5]*direction->z; direction->z = source_data_CONST.rot_fan[*num_p][6]*direction_x_tmp + source_data_CONST.rot_fan[*num_p][7]*direction_y_tmp + source_data_CONST.rot_fan[*num_p][8]*direction->z; } /* !!DeBuG!! OLD WRONG FAN BEAM: // --Normalize the sampled vector (since v==1, it will never be normalized by default): #ifdef USING_CUDA register float norm = rsqrtf(direction->x*direction->x + direction->y*direction->y + direction->z*direction->z); // Using the GPU reciprocal sqrt function #else register float norm = 1.0f/sqrtf(direction->x*direction->x + direction->y*direction->y + direction->z*direction->z); #endif direction->x = direction->x * norm; direction->y = direction->y * norm; direction->z = direction->z * norm; */ // // *** Sample initial direction with a cone beam: // !!DeBuG!! Cone beam source: // direction->x = source_data_CONST.direction[*num_p].x; // direction->y = source_data_CONST.direction[*num_p].y; // direction->z = source_data_CONST.direction[*num_p].z; // // -- Rotate the current direction according to the sampled angles (using double precision!) [2*pi=6.283185307...] // // !!DeBuG!! the variable "tan_phi_semiaperture" is assumed to contain "cos_aperture"!! "tan_theta_semiaperture" is never used here. // rotate_double( direction, // /*cos_theta=*/ source_data_CONST.tan_phi_semiaperture + (1.0f - source_data_CONST.tan_phi_semiaperture)*ranecu_double(seed), // /*phi=*/ 6.28318530717958647693*ranecu_double(seed) ); // *** Move the particle inside the voxel bounding box: if (detector_data_CONST.rotation_flag != 1) // Initial directions is (0,+1,0): move the particle to the plane Y=0 and do not check any other plane for intersection { float dist_y = (EPS_SOURCE - source_data_CONST.position[*num_p].y) / direction->y; if (dist_y > 0.0) { position->y = EPS_SOURCE; // Particle will be slightly inside the voxel Y=0 border position->x = source_data_CONST.position[*num_p].x + dist_y*(direction->x); // Particle may be inside or outside the voxels (source may be wider than the bbox). position->z = source_data_CONST.position[*num_p].z + dist_y*(direction->z); } else { // Source located after the phantom?? Return focal spot. position->x = source_data_CONST.position[*num_p].x; position->y = source_data_CONST.position[*num_p].y; position->z = source_data_CONST.position[*num_p].z; } if ( (position->x < 0.0f) || (position->x > voxel_data_CONST.size_bbox.x) || (position->y < 0.0f) || (position->y > voxel_data_CONST.size_bbox.y) || (position->z < 0.0f) || (position->z > voxel_data_CONST.size_bbox.z) ) (*absvox) = -111; // Particle not pointing to the voxel bbox: return absvox<0 to skip interaction sampling. } else // Source not pointing to (0,+1,0): check the 6 box planes for intersection { // -- Find the distance to the planes of the bbox: front or back planes depending on the sign of the movement direction vector. // An EPSILON distance is added to make sure the particles will be clearly inside the bbox, not on the surface. // Currently the source can not be located inside the bbox (particles will be moved to the edge anyway)! // The back lower vertex of the voxel bounding box is always located at the origin: (x0,y0,z0)=(0,0,0). // A negative distance means no intersection with that plane. float dist_y, dist_x, dist_z; // -Distance to the nearest Y plane: if ((direction->y) > EPS_SOURCE) // Moving to +Y: check distance to y=0 plane { // Check Y=0: if (source_data_CONST.position[*num_p].y > 0.0f) dist_y = INF; // No intersection with this plane possible: dist_y would be negative else dist_y = EPS_SOURCE + (-source_data_CONST.position[*num_p].y)/(direction->y); // dist_y > 0 for sure in this case } else if ((direction->y) < NEG_EPS_SOURCE) { // Check Y=voxel_data_CONST.size_bbox.y: if (source_data_CONST.position[*num_p].y < voxel_data_CONST.size_bbox.y) dist_y = INF; // No intersection with this plane possible: dist_y would be negative else dist_y = EPS_SOURCE + (voxel_data_CONST.size_bbox.y - source_data_CONST.position[*num_p].y)/(direction->y); // dist_y > 0 for sure in this case } else // (direction->y)~0 dist_y = INF; // Particle moving parallel to the plane: considering no interaction possible (dist = INFINITE) // -Distance to the nearest X plane: if ((direction->x) > EPS_SOURCE) { // Check X=0: if (source_data_CONST.position[*num_p].x > 0.0f) dist_x = INF; // No intersection with this plane possible: dist_x would be negative else dist_x = EPS_SOURCE + (-source_data_CONST.position[*num_p].x)/(direction->x); // dist_x > 0 for sure in this case } else if ((direction->x) < NEG_EPS_SOURCE) { // Check X=voxel_data_CONST.size_bbox.x: if (source_data_CONST.position[*num_p].x < voxel_data_CONST.size_bbox.x) dist_x = INF; // No intersection with this plane possible: dist_x would be negative else dist_x = EPS_SOURCE + (voxel_data_CONST.size_bbox.x - source_data_CONST.position[*num_p].x)/(direction->x); // dist_x > 0 for sure in this case } else dist_x = INF; // -Distance to the nearest Z plane: if ((direction->z) > EPS_SOURCE) { // Check Z=0: if (source_data_CONST.position[*num_p].z > 0.0f) dist_z = INF; // No intersection with this plane possible: dist_z would be negative else dist_z = EPS_SOURCE + (-source_data_CONST.position[*num_p].z)/(direction->z); // dist_z > 0 for sure in this case } else if ((direction->z) < NEG_EPS_SOURCE) { // Check Z=voxel_data_CONST.size_bbox.z: if (source_data_CONST.position[*num_p].z < voxel_data_CONST.size_bbox.z) dist_z = INF; // No intersection with this plane possible: dist_z would be negative else dist_z = EPS_SOURCE + (voxel_data_CONST.size_bbox.z - source_data_CONST.position[*num_p].z)/(direction->z); // dist_z > 0 for sure in this case } else dist_z = INF; // -Find the particle position inside the bbox, checking first the shortest distance planes. // If the particle doesn't intersect the bbox, the focal point position is returned and the main program will take care of the particle outside the voxels. for (;;) // (This loop will be iterated 4 times at most, checking the 3 walls in the order of smallest distance or no intersection.) { if ((dist_y < INF_minus1) && (dist_y < dist_x) && (dist_y < dist_z)) // Y is the shortest distance { if (set_position(num_p, &dist_y, position, direction)==1) // 1 == true break; // Position found inside the voxel bbox, accept it. else dist_y = INF; } else if ((dist_x < INF_minus1) && (dist_x < dist_z)) // X is the shortest distance { if (set_position(num_p, &dist_x, position, direction)==1) break; else dist_x = INF; } else if (dist_z < INF_minus1) // Z is the shortest distance { if (set_position(num_p, &dist_z, position, direction)==1) break; else dist_z = INF; } else { // Particle not pointing to the voxel bbox: return focal spot position and absvox<0. (*absvox) = -111; position->x = source_data_CONST.position[*num_p].x; position->y = source_data_CONST.position[*num_p].y; position->z = source_data_CONST.position[*num_p].z; break; } } } } //////////////////////////////////////////////////////////////////////////////// //! Evaluate if the input distance will move the particle inside the voxels or //! if another distance has to be used. //! //! @param[in] dist //! @param[in,out] position //! @return 1 (true) or 0 (false) integer value telling if the distance is acceptable or not. //! //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline int set_position(int* num_p, float* dist, float3* position, float3* direction) { int acceptable = 1; position->x = source_data_CONST.position[*num_p].x + (*dist) * direction->x; if ((position->x < 0.0f) || (position->x > voxel_data_CONST.size_bbox.x)) acceptable = 0; // Position outside the voxels, not acceptable else { position->y = source_data_CONST.position[*num_p].y + (*dist) * direction->y; if ((position->y < 0.0f) || (position->y > voxel_data_CONST.size_bbox.y)) acceptable = 0; else { position->z = source_data_CONST.position[*num_p].z + (*dist) * direction->z; if ((position->z < 0.0f) || (position->z > voxel_data_CONST.size_bbox.z)) acceptable = 0; } } return acceptable; } //! Upper limit of the number of random values sampled in a single track. #define LEAP_DISTANCE 256 //! Multipliers and moduli for the two MLCG in RANECU. #define a1_RANECU 40014 #define m1_RANECU 2147483563 #define a2_RANECU 40692 #define m2_RANECU 2147483399 //////////////////////////////////////////////////////////////////////////////// //! Initialize the pseudo-random number generator (PRNG) RANECU to a position //! far away from the previous history (leap frog technique). //! //! Each calculated seed initiates a consecutive and disjoint sequence of //! pseudo-random numbers with length LEAP_DISTANCE, that can be used to //! in a parallel simulation (Sequence Splitting parallelization method). //! The basic equation behind the algorithm is: //! S(i+j) = (a**j * S(i)) MOD m = [(a**j MOD m)*S(i)] MOD m , //! which is described in: //! P L'Ecuyer, Commun. ACM 31 (1988) p.742 //! //! This function has been adapted from "seedsMLCG.f", see: //! A Badal and J Sempau, Computer Physics Communications 175 (2006) p. 440-450 //! //! @param[in] history Particle bach number. //! @param[in] seed_input Initial PRNG seed input (used to initiate both MLCGs in RANECU). //! @param[out] seed Initial PRNG seeds for the present history. //! //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline void init_PRNG(int history_batch, int histories_per_thread, int seed_input, int2* seed) { // -- Move the RANECU generator to a unique position for the current batch of histories: // I have to use an "unsigned long long int" value to represent all the simulated histories in all previous batches // The maximum unsigned long long int value is ~1.8e19: if history >1.8e16 and LEAP_DISTANCE==1000, 'leap' will overflow. // **** 1st MLCG: unsigned long long int leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE); int y = 1; int z = a1_RANECU; // -- Calculate the modulo power '(a^leap)MOD(m)' using a divide-and-conquer algorithm adapted to modulo arithmetic for(;;) { // (A2) Halve n, and store the integer part and the residue if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2); { leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2); y = abMODm(m1_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m if (0==leap) break; // (A4) leap==0? ==> finish } else // (leap is even) { leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2); } z = abMODm(m1_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m } // AjMODm1 = y; // Exponentiation finished: AjMODm = expMOD = y = a^j // -- Compute and display the seeds S(i+j), from the present seed S(i), using the previously calculated value of (a^j)MOD(m): // S(i+j) = [(a**j MOD m)*S(i)] MOD m // S_i = abMODm(m,S_i,AjMODm) seed->x = abMODm(m1_RANECU, seed_input, y); // Using the input seed as the starting seed // **** 2nd MLCG (repeating the previous calculation for the 2nd MLCG parameters): leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE); y = 1; z = a2_RANECU; for(;;) { // (A2) Halve n, and store the integer part and the residue if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2); { leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2); y = abMODm(m2_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m if (0==leap) break; // (A4) leap==0? ==> finish } else // (leap is even) { leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2); } z = abMODm(m2_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m } // AjMODm2 = y; seed->y = abMODm(m2_RANECU, seed_input, y); // Using the input seed as the starting seed } ///////////////////////////////////////////////////////////////////// //! Calculate "(a1*a2) MOD m" with 32-bit integers and avoiding //! the possible overflow, using the Russian Peasant approach //! modulo m and the approximate factoring method, as described //! in: L'Ecuyer and Cote, ACM Trans. Math. Soft. 17 (1991). //! //! This function has been adapted from "seedsMLCG.f", see: //! Badal and Sempau, Computer Physics Communications 175 (2006) //! //! @param[in] m,a,s MLCG parameters //! @return (a1*a2) MOD m // // Input: 0 < a1 < m // 0 < a2 < m // // Return value: (a1*a2) MOD m // ///////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline int abMODm(int m, int a, int s) { // CAUTION: the input parameters are modified in the function but should not be returned to the calling function! (pass by value!) int q, k; int p = -m; // p is always negative to avoid overflow when adding // ** Apply the Russian peasant method until "a =< 32768": while (a>32768) // We assume '32' bit integers (4 bytes): 2^(('32'-2)/2) = 32768 { if (0!=(a&1)) // Store 's' when 'a' is odd Equivalent code: if (1==(a%2)) { p += s; if (p>0) p -= m; } a >>= 1; // Half a (move bits 1 position right) Equivalent code: a = a/2; s = (s-m) + s; // Double s (MOD m) if (s<0) s += m; // (s is always positive) } // ** Employ the approximate factoring method (a is small enough to avoid overflow): q = (int) m / a; k = (int) s / q; s = a*(s-k*q)-k*(m-q*a); while (s<0) s += m; // ** Compute the final result: p += s; if (p<0) p += m; return p; } //////////////////////////////////////////////////////////////////////////////// //! Pseudo-random number generator (PRNG) RANECU returning a float value //! (single precision version). //! //! @param[in,out] seed PRNG seed (seed kept in the calling function and updated here). //! @return PRN double value in the open interval (0,1) //! //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline float ranecu(int2* seed) { int i1 = (int)(seed->x/53668); seed->x = 40014*(seed->x-i1*53668)-i1*12211; int i2 = (int)(seed->y/52774); seed->y = 40692*(seed->y-i2*52774)-i2*3791; if (seed->x < 0) seed->x += 2147483563; if (seed->y < 0) seed->y += 2147483399; i2 = seed->x-seed->y; if (i2 < 1) i2 += 2147483562; #ifdef USING_CUDA return (__int2float_rn(i2)*4.65661305739e-10f); // 4.65661305739e-10 == 1/2147483563 #else return ((float)(i2)*4.65661305739e-10f); #endif } //////////////////////////////////////////////////////////////////////////////// //! Pseudo-random number generator (PRNG) RANECU returning a double value. //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline double ranecu_double(int2* seed) { int i1 = (int)(seed->x/53668); seed->x = 40014*(seed->x-i1*53668)-i1*12211; int i2 = (int)(seed->y/52774); seed->y = 40692*(seed->y-i2*52774)-i2*3791; if (seed->x < 0) seed->x += 2147483563; if (seed->y < 0) seed->y += 2147483399; i2 = seed->x-seed->y; if (i2 < 1) i2 += 2147483562; #ifdef USING_CUDA return (__int2double_rn(i2)*4.6566130573917692e-10); #else return ((double)(i2)*4.6566130573917692e-10); #endif } //////////////////////////////////////////////////////////////////////////////// //! Find the voxel that contains the current position. //! //! @param[in] position Particle position //! @param[in] voxel_data Pointer to a structure containing the voxel number and size. //! @return Returns "absvox", the voxel number where the particle is //! located (negative if position outside the voxel bbox). //! //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline int locate_voxel(float3* position) { if ( (position->y < EPS_SOURCE) || (position->y > (voxel_data_CONST.size_bbox.y - EPS_SOURCE)) || (position->x < EPS_SOURCE) || (position->x > (voxel_data_CONST.size_bbox.x - EPS_SOURCE)) || (position->z < EPS_SOURCE) || (position->z > (voxel_data_CONST.size_bbox.z - EPS_SOURCE)) ) { // -- Particle escaped the voxelized geometry (using EPS_SOURCE to avoid numerical precision errors): return -1; } // -- Particle inside the voxelized geometry, find current voxel: #ifdef USING_CUDA register int voxel_coord_x = __float2int_rd(position->x * voxel_data_CONST.inv_voxel_size.x); // Using CUDA function to convert float to integer rounding down (towards minus infinite) register int voxel_coord_y = __float2int_rd(position->y * voxel_data_CONST.inv_voxel_size.y); register int voxel_coord_z = __float2int_rd(position->z * voxel_data_CONST.inv_voxel_size.z); #else register int voxel_coord_x = (int)(position->x * voxel_data_CONST.inv_voxel_size.x); // Truncate float to integer register int voxel_coord_y = (int)(position->y * voxel_data_CONST.inv_voxel_size.y); register int voxel_coord_z = (int)(position->z * voxel_data_CONST.inv_voxel_size.z); #endif return (voxel_coord_x + voxel_coord_y*(voxel_data_CONST.num_voxels.x) + voxel_coord_z*(voxel_data_CONST.num_voxels.x)*(voxel_data_CONST.num_voxels.y)); } ////////////////////////////////////////////////////////////////////// //! Rotates a vector; the rotation is specified by giving //! the polar and azimuthal angles in the "self-frame", as //! determined by the vector to be rotated. //! This function is a literal translation from Fortran to C of //! PENELOPE (v. 2006) subroutine "DIRECT". //! //! @param[in,out] (u,v,w) input vector (=d) in the lab. frame; returns the rotated vector components in the lab. frame //! @param[in] costh cos(theta), angle between d before and after turn //! @param[in] phi azimuthal angle (rad) turned by d in its self-frame // // Output: // (u,v,w) -> rotated vector components in the lab. frame // // Comments: // -> (u,v,w) should have norm=1 on input; if not, it is // renormalized on output, provided norm>0. // -> The algorithm is based on considering the turned vector // d' expressed in the self-frame S', // d' = (sin(th)cos(ph), sin(th)sin(ph), cos(th)) // and then apply a change of frame from S' to the lab // frame. S' is defined as having its z' axis coincident // with d, its y' axis perpendicular to z and z' and its // x' axis equal to y'*z'. The matrix of the change is then // / uv/rho -v/rho u \ // S ->lab: | vw/rho u/rho v | , rho=(u^2+v^2)^0.5 // \ -rho 0 w / // -> When rho=0 (w=1 or -1) z and z' are parallel and the y' // axis cannot be defined in this way. Instead y' is set to // y and therefore either x'=x (if w=1) or x'=-x (w=-1) ////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline void rotate_double(float3* direction, double costh, double phi) // !!DeBuG!! The direction vector is single precision but the rotation is performed in doule precision for increased accuracy. { double DXY, NORM, cosphi, sinphi, SDT; DXY = direction->x*direction->x + direction->y*direction->y; #ifdef USING_CUDA sincos(phi, &sinphi,&cosphi); // Calculate the SIN and COS at the same time. #else sinphi = sin(phi); // Some CPU compilers will be able to use "sincos", but let's be safe. (!!DeBuG!!) cosphi = cos(phi); #endif // **** Ensure normalisation NORM = DXY + direction->z*direction->z; // !!DeBuG!! Check if it is really necessary to renormalize in a real simulation!! if (fabs(NORM-1.0)>1.0e-14) { NORM = 1.0/sqrt(NORM); direction->x = NORM*direction->x; direction->y = NORM*direction->y; direction->z = NORM*direction->z; DXY = direction->x*direction->x + direction->y*direction->y; } if (DXY>1.0e-28) { SDT = sqrt((1.0-costh*costh)/DXY); float direction_x_in = direction->x; direction->x = direction->x*costh + SDT*(direction_x_in*direction->z*cosphi-direction->y*sinphi); direction->y = direction->y*costh+SDT*(direction->y*direction->z*cosphi+direction_x_in*sinphi); direction->z = direction->z*costh-DXY*SDT*cosphi; } else { SDT = sqrt(1.0-costh*costh); direction->y = SDT*sinphi; if (direction->z>0.0) { direction->x = SDT*cosphi; direction->z = costh; } else { direction->x =-SDT*cosphi; direction->z =-costh; } } } ////////////////////////////////////////////////////////////////////// // *********************************************************************** // * Translation of PENELOPE's "SUBROUTINE GRAa" from FORTRAN77 to C * // *********************************************************************** //! Sample a Rayleigh interaction using the sampling algorithm //! used in PENELOPE 2006. //! //! @param[in] energy Particle energy (not modified with Rayleigh) //! @param[out] costh_Rayleigh Cosine of the angular deflection //! @param[in] material Current voxel material // // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // C PENELOPE/PENGEOM (version 2006) C // C Copyright (c) 2001-2006 C // C Universitat de Barcelona C // C Permission to use, copy, modify, distribute and sell this software C // C and its documentation for any purpose is hereby granted without C // C fee, provided that the above copyright notice appears in all C // C copies and that both that copyright notice and this permission C // C notice appear in all supporting documentation. The Universitat de C // C Barcelona makes no representations about the suitability of this C // C software for any purpose. It is provided "as is" without express C // C or implied warranty. C // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC ////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA __device__ #endif inline void GRAa(float *energy, double *costh_Rayleigh, int *mat, float *pmax_current, int2 *seed, struct rayleigh_struct* cgra) { /* **** Energy grid and interpolation constants for the current energy. */ double xmax = ((double)*energy) * 8.065535669099010e-5; // 8.065535669099010e-5 == 2.0*20.6074/510998.918 double x2max = min_value( (xmax*xmax) , ((double)cgra->xco[(*mat+1)*NP_RAYLEIGH - 1]) ); // Get the last tabulated value of xco for this mat if (xmax < 0.01) { do { *costh_Rayleigh = 1.0 - ranecu_double(seed) * 2.0; } while ( ranecu_double(seed) > (((*costh_Rayleigh)*(*costh_Rayleigh)+1.0)*0.5) ); return; } for(;;) // (Loop will iterate everytime the sampled value is rejected or above maximum) { double ru = ranecu_double(seed) * (double)(*pmax_current); // Pmax for the current energy is entered as a parameter /* **** Selection of the interval (binary search within pre-calculated limits). */ int itn = (int)(ru * (NP_RAYLEIGH-1)); // 'itn' will never reach the last interval 'NP_RAYLEIGH-1', but this is how RITA is implemented in PENELOPE int i__ = (int)cgra->itlco[itn + (*mat)*NP_RAYLEIGH]; int j = (int)cgra->ituco[itn + (*mat)*NP_RAYLEIGH]; if ((j - i__) > 1) { do { register int k = (i__ + j)>>1; // >>1 == /2 if (ru > cgra->pco[k -1 + (*mat)*NP_RAYLEIGH]) i__ = k; else j = k; } while ((j - i__) > 1); } /* **** Sampling from the rational inverse cumulative distribution. */ int index = i__ - 1 + (*mat)*NP_RAYLEIGH; double rr = ru - cgra->pco[index]; double xx; if (rr > 1e-16) { double d__ = (double)(cgra->pco[index+1] - cgra->pco[index]); float aco_index = cgra->aco[index], bco_index = cgra->bco[index], xco_index = cgra->xco[index]; // Avoid multiple accesses to the same global variable xx = (double)xco_index + (double)(aco_index + 1.0f + bco_index)* d__* rr / (d__*d__ + (aco_index*d__ + bco_index*rr) * rr) * (double)(cgra->xco[index+1] - xco_index); } else { xx = cgra->xco[index]; } if (xx < x2max) { // Sampled value below maximum possible value: *costh_Rayleigh = 1.0 - 2.0 * xx / x2max; // !!DeBuG!! costh_Rayleigh in double precision, but not all intermediate steps are!? /* **** Rejection: */ if (ranecu_double(seed) < (((*costh_Rayleigh)*(*costh_Rayleigh) + 1.0)*0.5)) break; // Sample value not rejected! break loop and return. } } } /* graa */ ////////////////////////////////////////////////////////////////////////// // *********************************************************************** // * Translation of PENELOPE's "SUBROUTINE GCOa" from FORTRAN77 to C * // ********************************************************************* * //! Random sampling of incoherent (Compton) scattering of photons, using //! the sampling algorithm from PENELOPE 2006: //! Relativistic impulse approximation with analytical one-electron Compton profiles // !!DeBuG!! In penelope, Doppler broadening is not used for E greater than 5 MeV. // We don't use it in GPU to reduce the lines of code and prevent using COMMON/compos/ZT(M) //! @param[in,out] energy incident and final photon energy (eV) //! @param[out] costh_Compton cosine of the polar scattering angle //! @param[in] material Current voxel material //! @param[in] seed RANECU PRNG seed // // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // C PENELOPE/PENGEOM (version 2006) C // C Copyright (c) 2001-2006 C // C Universitat de Barcelona C // C Permission to use, copy, modify, distribute and sell this software C // C and its documentation for any purpose is hereby granted without C // C fee, provided that the above copyright notice appears in all C // C copies and that both that copyright notice and this permission C // C notice appear in all supporting documentation. The Universitat de C // C Barcelona makes no representations about the suitability of this C // C software for any purpose. It is provided "as is" without express C // C or implied warranty. C // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // // ************************************************************************ #ifdef USING_CUDA __device__ #endif inline void GCOa(float *energy, double *costh_Compton, int *mat, int2 *seed, struct compton_struct* cgco_SHARED) { float s, a1, s0, af, ek, ek2, ek3, tau, pzomc, taumin; float rn[MAX_SHELLS]; double cdt1; // Some variables used in PENELOPE have been eliminated to save register: float aux, taum2, fpzmax, a, a2, ek1 ,rni, xqc, fpz, pac[MAX_SHELLS]; int i__; int my_noscco = cgco_SHARED->noscco[*mat]; // Store the number of oscillators for the input material in a local variable #ifndef USING_CUDA static int warning_flag_1 = -1, warning_flag_2 = -1, warning_flag_3 = -1; // Write warnings for the CPU code, but only once. !!DeBuG!! #endif ek = *energy * 1.956951306108245e-6f; // (1.956951306108245e-6 == 1.0/510998.918) ek2 = ek * 2.f + 1.f; ek3 = ek * ek; // ek1 = ek3 - ek2 - 1.; taumin = 1.f / ek2; // taum2 = taumin * taumin; a1 = logf(ek2); // a2 = a1 + ek * 2. * (ek + 1.) * taum2; // a2 was used only once, code moved below !!DeBuG!! /* **** Incoherent scattering function for theta=PI. */ s0 = 0.0f; for (i__ = 0; i__ < my_noscco; i__++) { register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS]; if (temp < *energy) { register float aux = *energy * (*energy - temp) * 2.f; #ifdef USING_CUDA pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f; // 1.956951306108245e-6 = 1.0/510998.918f // !!DeBuG!! Version using the reciprocal of sqrt in CUDA: faster and more accurate!! #else pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f); #endif if (pzomc > 0.0f) temp = (0.707106781186545f+pzomc*1.4142135623731f) * (0.707106781186545f+pzomc*1.4142135623731f); else temp = (0.707106781186545f-pzomc*1.4142135623731f) * (0.707106781186545f-pzomc*1.4142135623731f); temp = 0.5f * expf(0.5f - temp); // Calculate EXP outside the IF to avoid branching if (pzomc > 0.0f) temp = 1.0f - temp; s0 += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp; } } /* **** Sampling tau. */ do { if (ranecu(seed)*/*a2=*/(a1+2.*ek*(ek+1.f)*taumin*taumin) < a1) { tau = powf(taumin, ranecu(seed)); // !!DeBuG!! "powf()" has a big error (7 ULP), the double version has only 2!! !!DeBuG!! } else { tau = sqrtf(1.f + ranecu(seed) * (taumin * taumin - 1.f)); } cdt1 = (double)(1.f-tau) / (((double)tau)*((double)*energy)*1.956951306108245e-6); // !!DeBuG!! The sampled COS will be double precision, but TAU is not!!! if (cdt1 > 2.0) cdt1 = 1.99999999; // !!DeBuG!! Make sure that precision error in POW, SQRT never gives cdt1>2 ==> costh_Compton<-1 /* **** Incoherent scattering function. */ s = 0.0f; for (i__ = 0; i__ < my_noscco; i__++) { register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS]; if (temp < *energy) { register float aux = (*energy) * (*energy - temp) * ((float)cdt1); if ((aux>1.0e-12f)||(temp>1.0e-12f)) // !!DeBuG!! Make sure the SQRT argument is never <0, and that we never get 0/0 -> NaN when aux=temp=0 !! { #ifdef USING_CUDA pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f; // 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!! #else pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f); #endif } else { pzomc = 0.002f; // !!DeBuG!! Using a rough approximation to a sample value of pzomc found using pure double precision: NOT RIGUROUS! But this code is expected to be used very seldom, only in extreme cases. #ifndef USING_CUDA if (warning_flag_1<0) { warning_flag_1 = +1; // Disable warning, do not show again printf(" [... Small numerical precision error detected computing \"pzomc\" in GCOa (this warning will not be repeated).]\n i__=%d, aux=%.14f, temp=%.14f, pzomc(forced)=%.14f, uico=%.14f, energy=%.7f, cgco_SHARED->fj0=%.14f, mat=%d, cdt1=%.14lf\n", (int)i__, aux, temp, pzomc, cgco_SHARED->uico[*mat+i__*MAX_MATERIALS], *energy, cgco_SHARED->fj0[*mat+i__*MAX_MATERIALS], (int)*mat, cdt1); // !!DeBuG!! } #endif } temp = pzomc * 1.4142135623731f; if (pzomc > 0.0f) temp = 0.5f - (temp + 0.70710678118654502f) * (temp + 0.70710678118654502f); // Calculate exponential argument else temp = 0.5f - (0.70710678118654502f - temp) * (0.70710678118654502f - temp); temp = 0.5f * expf(temp); // All threads will calculate the expf together if (pzomc > 0.0f) temp = 1.0f - temp; s += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp; rn[i__] = temp; } } } while( (ranecu(seed)*s0) > (s*(1.0f+tau*(/*ek1=*/(ek3 - ek2 - 1.0f)+tau*(ek2+tau*ek3)))/(ek3*tau*(tau*tau+1.0f))) ); // **** Rejection function *costh_Compton = 1.0 - cdt1; /* **** Target electron shell. */ for (;;) { register float temp = s*ranecu(seed); float pac = 0.0f; int ishell = my_noscco - 1; // First shell will have number 0 for (i__ = 0; i__ < (my_noscco-1); i__++) // !!DeBuG!! Iterate to (my_noscco-1) only: the last oscillator is excited in case all other fail (no point in double checking) ?? { pac += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * rn[i__]; // !!DeBuG!! pac[] is calculated on the fly to save registers! if (pac > temp) // pac[] is calculated on the fly to save registers! { ishell = i__; break; } } /* **** Projected momentum of the target electron. */ temp = ranecu(seed) * rn[ishell]; if (temp < 0.5f) { pzomc = (0.70710678118654502f - sqrtf(0.5f - logf(temp + temp))) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f); } else { pzomc = (sqrtf(0.5f - logf(2.0f - 2.0f*temp)) - 0.70710678118654502f) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f); } if (pzomc < -1.0f) { continue; // re-start the loop } /* **** F(EP) rejection. */ temp = tau * (tau - (*costh_Compton) * 2.f) + 1.f; // this variable was originally called "xqc" // af = sqrt( max_value(temp,1.0e-30f) ) * (tau * (tau - *costh_Compton) / max_value(temp,1.0e-30f) + 1.f); //!!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!! if (temp>1.0e-20f) // !!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!! { af = sqrtf(temp) * (tau * (tau - ((float)(*costh_Compton))) / temp + 1.f); } else { // When using single precision, it is possible (but very uncommon) to get costh_Compton==1 and tau==1; then temp is 0 and 'af' can not be calculated (0/0 -> nan). Analysing the results obtained using double precision, we found that 'af' would be almost 0 in this situation, with an "average" about ~0.002 (this is just a rough estimation, but using af=0 the value would never be rejected below). af = 0.00200f; // !!DeBuG!! // !!DeBuG!! #ifndef USING_CUDA if (warning_flag_2<0) { warning_flag_2 = +1; // Disable warning, do not show again printf(" [... Small numerical precision error detected computing \"af\" in GCOa (this warning will not be repeated)].\n xqc=%.14f, af(forced)=%.14f, tau=%.14f, costh_Compton=%.14lf\n", temp, af, tau, *costh_Compton); // !!DeBuG!! } #endif } if (af > 0.0f) { temp = af * 0.2f + 1.f; // this variable was originally called "fpzmax" } else { temp = 1.f - af * 0.2f; } if ( ranecu(seed)*temp < /*fpz =*/(af * max_value( min_value(pzomc,0.2f) , -0.2f ) + 1.f) ) { break; } } /* **** Energy of the scattered photon. */ { register float t, b1, b2, temp; t = pzomc * pzomc; b1 = 1.f - t * tau * tau; b2 = 1.f - t * tau * ((float)(*costh_Compton)); temp = sqrtf( fabsf(b2 * b2 - b1 * (1.0f - t)) ); if (pzomc < 0.0f) temp *= -1.0f; // !Error! energy may increase (slightly) due to inacurate calculation! !!DeBuG!! t = (tau / b1) * (b2 + temp); if (t > 1.0f) { #ifndef USING_CUDA #endif #ifndef USING_CUDA if (warning_flag_3<0) { warning_flag_3 = +1; // Disable warning, do not show again printf("\n [... a Compton event tried to increase the x ray energy due to precision error. Keeping initial energy. (This warning will not be repeated.)]\n scaling=%.14f, costh_Compton=%.14lf\n", t, *costh_Compton); // !!DeBuG!! } #endif t = 1.0f; // !!DeBuG!! // !!DeBuG!! Avoid increasing energy by hand!!! not nice!! } (*energy) *= t; // (*energy) *= (tau / b1) * (b2 + temp); // Original PENELOPE code } } // [End subroutine GCOa]
the_stack
#include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/group_normalization.hpp> #include <nbla/variable.hpp> // Kernels and ops #include <nbla/cuda/function/kernel/group_normalization.cuh> #include <nbla/cuda/function/kernel/normalization.cuh> #include <nbla/cuda/utils/reduce_ops/group_normalization.cuh> #include <nbla/cuda/utils/reduce_ops/welford.cuh> namespace nbla { template <typename T> void GroupNormalizationCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { GroupNormalization<T>::setup_impl(inputs, outputs); cuda_set_device(this->device_); const auto x = inputs[0]; const auto x_shape = x->shape(); const auto ndim = x->ndim(); // Setup input and output adaptor for channel-last memory format need_adaptor_ = ChannelFirstAdaptor::need_adaptor( inputs[0]->shape(), this->batch_axis_, this->channel_axis_); if (need_adaptor_) { adaptor_ = std::make_shared<ChannelFirstAdaptor>(); adaptor_->setup(inputs[0], &pre_adaptor_, &post_adaptor_, outputs[0], inputs[0]->shape(), this->batch_axis_, this->channel_axis_, this->ctx_); const auto c = this->batch_axis_.size(); channel_size_ = pre_adaptor_.shape()[c]; batch_size_ = pre_adaptor_.size() / pre_adaptor_.size(c); reduce_size_ = pre_adaptor_.size(c + 1) * (channel_size_ / this->num_groups_); inv_reduce_size_ = 1.0f / reduce_size_; outer_size_ = pre_adaptor_.size() / reduce_size_; } else { const auto c = this->channel_axis_; channel_size_ = x_shape[c]; batch_size_ = x->size() / x->size(c); reduce_size_ = x->size(c + 1) * (channel_size_ / this->num_groups_); inv_reduce_size_ = 1.0f / reduce_size_; outer_size_ = x->size() / reduce_size_; } //---------------- // Reshape buffers //---------------- // Batch stats var_.reshape({batch_size_ * channel_size_}, true); mean_.reshape({batch_size_ * channel_size_}, true); // Internal buffers for forward calculation a_.reshape({batch_size_ * channel_size_}, true); b_.reshape({batch_size_ * channel_size_}, true); // Internal buffers for backward calculation sum_dy_.reshape({batch_size_ * channel_size_}, true); sum_dyx_.reshape({batch_size_ * channel_size_}, true); gamma_invstd_.reshape({batch_size_ * channel_size_}, true); factor1_.reshape({batch_size_ * this->num_groups_}, true); factor2_.reshape({batch_size_ * this->num_groups_}, true); } template <typename T> void GroupNormalizationCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); // Currently, only channel-fist kernels are provided. Channel-last execution // is performed by transforming input and output memory format to // channel-first and using channel-first implementation. The transformation is // performed by ChannelFirstAdaptor. if (need_adaptor_) { // Transpose input to [B, C, H, W] memory format. adaptor_->convert_to_channel_first(inputs[0], &pre_adaptor_); auto channel_first_inputs = inputs; auto channel_first_outputs = outputs; channel_first_inputs[0] = &pre_adaptor_; channel_first_outputs[0] = &post_adaptor_; // Group normalization forward_channel_first(channel_first_inputs, channel_first_outputs); // Transpose output to original memory format. adaptor_->convert_from_channel_first(&post_adaptor_, outputs[0]); } else { forward_channel_first(inputs, outputs); } } template <typename T> void GroupNormalizationCuda<T>::forward_channel_first( const Variables &inputs, const Variables &outputs) { Variable *v_mean = &mean_; Variable *v_var = &var_; // Output mean and var when output_stats == true. if (outputs.size() == 3) { v_mean = outputs[1]; v_var = outputs[2]; } // Calculate mean and variance. { const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); Tc *mean = v_mean->cast_data_and_get_pointer<Tc>(this->ctx_, true); Tc *var = v_var->cast_data_and_get_pointer<Tc>(this->ctx_, true); const int num_threads = reduce_size_ < NBLA_CUDA_GN_NUM_THREADS ? CUDA_WARP_SIZE : NBLA_CUDA_GN_NUM_THREADS; const auto grid = std::min(outer_size_, static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); const auto block = num_threads; WelfordOp<Tc, Size_t> op(x, mean, var, reduce_size_); reduce_2d_x<<<grid, block>>>(op, outer_size_, reduce_size_); NBLA_CUDA_KERNEL_CHECK(); } // Calculate `a` and `b` for simplification of normalization formula // as `y = a * x + b`. { const auto beta_idx = 1; const auto gamma_idx = this->no_bias_ ? 1 : 2; const Tc *mean = v_mean->get_data_pointer<Tc>(this->ctx_); const Tc *var = v_var->get_data_pointer<Tc>(this->ctx_); const Tc *beta = this->no_bias_ ? nullptr : inputs[beta_idx]->get_data_pointer<Tc>(this->ctx_); const Tc *gamma = this->no_scale_ ? nullptr : inputs[gamma_idx]->get_data_pointer<Tc>(this->ctx_); Tc *a = a_.cast_data_and_get_pointer<Tc>(this->ctx_, true); Tc *b = b_.cast_data_and_get_pointer<Tc>(this->ctx_, true); const auto block = NBLA_CUDA_GN_NUM_THREADS; const auto grid = std::min(NBLA_CEIL_SIZE_T_DIV(batch_size_ * channel_size_, NBLA_CUDA_GN_NUM_THREADS), static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); group_norm_forward_normalization_factor<<<grid, block>>>( batch_size_, channel_size_, this->num_groups_, mean, var, beta, gamma, a, b, this->eps_); NBLA_CUDA_KERNEL_CHECK(); } // Normalization by `y = a * x + b`. { const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); const Tc *a = a_.get_data_pointer<Tc>(this->ctx_); const Tc *b = b_.get_data_pointer<Tc>(this->ctx_); Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); const Size_t size = inputs[0]->size(); const Size_t spatial_size = size / (batch_size_ * channel_size_); const Size_t num_threads = CUDA_WARP_SIZE * 2; const auto block = num_threads; const auto grid = std::min( NBLA_CEIL_SIZE_T_DIV(size, num_threads * NBLA_CUDA_GN_N_UNROLL), static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); group_norm_forward_normalization<Tc, Size_t, NBLA_CUDA_GN_N_UNROLL><<<grid, block>>>( size, spatial_size, x, a, b, y); NBLA_CUDA_KERNEL_CHECK(); // Clear internal buffers a_.data()->array()->clear(); b_.data()->array()->clear(); } } template <typename T> void GroupNormalizationCuda<T>::backward_impl( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || (inputs.size() > 1 && propagate_down[1]) || (inputs.size() > 2 && propagate_down[2]))) { return; } cuda_set_device(this->device_); if (need_adaptor_) { adaptor_->convert_from_channel_first_backward(&post_adaptor_, outputs[0], true, false); auto channel_first_inputs = inputs; auto channel_first_outputs = outputs; channel_first_inputs[0] = &pre_adaptor_; channel_first_outputs[0] = &post_adaptor_; auto channel_first_accum = accum; channel_first_accum[0] = false; backward_channel_first(channel_first_inputs, channel_first_outputs, propagate_down, channel_first_accum); post_adaptor_.data()->array()->clear(); post_adaptor_.grad()->array()->clear(); adaptor_->convert_to_channel_first_backward(inputs[0], &pre_adaptor_, propagate_down[0], accum[0]); pre_adaptor_.data()->array()->clear(); pre_adaptor_.grad()->array()->clear(); } else { backward_channel_first(inputs, outputs, propagate_down, accum); } } template <typename T> void GroupNormalizationCuda<T>::backward_channel_first( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { Variable *v_mean = &mean_; Variable *v_var = &var_; // Output mean and var when output_stats == true. if (outputs.size() == 3) { v_mean = outputs[1]; v_var = outputs[2]; } // Calculate sum of dy and dy*x for the following gradient calculation. { const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Tc *sum_dy = sum_dy_.cast_data_and_get_pointer<Tc>(this->ctx_, true); Tc *sum_dyx = sum_dyx_.cast_data_and_get_pointer<Tc>(this->ctx_, true); const Size_t size = inputs[0]->size(); const Size_t bc_size = batch_size_ * channel_size_; const Size_t spatial_size = size / bc_size; const auto num_threads = spatial_size < NBLA_CUDA_GN_NUM_THREADS ? CUDA_WARP_SIZE : NBLA_CUDA_GN_NUM_THREADS; const auto grid = std::min(bc_size, static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); const auto block = num_threads; GNGradOp<Tc, Size_t> op(x, dy, sum_dy, sum_dyx); reduce_2d_x<<<grid, block>>>(op, bc_size, spatial_size); NBLA_CUDA_KERNEL_CHECK(); } // Calculate gamma / sqrt(var) if (propagate_down[0]) { const auto gamma_idx = this->no_bias_ ? 1 : 2; const Tc *gamma = this->no_scale_ ? nullptr : inputs[gamma_idx]->get_data_pointer<Tc>(this->ctx_); const Tc *var = v_var->get_data_pointer<Tc>(this->ctx_); Tc *gamma_invstd = gamma_invstd_.cast_data_and_get_pointer<Tc>(this->ctx_, true); const Size_t size = batch_size_ * channel_size_; const auto num_threads = CUDA_WARP_SIZE * 2; const auto grid = std::min( NBLA_CEIL_SIZE_T_DIV(size, num_threads * NBLA_CUDA_GN_N_UNROLL), static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); const auto block = num_threads; group_norm_backward_gamma_invstd<Tc, Size_t, NBLA_CUDA_GN_N_UNROLL><<<grid, block>>>( size, channel_size_, this->num_groups_, gamma, var, gamma_invstd, this->eps_); NBLA_CUDA_KERNEL_CHECK(); } // Calculate factor1 and factor2 if (propagate_down[0]) { const auto gamma_idx = this->no_bias_ ? 1 : 2; const Tc *gamma = this->no_scale_ ? nullptr : inputs[gamma_idx]->get_data_pointer<Tc>(this->ctx_); const Tc *mean = v_mean->get_data_pointer<Tc>(this->ctx_); const Tc *var = v_var->get_data_pointer<Tc>(this->ctx_); const Tc *dmean = outputs.size() == 3 ? v_mean->get_grad_pointer<Tc>(this->ctx_) : nullptr; const Tc *dvar = outputs.size() == 3 ? v_var->get_grad_pointer<Tc>(this->ctx_) : nullptr; const Tc *sum_dy = sum_dy_.get_data_pointer<Tc>(this->ctx_); const Tc *sum_dyx = sum_dyx_.get_data_pointer<Tc>(this->ctx_); Tc *factor1 = factor1_.cast_data_and_get_pointer<Tc>(this->ctx_, true); Tc *factor2 = factor2_.cast_data_and_get_pointer<Tc>(this->ctx_, true); const Size_t size = inputs[0]->size(); const Size_t spatial_size = size / (batch_size_ * channel_size_); const auto num_threads = CUDA_WARP_SIZE * 2; dim3 grid; grid.x = std::min(batch_size_, static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); grid.y = std::min(static_cast<Size_t>(this->num_groups_), static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); dim3 block(num_threads); group_norm_backward_dx_factor<<<grid, block>>>( batch_size_, channel_size_, spatial_size, inv_reduce_size_, this->num_groups_, mean, var, dmean, dvar, gamma, sum_dy, sum_dyx, factor1, factor2, this->eps_); NBLA_CUDA_KERNEL_CHECK(); } // Calculate dx by `dx = gamma_invstd * dy + factor1 * x + factor2`. if (propagate_down[0]) { const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); const Tc *gamma_invstd = gamma_invstd_.get_data_pointer<Tc>(this->ctx_); const Tc *factor1 = factor1_.get_data_pointer<Tc>(this->ctx_); const Tc *factor2 = factor2_.get_data_pointer<Tc>(this->ctx_); Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); const Size_t size = inputs[0]->size(); const Size_t spatial_size = size / (batch_size_ * channel_size_); const Size_t num_threads = CUDA_WARP_SIZE * 2; const auto block = num_threads; const auto grid = std::min( NBLA_CEIL_SIZE_T_DIV(size, num_threads * NBLA_CUDA_GN_N_UNROLL), static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); auto kernel = accum[0] ? group_norm_backward_dx<true, Tc, Size_t, NBLA_CUDA_GN_N_UNROLL> : group_norm_backward_dx<false, Tc, Size_t, NBLA_CUDA_GN_N_UNROLL>; kernel<<<grid, block>>>(size, channel_size_, spatial_size, this->num_groups_, x, dy, gamma_invstd, factor1, factor2, dx); NBLA_CUDA_KERNEL_CHECK(); // Clear internal buffer gamma_invstd_.data()->array()->clear(); factor1_.data()->array()->clear(); factor2_.data()->array()->clear(); } // Calculate dbeta and dgamma. if ((inputs.size() > 1 && propagate_down[1]) || (inputs.size() > 2 && propagate_down[2])) { const auto beta_idx = 1; const auto gamma_idx = this->no_bias_ ? 1 : 2; const Tc *mean = v_mean->get_data_pointer<Tc>(this->ctx_); const Tc *var = v_var->get_data_pointer<Tc>(this->ctx_); const Tc *sum_dy = sum_dy_.get_data_pointer<Tc>(this->ctx_); const Tc *sum_dyx = sum_dyx_.get_data_pointer<Tc>(this->ctx_); Tc *dbeta = !this->no_bias_ && propagate_down[beta_idx] ? inputs[beta_idx]->cast_grad_and_get_pointer<Tc>( this->ctx_, !accum[beta_idx]) : nullptr; Tc *dgamma = !this->no_scale_ && propagate_down[gamma_idx] ? inputs[gamma_idx]->cast_grad_and_get_pointer<Tc>( this->ctx_, !accum[gamma_idx]) : nullptr; const auto block = NBLA_CUDA_GN_NUM_THREADS; const auto grid = std::min(NBLA_CEIL_SIZE_T_DIV(channel_size_, NBLA_CUDA_GN_NUM_THREADS), static_cast<Size_t>(NBLA_CUDA_GN_MAX_BLOCKS)); // Select kernels by accum combination. auto kernel = group_norm_backward_dbeta_dgamma<true, true, Tc, Size_t>; if (!this->no_bias_ && accum[beta_idx]) { kernel = !this->no_scale_ && accum[gamma_idx] ? group_norm_backward_dbeta_dgamma<true, true, Tc, Size_t> : group_norm_backward_dbeta_dgamma<true, false, Tc, Size_t>; } else { kernel = !this->no_scale_ && accum[gamma_idx] ? group_norm_backward_dbeta_dgamma<false, true, Tc, Size_t> : group_norm_backward_dbeta_dgamma<false, false, Tc, Size_t>; } kernel<<<grid, block>>>(batch_size_, channel_size_, this->num_groups_, mean, var, sum_dy, sum_dyx, dbeta, dgamma, this->eps_); NBLA_CUDA_KERNEL_CHECK(); } // Clear internal buffer sum_dy_.data()->array()->clear(); sum_dyx_.data()->array()->clear(); } }
the_stack
#include "utility.hpp" using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { __global__ void warpPerspectiveLinearKernel(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, float coeffe0, float coeffe1, float coeffe2, float coeffe3, float coeffe4, float coeffe5, float coeffe6, float coeffe7, float coeffe8, uchar* dst, int dst_rows, int dst_cols, int dst_stride, BorderType border_type, uchar border_value) { int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x; int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float src_x = coeffe0 * element_x + coeffe1 * element_y + coeffe2; float src_y = coeffe3 * element_x + coeffe4 * element_y + coeffe5; float weight = coeffe6 * element_x + coeffe7 * element_y + coeffe8; src_x /= weight; src_y /= weight; int src_x0 = __float2int_rd(src_x); int src_y0 = __float2int_rd(src_y); int src_x1 = src_x0 + 1; int src_y1 = src_y0 + 1; if (border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_TRANSPARENT) { bool flag0 = src_y0 >= 0 && src_y0 < src_rows && src_x0 >= 0 && src_x0 < src_cols; bool flag1 = src_y0 >= 0 && src_y0 < src_rows && src_x1 >= 0 && src_x1 < src_cols; bool flag2 = src_y1 >= 0 && src_y1 < src_rows && src_x0 >= 0 && src_x0 < src_cols; bool flag3 = src_y1 >= 0 && src_y1 < src_rows && src_x1 >= 0 && src_x1 < src_cols; if ((border_type == BORDER_TYPE_TRANSPARENT) && ((!flag0) || (!flag1) || (!flag2) || (!flag3))) { return; } if (channels == 1) { uchar* input = (uchar*)(src + src_y0 * src_stride); uchar src_value0 = flag0 ? input[src_x0] : border_value; uchar src_value1 = flag1 ? input[src_x1] : border_value; float value0 = (src_x1 - src_x) * (src_y1 - src_y) * src_value0; float value1 = (src_x - src_x0) * (src_y1 - src_y) * src_value1; float sum = 0.f; sum += value0; sum += value1; input = (uchar*)(src + src_y1 * src_stride); src_value0 = flag2 ? input[src_x0] : border_value; src_value1 = flag3 ? input[src_x1] : border_value; value0 = (src_x1 - src_x) * (src_y - src_y0) * src_value0; value1 = (src_x - src_x0) * (src_y - src_y0) * src_value1; sum += value0; sum += value1; uchar* output = (uchar*)(dst + element_y * dst_stride); output[element_x] = saturateCast(sum); } else if (channels == 3) { uchar3 border_value1 = make_uchar3(border_value, border_value, border_value); uchar3* input = (uchar3*)(src + src_y0 * src_stride); uchar3 src_value0 = flag0 ? input[src_x0] : border_value1; uchar3 src_value1 = flag1 ? input[src_x1] : border_value1; float3 value0 = (src_x1 - src_x) * (src_y1 - src_y) * src_value0; float3 value1 = (src_x - src_x0) * (src_y1 - src_y) * src_value1; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value0; sum += value1; input = (uchar3*)(src + src_y1 * src_stride); src_value0 = flag2 ? input[src_x0] : border_value1; src_value1 = flag3 ? input[src_x1] : border_value1; value0 = (src_x1 - src_x) * (src_y - src_y0) * src_value0; value1 = (src_x - src_x0) * (src_y - src_y0) * src_value1; sum += value0; sum += value1; uchar3* output = (uchar3*)(dst + element_y * dst_stride); if (src_x > src_cols - 1 || src_y > src_rows - 1) { output[element_x] = border_value1; // align with npp. } else { output[element_x] = saturateCastVector<uchar3, float3>(sum); } } else { uchar4 border_value1 = make_uchar4(border_value, border_value, border_value, border_value); uchar4* input = (uchar4*)(src + src_y0 * src_stride); uchar4 src_value0 = flag0 ? input[src_x0] : border_value1; uchar4 src_value1 = flag1 ? input[src_x1] : border_value1; float4 value0 = (src_x1 - src_x) * (src_y1 - src_y) * src_value0; float4 value1 = (src_x - src_x0) * (src_y1 - src_y) * src_value1; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value0; sum += value1; input = (uchar4*)(src + src_y1 * src_stride); src_value0 = flag2 ? input[src_x0] : border_value1; src_value1 = flag3 ? input[src_x1] : border_value1; value0 = (src_x1 - src_x) * (src_y - src_y0) * src_value0; value1 = (src_x - src_x0) * (src_y - src_y0) * src_value1; sum += value0; sum += value1; uchar4* output = (uchar4*)(dst + element_y * dst_stride); output[element_x] = saturateCastVector<uchar4, float4>(sum); } } else if (border_type == BORDER_TYPE_REPLICATE) { float diff_x0 = src_x - src_x0; float diff_x1 = src_x1 - src_x; float diff_y0 = src_y - src_y0; float diff_y1 = src_y1 - src_y; src_x0 = clip(src_x0, 0, src_cols - 1); src_y0 = clip(src_y0, 0, src_rows - 1); src_x1 = clip(src_x1, 0, src_cols - 1); src_y1 = clip(src_y1, 0, src_rows - 1); if (channels == 1) { uchar* input = (uchar*)(src + src_y0 * src_stride); uchar src_value0 = input[src_x0]; uchar src_value1 = input[src_x1]; float value0 = diff_x1 * diff_y1 * src_value0; float value1 = diff_x0 * diff_y1 * src_value1; float sum = 0.f; sum += value0; sum += value1; input = (uchar*)(src + src_y1 * src_stride); src_value0 = input[src_x0]; src_value1 = input[src_x1]; value0 = diff_x1 * diff_y0 * src_value0; value1 = diff_x0 * diff_y0 * src_value1; sum += value0; sum += value1; uchar* output = (uchar*)(dst + element_y * dst_stride); output[element_x] = saturateCast(sum); } else if (channels == 3) { uchar3* input = (uchar3*)(src + src_y0 * src_stride); uchar3 src_value0 = input[src_x0]; uchar3 src_value1 = input[src_x1]; float3 value0 = diff_x1 * diff_y1 * src_value0; float3 value1 = diff_x0 * diff_y1 * src_value1; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value0; sum += value1; input = (uchar3*)(src + src_y1 * src_stride); src_value0 = input[src_x0]; src_value1 = input[src_x1]; value0 = diff_x1 * diff_y0 * src_value0; value1 = diff_x0 * diff_y0 * src_value1; sum += value0; sum += value1; uchar3* output = (uchar3*)(dst + element_y * dst_stride); output[element_x] = saturateCastVector<uchar3, float3>(sum); } else { uchar4* input = (uchar4*)(src + src_y0 * src_stride); uchar4 src_value0 = input[src_x0]; uchar4 src_value1 = input[src_x1]; float4 value0 = diff_x1 * diff_y1 * src_value0; float4 value1 = diff_x0 * diff_y1 * src_value1; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value0; sum += value1; input = (uchar4*)(src + src_y1 * src_stride); src_value0 = input[src_x0]; src_value1 = input[src_x1]; value0 = diff_x1 * diff_y0 * src_value0; value1 = diff_x0 * diff_y0 * src_value1; sum += value0; sum += value1; uchar4* output = (uchar4*)(dst + element_y * dst_stride); output[element_x] = saturateCastVector<uchar4, float4>(sum); } } else { } } __global__ void warpPerspectiveLinearKernel(const float* src, int src_rows, int src_cols, int channels, int src_stride, float coeffe0, float coeffe1, float coeffe2, float coeffe3, float coeffe4, float coeffe5, float coeffe6, float coeffe7, float coeffe8, float* dst, int dst_rows, int dst_cols, int dst_stride, BorderType border_type, float border_value) { int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x; int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float src_x = coeffe0 * element_x + coeffe1 * element_y + coeffe2; float src_y = coeffe3 * element_x + coeffe4 * element_y + coeffe5; float weight = coeffe6 * element_x + coeffe7 * element_y + coeffe8; src_x /= weight; src_y /= weight; int src_x0 = __float2int_rd(src_x); int src_y0 = __float2int_rd(src_y); int src_x1 = src_x0 + 1; int src_y1 = src_y0 + 1; if (border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_TRANSPARENT) { bool flag0 = src_y0 >= 0 && src_y0 < src_rows && src_x0 >= 0 && src_x0 < src_cols; bool flag1 = src_y0 >= 0 && src_y0 < src_rows && src_x1 >= 0 && src_x1 < src_cols; bool flag2 = src_y1 >= 0 && src_y1 < src_rows && src_x0 >= 0 && src_x0 < src_cols; bool flag3 = src_y1 >= 0 && src_y1 < src_rows && src_x1 >= 0 && src_x1 < src_cols; if ((border_type == BORDER_TYPE_TRANSPARENT) && ((!flag0) || (!flag1) || (!flag2) || (!flag3))) { return; } if (channels == 1) { float* input = (float*)(src + src_y0 * src_stride); float src_value0 = flag0 ? input[src_x0] : border_value; float src_value1 = flag1 ? input[src_x1] : border_value; float value0 = (src_x1 - src_x) * (src_y1 - src_y) * src_value0; float value1 = (src_x - src_x0) * (src_y1 - src_y) * src_value1; float sum = 0.f; sum += value0; sum += value1; input = (float*)(src + src_y1 * src_stride); src_value0 = flag2 ? input[src_x0] : border_value; src_value1 = flag3 ? input[src_x1] : border_value; value0 = (src_x1 - src_x) * (src_y - src_y0) * src_value0; value1 = (src_x - src_x0) * (src_y - src_y0) * src_value1; sum += value0; sum += value1; float* output = (float*)(dst + element_y * dst_stride); output[element_x] = sum; } else if (channels == 3) { float3 border_value1 = make_float3(border_value, border_value, border_value); float3* input = (float3*)(src + src_y0 * src_stride); float3 src_value0 = flag0 ? input[src_x0] : border_value1; float3 src_value1 = flag1 ? input[src_x1] : border_value1; float3 value0 = (src_x1 - src_x) * (src_y1 - src_y) * src_value0; float3 value1 = (src_x - src_x0) * (src_y1 - src_y) * src_value1; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value0; sum += value1; input = (float3*)(src + src_y1 * src_stride); src_value0 = flag2 ? input[src_x0] : border_value1; src_value1 = flag3 ? input[src_x1] : border_value1; value0 = (src_x1 - src_x) * (src_y - src_y0) * src_value0; value1 = (src_x - src_x0) * (src_y - src_y0) * src_value1; sum += value0; sum += value1; float3* output = (float3*)(dst + element_y * dst_stride); output[element_x] = sum; } else { float4 border_value1 = make_float4(border_value, border_value, border_value, border_value); float4* input = (float4*)(src + src_y0 * src_stride); float4 src_value0 = flag0 ? input[src_x0] : border_value1; float4 src_value1 = flag1 ? input[src_x1] : border_value1; float4 value0 = (src_x1 - src_x) * (src_y1 - src_y) * src_value0; float4 value1 = (src_x - src_x0) * (src_y1 - src_y) * src_value1; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value0; sum += value1; input = (float4*)(src + src_y1 * src_stride); src_value0 = flag2 ? input[src_x0] : border_value1; src_value1 = flag3 ? input[src_x1] : border_value1; value0 = (src_x1 - src_x) * (src_y - src_y0) * src_value0; value1 = (src_x - src_x0) * (src_y - src_y0) * src_value1; sum += value0; sum += value1; float4* output = (float4*)(dst + element_y * dst_stride); output[element_x] = sum; } } else if (border_type == BORDER_TYPE_REPLICATE) { float diff_x0 = src_x - src_x0; float diff_x1 = src_x1 - src_x; float diff_y0 = src_y - src_y0; float diff_y1 = src_y1 - src_y; src_x0 = clip(src_x0, 0, src_cols - 1); src_y0 = clip(src_y0, 0, src_rows - 1); src_x1 = clip(src_x1, 0, src_cols - 1); src_y1 = clip(src_y1, 0, src_rows - 1); if (channels == 1) { float* input = (float*)(src + src_y0 * src_stride); float src_value0 = input[src_x0]; float src_value1 = input[src_x1]; float value0 = diff_x1 * diff_y1 * src_value0; float value1 = diff_x0 * diff_y1 * src_value1; float sum = 0.f; sum += value0; sum += value1; input = (float*)(src + src_y1 * src_stride); src_value0 = input[src_x0]; src_value1 = input[src_x1]; value0 = diff_x1 * diff_y0 * src_value0; value1 = diff_x0 * diff_y0 * src_value1; sum += value0; sum += value1; float* output = (float*)(dst + element_y * dst_stride); output[element_x] = sum; } else if (channels == 3) { float3* input = (float3*)(src + src_y0 * src_stride); float3 src_value0 = input[src_x0]; float3 src_value1 = input[src_x1]; float3 value0 = diff_x1 * diff_y1 * src_value0; float3 value1 = diff_x0 * diff_y1 * src_value1; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value0; sum += value1; input = (float3*)(src + src_y1 * src_stride); src_value0 = input[src_x0]; src_value1 = input[src_x1]; value0 = diff_x1 * diff_y0 * src_value0; value1 = diff_x0 * diff_y0 * src_value1; sum += value0; sum += value1; float3* output = (float3*)(dst + element_y * dst_stride); output[element_x] = sum; } else { float4* input = (float4*)(src + src_y0 * src_stride); float4 src_value0 = input[src_x0]; float4 src_value1 = input[src_x1]; float4 value0 = diff_x1 * diff_y1 * src_value0; float4 value1 = diff_x0 * diff_y1 * src_value1; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value0; sum += value1; input = (float4*)(src + src_y1 * src_stride); src_value0 = input[src_x0]; src_value1 = input[src_x1]; value0 = diff_x1 * diff_y0 * src_value0; value1 = diff_x0 * diff_y0 * src_value1; sum += value0; sum += value1; float4* output = (float4*)(dst + element_y * dst_stride); output[element_x] = sum; } } else { } } template <typename T, typename Tn> __global__ void warpPerspectiveNPKernel(const T* src, int src_rows, int src_cols, int channels, int src_stride, float coeffe0, float coeffe1, float coeffe2, float coeffe3, float coeffe4, float coeffe5, float coeffe6, float coeffe7, float coeffe8, T* dst, int dst_rows, int dst_cols, int dst_stride, BorderType border_type, Tn border_value) { int element_x, element_y; if (sizeof(T) == 1) { element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x; element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y; } else { element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x; element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; } if (element_y >= dst_rows || element_x >= dst_cols) { return; } float src_x_float = coeffe0 * element_x + coeffe1 * element_y + coeffe2; float src_y_float = coeffe3 * element_x + coeffe4 * element_y + coeffe5; float weight = coeffe6 * element_x + coeffe7 * element_y + coeffe8; src_x_float /= weight; src_y_float /= weight; int src_x = src_x_float; int src_y = src_y_float; if (border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_TRANSPARENT) { Tn* output = (Tn*)(dst + element_y * dst_stride); if (src_x >= 0 && src_x < src_cols && src_y >= 0 && src_y < src_rows) { Tn* input = (Tn*)(src + src_y * src_stride); output[element_x] = input[src_x]; } else { output[element_x] = border_value; } } else if (border_type == BORDER_TYPE_REPLICATE) { src_x = clip(src_x, 0, src_cols - 1); src_y = clip(src_y, 0, src_rows - 1); Tn* input = (Tn*)(src + src_y * src_stride); Tn* output = (Tn*)(dst + element_y * dst_stride); output[element_x] = input[src_x]; } else { } } RetCode warpPerspective(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, const float* affine_matrix, uchar* dst, int dst_rows, int dst_cols, int dst_stride, InterpolationType interpolation, BorderType border_type, uchar border_value, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src != dst); PPL_ASSERT(affine_matrix != nullptr); PPL_ASSERT(src_rows >= 1 && src_cols >= 1); PPL_ASSERT(dst_rows >= 1 && dst_cols >= 1); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); PPL_ASSERT(interpolation == INTERPOLATION_TYPE_LINEAR || interpolation == INTERPOLATION_TYPE_NEAREST_POINT); PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_REPLICATE || border_type == BORDER_TYPE_TRANSPARENT); dim3 block, grid; block.x = kBlockDimX0; block.y = kBlockDimY0; grid.x = divideUp(dst_cols, kBlockDimX0, kBlockShiftX0); grid.y = divideUp(dst_rows, kBlockDimY0, kBlockShiftY0); if (interpolation == INTERPOLATION_TYPE_LINEAR) { warpPerspectiveLinearKernel<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, affine_matrix[0], affine_matrix[1], affine_matrix[2], affine_matrix[3], affine_matrix[4], affine_matrix[5], affine_matrix[6], affine_matrix[7], affine_matrix[8], dst, dst_rows, dst_cols, dst_stride, border_type, border_value); } else if (interpolation == INTERPOLATION_TYPE_NEAREST_POINT) { if (channels == 1) { warpPerspectiveNPKernel<uchar, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, affine_matrix[0], affine_matrix[1], affine_matrix[2], affine_matrix[3], affine_matrix[4], affine_matrix[5], affine_matrix[6], affine_matrix[7], affine_matrix[8], dst, dst_rows, dst_cols, dst_stride, border_type, border_value); } else if (channels == 3) { uchar3 border_value1 = make_uchar3(border_value, border_value, border_value); warpPerspectiveNPKernel<uchar, uchar3><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, affine_matrix[0], affine_matrix[1], affine_matrix[2], affine_matrix[3], affine_matrix[4], affine_matrix[5], affine_matrix[6], affine_matrix[7], affine_matrix[8], dst, dst_rows, dst_cols, dst_stride, border_type, border_value1); } else { uchar4 border_value1 = make_uchar4(border_value, border_value, border_value, border_value); warpPerspectiveNPKernel<uchar, uchar4><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, affine_matrix[0], affine_matrix[1], affine_matrix[2], affine_matrix[3], affine_matrix[4], affine_matrix[5], affine_matrix[6], affine_matrix[7], affine_matrix[8], dst, dst_rows, dst_cols, dst_stride, border_type, border_value1); } } else { } cudaError_t code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } RetCode warpPerspective(const float* src, int src_rows, int src_cols, int channels, int src_stride, const float* affine_matrix, float* dst, int dst_rows, int dst_cols, int dst_stride, InterpolationType interpolation, BorderType border_type, float border_value, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src != dst); PPL_ASSERT(affine_matrix != nullptr); PPL_ASSERT(src_rows >= 1 && src_cols >= 1); PPL_ASSERT(dst_rows >= 1 && dst_cols >= 1); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); PPL_ASSERT(interpolation == INTERPOLATION_TYPE_LINEAR || interpolation == INTERPOLATION_TYPE_NEAREST_POINT); PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_REPLICATE || border_type == BORDER_TYPE_TRANSPARENT); dim3 block, grid; block.x = kBlockDimX1; block.y = kBlockDimY1; grid.x = divideUp(dst_cols, kBlockDimX1, kBlockShiftX1); grid.y = divideUp(dst_rows, kBlockDimY1, kBlockShiftY1); if (interpolation == INTERPOLATION_TYPE_LINEAR) { warpPerspectiveLinearKernel<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, affine_matrix[0], affine_matrix[1], affine_matrix[2], affine_matrix[3], affine_matrix[4], affine_matrix[5], affine_matrix[6], affine_matrix[7], affine_matrix[8], dst, dst_rows, dst_cols, dst_stride, border_type, border_value); } else if (interpolation == INTERPOLATION_TYPE_NEAREST_POINT) { if (channels == 1) { warpPerspectiveNPKernel<float, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, affine_matrix[0], affine_matrix[1], affine_matrix[2], affine_matrix[3], affine_matrix[4], affine_matrix[5], affine_matrix[6], affine_matrix[7], affine_matrix[8], dst, dst_rows, dst_cols, dst_stride, border_type, border_value); } else if (channels == 3) { float3 border_value1 = make_float3(border_value, border_value, border_value); warpPerspectiveNPKernel<float, float3><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, affine_matrix[0], affine_matrix[1], affine_matrix[2], affine_matrix[3], affine_matrix[4], affine_matrix[5], affine_matrix[6], affine_matrix[7], affine_matrix[8], dst, dst_rows, dst_cols, dst_stride, border_type, border_value1); } else { float4 border_value1 = make_float4(border_value, border_value, border_value, border_value); warpPerspectiveNPKernel<float, float4><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, affine_matrix[0], affine_matrix[1], affine_matrix[2], affine_matrix[3], affine_matrix[4], affine_matrix[5], affine_matrix[6], affine_matrix[7], affine_matrix[8], dst, dst_rows, dst_cols, dst_stride, border_type, border_value1); } } else { } cudaError_t code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode WarpPerspective<uchar, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData, const float* affineMatrix, InterpolationType interpolation, BorderType borderType, uchar borderValue) { RetCode code = warpPerspective(inData, inHeight, inWidth, 1, inWidthStride, affineMatrix, outData, outHeight, outWidth, outWidthStride, interpolation, borderType, borderValue, stream); return code; } template <> RetCode WarpPerspective<uchar, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData, const float* affineMatrix, InterpolationType interpolation, BorderType borderType, uchar borderValue) { RetCode code = warpPerspective(inData, inHeight, inWidth, 3, inWidthStride, affineMatrix, outData, outHeight, outWidth, outWidthStride, interpolation, borderType, borderValue, stream); return code; } template <> RetCode WarpPerspective<uchar, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData, const float* affineMatrix, InterpolationType interpolation, BorderType borderType, uchar borderValue) { RetCode code = warpPerspective(inData, inHeight, inWidth, 4, inWidthStride, affineMatrix, outData, outHeight, outWidth, outWidthStride, interpolation, borderType, borderValue, stream); return code; } template <> RetCode WarpPerspective<float, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData, const float* affineMatrix, InterpolationType interpolation, BorderType borderType, float borderValue) { RetCode code = warpPerspective(inData, inHeight, inWidth, 1, inWidthStride, affineMatrix, outData, outHeight, outWidth, outWidthStride, interpolation, borderType, borderValue, stream); return code; } template <> RetCode WarpPerspective<float, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData, const float* affineMatrix, InterpolationType interpolation, BorderType borderType, float borderValue) { RetCode code = warpPerspective(inData, inHeight, inWidth, 3, inWidthStride, affineMatrix, outData, outHeight, outWidth, outWidthStride, interpolation, borderType, borderValue, stream); return code; } template <> RetCode WarpPerspective<float, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData, const float* affineMatrix, InterpolationType interpolation, BorderType borderType, float borderValue) { RetCode code = warpPerspective(inData, inHeight, inWidth, 4, inWidthStride, affineMatrix, outData, outHeight, outWidth, outWidthStride, interpolation, borderType, borderValue, stream); return code; } } // namespace cuda } // namespace cv } // namespace ppl
the_stack
template <class Vector> void TestScatterSimple(void) { Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter(src.begin(), src.end(), map.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 4); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestScatterSimple); template<typename InputIterator1, typename InputIterator2, typename RandomAccessIterator> void scatter(my_system &system, InputIterator1, InputIterator1, InputIterator2, RandomAccessIterator) { system.validate_dispatch(); } void TestScatterDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::scatter(sys, vec.begin(), vec.begin(), vec.begin(), vec.begin()); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestScatterDispatchExplicit); template<typename InputIterator1, typename InputIterator2, typename RandomAccessIterator> void scatter(my_tag, InputIterator1, InputIterator1, InputIterator2, RandomAccessIterator output) { *output = 13; } void TestScatterDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::scatter(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestScatterDispatchImplicit); template <typename T> void TestScatter(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin()); thrust::scatter(d_input.begin(), d_input.end(), d_map.begin(), d_output.begin()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatter); template <typename T> void TestScatterToDiscardIterator(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), thrust::make_discard_iterator()); thrust::scatter(d_input.begin(), d_input.end(), d_map.begin(), thrust::make_discard_iterator()); // there's nothing to check -- just make sure it compiles } DECLARE_VARIABLE_UNITTEST(TestScatterToDiscardIterator); template <class Vector> void TestScatterIfSimple(void) { Vector flg(5); // predicate array Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; thrust::scatter_if(src.begin(), src.end(), map.begin(), flg.begin(), dst.begin()); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 0); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestScatterIfSimple); template<typename InputIterator1, typename InputIterator2, typename InputIterator3, typename RandomAccessIterator> void scatter_if(my_system &system, InputIterator1, InputIterator1, InputIterator2, InputIterator3, RandomAccessIterator) { system.validate_dispatch(); } void TestScatterIfDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::scatter_if(sys, vec.begin(), vec.begin(), vec.begin(), vec.begin(), vec.begin()); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestScatterIfDispatchExplicit); template<typename InputIterator1, typename InputIterator2, typename InputIterator3, typename RandomAccessIterator> void scatter_if(my_tag, InputIterator1, InputIterator1, InputIterator2, InputIterator3, RandomAccessIterator output) { *output = 13; } void TestScatterIfDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::scatter_if(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.begin())); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestScatterIfDispatchImplicit); template <typename T> class is_even_scatter_if { public: __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template <typename T> void TestScatterIf(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>()); thrust::scatter_if(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>()); ASSERT_EQUAL(h_output, d_output); } DECLARE_VARIABLE_UNITTEST(TestScatterIf); template <typename T> void TestScatterIfToDiscardIterator(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input(n, (T) 1); thrust::device_vector<T> d_input(n, (T) 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) h_map[i] = h_map[i] % output_size; thrust::device_vector<unsigned int> d_map = h_map; thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), thrust::make_discard_iterator(), is_even_scatter_if<unsigned int>()); thrust::scatter_if(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), thrust::make_discard_iterator(), is_even_scatter_if<unsigned int>()); } DECLARE_VARIABLE_UNITTEST(TestScatterIfToDiscardIterator); template <typename Vector> void TestScatterCountingIterator(void) { Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector output(10); // source has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(source.begin(), source.end(), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestScatterCountingIterator); template <typename Vector> void TestScatterIfCountingIterator(void) { Vector source(10); thrust::sequence(source.begin(), source.end(), 0); Vector map(10); thrust::sequence(map.begin(), map.end(), 0); Vector stencil(10, 1); Vector output(10); // source has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), map.begin(), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // map has any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(source.begin(), source.end(), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); // source and map have any_system_tag thrust::fill(output.begin(), output.end(), 0); thrust::scatter_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10), thrust::make_counting_iterator(0), stencil.begin(), output.begin()); ASSERT_EQUAL(output, map); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestScatterIfCountingIterator);
the_stack
#include <dnn-utility.h> using namespace std; CURAND_STATE::CURAND_STATE(unsigned seed, int N): _states(NULL) { cudaMalloc ( &_states, N * N * sizeof( curandState ) ); setupCuRandState <<< 1, N * N >>> ( _states, seed ); CCE(cudaDeviceSynchronize()); } curandState* CURAND_STATE::get() const { return _states; } CURAND_STATE::~CURAND_STATE() { cudaFree(_states); } __global__ void setupCuRandState( curandState * state, unsigned long seed ) { int x = blockIdx.x*blockDim.x + threadIdx.x; curand_init ( seed, x, 0, &state[x] ); } inline __device__ void get_curand_normal(float& x, curandState* state) { x = curand_normal(state); } inline __device__ void get_curand_uniform(float& x, curandState* state) { x = curand_uniform(state); } inline __device__ void sample_gaussian(float& x, curandState* state) { x += curand_normal(state); } inline __device__ void sample_bernoulli(float& x, curandState* state) { x = (float) (x >= curand_uniform(state)); } template <Operation op> __global__ void element_wise_curand_kernel(float* const data, curandState* globalState, unsigned int rows, unsigned int cols) { int tx = threadIdx.x; int ty = threadIdx.y; // Matrix index int x = blockIdx.x*blockDim.x + tx; int y = blockIdx.y*blockDim.y + ty; if (x >= cols || y >= rows) return; int i = x * rows + y; int j = tx * blockDim.y + ty; op(data[i], globalState + j); __syncthreads(); } void sample(mat &prob, UNIT_TYPE type) { static CURAND_STATE state; ALLOCATE_GRIDS_AND_THREADS(prob.getCols(), prob.getRows()); switch (type) { case GAUSSIAN: element_wise_curand_kernel<sample_gaussian><<< grids, threads >>>(prob.getData(), state.get(), prob.getRows(), prob.getCols()); break; case BERNOULLI: element_wise_curand_kernel<sample_bernoulli><<< grids, threads >>>(prob.getData(), state.get(), prob.getRows(), prob.getCols()); break; } CCE(cudaDeviceSynchronize()); } mat randn(int m, int n) { #ifdef DEBUG // Use ext::randn (which is set to seed 0) to debug. mat x(m, n); ext::randn(x); return x; #else static CURAND_STATE state; mat x(m, n); ALLOCATE_GRIDS_AND_THREADS(n, m); element_wise_curand_kernel<get_curand_normal><<<grids, threads>>>(x.getData(), state.get(), m, n); CCE(cudaDeviceSynchronize()); return x; #endif } mat rand(int m, int n) { #ifdef DEBUG // Use ext::rand (which is set to seed 0) to debug. mat x(m, n); ext::rand(x); return x; #else static CURAND_STATE state; mat x(m, n); ALLOCATE_GRIDS_AND_THREADS(n, m); element_wise_curand_kernel<get_curand_uniform><<<grids, threads>>>(x.getData(), state.get(), m, n); CCE(cudaDeviceSynchronize()); return x; #endif } map<int, int> getLabelMapping(const hmat& labels) { map<int, int> classes; for (size_t i=0; i<labels.size(); ++i) classes[(int) labels[i]] = 1; int counter = 0; map<int, int>::iterator itr = classes.begin(); for (; itr != classes.end(); ++itr) itr->second = ++counter; return classes; } namespace ext { void rescale(mat& data, float lower, float upper) { float min = ext::min(data); float max = ext::max(data); float ratio = (upper - lower) / (max - min); data = (data - min) * ratio + lower; } float max(const mat& v) { thrust::device_ptr<float> vPtr(v.getData()); thrust::device_ptr<float> maxPtr = thrust::max_element(vPtr, vPtr + v.size()); thrust::host_vector<float> hMaxPtr(maxPtr, maxPtr + 1); return hMaxPtr[0]; } float min(const mat& v) { thrust::device_ptr<float> vPtr(v.getData()); thrust::device_ptr<float> minPtr = thrust::min_element(vPtr, vPtr + v.size()); thrust::host_vector<float> hMaxPtr(minPtr, minPtr + 1); return hMaxPtr[0]; } float max(const hmat& v) { float* m = thrust::max_element(v.getData(), v.getData() + v.size()); return *m; } float min(const hmat& v) { float* m = thrust::min_element(v.getData(), v.getData() + v.size()); return *m; } }; __global__ void compute_error_kernel(float* error, float* const target, float* const output, unsigned int rows, unsigned int cols) { int tx = threadIdx.x; int ty = threadIdx.y; // Matrix index int x = blockIdx.x*blockDim.x + tx; int y = blockIdx.y*blockDim.y + ty; if (x >= rows || y >= cols) return; int i = y * rows + x; // target[y] need to be 0-based error[i] = output[i] - (float) (target[y] == x); __syncthreads(); } mat getError(const mat& target, const mat& output, ERROR_MEASURE errorMeasure) { mat error(output.getRows(), output.getCols()); switch (errorMeasure) { case L2ERROR: // FIXME // error = ~output - target; // error = ~error; break; case CROSS_ENTROPY: ALLOCATE_GRIDS_AND_THREADS(error.getRows(), error.getCols()); compute_error_kernel<<< grids, threads >>>( error.getData(), target.getData(), output.getData(), error.getRows(), error.getCols()); CCE(cudaDeviceSynchronize()); break; } return error; } mat posteriorProb2Label(const mat& prob) { assert(prob.getCols() > 1); size_t rows = prob.getRows(), cols = prob.getCols(); hmat h_prob(prob); hmat h_labels(1, cols); for (size_t j=0; j<cols; ++j) { float max = -1e10; size_t maxIdx = 0; for (size_t i=0; i<rows; ++i) { if (h_prob(i, j) > max) { max = h_prob(i, j); maxIdx = i; } } h_labels[j] = maxIdx; } return h_labels; } vector<float> copyToHost(const mat& m) { vector<float> hm(m.size()); thrust::device_ptr<float> dPtr(m.getData()); thrust::copy(dPtr, dPtr + m.size(), hm.begin()); return hm; } size_t countDifference(const mat& m1, const mat& m2) { assert(m1.size() == m2.size()); size_t L = m1.size(); thrust::device_ptr<float> ptr1(m1.getData()); thrust::device_ptr<float> ptr2(m2.getData()); size_t nDiff = thrust::inner_product(ptr1, ptr1 + L, ptr2, 0.0, thrust::plus<float>(), thrust::not_equal_to<float>()); return nDiff; } size_t zeroOneError(const mat& prob, const mat& label) { assert(prob.getCols() == label.getRows()); assert(label.getCols() == 1); mat L = posteriorProb2Label(prob); return countDifference(L, label); } template <typename T> device_matrix<T> MaxPerRow(const device_matrix<T>& A) { device_matrix<T> At(~A); device_matrix<T> rmax(At.getCols(), 1); // allocate storage for per-row results and indices thrust::device_vector<T> row_indices(At.getCols()); // Originally, it compute row sums (thrust::plus) by summing values with equal // row indices. I replace thrust::plus with thrust::maximum and get rowmax thrust::reduce_by_key (thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(At.getRows())), thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(At.getRows())) + A.size(), thrust::device_ptr<T>(At.getData()), row_indices.begin(), thrust::device_ptr<T>(rmax.getData()), thrust::equal_to<T>(), thrust::maximum<T>()); return rmax; } template <typename T> __global__ void substract_max_per_row_kernel(T* const A, T* const rmax, unsigned int rows, unsigned int cols) { // Matrix index int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= cols || y >= rows) return; A[x * rows + y] -= rmax[y]; } template <typename T> void SubstractMaxPerRow(device_matrix<T>& x) { device_matrix<T> rmax = MaxPerRow(x); ALLOCATE_GRIDS_AND_THREADS(x.getCols(), x.getRows()); substract_max_per_row_kernel<float><<< grids, threads >>> (x.getData(), rmax.getData(), x.getRows(), x.getCols()); CCE(cudaDeviceSynchronize()); } template <typename T> device_matrix<T> operator & (const device_matrix<T>& A, const device_matrix<T>& B) { assert(A.getRows() == B.getRows() && A.getCols() == B.getCols()); device_matrix<T> C(A.getRows(), A.getCols()); thrust::device_ptr<T> aPtr(A.getData()); thrust::device_ptr<T> bPtr(B.getData()); thrust::device_ptr<T> cPtr(C.getData()); thrust::transform(aPtr, aPtr + A.size(), bPtr, cPtr, thrust::multiplies<T>()); return C; } template <typename T> device_matrix<T>& operator &= (device_matrix<T>& A, const device_matrix<T>& B) { A = A & B; return A; } template <typename T> device_matrix<T> exp(const device_matrix<T>& x) { return transform(x, func::exp<T>()); } template <typename T> device_matrix<T> log(const device_matrix<T>& x) { return transform(x, func::log<T>()); } template <typename T> device_matrix<T> log1pexp(const device_matrix<T>& x) { return transform(x, func::log_of_one_plus_exp<T>()); } template <typename T> device_matrix<T> sigmoid(const device_matrix<T>& x) { return transform(x, func::sigmoid<T>()); } template <typename T> device_matrix<T> d_sigmoid(const device_matrix<T>& x) { return transform(x, func::d_sigmoid<T>()); } template <typename T> device_matrix<T> tanh(const device_matrix<T>& x) { return transform(x, func::hyperbolic_tangent<T>()); } template <typename T> device_matrix<T> d_tanh(const device_matrix<T>& x) { return transform(x, func::d_hyperbolic_tangent<T>()); } template <typename T> device_matrix<T> relu(const device_matrix<T>& x) { return transform(x, func::max<T>(0.0f)); } template <typename T> device_matrix<T> is_greater(const device_matrix<T>& x, const T value) { return transform(x, func::greater<T>(value)); } template <typename T> device_matrix<T> softmax(const device_matrix<T>& x_t) { mat x(~x_t); x.resize(x.getRows(), x.getCols() - 1); SubstractMaxPerRow(x); x = exp(x); thrust::device_ptr<T> xPtr(x.getData()); mat sum = x * mat(x.getCols(), x.getCols(), 1); mat y(x.getRows(), x.getCols() + 1); thrust::transform(xPtr, xPtr + x.size(), thrust::device_ptr<T>(sum.getData()), thrust::device_ptr<T>(y.getData()), thrust::divides<T>()); return ~y; } /* ! \brief Sum all the elements in a matrix. * \fn sum_all(const device_matrix<T>& x) * \param x matrix x to be sum * return the result in host memory. */ template <typename T> T sum_all(const device_matrix<T>& x) { /*int r = x.getRows(), c = x.getCols(); mat d_s = mat(1, r, 1) * x * mat(c, 1, 1); return hmat(d_s)[0];*/ thrust::device_ptr<T> ptr(x.getData()); return thrust::reduce(ptr, ptr + x.size()); } /* \brief Explicit instantiation definition of template functions */ #define register_device_matrix_utility(T) \ template device_matrix<T> operator &<T> (const device_matrix<T>& A, const device_matrix<T>& B); \ template device_matrix<T>& operator &=<T> (device_matrix<T>& A, const device_matrix<T>& B); \ template device_matrix<T> add_bias<T>(const device_matrix<T>& A, const T value, bool add_new_column); \ template device_matrix<T> exp<T>(const device_matrix<T>& x); \ template device_matrix<T> log<T>(const device_matrix<T>& x); \ template device_matrix<T> log1pexp<T>(const device_matrix<T>& x); \ template device_matrix<T> sigmoid<T>(const device_matrix<T>& x); \ template device_matrix<T> d_sigmoid<T>(const device_matrix<T>& x); \ template device_matrix<T> tanh<T>(const device_matrix<T>& x); \ template device_matrix<T> d_tanh<T>(const device_matrix<T>& x); \ template device_matrix<T> softmax<T>(const device_matrix<T>& x); \ template device_matrix<T> relu(const device_matrix<T>& x); \ template device_matrix<T> is_greater(const device_matrix<T>& x, const T value); \ template device_matrix<T> MaxPerRow<T>(const device_matrix<T>& A); \ template T sum_all<T>(const device_matrix<T>& A); \ template void SubstractMaxPerRow<T>(device_matrix<T>& x); register_device_matrix_utility(float);
the_stack
#include "common.h" #include "polish_E.h" // Some type definitions -- could get from common.h // typedef double Matches_5[5][3]; // typedef double Ematrix[3][3]; //---------------------------------------------------------------------------- // // This relies on a parametrization for an Essential Matrix as given // by the decomposition: // // Rx . Ry . Rz . E . Ru' . Rv' = I2 // // where Ru and Rv are rotations about the x and y axes. // // From this it follows that // // E = Rz' . Ry' . Rx' . I2 . Rv . Ru // // We also define // // U = Rz' . Ry' . Rx' // V = Ru' . Rv' // // Then E = U . I2 . V' // // The essential matrix satisfies q' . E . p = 0. So // // q' . U . I2 . V' . p = 0. // // We define pp = V' . p and qq = U' . q. Then // // qq' . I2 . pp = 0 // // More generally, if we define qq and pp as above, and find E0 such // that // qq' . E0 . pp = 0, // // then // // q' . (U . E0 . V) . p = 0 // // Hence, if // // E0 = U0 . I2 . V0' // = Rz' . Ry' . Rx' . I2 . Rv . Ru // // then, the update step is to replace // // U -> U . Rz'. Ry' . Rx' // V -> V . Ru' . Rv' // // We find that if qq' . I2 . pp = delta // then, we want to solve // // qq' . Rz' . Ry' . Rx' . I2 . Rv . Ru . pp = 0 // // To do this, we take derivatives about the origin (z = y = x = u = v = 0) // // We have to solve J . [dx, dy, dz, dv, du] = -delta // // Canonical order for the 5 rotations is x, y, z, v, u, which is the // order that they are applied to E in extracting the parameters. // Namely E = Rz . Ry . Rx . I2 . Rv' . Ru'. // //---------------------------------------------------------------------------- __host__ __device__ void Eprod (Ematrix U, Ematrix V, Ematrix E) { // Forms E = U . I2 . V' for (int i=0; i<3; i++) for (int j=0; j<3; j++) E[i][j] = U[i][0]*V[j][0] + U[i][1]*V[j][1]; } __host__ __device__ void printE (Ematrix E) { for (int i=0; i<3; i++) { for (int j=0; j<3; j++) printf (" %20.16f", E[i][j]); printf ("\n"); } printf ("\n"); } __host__ __device__ void printM (Matches_5 E) { for (int i=0; i<5; i++) { for (int j=0; j<3; j++) printf (" %20.16f", E[i][j]); printf ("\n"); } printf ("\n"); } __host__ __device__ void printA (double A[5][5]) { for (int i=0; i<5; i++) { for (int j=0; j<5; j++) printf (" %8.4f", A[i][j]); printf ("\n"); } printf ("\n"); } __host__ __device__ void printAb (double A[5][5], double b[5]) { for (int i=0; i<5; i++) { for (int j=0; j<5; j++) printf (" %8.4f", A[i][j]); printf (" | %8.4f\n", b[i]); } printf ("\n"); } __host__ __device__ inline void Gright (Ematrix E, int row1, int row2, double angle) { // Applies a Givens rotation to a matrix from the right // Multiplies A on the right by G'; E -> E . G' // Get cos and sin of angle double c = cos(angle); double s = sin(angle); // Now, carry out for (int i=0; i<3; i++) { double temp = E[i][row1] * c - E[i][row2] * s; E[i][row2] = E[i][row1] * s + E[i][row2] * c; E[i][row1] = temp; } } __host__ __device__ void Edecomp(Ematrix E, Ematrix U, Ematrix V) { // // Given an essential matrix E, // computes rotation matrices U and V such that E = U . I2 . V' // where I2 = diag(1, 1, 0) // // Parameters of the matrix double cx, cy, cz, cu, cv, sx, sy, sz, su, sv, temp, scale; // printE(E); //---------------- // Gleft (0, 1, 0) cz = E[0][0]; sz = -E[1][0]; // Element to be eliminated scale = sqrt(cz*cz + sz*sz); cz /= scale; sz /= scale; // Now, carry out for (int j=0; j<3; j++) { temp = E[0][j] * cz - E[1][j] * sz; E[1][j] = E[0][j] * sz + E[1][j] * cz; E[0][j] = temp; } // printE(E); //---------------- // Gleft (0, 2, 0) cy = E[0][0]; sy = -E[2][0]; // Element to be eliminated scale = sqrt(cy*cy + sy*sy); cy /= scale; sy /= scale; // Now, carry out for (int j=0; j<3; j++) { temp = E[0][j] * cy - E[2][j] * sy; E[2][j] = E[0][j] * sy + E[2][j] * cy; E[0][j] = temp; } // printE(E); //---------------- // Gleft (1, 2, 1) cx = E[1][1]; sx = -E[2][1]; // Element to be eliminated scale = sqrt(cx*cx + sx*sx); cx /= scale; sx /= scale; // Now, carry out -- no need to compute row 2 for (int j=1; j<3; j++) E[1][j] = E[1][j] * cx - E[2][j] * sx; // printE(E); // Compute the right matrix U[0][0] = cy*cz; U[0][1] = -cz*sx*sy + cx*sz; U[0][2] = cx*cz*sy + sx*sz; U[1][0] = -cy*sz; U[1][1] = cx*cz + sx*sy*sz; U[1][2] = cz*sx - cx*sy*sz; U[2][0] = -sy; U[2][1] = -cy*sx; U[2][2] = cx*cy; //----------------------- // Now, column operations // Gright(1, 2, 1) cu = E[1][1]; su = -E[1][2]; // Element to be eliminated scale = sqrt(cu*cu + su*su); cu /= scale; su /= scale; // Now, carry out E[0][2] = su * E[0][1] + cu * E[0][2]; // printE(E); // Gright(0, 2, 0) cv = E[0][0]; sv = -E[0][2]; // Element to be eliminated scale = sqrt(cv*cv + sv*sv); cv /= scale; sv /= scale; // Compute the right matrix V[0][0] = cv; V[0][1] = 0; V[0][2] = sv; V[1][0] = -su*sv; V[1][1] = cu; V[1][2] = cv*su; V[2][0] = -cu*sv; V[2][1] = -su; V[2][2] = cu*cv; // printE(E); // printE(U); // printE(V); } __host__ __device__ void Edecomp(Ematrix E, double parameters[5]) { // // Given an essential matrix E, // computes rotation matrices U and V such that E = U . I2 . V' // where I2 = diag(1, 1, 0) // // Parameters of the matrix double cx, cy, cz, cu, cv, sx, sy, sz, su, sv, temp, scale; // printE(E); //---------------- // Gleft (0, 1, 0) cz = E[0][0]; sz = -E[1][0]; // Element to be eliminated scale = sqrt(cz*cz + sz*sz); cz /= scale; sz /= scale; // Now, carry out for (int j=0; j<3; j++) { temp = E[0][j] * cz - E[1][j] * sz; E[1][j] = E[0][j] * sz + E[1][j] * cz; E[0][j] = temp; } // printE(E); //---------------- // Gleft (0, 2, 0) cy = E[0][0]; sy = -E[2][0]; // Element to be eliminated scale = sqrt(cy*cy + sy*sy); cy /= scale; sy /= scale; // Now, carry out for (int j=0; j<3; j++) { temp = E[0][j] * cy - E[2][j] * sy; E[2][j] = E[0][j] * sy + E[2][j] * cy; E[0][j] = temp; } // printE(E); //---------------- // Gleft (1, 2, 1) cx = E[1][1]; sx = -E[2][1]; // Element to be eliminated scale = sqrt(cx*cx + sx*sx); cx /= scale; sx /= scale; // Now, carry out -- no need to compute row 2 for (int j=1; j<3; j++) E[1][j] = E[1][j] * cx - E[2][j] * sx; // printE(E); //----------------------- // Now, column operations // Gright(1, 2, 1) cu = E[1][1]; su = -E[1][2]; // Element to be eliminated scale = sqrt(cu*cu + su*su); cu /= scale; su /= scale; // Now, carry out E[0][2] = su * E[0][1] + cu * E[0][2]; // printE(E); // Gright(0, 2, 0) cv = E[0][0]; sv = -E[0][2]; // Element to be eliminated scale = sqrt(cv*cv + sv*sv); cv /= scale; sv /= scale; // printE(E); // Fill in parameters parameters[0] = atan2(sx, cx); parameters[1] = atan2(sy, cy); parameters[2] = atan2(sz, cz); parameters[3] = atan2(su, cu); parameters[4] = atan2(sv, cv); } __host__ __device__ bool solve_5x5 (double A[5][5], double b[5]) { // First Gaussian-Jordan elimination to put in echelon form // printf ("In solve_5x5: A\n"); // printAb(A, b); // For debugging double Aorig[5][5]; memcpy (&(Aorig[0][0]), A, sizeof(Aorig)); double borig[5]; memcpy (&(borig[0]), &(b[0]), sizeof(borig)); // printf ("b\n"); // printf ("%8.4f %8.4f %8.4f %8.4f %8.4f\n",b[0],b[1],b[2],b[3],b[4]); const int nrows = 5, ncols = 5; for (int row=0; row<5; row++) { // Find the maximum element in the column const int col = row; double maxval = fabs(A[row][col]); int maxrow = row; // Find the maximum value in the column for (int i=row+1; i<nrows; i++) { double val = fabs(A[i][col]); if (val > maxval) { maxval = val; maxrow = i; } } // printf ("In solve_5x5: row %d\n", row); // Pivot if (row != maxrow) { // Swap rows row and maxrow double t; for (int j=col; j<ncols; j++) { t = A[row][j]; A[row][j] = A[maxrow][j]; A[maxrow][j] = t; } // Swap elements in the vector b t = b[row]; b[row] = b[maxrow]; b[maxrow] = t; } // printf ("In solve_5x5: after pivot\n"); // printAb(A, b); // Eliminate for (int i=row+1; i<nrows; i++) { double fac = A[i][col] / A[row][col]; for (int j=row+1; j<ncols; j++) A[i][j] -= fac * A[row][j]; b[i] -= fac * b[row]; } // printf ("In solve_5x5: after elim\n"); // printAb(A, b); } // printf ("In solve_5x5: B - in row-echelon-form\n"); // printAb(A, b); // Now it is in row-echelon form, so do back-factorization for (int i=nrows-1; i>=0; i--) { // Substitute previous values for (int j=i+1; j<ncols; j++) b[i] -= A[i][j] * b[j]; // Update the vector b b[i] /= A[i][i]; } // Check that all numbers are valid bool good = true; for (int i=0; i<5; i++) if (0) //! isfinite(b[i])) { b[i] = 0.0; good = false; } #ifdef RH_DEBUG // printf ("In solve_5x5: finished - Now test\n"); for (int i=0; i<5; i++) { double val = 0.0; for (int j=0; j<5; j++) val += Aorig[i][j] * b[j]; printf ("%13.4e %13.4e %13.4e %13.4e\n", val, borig[i], val - borig[i], b[i]); } #endif // Return success or not return good; } __host__ __device__ void update (Ematrix U, Ematrix V, double e[5]) { // Updates U to the new value // U -> U . Rz'. Ry' . Rx' // V -> V . Ru' . Rv' // Order of params is x, y, z, v, u // Gright(U, 0, 1, e[2]); // z // Gright(U, 0, 2, e[1]); // y // Gright(U, 1, 2, e[0]); // x // // // V -> V . Rv' . Ru' // Gright(V, 1, 2, e[4]); // u - wrong index! // Gright(V, 0, 2, e[3]); // v // DJC: Bug fix 20190409 // U <- U * Rz * Ry * Rx Gright(U, 0, 1, e[2]); // z Gright(U, 0, 2, e[1]); // y Gright(U, 1, 2, e[0]); // x // V <- V * Ru * Rv Gright(V, 1, 2, e[3]); // u Gright(V, 0, 2, e[4]); // v } __host__ __device__ double sq_error (Ematrix E, Matches_5 pin, Matches_5 qin) { // Computes the error for the points double errsq = 0.0; for (int pt=0; pt<5; pt++) { // Compute (q . E . p)^2 double err = 0.0; for (int i=0; i<3; i++) for (int j=0; j<3; j++) err += E[i][j]*qin[pt][i]*pin[pt][j]; errsq += err*err; } return errsq; } template<int n> __host__ __device__ double sq_error (Ematrix E, Matches_n<n> pin, Matches_n<n> qin) { // Computes the error for the points double errsq = 0.0; for (int pt=0; pt<n; pt++) { // Compute (q . E . p)^2 double err = 0.0; for (int i=0; i<3; i++) for (int j=0; j<3; j++) err += E[i][j]*qin[pt][i]*pin[pt][j]; errsq += err*err; } return errsq; } __host__ __device__ void polish_E (Ematrix E, Matches_5 pin, Matches_5 qin, const int MaxReps) { // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); // printf ("In polish_E\n"); // printf ("U and V\n"); // printE(U); // printE(V); // Go into a loop of polishing double oldemag = 1.0e6; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; Matches_5 p, q; for (int i=0; i<5; i++) for (int j=0; j<3; j++) { p[i][j] = pin[i][0]*V[0][j] + pin[i][1]*V[1][j] + pin[i][2]*V[2][j]; q[i][j] = qin[i][0]*U[0][j] + qin[i][1]*U[1][j] + qin[i][2]*U[2][j]; } // Print out the points // printf ("Transformed p and q\n"); // printM(p); // printM(q); // Form the vector double err[5], emag = 0.0; for (int i=0; i<5; i++) { err[i] = -(p[i][0]*q[i][0] + p[i][1]*q[i][1]); emag += err[i]*err[i]; } // Now, if emag is greater than old mag, then we stop if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); // printf ("E = \n"); // printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double J[5][5]; for (int j=0; j<5; j++) // j is the point number { J[j][0] = -p[j][1] * q[j][2]; // -py*qz J[j][1] = -p[j][0] * q[j][2]; // -px*qz J[j][2] = p[j][1] * q[j][0] - p[j][0] * q[j][1];// py*qx - px*qy J[j][3] = -p[j][2] * q[j][1]; // -pz*qx [DJC] -> -pz*qy J[j][4] = -p[j][2] * q[j][0]; // -pz*qy [DJC] -> -pz*qx } // Solve the equation solve_5x5 (J, err); // printf ("del = \n"); // for (int i=0; i<5; i++) // printf ("%12.4e ", err[i]); // printf ("\n"); // Now, build the new U and V from the params update (U, V, err); // printf ("U and V after update\n"); // printE(U); // printE(V); } } /* * Iterative least squares refinement of E matrix * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ template<int n> __host__ __device__ void polish_E (Ematrix E, const double* pin, const double* qin, const int MaxReps) { // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); printf ("In polish_E\n"); printf ("U and V\n"); printE(U); printE(V); // Go into a loop of polishing double oldemag = 1.0e8; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; Matches_n<n> p, q; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points // printf ("Transformed p and q\n"); // printM(p); // printM(q); // Form the vector -J^T * err double JTerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTerr[i] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon JTerr[0] += -p[k][1] * q[k][2] * -errk; JTerr[1] += -p[k][0] * q[k][2] * -errk; JTerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk; JTerr[3] += -p[k][2] * q[k][1] * -errk; // -pz*qx [DJC] -> -pz*qy JTerr[4] += -p[k][2] * q[k][0] * -errk; // -pz*qx [DJC] -> -pz*qx } printf("JTerr \n"); for (int i = 0; i < 5; i++) printf("%f ", JTerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTerr[i]*JTerr[i]; emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop printf("emag %f \n", emag); if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); // printf ("E = \n"); // printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qx for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTJ[i][j] += Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTJ, JTerr); printf ("del = \n"); for (int i=0; i<5; i++) printf ("%12.4e ", JTerr[i]); printf ("\n"); // Now, build the new U and V from the params update (U, V, JTerr); printf ("U and V after update\n"); printE(U); printE(V); } } /* * Iteratively reweighted least squares refinement of E matrix * With Huber weighting for robustness J^T W J Delta = -J^T W epsilon * See: https://newonlinecourses.science.psu.edu/stat501/node/353/ * delta: scale parameter - size of residual when behaviour changes from quadratic to linear * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ template<int n> __host__ __device__ void polish_E_huber (Ematrix E, double* pin, double* qin, const double delta, const int MaxReps) { // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); printf ("In polish_E\n"); printf ("U and V\n"); printE(U); printE(V); // Go into a loop of polishing double oldemag = 1.0e7; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; Matches_n<n> p, q; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points // printf ("Transformed p and q\n"); // printM(p); // printM(q); // Form the vector -J^T * err double JTWerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTWerr[i] = 0.0; // But just to be absolutely sure double weights[n]; for (int k=0; k<n; k++) { // Loop over the points double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon weights[k] = (fabs(errk) < delta) ? 1.0 : delta / fabs(errk); JTWerr[0] += -p[k][1] * q[k][2] * -errk * weights[k]; JTWerr[1] += -p[k][0] * q[k][2] * -errk * weights[k]; JTWerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk * weights[k]; JTWerr[3] += -p[k][2] * q[k][1] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qy JTWerr[4] += -p[k][2] * q[k][0] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qx } // printf("JTerr \n"); // for (int i = 0; i < 5; i++) // printf("%f ", JTWerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTWerr[i]*JTWerr[i]; emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop printf("emag %f \n", emag); if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); // printf ("E = \n"); // printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTWJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTWJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qx for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTWJ[i][j] += weights[k] * Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTWJ, JTWerr); // printf ("del = \n"); // for (int i=0; i<5; i++) // printf ("%12.4e ", JTWerr[i]); // printf ("\n"); // Now, build the new U and V from the params update (U, V, JTWerr); // printf ("U and V after update\n"); // printE(U); // printE(V); } } /* * Iteratively reweighted least squares refinement of E matrix * With Huber weighting for robustness J^T W J Delta = -J^T W epsilon * See: https://newonlinecourses.science.psu.edu/stat501/node/353/ * Scales residuals by tau = MAD / 0.6745 (Median Absolute Deviation), so that a fixed scale parameter * delta can be used (1.345), for 95% efficiency assuming the inliers have Gaussian noise * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ template<int n> __host__ __device__ void polish_E_huber (Ematrix E, double* pin, double* qin, const int MaxReps) { // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); // printf ("In polish_E\n"); // printf ("U and V\n"); // printE(U); // printE(V); // Go into a loop of polishing double oldemag = 1.0e7; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; Matches_n<n> p, q; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points // printf ("Transformed p and q\n"); // printM(p); // printM(q); // Compute error vector and scale parameter tau // tau = median(|r_i - r_bar|) / 0.6745 // r_i = abs(err_i); r_bar = median({r_i}) double err[n]; double abs_err[n]; for (int k=0; k<n; k++) { // Loop over the points err[k] = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon abs_err[k] = fabs(err[k]); } int middle_index = 0.5 * n; // std::nth_element(abs_err, abs_err + middle_index, abs_err + n); // Use this for host-only code // double median_abs_err = abs_err[middle_index]; // Use this for host-only code double median_abs_err = quickselect(abs_err, middle_index, n); for (int k=0; k<n; k++) { // Loop over the points abs_err[k] = fabs(abs_err[k] - median_abs_err); } // std::nth_element(abs_err, abs_err + middle_index, abs_err + n); // Use this for host-only code // double tau = abs_err[middle_index] / 0.6745; // Use this for host-only code double tau = quickselect(abs_err, middle_index, n) / 0.6745; double delta = 1.345; // printf("median_abs_err: %f, tau: %f\n", median_abs_err, tau); // Form the vector -J^T * err double JTWerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTWerr[i] = 0.0; // But just to be absolutely sure double weights[n]; for (int k=0; k<n; k++) { // Loop over the points // double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon double errk = err[k]; weights[k] = (fabs(errk) < tau * delta) ? 1.0 : tau * delta / fabs(errk); JTWerr[0] += -p[k][1] * q[k][2] * -errk * weights[k]; JTWerr[1] += -p[k][0] * q[k][2] * -errk * weights[k]; JTWerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk * weights[k]; JTWerr[3] += -p[k][2] * q[k][1] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qy JTWerr[4] += -p[k][2] * q[k][0] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qx } // printf("JTerr \n"); // for (int i = 0; i < 5; i++) // printf("%f ", JTWerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTWerr[i]*JTWerr[i]; emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop // printf("emag %f \n", emag); if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); // printf ("E = \n"); // printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTWJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTWJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qx for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTWJ[i][j] += weights[k] * Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTWJ, JTWerr); // printf ("del = \n"); // for (int i=0; i<5; i++) // printf ("%12.4e ", JTWerr[i]); // printf ("\n"); // Now, build the new U and V from the params update (U, V, JTWerr); // printf ("U and V after update\n"); // printE(U); // printE(V); } } /* * Iteratively reweighted least squares refinement of E matrix * With parametric robust weighting J^T W J Delta = -J^T W epsilon * alpha in [0, 1], smooth transition between truncated L2 and Huber penalty functions * alpha = 0 ==> truncated L2 * alpha = 1 ==> Huber norm * See: https://newonlinecourses.science.psu.edu/stat501/node/353/ * delta: scale parameter - size of residual when behaviour changes from quadratic to linear * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ template<int n> __host__ __device__ void polish_E_robust_parametric (Ematrix E, const double* pin, const double* qin, const double delta, const double alpha, const int MaxReps) { // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); printf ("In polish_E\n"); printf ("U and V\n"); printE(U); printE(V); // Go into a loop of polishing double oldemag = 1.0e7; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; Matches_n<n> p, q; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points printf ("Transformed p and q\n"); printM(p); printM(q); // Form the vector -J^T * err double JTWerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTWerr[i] = 0.0; // But just to be absolutely sure double weights[n]; for (int k=0; k<n; k++) { // Loop over the points double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon weights[k] = (fabs(errk) < delta) ? 1.0 : alpha * delta / fabs(errk); JTWerr[0] += -p[k][1] * q[k][2] * -errk * weights[k]; JTWerr[1] += -p[k][0] * q[k][2] * -errk * weights[k]; JTWerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk * weights[k]; JTWerr[3] += -p[k][2] * q[k][1] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qy JTWerr[4] += -p[k][2] * q[k][0] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qx } printf("JTerr \n"); for (int i = 0; i < 5; i++) printf("%f ", JTWerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTWerr[i]*JTWerr[i]; emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop printf("emag %f \n", emag); if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); printf ("E = \n"); printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTWJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTWJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qx for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTWJ[i][j] += weights[k] * Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTWJ, JTWerr); printf ("del = \n"); for (int i=0; i<5; i++) printf ("%12.4e ", JTWerr[i]); printf ("\n"); // Now, build the new U and V from the params update (U, V, JTWerr); printf ("U and V after update\n"); printE(U); printE(V); } } /* * Iteratively reweighted least squares refinement of E matrix * With parametric robust weighting J^T W J Delta = -J^T W epsilon * alpha in (-inf, 2], smooth transition between Welsch loss and L2 loss * alpha = -inf ==> Welsch * alpha = -2 ==> Geman-McClure * alpha = 1 ==> pseudo-Huber * alpha = 2 ==> L2 * See: https://newonlinecourses.science.psu.edu/stat501/node/353/ * See: https://arxiv.org/pdf/1701.03077.pdf * delta: scale parameter - size of residual when behaviour changes from quadratic to linear * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ template<int n> __host__ __device__ void polish_E_robust_parametric_barron (Ematrix E, const double* pin, const double* qin, const double delta, const double alpha, const int MaxReps) { double eps = 1e-5; double b = fabs(2.0 - alpha) + eps; double d = alpha >= 0.0 ? alpha + eps : alpha - eps; double delta2 = delta * delta; // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); printf ("In polish_E\n"); printf ("U and V\n"); printE(U); printE(V); // Go into a loop of polishing double oldemag = 1.0e7; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; Matches_n<n> p, q; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points printf ("Transformed p and q\n"); printM(p); printM(q); // Form the vector -J^T * err double JTWerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTWerr[i] = 0.0; // But just to be absolutely sure double weights[n]; for (int k=0; k<n; k++) { // Loop over the points double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon weights[k] = pow(errk * errk / delta2 / b + 1.0, 0.5 * d - 1.0) / delta2; JTWerr[0] += -p[k][1] * q[k][2] * -errk * weights[k]; JTWerr[1] += -p[k][0] * q[k][2] * -errk * weights[k]; JTWerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk * weights[k]; JTWerr[3] += -p[k][2] * q[k][1] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qy JTWerr[4] += -p[k][2] * q[k][0] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qx } printf("JTerr \n"); for (int i = 0; i < 5; i++) printf("%f ", JTWerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTWerr[i]*JTWerr[i]; emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop printf("emag %f \n", emag); if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); printf ("E = \n"); printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTWJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTWJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qx for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTWJ[i][j] += weights[k] * Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTWJ, JTWerr); printf ("del = \n"); for (int i=0; i<5; i++) printf ("%12.4e ", JTWerr[i]); printf ("\n"); // Now, build the new U and V from the params update (U, V, JTWerr); printf ("U and V after update\n"); printE(U); printE(V); } } /* * Basic non-recursive quickselect implementation * Modified from http://blog.teamleadnet.com/2012/07/quick-select-algorithm-find-kth-element.html */ __host__ __device__ double quickselect(double *array, int k, int n) { int from = 0; int to = n - 1; // if from == to we reached the kth element while (from < to) { int r = from; int w = to; double mid = array[(r + w) / 2]; // stop if the reader and writer meets while (r < w) { if (array[r] >= mid) { // put the large values at the end double tmp = array[w]; array[w] = array[r]; array[r] = tmp; w--; } else { // the value is smaller than the pivot, skip r++; } } // if we stepped up (r++) we need to step one down if (array[r] > mid) r--; // the r pointer is on the end of the first k elements if (k <= r) { to = r; } else { from = r + 1; } } return array[k]; } /* * Functions with Dynamic Memory Allocation */ /* * Iterative least squares refinement of E matrix * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ __host__ void polish_E (Ematrix E, const double* pin, const double* qin, const int n, const int MaxReps) { // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); printf ("In polish_E\n"); printf ("U and V\n"); printE(U); printE(V); // std::vector<std::array<double, 3>> p(n); // std::vector<std::array<double, 3>> q(n); double (*p)[3] = new double[n][3]; double (*q)[3] = new double[n][3]; // Go into a loop of polishing double oldemag = 1.0e8; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points // printf ("Transformed p and q\n"); // printM(p); // printM(q); // Form the vector -J^T * err double JTerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTerr[i] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon JTerr[0] += -p[k][1] * q[k][2] * -errk; JTerr[1] += -p[k][0] * q[k][2] * -errk; JTerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk; JTerr[3] += -p[k][2] * q[k][1] * -errk; // -pz*qx [DJC] -> -pz*qy JTerr[4] += -p[k][2] * q[k][0] * -errk; // -pz*qx [DJC] -> -pz*qx } printf("JTerr \n"); for (int i = 0; i < 5; i++) printf("%f ", JTerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTerr[i]*JTerr[i]; emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop printf("emag %f \n", emag); if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); // printf ("E = \n"); // printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qy for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTJ[i][j] += Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTJ, JTerr); printf ("del = \n"); for (int i=0; i<5; i++) printf ("%12.4e ", JTerr[i]); printf ("\n"); // Now, build the new U and V from the params update (U, V, JTerr); printf ("U and V after update\n"); printE(U); printE(V); } // Release dynamically-allocated memory delete[] p; delete[] q; } /* * Iteratively reweighted least squares refinement of E matrix * With Huber weighting for robustness J^T W J Delta = -J^T W epsilon * See: https://newonlinecourses.science.psu.edu/stat501/node/353/ * delta: scale parameter - size of residual when behaviour changes from quadratic to linear * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ __host__ void polish_E_huber (Ematrix E, const double* pin, const double* qin, const int n, const double delta, const int MaxReps) { // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); printf ("In polish_E\n"); printf ("U and V\n"); printE(U); printE(V); // std::vector<std::array<double, 3>> p(n); // std::vector<std::array<double, 3>> q(n); // std::vector<double> weights(n); double (*p)[3] = new double[n][3]; double (*q)[3] = new double[n][3]; double *weights = new double[n]; // Go into a loop of polishing double oldemag = 1.0e7; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points printf ("Transformed p and q\n"); printM(p); printM(q); // Form the vector -J^T * err double JTWerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTWerr[i] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon weights[k] = (fabs(errk) < delta) ? 1.0 : delta / fabs(errk); JTWerr[0] += -p[k][1] * q[k][2] * -errk * weights[k]; JTWerr[1] += -p[k][0] * q[k][2] * -errk * weights[k]; JTWerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk * weights[k]; JTWerr[3] += -p[k][2] * q[k][1] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qy JTWerr[4] += -p[k][2] * q[k][0] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qx } printf("JTerr \n"); for (int i = 0; i < 5; i++) printf("%f ", JTWerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTWerr[i]*JTWerr[i]; emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop printf("emag %f \n", emag); if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); printf ("E = \n"); printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTWJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTWJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qx for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTWJ[i][j] += weights[k] * Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTWJ, JTWerr); printf ("del = \n"); for (int i=0; i<5; i++) printf ("%12.4e ", JTWerr[i]); printf ("\n"); // Now, build the new U and V from the params update (U, V, JTWerr); printf ("U and V after update\n"); printE(U); printE(V); } // Release dynamically-allocated memory delete[] p; delete[] q; delete[] weights; } /* * Iteratively reweighted least squares refinement of E matrix * With parametric robust weighting J^T W J Delta = -J^T W epsilon * alpha in [0, 1], smooth transition between truncated L2 and Huber penalty functions * alpha = 0 ==> truncated L2 * alpha = 1 ==> Huber norm * See: https://newonlinecourses.science.psu.edu/stat501/node/353/ * delta: scale parameter - size of residual when behaviour changes from quadratic to linear * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ //template<typename T> //__host__ void polish_E_robust_parametric (Ematrix E, const T *pin, const T *qin, const int n, const T delta, const T alpha, const int MaxReps) __host__ void polish_E_robust_parametric (Ematrix E, const double *pin, const double *qin, const int n, const double delta, const double alpha, const int MaxReps) { // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); // printf ("In polish_E\n"); // printf ("U and V\n"); // printE(U); // printE(V); // std::vector<std::array<T, 3>> p(n); // std::vector<std::array<T, 3>> q(n); // std::vector<T> weights(n); double (*p)[3] = new double[n][3]; double (*q)[3] = new double[n][3]; double *weights = new double[n]; // Go into a loop of polishing double oldemag = 1.0e20; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points // printf ("Transformed p and q\n"); // printM(p); // printM(q); // Form the vector -J^T * err double JTWerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTWerr[i] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon weights[k] = (fabs(errk) < delta) ? 1.0 : alpha * delta / fabs(errk); JTWerr[0] += -p[k][1] * q[k][2] * -errk * weights[k]; JTWerr[1] += -p[k][0] * q[k][2] * -errk * weights[k]; JTWerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk * weights[k]; JTWerr[3] += -p[k][2] * q[k][1] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qy JTWerr[4] += -p[k][2] * q[k][0] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qx } // printf("JTerr \n"); // for (int i = 0; i < 5; i++) // printf("%f ", JTWerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTWerr[i]*JTWerr[i]; //emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop // printf("emag %.30f \n", emag); // if (emag > oldemag) break; if (emag < 1e-20) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTWJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTWJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qx for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTWJ[i][j] += weights[k] * Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTWJ, JTWerr); // printf ("del = \n"); // for (int i=0; i<5; i++) // printf ("%12.4e ", JTWerr[i]); // printf ("\n"); // Now, build the new U and V from the params update (U, V, JTWerr); // printf ("U and V after update\n"); // printE(U); // printE(V); } // Release dynamically-allocated memory delete[] p; delete[] q; delete[] weights; } /* * Iteratively reweighted least squares refinement of E matrix * With parametric robust weighting J^T W J Delta = -J^T W epsilon * alpha in (-inf, 2], smooth transition between Welsch loss and L2 loss * alpha = -inf ==> Welsch * alpha = -2 ==> Geman-McClure * alpha = 1 ==> pseudo-Huber * alpha = 2 ==> L2 * See: https://newonlinecourses.science.psu.edu/stat501/node/353/ * See: https://arxiv.org/pdf/1701.03077.pdf * delta: scale parameter - size of residual when behaviour changes from quadratic to linear * Note that the Jacobian for each point is slightly different to that in Eq (13) of the PAMI'12 paper, * up to some sign changes and swaps. I'm taking the values in the above function as gospel, for now. * Presumably because the decomposition is defined slightly differently. */ __host__ void polish_E_robust_parametric_barron (Ematrix E, const double* pin, const double* qin, const int n, const double delta, const double alpha, const int MaxReps) { double eps = 1e-5; double b = fabs(2.0 - alpha) + eps; double d = alpha >= 0.0 ? alpha + eps : alpha - eps; double delta2 = delta * delta; // Decompose the matrix Ematrix U, V; Edecomp (E, U, V); printf ("In polish_E\n"); printf ("U and V\n"); printE(U); printE(V); // std::vector<std::array<double, 3>> p(n); // std::vector<std::array<double, 3>> q(n); // std::vector<double> weights(n); double (*p)[3] = new double[n][3]; double (*q)[3] = new double[n][3]; double *weights = new double[n]; // Go into a loop of polishing double oldemag = 1.0e7; // Large value for (int rep=0; ; rep++) { // Multiply out the matches: q <- q . U; p <- p . V; for (int i=0; i<n; i++) for (int j=0; j<3; j++) { p[i][j] = pin[2*i+0]*V[0][j] + pin[2*i+1]*V[1][j] + 1.0*V[2][j]; q[i][j] = qin[2*i+0]*U[0][j] + qin[2*i+1]*U[1][j] + 1.0*U[2][j]; } // Print out the points printf ("Transformed p and q\n"); printM(p); printM(q); // Form the vector -J^T * err double JTWerr[5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) JTWerr[i] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double errk = p[k][0]*q[k][0] + p[k][1]*q[k][1]; // epsilon weights[k] = pow(errk * errk / delta2 / b + 1.0, 0.5 * d - 1.0) / delta2; JTWerr[0] += -p[k][1] * q[k][2] * -errk * weights[k]; JTWerr[1] += -p[k][0] * q[k][2] * -errk * weights[k]; JTWerr[2] += (p[k][1] * q[k][0] - p[k][0] * q[k][1]) * -errk * weights[k]; JTWerr[3] += -p[k][2] * q[k][1] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qy JTWerr[4] += -p[k][2] * q[k][0] * -errk * weights[k]; // -pz*qx [DJC] -> -pz*qx } printf("JTerr \n"); for (int i = 0; i < 5; i++) printf("%f ", JTWerr[i]); double emag = 0.0; for (int i=0; i<5; i++) emag += JTWerr[i]*JTWerr[i]; emag /= n; // divide error by n, to make it smaller // Now, if emag is greater than old mag, then we stop printf("emag %f \n", emag); if (emag > oldemag) break; oldemag = emag; // Compute the E from the present values of U, V Eprod (U, V, E); printf ("E = \n"); printE(E); // Here is the break point, after it has really tried MaxReps changes if (rep == MaxReps) break; // Now, form a matrix double JTWJ[5][5] = {}; // Sets all elements to 0 for (int i=0; i<5; i++) for (int j=0; j<5; j++) JTWJ[i][j] = 0.0; // But just to be absolutely sure for (int k=0; k<n; k++) { // Loop over the points double Jk[5]; Jk[0] = -p[k][1] * q[k][2]; // -py*qz Jk[1] = -p[k][0] * q[k][2]; // -px*qz Jk[2] = p[k][1] * q[k][0] - p[k][0] * q[k][1];// py*qx - px*qy Jk[3] = -p[k][2] * q[k][1]; // -pz*qx [DJC] -> -pz*qy Jk[4] = -p[k][2] * q[k][0]; // -pz*qy [DJC] -> -pz*qy for (int i=0; i<5; i++) { for (int j=0; j<5; j++) { JTWJ[i][j] += weights[k] * Jk[i] * Jk[j]; } } } // Solve the equation solve_5x5 (JTWJ, JTWerr); printf ("del = \n"); for (int i=0; i<5; i++) printf ("%12.4e ", JTWerr[i]); printf ("\n"); // Now, build the new U and V from the params update (U, V, JTWerr); printf ("U and V after update\n"); printE(U); printE(V); } // Release dynamically-allocated memory delete[] p; delete[] q; delete[] weights; }
the_stack
#include <limits> #include <cmath> // HACK TESTING #include <iostream> using std::cout; using std::endl; using std::max; using std::min; template<typename I, typename F> inline __device__ F clip(F x) { return min_gpu(max_gpu(x,F(minval_gpu<I>())),F(maxval_gpu<I>())); } template<typename F> inline __device__ F clip_4bit(F x) { return min_gpu(max_gpu(x,F(-7)),F(7)); } template<typename F> inline __device__ F clip_2bit(F x) { return min_gpu(max_gpu(x,F(-1)),F(1)); } template<typename F> inline __device__ F clip_1bit(F x) { return x >= F(0) ? F(1) : F(0); } template<typename IType, typename SType, typename OType> __device__ void guantize(IType ival, SType scale, OType& oval) { oval = OType(rint(clip<OType>(ival*scale))); } template<typename IType, typename SType, typename OType> struct GuantizeFunctor { SType scale; bool byteswap_in; bool byteswap_out; GuantizeFunctor(SType scale_, bool byteswap_in_, bool byteswap_out_) : scale(scale_), byteswap_in(byteswap_in_), byteswap_out(byteswap_out_) {} __device__ void operator()(IType ival, OType& oval) const { if( byteswap_in ) { byteswap_gpu(ival, &ival); } guantize(ival, scale, oval); if( byteswap_out ) { byteswap_gpu(oval, &oval); } } }; template<typename T, typename U, typename Func, typename Size> __global__ void foreach_simple_gpu(T const* in, U* out, Size nelement, Func func) { Size v0 = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x; if( v0 < nelement ) { func(in[v0], out[v0]); } } template<typename T, typename U, typename Func, typename Size> inline void launch_foreach_simple_gpu(T const* in, U* out, Size nelement, Func func, cudaStream_t stream=0) { dim3 block(512, 1); // TODO: Tune this Size first = std::min((nelement-1)/block.x+1, 65535ul); Size secnd = std::min((nelement - first*block.x) / first + 1, 65535ul); if( block.x*first > nelement ) { secnd = 1; } dim3 grid(first, secnd); /* cout << " Block size is " << block.x << " by " << block.y << endl; cout << " Grid size is " << grid.x << " by " << grid.y << endl; cout << " Maximum size is " << block.x*grid.x*grid.y << endl; if( block.x*grid.x*grid.y >= nelement ) { cout << " -> Valid" << endl; } */ void* args[] = {&in, &out, &nelement, &func}; BF_CHECK_CUDA_EXCEPTION(cudaLaunchKernel((void*)foreach_simple_gpu<T,U,Func,Size>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } template<typename T, typename Func, typename Size> __global__ void foreach_simple_gpu_4bit(T const* in, int8_t* out, Size nelement, Func func) { Size v0 = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x; T tempR; T tempI; int8_t tempO; if( v0 < nelement ) { tempR = in[2*v0+0]; tempI = in[2*v0+1]; if(func.byteswap_in) { byteswap_gpu(tempR, &tempR); byteswap_gpu(tempI, &tempI); } tempO = (((int8_t(rint(clip_4bit(tempR*func.scale)))*16) ) & 0xF0) | \ (((int8_t(rint(clip_4bit(tempI*func.scale)))*16) >> 4) & 0x0F); if(func.byteswap_out) { byteswap_gpu(tempO, &tempO); } out[v0] = tempO; } } template<typename T, typename Func, typename Size> inline void launch_foreach_simple_gpu_4bit(T const* in, int8_t* out, Size nelement, Func func, cudaStream_t stream=0) { nelement /= 2; dim3 block(512, 1); // TODO: Tune this Size first = std::min((nelement-1)/block.x+1, 65535ul); Size secnd = std::min((nelement - first*block.x) / first + 1, 65535ul); if( block.x*first > nelement ) { secnd = 1; } dim3 grid(first, secnd); /* cout << " Block size is " << block.x << " by " << block.y << endl; cout << " Grid size is " << grid.x << " by " << grid.y << endl; cout << " Maximum size is " << block.x*grid.x*grid.y << endl; if( block.x*grid.x*grid.y >= nelement ) { cout << " -> Valid" << endl; } */ void* args[] = {&in, &out, &nelement, &func}; BF_CHECK_CUDA_EXCEPTION(cudaLaunchKernel((void*)foreach_simple_gpu_4bit<T,Func,Size>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } template<typename T, typename Func, typename Size> __global__ void foreach_simple_gpu_2bit(T const* in, int8_t* out, Size nelement, Func func) { Size v0 = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x; T tempA; T tempB; T tempC; T tempD; int8_t tempO; if( v0 < nelement ) { tempA = in[4*v0+0]; tempB = in[4*v0+1]; tempC = in[4*v0+2]; tempD = in[4*v0+3]; if(func.byteswap_in) { byteswap_gpu(tempA, &tempA); byteswap_gpu(tempB, &tempB); byteswap_gpu(tempC, &tempC); byteswap_gpu(tempD, &tempD); } tempO = (((int8_t(rint(clip_2bit(tempA*func.scale)))*64) ) & 0xC0) | \ (((int8_t(rint(clip_2bit(tempB*func.scale)))*64) >> 2) & 0x30) | \ (((int8_t(rint(clip_2bit(tempC*func.scale)))*64) >> 4) & 0x0C) | \ (((int8_t(rint(clip_2bit(tempD*func.scale)))*64) >> 6) & 0x03); if(func.byteswap_out) { byteswap_gpu(tempO, &tempO); } out[v0] = tempO; } } template<typename T, typename Func, typename Size> inline void launch_foreach_simple_gpu_2bit(T const* in, int8_t* out, Size nelement, Func func, cudaStream_t stream=0) { nelement /= 4; dim3 block(512, 1); // TODO: Tune this Size first = std::min((nelement-1)/block.x+1, 65535ul); Size secnd = std::min((nelement - first*block.x) / first + 1, 65535ul); if( block.x*first > nelement ) { secnd = 1; } dim3 grid(first, secnd); /* cout << " Block size is " << block.x << " by " << block.y << endl; cout << " Grid size is " << grid.x << " by " << grid.y << endl; cout << " Maximum size is " << block.x*grid.x*grid.y << endl; if( block.x*grid.x*grid.y >= nelement ) { cout << " -> Valid" << endl; } */ void* args[] = {&in, &out, &nelement, &func}; BF_CHECK_CUDA_EXCEPTION(cudaLaunchKernel((void*)foreach_simple_gpu_2bit<T,Func,Size>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } template<typename T, typename Func, typename Size> __global__ void foreach_simple_gpu_1bit(T const* in, int8_t* out, Size nelement, Func func) { Size v0 = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x; T tempA; T tempB; T tempC; T tempD; T tempE; T tempF; T tempG; T tempH; int8_t tempO; if( v0 < nelement ) { tempA = in[8*v0+0]; tempB = in[8*v0+1]; tempC = in[8*v0+2]; tempD = in[8*v0+3]; tempE = in[8*v0+4]; tempF = in[8*v0+5]; tempG = in[8*v0+6]; tempH = in[8*v0+7]; if(func.byteswap_in) { byteswap_gpu(tempA, &tempA); byteswap_gpu(tempB, &tempB); byteswap_gpu(tempC, &tempC); byteswap_gpu(tempD, &tempD); byteswap_gpu(tempE, &tempE); byteswap_gpu(tempF, &tempF); byteswap_gpu(tempG, &tempG); byteswap_gpu(tempH, &tempH); } tempO = (((int8_t(rint(clip_1bit(tempA*func.scale)))*128) ) & 0x08) | \ (((int8_t(rint(clip_1bit(tempB*func.scale)))*128) >> 1) & 0x04) | \ (((int8_t(rint(clip_1bit(tempC*func.scale)))*128) >> 2) & 0x02) | \ (((int8_t(rint(clip_1bit(tempD*func.scale)))*128) >> 3) & 0x10) | \ (((int8_t(rint(clip_1bit(tempE*func.scale)))*128) >> 4) & 0x08) | \ (((int8_t(rint(clip_1bit(tempF*func.scale)))*128) >> 5) & 0x04) | \ (((int8_t(rint(clip_1bit(tempG*func.scale)))*128) >> 6) & 0x02) | \ (((int8_t(rint(clip_1bit(tempH*func.scale)))*128) >> 7) & 0x01); if(func.byteswap_out) { byteswap_gpu(tempO, &tempO); } out[v0] = tempO; } } template<typename T, typename Func, typename Size> inline void launch_foreach_simple_gpu_1bit(T const* in, int8_t* out, Size nelement, Func func, cudaStream_t stream=0) { nelement /= 8; dim3 block(512, 1); // TODO: Tune this Size first = std::min((nelement-1)/block.x+1, 65535ul); Size secnd = std::min((nelement - first*block.x) / first + 1, 65535ul); if( block.x*first > nelement ) { secnd = 1; } dim3 grid(first, secnd); /* cout << " Block size is " << block.x << " by " << block.y << endl; cout << " Grid size is " << grid.x << " by " << grid.y << endl; cout << " Maximum size is " << block.x*grid.x*grid.y << endl; if( block.x*grid.x*grid.y >= nelement ) { cout << " -> Valid" << endl; } */ void* args[] = {&in, &out, &nelement, &func}; BF_CHECK_CUDA_EXCEPTION(cudaLaunchKernel((void*)foreach_simple_gpu_1bit<T,Func,Size>, grid, block, &args[0], 0, stream), BF_STATUS_INTERNAL_ERROR); } // Instantiation - gunatize functors used in quantize.cpp //// unsigned template class GuantizeFunctor<float,float,uint8_t>; template class GuantizeFunctor<float,double,uint8_t>; template class GuantizeFunctor<float,float,uint16_t>; template class GuantizeFunctor<float,double,uint16_t>; template class GuantizeFunctor<float,float,uint32_t>; template class GuantizeFunctor<float,double,uint32_t>; //// signed template class GuantizeFunctor<float,float,int8_t>; template class GuantizeFunctor<float,double,int8_t>; template class GuantizeFunctor<float,float,int16_t>; template class GuantizeFunctor<float,double,int16_t>; template class GuantizeFunctor<float,float,int32_t>; template class GuantizeFunctor<float,double,int32_t>; // Instantiation - launch_foreach_simple_gpu_1bit calls used in quantize.cpp template void launch_foreach_simple_gpu_1bit<float,GuantizeFunctor<float,float,uint8_t>,size_t>(float const* in, int8_t* out, size_t nelement, GuantizeFunctor<float,float,uint8_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu_1bit<float,GuantizeFunctor<float,double,uint8_t>,size_t>(float const* in, int8_t* out, size_t nelement, GuantizeFunctor<float,double,uint8_t> func, cudaStream_t stream); // Instantiation - launch_foreach_simple_gpu_2bit calls used in quantize.cpp template void launch_foreach_simple_gpu_2bit<float,GuantizeFunctor<float,float,uint8_t>,size_t>(float const* in, int8_t* out, size_t nelement, GuantizeFunctor<float,float,uint8_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu_2bit<float,GuantizeFunctor<float,double,uint8_t>,size_t>(float const* in, int8_t* out, size_t nelement, GuantizeFunctor<float,double,uint8_t> func, cudaStream_t stream); // Instantiation - launch_foreach_simple_gpu_4bit calls used in quantize.cpp template void launch_foreach_simple_gpu_4bit<float,GuantizeFunctor<float,float,uint8_t>,size_t>(float const* in, int8_t* out, size_t nelement, GuantizeFunctor<float,float,uint8_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu_4bit<float,GuantizeFunctor<float,double,uint8_t>,size_t>(float const* in, int8_t* out, size_t nelement, GuantizeFunctor<float,double,uint8_t> func, cudaStream_t stream); // Instantiation - launch_foreach_simple_gpu calls used in quantize.cpp //// unsigned template void launch_foreach_simple_gpu<float,uint8_t,GuantizeFunctor<float,float,uint8_t>,size_t>(float const* in, uint8_t* out, size_t nelement, GuantizeFunctor<float,float,uint8_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,uint8_t,GuantizeFunctor<float,double,uint8_t>,size_t>(float const* in, uint8_t* out, size_t nelement, GuantizeFunctor<float,double,uint8_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,uint16_t,GuantizeFunctor<float,float,uint16_t>,size_t>(float const* in, uint16_t* out, size_t nelement, GuantizeFunctor<float,float,uint16_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,uint16_t,GuantizeFunctor<float,double,uint16_t>,size_t>(float const* in, uint16_t* out, size_t nelement, GuantizeFunctor<float,double,uint16_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,uint32_t,GuantizeFunctor<float,float,uint32_t>,size_t>(float const* in, uint32_t* out, size_t nelement, GuantizeFunctor<float,float,uint32_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,uint32_t,GuantizeFunctor<float,double,uint32_t>,size_t>(float const* in, uint32_t* out, size_t nelement, GuantizeFunctor<float,double,uint32_t> func, cudaStream_t stream); //// signed template void launch_foreach_simple_gpu<float,int8_t,GuantizeFunctor<float,float,int8_t>,size_t>(float const* in, int8_t* out, size_t nelement, GuantizeFunctor<float,float,int8_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,int8_t,GuantizeFunctor<float,double,int8_t>,size_t>(float const* in, int8_t* out, size_t nelement, GuantizeFunctor<float,double,int8_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,int16_t,GuantizeFunctor<float,float,int16_t>,size_t>(float const* in, int16_t* out, size_t nelement, GuantizeFunctor<float,float,int16_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,int16_t,GuantizeFunctor<float,double,int16_t>,size_t>(float const* in, int16_t* out, size_t nelement, GuantizeFunctor<float,double,int16_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,int32_t,GuantizeFunctor<float,float,int32_t>,size_t>(float const* in, int32_t* out, size_t nelement, GuantizeFunctor<float,float,int32_t> func, cudaStream_t stream); template void launch_foreach_simple_gpu<float,int32_t,GuantizeFunctor<float,double,int32_t>,size_t>(float const* in, int32_t* out, size_t nelement, GuantizeFunctor<float,double,int32_t> func, cudaStream_t stream);
the_stack
* \file * cub::BlockRegionReduceByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key. */ #pragma once #include <iterator> #include "device_scan_types.cuh" #include "../../block/block_load.cuh" #include "../../block/block_store.cuh" #include "../../block/block_scan.cuh" #include "../../block/block_exchange.cuh" #include "../../block/block_discontinuity.cuh" #include "../../grid/grid_queue.cuh" #include "../../iterator/cache_modified_input_iterator.cuh" #include "../../iterator/constant_input_iterator.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Parameterizable tuning policy type for BlockRegionReduceByKey */ template < int _BLOCK_THREADS, ///< Threads per thread block int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements bool _TWO_PHASE_SCATTER, ///< Whether or not to coalesce output values in shared memory before scattering them to global BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use struct BlockRegionReduceByKeyPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) TWO_PHASE_SCATTER = _TWO_PHASE_SCATTER, ///< Whether or not to coalesce output values in shared memory before scattering them to global }; static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use }; /****************************************************************************** * Tile status interface types ******************************************************************************/ /** * Tile status interface for reduction by key. * */ template < typename Value, typename Offset, bool SINGLE_WORD = (Traits<Value>::PRIMITIVE) && (sizeof(Value) + sizeof(Offset) < 16)> struct ReduceByKeyTileLookbackStatus; /** * Tile status interface for reduction by key, specialized for scan status and value types that * cannot be combined into one machine word. */ template < typename Value, typename Offset> struct ReduceByKeyTileLookbackStatus<Value, Offset, false> : TileLookbackStatus<ItemOffsetPair<Value, Offset> > { typedef TileLookbackStatus<ItemOffsetPair<Value, Offset> > SuperClass; /// Constructor __host__ __device__ __forceinline__ ReduceByKeyTileLookbackStatus() : SuperClass() {} }; /** * Tile status interface for reduction by key, specialized for scan status and value types that * can be combined into one machine word that can be read/written coherently in a single access. */ template < typename Value, typename Offset> struct ReduceByKeyTileLookbackStatus<Value, Offset, true> { typedef ItemOffsetPair<Value, Offset> ItemOffsetPair; // Constants enum { PAIR_SIZE = sizeof(Value) + sizeof(Offset), TXN_WORD_SIZE = 1 << Log2<PAIR_SIZE + 1>::VALUE, STATUS_WORD_SIZE = TXN_WORD_SIZE - PAIR_SIZE, TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS, }; // Status word type typedef typename If<(STATUS_WORD_SIZE == 8), long long, typename If<(STATUS_WORD_SIZE == 4), int, typename If<(STATUS_WORD_SIZE == 2), short, char>::Type>::Type>::Type StatusWord; // Status word type typedef typename If<(TXN_WORD_SIZE == 16), longlong2, typename If<(TXN_WORD_SIZE == 8), long long, int>::Type>::Type TxnWord; // Device word type (for when sizeof(Value) == sizeof(Offset)) struct TileDescriptorBigStatus { Offset offset; Value value; StatusWord status; }; // Device word type (for when sizeof(Value) != sizeof(Offset)) struct TileDescriptorLittleStatus { Value value; StatusWord status; Offset offset; }; // Device word type typedef typename If< (sizeof(Value) == sizeof(Offset)), TileDescriptorBigStatus, TileDescriptorLittleStatus>::Type TileDescriptor; // Device storage TileDescriptor *d_tile_status; /// Constructor __host__ __device__ __forceinline__ ReduceByKeyTileLookbackStatus() : d_tile_status(NULL) {} /// Initializer __host__ __device__ __forceinline__ cudaError_t Init( int num_tiles, ///< [in] Number of tiles void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t temp_storage_bytes) ///< [in] Size in bytes of \t d_temp_storage allocation { d_tile_status = reinterpret_cast<TileDescriptor*>(d_temp_storage); return cudaSuccess; } /** * Compute device memory needed for tile status */ __host__ __device__ __forceinline__ static cudaError_t AllocationSize( int num_tiles, ///< [in] Number of tiles size_t &temp_storage_bytes) ///< [out] Size in bytes of \t d_temp_storage allocation { temp_storage_bytes = (num_tiles + TILE_STATUS_PADDING) * sizeof(TileDescriptor); // bytes needed for tile status descriptors return cudaSuccess; } /** * Initialize (from device) */ __device__ __forceinline__ void InitializeStatus(int num_tiles) { int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tile_idx < num_tiles) { // Not-yet-set d_tile_status[TILE_STATUS_PADDING + tile_idx].status = StatusWord(LOOKBACK_TILE_INVALID); } if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING)) { // Padding d_tile_status[threadIdx.x].status = StatusWord(LOOKBACK_TILE_OOB); } } /** * Update the specified tile's inclusive value and corresponding status */ __device__ __forceinline__ void SetInclusive(int tile_idx, ItemOffsetPair tile_inclusive) { TileDescriptor tile_descriptor; tile_descriptor.status = LOOKBACK_TILE_INCLUSIVE; tile_descriptor.value = tile_inclusive.value; tile_descriptor.offset = tile_inclusive.offset; TxnWord alias; *reinterpret_cast<TileDescriptor*>(&alias) = tile_descriptor; ThreadStore<STORE_CG>(reinterpret_cast<TxnWord*>(d_tile_status + TILE_STATUS_PADDING + tile_idx), alias); } /** * Update the specified tile's partial value and corresponding status */ __device__ __forceinline__ void SetPartial(int tile_idx, ItemOffsetPair tile_partial) { TileDescriptor tile_descriptor; tile_descriptor.status = LOOKBACK_TILE_PARTIAL; tile_descriptor.value = tile_partial.value; tile_descriptor.offset = tile_partial.offset; TxnWord alias; *reinterpret_cast<TileDescriptor*>(&alias) = tile_descriptor; ThreadStore<STORE_CG>(reinterpret_cast<TxnWord*>(d_tile_status + TILE_STATUS_PADDING + tile_idx), alias); } /** * Wait for the corresponding tile to become non-invalid */ __device__ __forceinline__ void WaitForValid( int tile_idx, StatusWord &status, ItemOffsetPair &value) { // Use warp-any to determine when all threads have valid status TxnWord alias = ThreadLoad<LOAD_CG>(reinterpret_cast<TxnWord*>(d_tile_status + TILE_STATUS_PADDING + tile_idx)); TileDescriptor tile_descriptor = reinterpret_cast<TileDescriptor&>(alias); while ((tile_descriptor.status == LOOKBACK_TILE_INVALID)) { alias = ThreadLoad<LOAD_CG>(reinterpret_cast<TxnWord*>(d_tile_status + TILE_STATUS_PADDING + tile_idx)); tile_descriptor = reinterpret_cast<TileDescriptor&>(alias); } status = tile_descriptor.status; value.value = tile_descriptor.value; value.offset = tile_descriptor.offset; } }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ /** * \brief BlockRegionReduceByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key across a region of tiles */ template < typename BlockRegionReduceByKeyPolicy, ///< Parameterized BlockRegionReduceByKeyPolicy tuning policy type typename KeyInputIterator, ///< Random-access input iterator type for keys typename KeyOutputIterator, ///< Random-access output iterator type for keys typename ValueInputIterator, ///< Random-access input iterator type for values typename ValueOutputIterator, ///< Random-access output iterator type for values typename EqualityOp, ///< Key equality operator type typename ReductionOp, ///< Value reduction operator type typename Offset> ///< Signed integer type for global offsets struct BlockRegionReduceByKey { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Data type of key iterator typedef typename std::iterator_traits<KeyInputIterator>::value_type Key; // Data type of value iterator typedef typename std::iterator_traits<ValueInputIterator>::value_type Value; // Tile status descriptor interface type typedef ReduceByKeyTileLookbackStatus<Value, Offset> TileLookbackStatus; // Constants enum { BLOCK_THREADS = BlockRegionReduceByKeyPolicy::BLOCK_THREADS, WARPS = BLOCK_THREADS / CUB_PTX_WARP_THREADS, ITEMS_PER_THREAD = BlockRegionReduceByKeyPolicy::ITEMS_PER_THREAD, TWO_PHASE_SCATTER = (BlockRegionReduceByKeyPolicy::TWO_PHASE_SCATTER) && (ITEMS_PER_THREAD > 1), TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, // Whether or not the scan operation has a zero-valued identity value (true if we're performing addition on a primitive type) HAS_IDENTITY_ZERO = (Equals<ReductionOp, cub::Sum>::VALUE) && (Traits<Value>::PRIMITIVE), // Whether or not to sync after loading data SYNC_AFTER_LOAD = (BlockRegionReduceByKeyPolicy::LOAD_ALGORITHM != BLOCK_LOAD_DIRECT), // Whether or not this is run-length-encoding with a constant iterator as values IS_RUN_LENGTH_ENCODE = (Equals<ValueInputIterator, ConstantInputIterator<Value, size_t> >::VALUE) || (Equals<ValueInputIterator, ConstantInputIterator<Value, int> >::VALUE) || (Equals<ValueInputIterator, ConstantInputIterator<Value, unsigned int> >::VALUE), }; // Cache-modified input iterator wrapper type for keys typedef typename If<IsPointer<KeyInputIterator>::VALUE, CacheModifiedInputIterator<BlockRegionReduceByKeyPolicy::LOAD_MODIFIER, Key, Offset>, // Wrap the native input pointer with CacheModifiedValueInputIterator KeyInputIterator>::Type // Directly use the supplied input iterator type WrappedKeyInputIterator; // Cache-modified input iterator wrapper type for values typedef typename If<IsPointer<ValueInputIterator>::VALUE, CacheModifiedInputIterator<BlockRegionReduceByKeyPolicy::LOAD_MODIFIER, Value, Offset>, // Wrap the native input pointer with CacheModifiedValueInputIterator ValueInputIterator>::Type // Directly use the supplied input iterator type WrappedValueInputIterator; // Value-offset tuple type for scanning (maps accumulated values to segment index) typedef ItemOffsetPair<Value, Offset> ValueOffsetPair; // Reduce-value-by-segment scan operator struct ReduceByKeyOp { ReductionOp op; ///< Wrapped reduction operator /// Constructor __device__ __forceinline__ ReduceByKeyOp(ReductionOp op) : op(op) {} /// Scan operator (specialized for sum on primitive types) __device__ __forceinline__ ValueOffsetPair operator()( const ValueOffsetPair &first, ///< First partial reduction const ValueOffsetPair &second, ///< Second partial reduction Int2Type<true> has_identity_zero) ///< Whether the operation has a zero-valued identity { Value select = (second.offset) ? 0 : first.value; ValueOffsetPair retval; retval.offset = first.offset + second.offset; retval.value = op(select, second.value); return retval; } /// Scan operator (specialized for reductions without zero-valued identity) __device__ __forceinline__ ValueOffsetPair operator()( const ValueOffsetPair &first, ///< First partial reduction const ValueOffsetPair &second, ///< Second partial reduction Int2Type<false> has_identity_zero) ///< Whether the operation has a zero-valued identity { ValueOffsetPair retval; retval.offset = first.offset + second.offset; if (second.offset) { retval.value = second.value; return retval; } else { retval.value = op(first.value, second.value); return retval; } /* // Alternate expression below uses more registers, slower ValueOffsetPair retval; retval.offset = first.offset + second.offset; retval.value = (second.offset) ? second.value : // The second partial reduction spans a segment reset, so it's value aggregate becomes the running aggregate op(first.value, second.value); // The second partial reduction does not span a reset, so accumulate both into the running aggregate return retval; */ } /// Scan operator __device__ __forceinline__ ValueOffsetPair operator()( const ValueOffsetPair &first, ///< First partial reduction const ValueOffsetPair &second) ///< Second partial reduction { return (*this)(first, second, Int2Type<HAS_IDENTITY_ZERO>()); } }; // Parameterized BlockLoad type for keys typedef BlockLoad< WrappedKeyInputIterator, BlockRegionReduceByKeyPolicy::BLOCK_THREADS, BlockRegionReduceByKeyPolicy::ITEMS_PER_THREAD, BlockRegionReduceByKeyPolicy::LOAD_ALGORITHM> BlockLoadKeys; // Parameterized BlockLoad type for values typedef BlockLoad< WrappedValueInputIterator, BlockRegionReduceByKeyPolicy::BLOCK_THREADS, BlockRegionReduceByKeyPolicy::ITEMS_PER_THREAD, (IS_RUN_LENGTH_ENCODE) ? BLOCK_LOAD_DIRECT : (BlockLoadAlgorithm) BlockRegionReduceByKeyPolicy::LOAD_ALGORITHM> BlockLoadValues; // Parameterized BlockExchange type for locally compacting items as part of a two-phase scatter typedef BlockExchange< Key, BLOCK_THREADS, ITEMS_PER_THREAD> BlockExchangeKeys; // Parameterized BlockExchange type for locally compacting items as part of a two-phase scatter typedef BlockExchange< Value, BLOCK_THREADS, ITEMS_PER_THREAD> BlockExchangeValues; // Parameterized BlockDiscontinuity type for keys typedef BlockDiscontinuity<Key, BLOCK_THREADS> BlockDiscontinuityKeys; // Parameterized BlockScan type typedef BlockScan< ValueOffsetPair, BlockRegionReduceByKeyPolicy::BLOCK_THREADS, BlockRegionReduceByKeyPolicy::SCAN_ALGORITHM> BlockScanAllocations; // Callback type for obtaining tile prefix during block scan typedef LookbackBlockPrefixCallbackOp< ValueOffsetPair, ReduceByKeyOp, TileLookbackStatus> LookbackPrefixCallbackOp; // Shared memory type for this threadblock struct _TempStorage { union { struct { typename BlockScanAllocations::TempStorage scan; // Smem needed for tile scanning typename LookbackPrefixCallbackOp::TempStorage prefix; // Smem needed for cooperative prefix callback typename BlockDiscontinuityKeys::TempStorage discontinuity; // Smem needed for discontinuity detection typename BlockLoadKeys::TempStorage load_keys; // Smem needed for loading keys Offset tile_idx; // Shared tile index Offset tile_num_flags_prefix; // Exclusive tile prefix }; // Smem needed for loading values typename BlockLoadValues::TempStorage load_values; // Smem needed for compacting values typename BlockExchangeValues::TempStorage exchange_values; // Smem needed for compacting keys typename BlockExchangeKeys::TempStorage exchange_keys; }; }; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- _TempStorage &temp_storage; ///< Reference to temp_storage WrappedKeyInputIterator d_keys_in; ///< Input keys KeyOutputIterator d_keys_out; ///< Output keys WrappedValueInputIterator d_values_in; ///< Input values ValueOutputIterator d_values_out; ///< Output values InequalityWrapper<EqualityOp> inequality_op; ///< Key inequality operator ReduceByKeyOp scan_op; ///< Reduce-value-by flag scan operator Offset num_items; ///< Total number of input items //--------------------------------------------------------------------- // Constructor //--------------------------------------------------------------------- // Constructor __device__ __forceinline__ BlockRegionReduceByKey( TempStorage &temp_storage, ///< Reference to temp_storage KeyInputIterator d_keys_in, ///< Input keys KeyOutputIterator d_keys_out, ///< Output keys ValueInputIterator d_values_in, ///< Input values ValueOutputIterator d_values_out, ///< Output values EqualityOp equality_op, ///< Key equality operator ReductionOp reduction_op, ///< Value reduction operator Offset num_items) ///< Total number of input items : temp_storage(temp_storage.Alias()), d_keys_in(d_keys_in), d_keys_out(d_keys_out), d_values_in(d_values_in), d_values_out(d_values_out), inequality_op(equality_op), scan_op(reduction_op), num_items(num_items) {} //--------------------------------------------------------------------- // Block scan utility methods //--------------------------------------------------------------------- /** * Scan with identity (first tile) */ __device__ __forceinline__ void ScanBlock( ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD], ValueOffsetPair &block_aggregate, Int2Type<true> has_identity) { ValueOffsetPair identity; identity.value = 0; identity.offset = 0; BlockScanAllocations(temp_storage.scan).ExclusiveScan(values_and_segments, values_and_segments, identity, scan_op, block_aggregate); } /** * Scan without identity (first tile). Without an identity, the first output item is undefined. * */ __device__ __forceinline__ void ScanBlock( ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD], ValueOffsetPair &block_aggregate, Int2Type<false> has_identity) { BlockScanAllocations(temp_storage.scan).ExclusiveScan(values_and_segments, values_and_segments, scan_op, block_aggregate); } /** * Scan with identity (subsequent tile) */ __device__ __forceinline__ void ScanBlock( ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD], ValueOffsetPair &block_aggregate, LookbackPrefixCallbackOp &prefix_op, Int2Type<true> has_identity) { ValueOffsetPair identity; identity.value = 0; identity.offset = 0; BlockScanAllocations(temp_storage.scan).ExclusiveScan(values_and_segments, values_and_segments, identity, scan_op, block_aggregate, prefix_op); } /** * Scan without identity (subsequent tile). Without an identity, the first output item is undefined. */ __device__ __forceinline__ void ScanBlock( ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD], ValueOffsetPair &block_aggregate, LookbackPrefixCallbackOp &prefix_op, Int2Type<false> has_identity) { BlockScanAllocations(temp_storage.scan).ExclusiveScan(values_and_segments, values_and_segments, scan_op, block_aggregate, prefix_op); } //--------------------------------------------------------------------- // Zip utility methods //--------------------------------------------------------------------- template <bool LAST_TILE> __device__ __forceinline__ void ZipValuesAndFlags( Offset num_remaining, Value (&values)[ITEMS_PER_THREAD], Offset (&flags)[ITEMS_PER_THREAD], ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD]) { // Zip values and flags #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { // Unset flags for out-of-bounds keys if ((LAST_TILE) && (Offset(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_remaining)) flags[ITEM] = 0; values_and_segments[ITEM].value = values[ITEM]; values_and_segments[ITEM].offset = flags[ITEM]; } } //--------------------------------------------------------------------- // Scatter utility methods //--------------------------------------------------------------------- /** * Scatter flagged items to output offsets (specialized for direct scattering) * * The exclusive scan causes each head flag to be paired with the previous * value aggregate. As such: * - The scatter offsets must be decremented for value value aggregates * - The first tile does not scatter the first flagged value (it is undefined from the exclusive scan) * - If the tile is partially-full, we need to scatter the first out-of-bounds value (which aggregates all valid values in the last segment) * */ template <bool LAST_TILE, bool FIRST_TILE, int ITEM> __device__ __forceinline__ void ScatterDirect( Offset num_remaining, Key (&keys)[ITEMS_PER_THREAD], ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD], Offset (&flags)[ITEMS_PER_THREAD], Offset tile_num_flags, Int2Type<ITEM> iteration) { // Scatter key if (flags[ITEM]) { d_keys_out[values_and_segments[ITEM].offset] = keys[ITEM]; } bool is_first_flag = FIRST_TILE && (ITEM == 0) && (threadIdx.x == 0); bool is_oob_value = (LAST_TILE) && (Offset(threadIdx.x * ITEMS_PER_THREAD) + ITEM == num_remaining); // Scatter value reduction if (((flags[ITEM] || is_oob_value)) && (!is_first_flag)) { d_values_out[values_and_segments[ITEM].offset - 1] = values_and_segments[ITEM].value; } ScatterDirect<LAST_TILE, FIRST_TILE>(num_remaining, keys, values_and_segments, flags, tile_num_flags, Int2Type<ITEM + 1>()); } template <bool LAST_TILE, bool FIRST_TILE> __device__ __forceinline__ void ScatterDirect( Offset num_remaining, Key (&keys)[ITEMS_PER_THREAD], ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD], Offset (&flags)[ITEMS_PER_THREAD], Offset tile_num_flags, Int2Type<ITEMS_PER_THREAD> iteration) {} /** * Scatter flagged items to output offsets (specialized for two-phase scattering) * * The exclusive scan causes each head flag to be paired with the previous * value aggregate. As such: * - The scatter offsets must be decremented for value value aggregates * - The first tile does not scatter the first flagged value (it is undefined from the exclusive scan) * - If the tile is partially-full, we need to scatter the first out-of-bounds value (which aggregates all valid values in the last segment) * */ template <bool LAST_TILE, bool FIRST_TILE> __device__ __forceinline__ void ScatterTwoPhase( Offset num_remaining, Key (&keys)[ITEMS_PER_THREAD], ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD], Offset (&flags)[ITEMS_PER_THREAD], Offset tile_num_flags, Offset tile_num_flags_prefix) { int local_ranks[ITEMS_PER_THREAD]; Value values[ITEMS_PER_THREAD]; // Share exclusive tile prefix if (threadIdx.x == 0) { temp_storage.tile_num_flags_prefix = tile_num_flags_prefix; } __syncthreads(); // Load exclusive tile prefix in all threads tile_num_flags_prefix = temp_storage.tile_num_flags_prefix; __syncthreads(); // Compute local scatter ranks #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { local_ranks[ITEM] = values_and_segments[ITEM].offset - tile_num_flags_prefix; } // Compact keys in shared memory BlockExchangeKeys(temp_storage.exchange_keys).ScatterToStriped(keys, local_ranks, flags); // Scatter keys StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys_out + tile_num_flags_prefix, keys, tile_num_flags); // Unzip values and set flag for first oob item in last tile #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { values[ITEM] = values_and_segments[ITEM].value; if (FIRST_TILE) local_ranks[ITEM]--; if (LAST_TILE && (Offset(threadIdx.x * ITEMS_PER_THREAD) + ITEM == num_remaining)) flags[ITEM] = 1; } // Unset first flag in first tile if (FIRST_TILE && (threadIdx.x == 0)) flags[0] = 0; __syncthreads(); // Compact values in shared memory BlockExchangeValues(temp_storage.exchange_values).ScatterToStriped(values, local_ranks, flags); // Number to output Offset exchange_count = tile_num_flags; if (LAST_TILE && (num_remaining < TILE_ITEMS)) exchange_count++; if (FIRST_TILE) { exchange_count--; } else { tile_num_flags_prefix--; } // Scatter values StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values_out + tile_num_flags_prefix, values, exchange_count); __syncthreads(); } /** * Scatter flagged items */ template <bool LAST_TILE, bool FIRST_TILE> __device__ __forceinline__ void Scatter( Offset num_remaining, Key (&keys)[ITEMS_PER_THREAD], ValueOffsetPair (&values_and_segments)[ITEMS_PER_THREAD], Offset (&flags)[ITEMS_PER_THREAD], Offset tile_num_flags, Offset tile_num_flags_prefix) { // Do a one-phase scatter if (a) two-phase is disabled or (b) the average number of selected items per thread is less than one if ((TWO_PHASE_SCATTER) && ((tile_num_flags >> Log2<BLOCK_THREADS>::VALUE) > 0)) { ScatterTwoPhase<LAST_TILE, FIRST_TILE>( num_remaining, keys, values_and_segments, flags, tile_num_flags, tile_num_flags_prefix); } else { ScatterDirect<LAST_TILE, FIRST_TILE>( num_remaining, keys, values_and_segments, flags, tile_num_flags, Int2Type<0>()); } } //--------------------------------------------------------------------- // Cooperatively scan a device-wide sequence of tiles with other CTAs //--------------------------------------------------------------------- /** * Process a tile of input (dynamic domino scan) */ template < bool LAST_TILE> __device__ __forceinline__ ValueOffsetPair ConsumeTile( Offset num_items, ///< Total number of global input items Offset num_remaining, ///< Number of global input items remaining (including this tile) int tile_idx, ///< Tile index Offset block_offset, ///< Tile offset TileLookbackStatus &tile_status) ///< Global list of tile status { Key keys[ITEMS_PER_THREAD]; // Tile keys Value values[ITEMS_PER_THREAD]; // Tile values Offset flags[ITEMS_PER_THREAD]; // Segment head flags ValueOffsetPair values_and_segments[ITEMS_PER_THREAD]; // Zipped values and segment flags|indices ValueOffsetPair running_total; // Running count of segments and current value aggregate (including this tile) // Load keys and values if (LAST_TILE) { BlockLoadKeys(temp_storage.load_keys).Load(d_keys_in + block_offset, keys, num_remaining); } else { BlockLoadKeys(temp_storage.load_keys).Load(d_keys_in + block_offset, keys); } if (SYNC_AFTER_LOAD) __syncthreads(); // Load values if (LAST_TILE) BlockLoadValues(temp_storage.load_values).Load(d_values_in + block_offset, values, num_remaining); else BlockLoadValues(temp_storage.load_values).Load(d_values_in + block_offset, values); if (SYNC_AFTER_LOAD) __syncthreads(); if (tile_idx == 0) { // First tile // Set head flags. First tile sets the first flag for the first item BlockDiscontinuityKeys(temp_storage.discontinuity).FlagHeads(flags, keys, inequality_op); // Zip values and flags ZipValuesAndFlags<LAST_TILE>(num_remaining, values, flags, values_and_segments); // Exclusive scan of values and flags ValueOffsetPair block_aggregate; ScanBlock(values_and_segments, block_aggregate, Int2Type<HAS_IDENTITY_ZERO>()); // Update tile status if this is not the last tile if (!LAST_TILE && (threadIdx.x == 0)) tile_status.SetInclusive(0, block_aggregate); // Set offset for first scan output if (!HAS_IDENTITY_ZERO && (threadIdx.x == 0)) values_and_segments[0].offset = 0; running_total = block_aggregate; // Scatter flagged items Scatter<LAST_TILE, true>(num_remaining, keys, values_and_segments, flags, block_aggregate.offset, 0); } else { // Not first tile // Obtain the last key in the previous tile to compare with Key tile_predecessor_key = (threadIdx.x == 0) ? d_keys_in[block_offset - 1] : ZeroInitialize<Key>(); // Set head flags BlockDiscontinuityKeys(temp_storage.discontinuity).FlagHeads(flags, keys, inequality_op, tile_predecessor_key); // Zip values and flags ZipValuesAndFlags<LAST_TILE>(num_remaining, values, flags, values_and_segments); // Exclusive scan of values and flags ValueOffsetPair block_aggregate; LookbackPrefixCallbackOp prefix_op(tile_status, temp_storage.prefix, scan_op, tile_idx); ScanBlock(values_and_segments, block_aggregate, prefix_op, Int2Type<HAS_IDENTITY_ZERO>()); running_total = prefix_op.inclusive_prefix; // Scatter flagged items Scatter<LAST_TILE, false>(num_remaining, keys, values_and_segments, flags, block_aggregate.offset, prefix_op.exclusive_prefix.offset); } return running_total; } /** * Dequeue and scan tiles of items as part of a dynamic domino scan */ template <typename NumSegmentsIterator> ///< Output iterator type for recording number of items selected __device__ __forceinline__ void ConsumeRegion( int num_tiles, ///< Total number of input tiles GridQueue<int> queue, ///< Queue descriptor for assigning tiles of work to thread blocks TileLookbackStatus &tile_status, ///< Global list of tile status NumSegmentsIterator d_num_segments) ///< Output pointer for total number of segments identified { #if (CUB_PTX_VERSION <= 130) // Blocks are launched in increasing order, so just assign one tile per block int tile_idx = (blockIdx.y * 32 * 1024) + blockIdx.x; // Current tile index Offset block_offset = Offset(TILE_ITEMS) * tile_idx; // Global offset for the current tile Offset num_remaining = num_items - block_offset; // Remaining items (including this tile) if (num_remaining > TILE_ITEMS) { // Full tile ConsumeTile<false>(num_items, num_remaining, tile_idx, block_offset, tile_status); } else if (num_remaining > 0) { // Last tile ValueOffsetPair running_total = ConsumeTile<true>(num_items, num_remaining, tile_idx, block_offset, tile_status); // Output the total number of items selected if (threadIdx.x == 0) { *d_num_segments = running_total.offset; // If the last tile is a whole tile, the inclusive prefix contains accumulated value reduction for the last segment if (num_remaining == TILE_ITEMS) { d_values_out[running_total.offset - 1] = running_total.value; } } } #else // Blocks may not be launched in increasing order, so work-steal tiles // Get first tile index if (threadIdx.x == 0) temp_storage.tile_idx = queue.Drain(1); __syncthreads(); int tile_idx = temp_storage.tile_idx; Offset block_offset = Offset(TILE_ITEMS) * tile_idx; // Global offset for the current tile Offset num_remaining = num_items - block_offset; // Remaining items (including this tile) while (num_remaining > TILE_ITEMS) { if (SYNC_AFTER_LOAD) __syncthreads(); // Consume full tile ConsumeTile<false>(num_items, num_remaining, tile_idx, block_offset, tile_status); // Get tile index if (threadIdx.x == 0) temp_storage.tile_idx = queue.Drain(1); __syncthreads(); tile_idx = temp_storage.tile_idx; block_offset = Offset(TILE_ITEMS) * tile_idx; num_remaining = num_items - block_offset; } if (num_remaining > 0) { // Consume last tile (treat as partially-full) ValueOffsetPair running_total = ConsumeTile<true>(num_items, num_remaining, tile_idx, block_offset, tile_status); if ((threadIdx.x == 0)) { // Output the total number of items selected *d_num_segments = running_total.offset; // If the last tile is a whole tile, the inclusive prefix contains accumulated value reduction for the last segment if (num_remaining == TILE_ITEMS) { d_values_out[running_total.offset - 1] = running_total.value; } } } #endif } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
// @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// template <typename T, typename Z> static __global__ void mergeMaxIndexCudaLauncher(void** inArrs, void** inShapes, const int numArrays, void* voutput, const Nd4jLong* outputShape, Nd4jLong length) { auto output = reinterpret_cast<Z*>(voutput); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T mVal = -DataTypeUtils::max<T>(); Z mIdx(0); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong*>(inShapes[i]); auto val = x[shape::getIndexOffset(e, xShape)];; if (mVal < val) { mIdx = static_cast<Z>(i); mVal = val; } } output[shape::getIndexOffset(e, outputShape)] = mIdx; } } template <typename T, typename Z> static void mergeMaxIndex_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output) { int nArrSize = static_cast<int>(inArrs.size()); std::vector<const void*> inBuffers(nArrSize), inShapes(nArrSize); for (int e = 0; e < nArrSize; e++) { inBuffers[e] = inArrs[e]->specialBuffer(); inShapes[e] = inArrs[e]->specialShapeInfo(); } PointersManager manager(context, "mergeMaxIndex"); auto pInBuffers = reinterpret_cast<void**>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void*))); auto pInShapes = reinterpret_cast<void**>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void*))); auto length = output.lengthOf(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (length + threadsPerBlock - 1) / threadsPerBlock; mergeMaxIndexCudaLauncher<T, Z><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, nArrSize, output.specialBuffer(), output.specialShapeInfo(), length); manager.synchronize(); } void mergeMaxIndex(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output) { NDArray::prepareSpecialUse({ &output }, inArrs); BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({ &output }, inArrs); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mergeMaxCudaLauncher(void** inArrs, void** inShapes, const int numArrays, void* voutput, const Nd4jLong* outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T mVal = -DataTypeUtils::max<T>(); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<const T*>(inArrs[i]); auto xShape = reinterpret_cast<const Nd4jLong*>(inShapes[i]); auto val = x[shape::getIndexOffset(e, xShape)];; if (mVal < val) mVal = val; } output[shape::getIndexOffset(e, outputShape)] = mVal; } } template<typename T> static void mergeMax_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output) { int nArrsSize = static_cast<int>(inArrs.size()); std::vector<const void*> inBuffers(nArrsSize), inShapes(nArrsSize); for (int e = 0; e < nArrsSize; e++) { inBuffers[e] = inArrs[e]->specialBuffer(); inShapes[e] = inArrs[e]->specialShapeInfo(); } PointersManager manager(context, "mergeMax"); auto pInBuffers = reinterpret_cast<void**>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void*))); auto pInShapes = reinterpret_cast<void**>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void*))); auto length = output.lengthOf(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (length + threadsPerBlock - 1) / threadsPerBlock; mergeMaxCudaLauncher<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, nArrsSize, output.specialBuffer(), output.specialShapeInfo(), length); manager.synchronize(); } void mergeMax(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output) { NDArray::prepareSpecialUse({ &output }, inArrs); BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES); NDArray::registerSpecialUse({ &output }, inArrs); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mergeMaxBpCudaLauncher( void** inArrs, void** inShapes, const void* vgradient, const Nd4jLong* gradientShape, const int numArrays, void** outArrs, void** outShapes, Nd4jLong length, bool bSameOrderAndEws1) { auto grad = reinterpret_cast<const T*>(vgradient); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; int coords[MAX_RANK]; for (Nd4jLong e = tid; e < length; e += step) { T mVal = -DataTypeUtils::max<T>(); int nMaxIndex = 0; auto xOffset = e, zOffset = e, gradOffset = e; if (!bSameOrderAndEws1) { shape::index2coords(e, gradientShape, coords); gradOffset = shape::getOffset(gradientShape, coords); } for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); if (!bSameOrderAndEws1) { auto xShape = reinterpret_cast<Nd4jLong*>(inShapes[i]); xOffset = shape::getOffset(xShape, coords); } auto val = x[xOffset]; if (mVal < val) { mVal = val; nMaxIndex = i; } } // outputs have to be pre-nullify if (!bSameOrderAndEws1) { auto outShape = reinterpret_cast<Nd4jLong*>(outShapes[nMaxIndex]); zOffset = shape::getOffset(outShape, coords); } auto output = reinterpret_cast<T*>(outArrs[nMaxIndex]); output[zOffset] = grad[gradOffset]; } } template<typename T> static void mergeMaxBp_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, std::vector<NDArray*>& outArrs, int nArrSize, bool bSameOrderAndEws1) { std::vector<const void*> inBuffers(nArrSize), inShapes(nArrSize), outBuffers(nArrSize), outShapes(nArrSize); for (int e = 0; e < nArrSize; e++) { inBuffers[e] = inArrs[e]->specialBuffer(); inShapes[e] = inArrs[e]->specialShapeInfo(); outBuffers[e] = outArrs[e]->specialBuffer(); outShapes[e] = outArrs[e]->specialShapeInfo(); } PointersManager manager(context, "mergeMaxBp"); auto pInBuffers = reinterpret_cast<void**>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void*))); auto pInShapes = reinterpret_cast<void**>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void*))); auto pOutBuffers = reinterpret_cast<void**>(manager.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void*))); auto pOutShapes = reinterpret_cast<void**>(manager.replicatePointer(outShapes.data(), outShapes.size() * sizeof(void*))); auto length = inArrs[nArrSize]->lengthOf(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (length + threadsPerBlock - 1) / threadsPerBlock; mergeMaxBpCudaLauncher<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, inArrs[nArrSize]->specialBuffer(), inArrs[nArrSize]->specialShapeInfo(), nArrSize, pOutBuffers, pOutShapes, length, bSameOrderAndEws1); manager.synchronize(); } void mergeMaxBp(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, std::vector<NDArray*>& outArrs) { // not use gradient int nArrSize = static_cast<int>(inArrs.size() - 1); const std::vector<const NDArray*>& out = reinterpret_cast<const std::vector<const NDArray*>&>(outArrs); NDArray::prepareSpecialUse(out, inArrs); bool bSameOrderAndEws1 = (1 == inArrs[nArrSize]->ews()); auto ordering = inArrs[nArrSize]->ordering(); for (int i = 0; i < nArrSize; ++i) { bSameOrderAndEws1 &= (ordering == inArrs[i]->ordering()); bSameOrderAndEws1 &= (1 == inArrs[i]->ews()); bSameOrderAndEws1 &= (ordering == outArrs[i]->ordering()); bSameOrderAndEws1 &= (1 == outArrs[i]->ews()); } BUILD_SINGLE_SELECTOR(inArrs[nArrSize]->dataType(), mergeMaxBp_, (context, inArrs, outArrs, nArrSize, bSameOrderAndEws1), LIBND4J_TYPES); NDArray::registerSpecialUse( out, inArrs ); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mergeAvgCudaLauncher(void** inArrs, void** inShapes, const int numArrays, void* voutput, const Nd4jLong* outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T sum(0.0f); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong*>(inShapes[i]); sum += x[shape::getIndexOffset(e, xShape)]; } output[shape::getIndexOffset(e, outputShape)] = sum / numArrays; } } template<typename T> static void mergeAvg_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output) { std::vector<const void*> inBuffers(inArrs.size()), inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->specialBuffer(); inShapes[e] = inArrs[e]->specialShapeInfo(); } PointersManager manager(context, "mergeAvg"); auto pInBuffers = reinterpret_cast<void**>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void*))); auto pInShapes = reinterpret_cast<void**>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void*))); auto length = output.lengthOf(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (length + threadsPerBlock - 1) / threadsPerBlock; mergeAvgCudaLauncher<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int)inArrs.size(), output.specialBuffer(), output.specialShapeInfo(), length); manager.synchronize(); } void mergeAvg(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output) { NDArray::prepareSpecialUse({ &output }, inArrs); BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), FLOAT_TYPES); NDArray::registerSpecialUse({ &output }, inArrs); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mergeAvgBpCudaLauncher( const void* vgradient, const Nd4jLong* gradientShape, void** outArrs, void** outShapes, const int numArrays, Nd4jLong length, bool bSameOrderAndEws1) { auto grad = reinterpret_cast<const T*>(vgradient); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; int coords[MAX_RANK]; for (Nd4jLong e = tid; e < length; e += step) { auto zOffset = e, gradOffset = e; if (!bSameOrderAndEws1) { shape::index2coords(e, gradientShape, coords); gradOffset = shape::getOffset(gradientShape, coords); } for (int i = 0; i < numArrays; i++) { if (!bSameOrderAndEws1) { auto outShape = reinterpret_cast<Nd4jLong*>(outShapes[i]); zOffset = shape::getOffset(outShape, coords); } auto output = reinterpret_cast<T*>(outArrs[i]); output[zOffset] = grad[gradOffset] / numArrays; } } } template<typename T> static void mergeAvgBp_(sd::LaunchContext* context, const NDArray& gradient, std::vector<NDArray*>& outArrs, bool bSameOrderAndEws1) { int nArrSize = static_cast<int>(outArrs.size()); std::vector<const void*> outBuffers(nArrSize), outShapes(nArrSize); for (int e = 0; e < nArrSize; e++) { outBuffers[e] = outArrs[e]->specialBuffer(); outShapes[e] = outArrs[e]->specialShapeInfo(); } PointersManager manager(context, "mergeAvgBp"); auto pOutBuffers = reinterpret_cast<void**>(manager.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void*))); auto pOutShapes = reinterpret_cast<void**>(manager.replicatePointer(outShapes.data(), outShapes.size() * sizeof(void*))); auto length = gradient.lengthOf(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (length + threadsPerBlock - 1) / threadsPerBlock; mergeAvgBpCudaLauncher<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(gradient.specialBuffer(), gradient.specialShapeInfo(), pOutBuffers, pOutShapes, nArrSize, length, bSameOrderAndEws1); manager.synchronize(); } void mergeAvgBp(sd::LaunchContext* context, const NDArray& gradient, std::vector<NDArray*>& outArrs) { const std::vector<const NDArray*>& out = reinterpret_cast<const std::vector<const NDArray*>&>(outArrs); NDArray::prepareSpecialUse( out, { &gradient }); bool bSameOrderAndEws1 = (1 == gradient.ews()); auto ordering = gradient.ordering(); for (const auto& v : outArrs) { bSameOrderAndEws1 &= (ordering == v->ordering()); bSameOrderAndEws1 &= (1 == v->ews()); } BUILD_SINGLE_SELECTOR(gradient.dataType(), mergeAvgBp_, (context, gradient, outArrs, bSameOrderAndEws1), LIBND4J_TYPES); NDArray::prepareSpecialUse(out, { &gradient }); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mergeAddCudaLauncher(void** inArrs, void** inShapes, const int numArrays, void* voutput, const Nd4jLong* outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T sum(0.0f); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong*>(inShapes[i]); sum += x[shape::getIndexOffset(e, xShape)]; } output[shape::getIndexOffset(e, outputShape)] = sum; } } template<typename T> static void mergeAdd_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output) { int nArrSize = static_cast<int>(inArrs.size()); std::vector<const void*> inBuffers(nArrSize), inShapes(nArrSize); for (int e = 0; e < nArrSize; e++) { inBuffers[e] = inArrs[e]->specialBuffer(); inShapes[e] = inArrs[e]->specialShapeInfo(); } PointersManager manager(context, "mergeAdd"); auto pInBuffers = reinterpret_cast<void**>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void*))); auto pInShapes = reinterpret_cast<void**>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void*))); auto length = output.lengthOf(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (length + threadsPerBlock - 1) / threadsPerBlock; mergeAddCudaLauncher<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, nArrSize, output.specialBuffer(), output.specialShapeInfo(), length); manager.synchronize(); } BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output), NUMERIC_TYPES); void mergeAdd(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output) { NDArray::prepareSpecialUse({ &output }, inArrs); BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), NUMERIC_TYPES); NDArray::registerSpecialUse({ &output }, inArrs); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mergeAddBpCudaLauncher(const void* vgradient, const Nd4jLong* gradientShape, void** outArrs, void** outShapes, const int numArrays, Nd4jLong length, bool bSameOrderAndEws1) { auto grad = reinterpret_cast<const T*>(vgradient); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; int coords[MAX_RANK]; for (Nd4jLong e = tid; e < length; e += step) { auto zOffset = e, gradOffset = e; if (!bSameOrderAndEws1) { shape::index2coords(e, gradientShape, coords); gradOffset = shape::getOffset(gradientShape, coords); } for (int i = 0; i < numArrays; i++) { if (!bSameOrderAndEws1) { auto outShape = reinterpret_cast<Nd4jLong*>(outShapes[i]); zOffset = shape::getOffset(outShape, coords); } auto output = reinterpret_cast<T*>(outArrs[i]); output[zOffset] = grad[gradOffset]; } } } template<typename T> static void mergeAddBp_(sd::LaunchContext* context, const NDArray& gradient, std::vector<NDArray*>& outArrs, bool bSameOrderAndEws1) { int nArrSize = static_cast<int>(outArrs.size()); std::vector<const void*> outBuffers(nArrSize), outShapes(nArrSize); for (int e = 0; e < nArrSize; e++) { outBuffers[e] = outArrs[e]->specialBuffer(); outShapes[e] = outArrs[e]->specialShapeInfo(); } PointersManager manager(context, "mergeAddBp"); auto pOutBuffers = reinterpret_cast<void**>(manager.replicatePointer(outBuffers.data(), outBuffers.size() * sizeof(void*))); auto pOutShapes = reinterpret_cast<void**>(manager.replicatePointer(outShapes.data(), outShapes.size() * sizeof(void*))); auto length = gradient.lengthOf(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (length + threadsPerBlock - 1) / threadsPerBlock; mergeAddBpCudaLauncher<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(gradient.specialBuffer(), gradient.specialShapeInfo(), pOutBuffers, pOutShapes, nArrSize, length, bSameOrderAndEws1); manager.synchronize(); } void mergeAddBp(sd::LaunchContext* context, const NDArray& gradient, std::vector<NDArray*>& outArrs) { const std::vector<const NDArray*>& out = reinterpret_cast<const std::vector<const NDArray*>& >(outArrs); NDArray::prepareSpecialUse( out, { &gradient }); bool bSameOrderAndEws1 = (1 == gradient.ews()); auto ordering = gradient.ordering(); for (const auto& v : outArrs) { bSameOrderAndEws1 &= (ordering == v->ordering()); bSameOrderAndEws1 &= (1 == v->ews()); } BUILD_SINGLE_SELECTOR(gradient.dataType(), mergeAddBp_, (context, gradient, outArrs, bSameOrderAndEws1), LIBND4J_TYPES); NDArray::prepareSpecialUse( out, { &gradient }); } } } }
the_stack
#define threadsPerBlock 1024 #include "cuda_helper.h" __constant__ uint64_t c_PaddedMessage80[16]; // padded message (80 bytes + padding) __constant__ uint64_t c_tmp[8*9]; __constant__ uint64_t pTarget[4]; uint32_t *d_wxnonce[MAX_GPUS] = { 0 }; uint32_t *d_WXNonce[MAX_GPUS] = { 0 }; /** * Whirlpool CUDA kernel implementation. * * ==========================(LICENSE BEGIN)============================ * * Copyright (c) 2014 djm34 & tpruvot & SP & Provos Alexis * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * ===========================(LICENSE END)============================= * @author djm34 * @author tpruvot * @author SP * @author Provos Alexis */ __constant__ __align__(128) uint64_t mixTob0Tox[256]; const uint64_t plain_T0[256]= { 0xD83078C018601818,0x2646AF05238C2323,0xB891F97EC63FC6C6,0xFBCD6F13E887E8E8,0xCB13A14C87268787,0x116D62A9B8DAB8B8,0x0902050801040101,0x0D9E6E424F214F4F,0x9B6CEEAD36D83636, 0xFF510459A6A2A6A6,0x0CB9BDDED26FD2D2,0x0EF706FBF5F3F5F5,0x96F280EF79F97979,0x30DECE5F6FA16F6F,0x6D3FEFFC917E9191,0xF8A407AA52555252,0x47C0FD27609D6060,0x35657689BCCABCBC, 0x372BCDAC9B569B9B,0x8A018C048E028E8E,0xD25B1571A3B6A3A3,0x6C183C600C300C0C,0x84F68AFF7BF17B7B,0x806AE1B535D43535,0xF53A69E81D741D1D,0xB3DD4753E0A7E0E0,0x21B3ACF6D77BD7D7, 0x9C99ED5EC22FC2C2,0x435C966D2EB82E2E,0x29967A624B314B4B,0x5DE121A3FEDFFEFE,0xD5AE168257415757,0xBD2A41A815541515,0xE8EEB69F77C17777,0x926EEBA537DC3737,0x9ED7567BE5B3E5E5, 0x1323D98C9F469F9F,0x23FD17D3F0E7F0F0,0x20947F6A4A354A4A,0x44A9959EDA4FDADA,0xA2B025FA587D5858,0xCF8FCA06C903C9C9,0x7C528D5529A42929,0x5A1422500A280A0A,0x507F4FE1B1FEB1B1, 0xC95D1A69A0BAA0A0,0x14D6DA7F6BB16B6B,0xD917AB5C852E8585,0x3C677381BDCEBDBD,0x8FBA34D25D695D5D,0x9020508010401010,0x07F503F3F4F7F4F4,0xDD8BC016CB0BCBCB,0xD37CC6ED3EF83E3E, 0x2D0A112805140505,0x78CEE61F67816767,0x97D55373E4B7E4E4,0x024EBB25279C2727,0x7382583241194141,0xA70B9D2C8B168B8B,0xF6530151A7A6A7A7,0xB2FA94CF7DE97D7D,0x4937FBDC956E9595, 0x56AD9F8ED847D8D8,0x70EB308BFBCBFBFB,0xCDC17123EE9FEEEE,0xBBF891C77CED7C7C,0x71CCE31766856666,0x7BA78EA6DD53DDDD,0xAF2E4BB8175C1717,0x458E460247014747,0x1A21DC849E429E9E, 0xD489C51ECA0FCACA,0x585A99752DB42D2D,0x2E637991BFC6BFBF,0x3F0E1B38071C0707,0xAC472301AD8EADAD,0xB0B42FEA5A755A5A,0xEF1BB56C83368383,0xB666FF8533CC3333,0x5CC6F23F63916363, 0x12040A1002080202,0x93493839AA92AAAA,0xDEE2A8AF71D97171,0xC68DCF0EC807C8C8,0xD1327DC819641919,0x3B92707249394949,0x5FAF9A86D943D9D9,0x31F91DC3F2EFF2F2,0xA8DB484BE3ABE3E3, 0xB9B62AE25B715B5B,0xBC0D9234881A8888,0x3E29C8A49A529A9A,0x0B4CBE2D26982626,0xBF64FA8D32C83232,0x597D4AE9B0FAB0B0,0xF2CF6A1BE983E9E9,0x771E33780F3C0F0F,0x33B7A6E6D573D5D5, 0xF41DBA74803A8080,0x27617C99BEC2BEBE,0xEB87DE26CD13CDCD,0x8968E4BD34D03434,0x3290757A483D4848,0x54E324ABFFDBFFFF,0x8DF48FF77AF57A7A,0x643DEAF4907A9090,0x9DBE3EC25F615F5F, 0x3D40A01D20802020,0x0FD0D56768BD6868,0xCA3472D01A681A1A,0xB7412C19AE82AEAE,0x7D755EC9B4EAB4B4,0xCEA8199A544D5454,0x7F3BE5EC93769393,0x2F44AA0D22882222,0x63C8E907648D6464, 0x2AFF12DBF1E3F1F1,0xCCE6A2BF73D17373,0x82245A9012481212,0x7A805D3A401D4040,0x4810284008200808,0x959BE856C32BC3C3,0xDFC57B33EC97ECEC,0x4DAB9096DB4BDBDB,0xC05F1F61A1BEA1A1, 0x9107831C8D0E8D8D,0xC87AC9F53DF43D3D,0x5B33F1CC97669797,0x0000000000000000,0xF983D436CF1BCFCF,0x6E5687452BAC2B2B,0xE1ECB39776C57676,0xE619B06482328282,0x28B1A9FED67FD6D6, 0xC33677D81B6C1B1B,0x74775BC1B5EEB5B5,0xBE432911AF86AFAF,0x1DD4DF776AB56A6A,0xEAA00DBA505D5050,0x578A4C1245094545,0x38FB18CBF3EBF3F3,0xAD60F09D30C03030,0xC4C3742BEF9BEFEF, 0xDA7EC3E53FFC3F3F,0xC7AA1C9255495555,0xDB591079A2B2A2A2,0xE9C96503EA8FEAEA,0x6ACAEC0F65896565,0x036968B9BAD2BABA,0x4A5E93652FBC2F2F,0x8E9DE74EC027C0C0,0x60A181BEDE5FDEDE, 0xFC386CE01C701C1C,0x46E72EBBFDD3FDFD,0x1F9A64524D294D4D,0x7639E0E492729292,0xFAEABC8F75C97575,0x360C1E3006180606,0xAE0998248A128A8A,0x4B7940F9B2F2B2B2,0x85D15963E6BFE6E6, 0x7E1C36700E380E0E,0xE73E63F81F7C1F1F,0x55C4F73762956262,0x3AB5A3EED477D4D4,0x814D3229A89AA8A8,0x5231F4C496629696,0x62EF3A9BF9C3F9F9,0xA397F666C533C5C5,0x104AB13525942525, 0xABB220F259795959,0xD015AE54842A8484,0xC5E4A7B772D57272,0xEC72DDD539E43939,0x1698615A4C2D4C4C,0x94BC3BCA5E655E5E,0x9FF085E778FD7878,0xE570D8DD38E03838,0x980586148C0A8C8C, 0x17BFB2C6D163D1D1,0xE4570B41A5AEA5A5,0xA1D94D43E2AFE2E2,0x4EC2F82F61996161,0x427B45F1B3F6B3B3,0x3442A51521842121,0x0825D6949C4A9C9C,0xEE3C66F01E781E1E,0x6186522243114343, 0xB193FC76C73BC7C7,0x4FE52BB3FCD7FCFC,0x2408142004100404,0xE3A208B251595151,0x252FC7BC995E9999,0x22DAC44F6DA96D6D,0x651A39680D340D0D,0x79E93583FACFFAFA,0x69A384B6DF5BDFDF, 0xA9FC9BD77EE57E7E,0x1948B43D24902424,0xFE76D7C53BEC3B3B,0x9A4B3D31AB96ABAB,0xF081D13ECE1FCECE,0x9922558811441111,0x8303890C8F068F8F,0x049C6B4A4E254E4E,0x667351D1B7E6B7B7, 0xE0CB600BEB8BEBEB,0xC178CCFD3CF03C3C,0xFD1FBF7C813E8181,0x4035FED4946A9494,0x1CF30CEBF7FBF7F7,0x186F67A1B9DEB9B9,0x8B265F98134C1313,0x51589C7D2CB02C2C,0x05BBB8D6D36BD3D3, 0x8CD35C6BE7BBE7E7,0x39DCCB576EA56E6E,0xAA95F36EC437C4C4,0x1B060F18030C0303,0xDCAC138A56455656,0x5E88491A440D4444,0xA0FE9EDF7FE17F7F,0x884F3721A99EA9A9,0x6754824D2AA82A2A, 0x0A6B6DB1BBD6BBBB,0x879FE246C123C1C1,0xF1A602A253515353,0x72A58BAEDC57DCDC,0x531627580B2C0B0B,0x0127D39C9D4E9D9D,0x2BD8C1476CAD6C6C,0xA462F59531C43131,0xF3E8B98774CD7474, 0x15F109E3F6FFF6F6,0x4C8C430A46054646,0xA5452609AC8AACAC,0xB50F973C891E8989,0xB42844A014501414,0xBADF425BE1A3E1E1,0xA62C4EB016581616,0xF774D2CD3AE83A3A,0x06D2D06F69B96969, 0x41122D4809240909,0xD7E0ADA770DD7070,0x6F7154D9B6E2B6B6,0x1EBDB7CED067D0D0,0xD6C77E3BED93EDED,0xE285DB2ECC17CCCC,0x6884572A42154242,0x2C2DC2B4985A9898,0xED550E49A4AAA4A4, 0x7550885D28A02828,0x86B831DA5C6D5C5C,0x6BED3F93F8C7F8F8,0xC211A44486228686 }; /** * Round constants. */ __constant__ uint64_t InitVector_RC[10]; const uint64_t plain_RC[10] = { 0x4F01B887E8C62318,0x52916F79F5D2A636,0x357B0CA38E9BBC60,0x57FE4B2EC2D7E01D,0xDA4AF09FE5377715, 0x856BA0B10A29C958,0x67053ECBF4105DBD,0xD8957DA78B4127E4,0x9E4717DD667CEEFB,0x33835AAD07BF2DCA }; /* ====================================================================== */ __device__ __forceinline__ static uint64_t ROUND_ELT(const uint64_t* sharedMemory, const uint64_t* in, const int i0, const int i1, const int i2, const int i3, const int i4, const int i5, const int i6, const int i7) { uint32_t* in32 = (uint32_t*)in; return xor8( sharedMemory[in32[(i0 << 1)]&0xFF], sharedMemory[__byte_perm(in32[(i1 << 1)], 0, 0x4441) + 256], sharedMemory[__byte_perm(in32[(i2 << 1)], 0, 0x4442) + 512], sharedMemory[__byte_perm(in32[(i3 << 1)], 0, 0x4443) + 768], sharedMemory[(in32[(i4 << 1) + 1]&0xFF) + 1024], sharedMemory[__byte_perm(in32[(i5 << 1) + 1], 0, 0x4441) + 1280], sharedMemory[__byte_perm(in32[(i6 << 1) + 1], 0, 0x4442) + 1536], sharedMemory[__byte_perm(in32[(i7 << 1) + 1], 0, 0x4443) + 1792]); } #define TRANSFER(dst, src) { \ dst[0] = src ## 0; \ dst[1] = src ## 1; \ dst[2] = src ## 2; \ dst[3] = src ## 3; \ dst[4] = src ## 4; \ dst[5] = src ## 5; \ dst[6] = src ## 6; \ dst[7] = src ## 7; \ } #define ROUND(table, in, out, c0, c1, c2, c3, c4, c5, c6, c7) { \ out ## 0 = xor1(ROUND_ELT(table, in, 0, 7, 6, 5, 4, 3, 2, 1), c0); \ out ## 1 = xor1(ROUND_ELT(table, in, 1, 0, 7, 6, 5, 4, 3, 2), c1); \ out ## 2 = xor1(ROUND_ELT(table, in, 2, 1, 0, 7, 6, 5, 4, 3), c2); \ out ## 3 = xor1(ROUND_ELT(table, in, 3, 2, 1, 0, 7, 6, 5, 4), c3); \ out ## 4 = xor1(ROUND_ELT(table, in, 4, 3, 2, 1, 0, 7, 6, 5), c4); \ out ## 5 = xor1(ROUND_ELT(table, in, 5, 4, 3, 2, 1, 0, 7, 6), c5); \ out ## 6 = xor1(ROUND_ELT(table, in, 6, 5, 4, 3, 2, 1, 0, 7), c6); \ out ## 7 = xor1(ROUND_ELT(table, in, 7, 6, 5, 4, 3, 2, 1, 0), c7); \ } #define ROUND1(table, in, out, c) { \ out ## 0 = xor1(ROUND_ELT(table, in, 0, 7, 6, 5, 4, 3, 2, 1), c); \ out ## 1 = ROUND_ELT(table, in, 1, 0, 7, 6, 5, 4, 3, 2); \ out ## 2 = ROUND_ELT(table, in, 2, 1, 0, 7, 6, 5, 4, 3); \ out ## 3 = ROUND_ELT(table, in, 3, 2, 1, 0, 7, 6, 5, 4); \ out ## 4 = ROUND_ELT(table, in, 4, 3, 2, 1, 0, 7, 6, 5); \ out ## 5 = ROUND_ELT(table, in, 5, 4, 3, 2, 1, 0, 7, 6); \ out ## 6 = ROUND_ELT(table, in, 6, 5, 4, 3, 2, 1, 0, 7); \ out ## 7 = ROUND_ELT(table, in, 7, 6, 5, 4, 3, 2, 1, 0); \ } #define ROUND_KSCHED(table, in, out, c) \ ROUND1(table, in, out, c) \ TRANSFER(in, out) #define ROUND_WENC(table, in, key, out) \ ROUND(table, in, out, key[0], key[1], key[2],key[3], key[4], key[5], key[6], key[7]) \ TRANSFER(in, out) uint64_t* d_tmp[MAX_GPUS] = { 0 }; __device__ __forceinline__ static void getShared(uint64_t* sharedMemory){ if (threadIdx.x < 256) { sharedMemory[threadIdx.x] = mixTob0Tox[threadIdx.x]; sharedMemory[threadIdx.x+256] = ROTL64(sharedMemory[threadIdx.x], 8); sharedMemory[threadIdx.x+512] = ROTL64(sharedMemory[threadIdx.x],16); sharedMemory[threadIdx.x+768] = ROTL64(sharedMemory[threadIdx.x],24); sharedMemory[threadIdx.x+1024] = ROTL64(sharedMemory[threadIdx.x],32); sharedMemory[threadIdx.x+1280] = ROTR64(sharedMemory[threadIdx.x],24); sharedMemory[threadIdx.x+1536] = ROTR64(sharedMemory[threadIdx.x],16); sharedMemory[threadIdx.x+1792] = ROTR64(sharedMemory[threadIdx.x], 8); } __syncthreads(); } __global__ #if __CUDA_ARCH__ > 200 __launch_bounds__(256,8) #endif void precomputeX(int threads,uint64_t* d_tmp){ __shared__ uint64_t sharedMemory[2048]; getShared(sharedMemory); int thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint64_t n[8]; uint64_t h[8] = {0,0,0,0,0,0,0,0}; #pragma unroll 8 for (int i=0; i<8; i++) { n[i] = c_PaddedMessage80[i]; // read data } //#pragma unroll 10 for (unsigned int r=0; r < 10; r++) { uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; ROUND_KSCHED(sharedMemory, h, tmp, InitVector_RC[r]); ROUND_WENC(sharedMemory, n, h, tmp); } #pragma unroll 8 for (int i=0; i < 8; i++) { h[i] = xor1(n[i],c_PaddedMessage80[i]); } uint64_t atLastCalc=xor1(h[3],h[5]); ////////////////////////////////// n[0] = c_PaddedMessage80[8]; //read data n[1] = c_PaddedMessage80[9]; //whirlpool n[2] = 0x0000000000000080; //whirlpool n[3] = 0; n[4] = 0; n[5] = 0; n[6] = 0; n[7] = 0x8002000000000000; n[0] = xor1(n[0],h[0]); n[2] = xor1(n[2],h[2]); n[3] = h[3]; n[4] = h[4]; n[5] = h[5]; n[6] = h[6]; n[7] = xor1(n[7],h[7]); uint64_t tmp[16]; tmp[0] = xor1(ROUND_ELT(sharedMemory, h, 0, 7, 6, 5, 4, 3, 2, 1),InitVector_RC[0]); tmp[1] = ROUND_ELT(sharedMemory, h, 1, 0, 7, 6, 5, 4, 3, 2); tmp[2] = ROUND_ELT(sharedMemory, h, 2, 1, 0, 7, 6, 5, 4, 3); tmp[3] = ROUND_ELT(sharedMemory, h, 3, 2, 1, 0, 7, 6, 5, 4); tmp[4] = ROUND_ELT(sharedMemory, h, 4, 3, 2, 1, 0, 7, 6, 5); tmp[5] = ROUND_ELT(sharedMemory, h, 5, 4, 3, 2, 1, 0, 7, 6); tmp[6] = ROUND_ELT(sharedMemory, h, 6, 5, 4, 3, 2, 1, 0, 7); tmp[7] = ROUND_ELT(sharedMemory, h, 7, 6, 5, 4, 3, 2, 1, 0); uint32_t* n32 = (uint32_t*)n; tmp[8]=xor8( sharedMemory[__byte_perm(n32[ 0], 0, 0x4440)] ,sharedMemory[__byte_perm(n32[14], 0, 0x4441) + 256], sharedMemory[__byte_perm(n32[12], 0, 0x4442) + 512] ,sharedMemory[__byte_perm(n32[10], 0, 0x4443) + 768], sharedMemory[__byte_perm(n32[ 9], 0, 0x4440) + 1024] ,sharedMemory[__byte_perm(n32[ 7], 0, 0x4441) + 1280], sharedMemory[__byte_perm(n32[ 5], 0, 0x4442) + 1536] ,tmp[0]); tmp[9]=xor8( tmp[1] ,sharedMemory[__byte_perm(n32[ 0], 0, 0x4441) + 256], sharedMemory[__byte_perm(n32[14], 0, 0x4442) + 512] ,sharedMemory[__byte_perm(n32[12], 0, 0x4443) + 768], sharedMemory[__byte_perm(n32[11], 0, 0x4440) + 1024] ,sharedMemory[__byte_perm(n32[ 9], 0, 0x4441) + 1280], sharedMemory[__byte_perm(n32[ 7], 0, 0x4442) + 1536] ,sharedMemory[__byte_perm(n32[ 5], 0, 0x4443) + 1792]); tmp[10]=xor8( sharedMemory[__byte_perm(n32[ 4], 0, 0x4440)] ,tmp[2] , sharedMemory[__byte_perm(n32[ 0], 0, 0x4442) + 512] ,sharedMemory[__byte_perm(n32[14], 0, 0x4443) + 768], sharedMemory[__byte_perm(n32[13], 0, 0x4440) + 1024] ,sharedMemory[__byte_perm(n32[11], 0, 0x4441) + 1280], sharedMemory[__byte_perm(n32[ 9], 0, 0x4442) + 1536] ,sharedMemory[__byte_perm(n32[ 7], 0, 0x4443) + 1792]); tmp[11]=xor8( sharedMemory[__byte_perm(n32[ 6], 0, 0x4440)] ,sharedMemory[__byte_perm(n32[ 4], 0, 0x4441) + 256], tmp[3] ,sharedMemory[__byte_perm(n32[ 0], 0, 0x4443) + 768], sharedMemory[__byte_perm(n32[15], 0, 0x4440) + 1024] ,sharedMemory[__byte_perm(n32[13], 0, 0x4441) + 1280], sharedMemory[__byte_perm(n32[11], 0, 0x4442) + 1536] ,sharedMemory[__byte_perm(n32[ 9], 0, 0x4443) + 1792]); tmp[12]=xor8( sharedMemory[__byte_perm(n32[ 8], 0, 0x4440)] ,sharedMemory[__byte_perm(n32[ 6], 0, 0x4441) + 256] , sharedMemory[__byte_perm(n32[ 4], 0, 0x4442) + 512] ,tmp[4] , sharedMemory[__byte_perm(n32[ 1], 0, 0x4440) + 1024] ,sharedMemory[__byte_perm(n32[15], 0, 0x4441) + 1280] , sharedMemory[__byte_perm(n32[13], 0, 0x4442) + 1536] ,sharedMemory[__byte_perm(n32[11], 0, 0x4443) + 1792]); tmp[13]=xor8( sharedMemory[__byte_perm(n32[10], 0, 0x4440)] ,sharedMemory[__byte_perm(n32[ 8], 0, 0x4441) + 256], sharedMemory[__byte_perm(n32[ 6], 0, 0x4442) + 512] ,sharedMemory[__byte_perm(n32[ 4], 0, 0x4443) + 768], tmp[5] ,sharedMemory[__byte_perm(n32[ 1], 0, 0x4441) + 1280], sharedMemory[__byte_perm(n32[15], 0, 0x4442) + 1536] ,sharedMemory[__byte_perm(n32[13], 0, 0x4443) + 1792]); tmp[14]=xor8( sharedMemory[__byte_perm(n32[12], 0, 0x4440)] ,sharedMemory[__byte_perm(n32[10], 0, 0x4441) + 256], sharedMemory[__byte_perm(n32[ 8], 0, 0x4442) + 512] ,sharedMemory[__byte_perm(n32[ 6], 0, 0x4443) + 768], sharedMemory[__byte_perm(n32[ 5], 0, 0x4440) + 1024] ,tmp[6], sharedMemory[__byte_perm(n32[ 1], 0, 0x4442) + 1536] ,sharedMemory[__byte_perm(n32[15], 0, 0x4443) + 1792]); tmp[15]=xor8( sharedMemory[__byte_perm(n32[14], 0, 0x4440)] ,sharedMemory[__byte_perm(n32[12], 0, 0x4441) + 256], sharedMemory[__byte_perm(n32[10], 0, 0x4442) + 512] ,sharedMemory[__byte_perm(n32[ 8], 0, 0x4443) + 768], sharedMemory[__byte_perm(n32[ 7], 0, 0x4440) + 1024] ,sharedMemory[__byte_perm(n32[ 5], 0, 0x4441) + 1280], tmp[7] ,sharedMemory[__byte_perm(n32[ 1], 0, 0x4443) + 1792]); n[1] =xor1(n[1],h[1]); tmp[9]=xor1(tmp[9],sharedMemory[__byte_perm(n32[2], 0, 0x4440)]); tmp[10]=xor1(tmp[10],sharedMemory[__byte_perm(n32[2], 0, 0x4441) + 256]); tmp[11]=xor1(tmp[11],sharedMemory[__byte_perm(n32[2], 0, 0x4442) + 512]); tmp[12]=xor1(tmp[12],sharedMemory[__byte_perm(n32[2], 0, 0x4443) + 768]); d_tmp[threadIdx.x]=tmp[8+threadIdx.x]; uint64_t tmp3[8]; tmp3[0] = xor1(ROUND_ELT(sharedMemory, tmp, 0, 7, 6, 5, 4, 3, 2, 1), InitVector_RC[1]); tmp3[1] = ROUND_ELT(sharedMemory, tmp, 1, 0, 7, 6, 5, 4, 3, 2); tmp3[2] = ROUND_ELT(sharedMemory, tmp, 2, 1, 0, 7, 6, 5, 4, 3); tmp3[3] = ROUND_ELT(sharedMemory, tmp, 3, 2, 1, 0, 7, 6, 5, 4); tmp3[4] = ROUND_ELT(sharedMemory, tmp, 4, 3, 2, 1, 0, 7, 6, 5); tmp3[5] = ROUND_ELT(sharedMemory, tmp, 5, 4, 3, 2, 1, 0, 7, 6); tmp3[6] = ROUND_ELT(sharedMemory, tmp, 6, 5, 4, 3, 2, 1, 0, 7); tmp3[7] = ROUND_ELT(sharedMemory, tmp, 7, 6, 5, 4, 3, 2, 1, 0); n32 = (uint32_t*)&tmp[8]; tmp[0]=xor1( xor3(sharedMemory[__byte_perm(n32[ 9], 0, 0x4440) + 1024],sharedMemory[__byte_perm(n32[ 7], 0, 0x4441) + 1280],sharedMemory[__byte_perm(n32[ 5], 0, 0x4442) + 1536]), xor1(sharedMemory[__byte_perm(n32[ 3], 0, 0x4443) + 1792],tmp3[0])); tmp[1]=xor1( xor3(sharedMemory[__byte_perm(n32[ 2], 0, 0x4440)],sharedMemory[__byte_perm(n32[ 9], 0, 0x4441) + 1280],sharedMemory[__byte_perm(n32[ 7], 0, 0x4442) + 1536]), xor1(sharedMemory[__byte_perm(n32[ 5], 0, 0x4443) + 1792],tmp3[1])); tmp[2]=xor1( xor3(sharedMemory[__byte_perm(n32[ 4], 0, 0x4440)],sharedMemory[__byte_perm(n32[ 2], 0, 0x4441) + 256],sharedMemory[__byte_perm(n32[ 9], 0, 0x4442) + 1536]), xor1(sharedMemory[__byte_perm(n32[ 7], 0, 0x4443) + 1792],tmp3[2])); tmp[3]=xor1( xor3(sharedMemory[__byte_perm(n32[ 6], 0, 0x4440)],sharedMemory[__byte_perm(n32[ 4], 0, 0x4441) + 256],sharedMemory[__byte_perm(n32[ 2], 0, 0x4442) + 512]), xor1(sharedMemory[__byte_perm(n32[ 9], 0, 0x4443) + 1792],tmp3[3])); tmp[4]=xor1( xor3(sharedMemory[__byte_perm(n32[ 8], 0, 0x4440)],sharedMemory[__byte_perm(n32[ 6], 0, 0x4441) + 256],sharedMemory[__byte_perm(n32[ 4], 0, 0x4442) + 512]), xor1(sharedMemory[__byte_perm(n32[ 2], 0, 0x4443) + 768],tmp3[4])); tmp[5]=xor1( xor3(sharedMemory[__byte_perm(n32[ 8], 0, 0x4441) + 256],sharedMemory[__byte_perm(n32[ 6], 0, 0x4442) + 512],sharedMemory[__byte_perm(n32[ 4], 0, 0x4443) + 768]), xor1(sharedMemory[__byte_perm(n32[ 3], 0, 0x4440) + 1024],tmp3[5])); tmp[6]=xor1( xor3(sharedMemory[__byte_perm(n32[ 8], 0, 0x4442) + 512],sharedMemory[__byte_perm(n32[ 6], 0, 0x4443) + 768],sharedMemory[__byte_perm(n32[ 5], 0, 0x4440) + 1024]), xor1(sharedMemory[__byte_perm(n32[ 3], 0, 0x4441) + 1280],tmp3[6])); tmp[7]=xor1( xor3(sharedMemory[__byte_perm(n32[ 8], 0, 0x4443) + 768],sharedMemory[__byte_perm(n32[ 7], 0, 0x4440) + 1024],sharedMemory[__byte_perm(n32[ 5], 0, 0x4441) + 1280]), xor1(sharedMemory[__byte_perm(n32[ 3], 0, 0x4442) + 1536],tmp3[7])); d_tmp[threadIdx.x+8]=tmp[threadIdx.x]; tmp[0] = xor1(ROUND_ELT(sharedMemory, tmp3, 0, 7, 6, 5, 4, 3, 2, 1), InitVector_RC[2]); tmp[1] = ROUND_ELT(sharedMemory, tmp3, 1, 0, 7, 6, 5, 4, 3, 2); tmp[2] = ROUND_ELT(sharedMemory, tmp3, 2, 1, 0, 7, 6, 5, 4, 3); tmp[3] = ROUND_ELT(sharedMemory, tmp3, 3, 2, 1, 0, 7, 6, 5, 4); tmp[4] = ROUND_ELT(sharedMemory, tmp3, 4, 3, 2, 1, 0, 7, 6, 5); tmp[5] = ROUND_ELT(sharedMemory, tmp3, 5, 4, 3, 2, 1, 0, 7, 6); tmp[6] = ROUND_ELT(sharedMemory, tmp3, 6, 5, 4, 3, 2, 1, 0, 7); tmp[7] = ROUND_ELT(sharedMemory, tmp3, 7, 6, 5, 4, 3, 2, 1, 0); d_tmp[threadIdx.x+16]=tmp[threadIdx.x]; #pragma unroll 6 for(int i=0;i<6;i++){ tmp[0+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &tmp[8*(i&1)], 0, 7, 6, 5, 4, 3, 2, 1), InitVector_RC[3+i]); tmp[1+8*((i+1)&1)] = ROUND_ELT(sharedMemory, &tmp[8*(i&1)], 1, 0, 7, 6, 5, 4, 3, 2); tmp[2+8*((i+1)&1)] = ROUND_ELT(sharedMemory, &tmp[8*(i&1)], 2, 1, 0, 7, 6, 5, 4, 3); tmp[3+8*((i+1)&1)] = ROUND_ELT(sharedMemory, &tmp[8*(i&1)], 3, 2, 1, 0, 7, 6, 5, 4); tmp[4+8*((i+1)&1)] = ROUND_ELT(sharedMemory, &tmp[8*(i&1)], 4, 3, 2, 1, 0, 7, 6, 5); tmp[5+8*((i+1)&1)] = ROUND_ELT(sharedMemory, &tmp[8*(i&1)], 5, 4, 3, 2, 1, 0, 7, 6); tmp[6+8*((i+1)&1)] = ROUND_ELT(sharedMemory, &tmp[8*(i&1)], 6, 5, 4, 3, 2, 1, 0, 7); tmp[7+8*((i+1)&1)] = ROUND_ELT(sharedMemory, &tmp[8*(i&1)], 7, 6, 5, 4, 3, 2, 1, 0); d_tmp[threadIdx.x+24+8*i]=tmp[threadIdx.x+8*((i+1)&1)]; } if(threadIdx.x==0){ d_tmp[1]=h[1]; tmp[8]=ROUND_ELT(sharedMemory,&tmp[0], 3, 2, 1, 0, 7, 6, 5, 4); tmp[9]=ROUND_ELT(sharedMemory,&tmp[0], 5, 4, 3, 2, 1, 0, 7, 6); tmp[10] = xor3(tmp[8],tmp[9],atLastCalc); d_tmp[2]=tmp[10]; } } } __global__ #if __CUDA_ARCH__ > 210 __launch_bounds__(threadsPerBlock,2) #endif void whirlpoolx(const uint32_t threads, const uint32_t startNounce,uint32_t *resNounce){ __shared__ uint64_t sharedMemory[2048]; getShared(sharedMemory); uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ uint64_t n[16]; uint32_t *n32 = (uint32_t*)n; const uint32_t nounce = startNounce + thread; n[1] = xor1(REPLACE_HIDWORD(c_PaddedMessage80[9], cuda_swab32(nounce)),c_tmp[1]); const uint64_t b=xor1(sharedMemory[__byte_perm(n32[3], 0, 0x4443) + 1792],c_tmp[0]); n[5]=xor1(sharedMemory[__byte_perm(n32[3], 0, 0x4440) + 1024],c_tmp[5]); n[6]=xor1(sharedMemory[__byte_perm(n32[3], 0, 0x4441) + 1280],c_tmp[6]); n[7]=xor1(sharedMemory[__byte_perm(n32[3], 0, 0x4442) + 1536],c_tmp[7]); n[8]=xor3(sharedMemory[__byte_perm(n32[10],0,0x4443)+768],sharedMemory[__byte_perm(n32[12],0,0x4442)+512],sharedMemory[__byte_perm(n32[14],0,0x4441)+256]); n[9]=xor3(sharedMemory[__byte_perm(n32[11],0,0x4440)+1024],sharedMemory[__byte_perm(n32[12],0,0x4443)+768],sharedMemory[__byte_perm(n32[14],0,0x4442)+512]); n[10]=xor3(sharedMemory[__byte_perm(n32[11],0,0x4441)+1280],sharedMemory[__byte_perm(n32[13],0,0x4440)+1024],sharedMemory[__byte_perm(n32[14],0,0x4443)+768]); n[11]=xor3(sharedMemory[__byte_perm(n32[11],0,0x4442)+1536],sharedMemory[__byte_perm(n32[13],0,0x4441)+1280],sharedMemory[__byte_perm(n32[15],0,0x4440)+1024]); n[12]=xor3(sharedMemory[__byte_perm(n32[11],0,0x4443)+1792],sharedMemory[__byte_perm(n32[13],0,0x4442)+1536],sharedMemory[__byte_perm(n32[15],0,0x4441)+1280]); n[13]=xor3(sharedMemory[__byte_perm(n32[10],0,0x4440)],sharedMemory[__byte_perm(n32[13],0,0x4443)+1792],sharedMemory[__byte_perm(n32[15],0,0x4442)+1536]); n[14]=xor3(sharedMemory[__byte_perm(n32[12],0,0x4440)],sharedMemory[__byte_perm(n32[10],0,0x4441)+256],sharedMemory[__byte_perm(n32[15],0,0x4443)+1792]); n[15]=xor3(sharedMemory[__byte_perm(n32[14],0,0x4440)],sharedMemory[__byte_perm(n32[12],0,0x4441)+256],sharedMemory[__byte_perm(n32[10],0,0x4442)+ 512]); n32 = (uint32_t* __restrict__)&b; n[0]=xor3(sharedMemory[__byte_perm(n32[ 0], 0, 0x4440) ], n[ 8],c_tmp[0+8]); n[1]=xor3(sharedMemory[__byte_perm(n32[ 0], 0, 0x4441) + 256], n[ 9],c_tmp[1+8]); n[2]=xor3(sharedMemory[__byte_perm(n32[ 0], 0, 0x4442) + 512], n[10],c_tmp[2+8]); n[3]=xor3(sharedMemory[__byte_perm(n32[ 0], 0, 0x4443) + 768], n[11],c_tmp[3+8]); n[4]=xor3(sharedMemory[__byte_perm(n32[ 1], 0, 0x4440) +1024], n[12],c_tmp[4+8]); n[5]=xor3(sharedMemory[__byte_perm(n32[ 1], 0, 0x4441) +1280], n[13],c_tmp[5+8]); n[6]=xor3(sharedMemory[__byte_perm(n32[ 1], 0, 0x4442) +1536], n[14],c_tmp[6+8]); n[7]=xor3(sharedMemory[__byte_perm(n32[ 1], 0, 0x4443) +1792], n[15],c_tmp[7+8]); #pragma unroll 7 for(int i=2;i<9;i++){ n[0+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &n[8*(i&1)], 0, 7, 6, 5, 4, 3, 2, 1), c_tmp[0+(8*i)]); n[1+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &n[8*(i&1)], 1, 0, 7, 6, 5, 4, 3, 2), c_tmp[1+(8*i)]); n[2+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &n[8*(i&1)], 2, 1, 0, 7, 6, 5, 4, 3), c_tmp[2+(8*i)]); n[3+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &n[8*(i&1)], 3, 2, 1, 0, 7, 6, 5, 4), c_tmp[3+(8*i)]); n[4+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &n[8*(i&1)], 4, 3, 2, 1, 0, 7, 6, 5), c_tmp[4+(8*i)]); n[5+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &n[8*(i&1)], 5, 4, 3, 2, 1, 0, 7, 6), c_tmp[5+(8*i)]); n[6+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &n[8*(i&1)], 6, 5, 4, 3, 2, 1, 0, 7), c_tmp[6+(8*i)]); n[7+8*((i+1)&1)] = xor1(ROUND_ELT(sharedMemory, &n[8*(i&1)], 7, 6, 5, 4, 3, 2, 1, 0), c_tmp[7+(8*i)]); } if (xor3(c_tmp[2],ROUND_ELT(sharedMemory, &n[8],3,2,1,0,7,6,5,4),ROUND_ELT(sharedMemory, &n[8],5,4,3,2,1,0,7,6)) <= pTarget[3]) atomicMin(&resNounce[0],nounce); } // thread < threads } __host__ void whirlpoolx_cpu_init(int thr_id, int threads){ cudaSetDevice(device_map[thr_id]); cudaSetDeviceFlags(cudaDeviceMapHost); cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); cudaMemcpyToSymbol(InitVector_RC, plain_RC, sizeof(plain_RC), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(mixTob0Tox, plain_T0, sizeof(plain_T0), 0, cudaMemcpyHostToDevice); cudaHostAlloc((void**)&d_wxnonce[thr_id],sizeof(uint32_t),cudaHostAllocMapped); cudaHostGetDevicePointer((void**)&d_WXNonce[thr_id],(void*)d_wxnonce[thr_id],0); cudaMalloc(&d_tmp[thr_id],8*9*sizeof(uint64_t)); } __host__ void whirlpoolx_setBlock_precompute(void *pdata, const void *ptarget,int thr_id){ uint64_t PaddedMessage[16]; dim3 grid(1); dim3 block(256); memcpy(PaddedMessage, pdata, 80); memset((uint8_t*)&PaddedMessage+80, 0, 48); *(uint8_t*)(&PaddedMessage+80) = 0x80; /* ending */ cudaMemcpyToSymbol(c_PaddedMessage80, PaddedMessage, 16*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(pTarget, ptarget, 4*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); precomputeX<<<grid, block>>>(8,d_tmp[thr_id]); cudaThreadSynchronize(); cudaMemcpyToSymbol(c_tmp,d_tmp[thr_id],8*9*sizeof(uint64_t),0,cudaMemcpyDeviceToDevice); } __host__ uint32_t cpu_whirlpoolx(int thr_id, uint32_t threads, uint32_t startNounce){ dim3 grid((threads + threadsPerBlock-1) / threadsPerBlock); dim3 block(threadsPerBlock); d_wxnonce[thr_id][0]=UINT32_MAX; whirlpoolx<<<grid, block>>>(threads, startNounce,d_WXNonce[thr_id]); cudaThreadSynchronize(); return *d_wxnonce[thr_id]; } __host__ extern void whirlpoolx_cpu_free(int thr_id) { cudaFree(d_WXNonce[thr_id]); cudaFree(d_tmp[thr_id]); }
the_stack
struct LinearInitParams { DnnHandle handle; int batchSize, inputSize, outputSize; }; Tensor RnnModel::add_linear_node(Tensor x, int output_size, ParallelConfig pc, SharedVariable params) { assert(x.numDim == 3); assert(x.adim[2] == LSTM_PER_NODE_LENGTH); assert(x.pdim[2] == LSTM_PER_NODE_LENGTH); Linear* node = new Linear(config, x, output_size, pc, params, part_is); layers.push_back(node); return node->outputs[0]; } Linear::Linear(RnnConfig config, Tensor input, int _output_size, ParallelConfig pc, SharedVariable _params, IndexSpaceT<1> input_part_is) : RnnOp(input, pc, _params), input_size(input.adim[0]), output_size(_output_size) { Context ctx = config.lg_ctx; HighLevelRuntime* runtime = config.lg_hlr; assert(pc.nDims == 2); int num_par_n = pc.dim[1]; int num_par_c = pc.dim[0]; input_part_rect = runtime->get_index_space_domain(ctx, input_part_is); { Rect<2> rect(Point<2>(0, 0), Point<2>(num_par_c-1, num_par_n-1)); part_rect = rect; } IndexSpaceT<2> part_is = runtime->create_index_space(ctx, part_rect); int batch_size = input.adim[1]; FieldSpace fs = config.field_space; Rect<3, coord_t> y_rect(Point<3>(0, 0, 0), Point<3>(output_size-1, batch_size-1, LSTM_PER_NODE_LENGTH-1)); IndexSpaceT<3> y_is = runtime->create_index_space(ctx, y_rect); LogicalRegion y_lr = runtime->create_logical_region(ctx, y_is, fs); LogicalRegion y_grad_lr = runtime->create_logical_region(ctx, y_is, fs); assert(output_size % num_par_c == 0); assert(batch_size % num_par_n == 0); int extent_c = output_size / num_par_c; int extent_n = batch_size / num_par_n; Rect<3, coord_t> extent(Point<3>(0, 0, 0), Point<3>(extent_c-1, extent_n-1, LSTM_PER_NODE_LENGTH-1)); Transform<3, 2, coord_t> trans; trans[0][0] = extent_c; trans[0][1] = 0; trans[1][0] = 0; trans[1][1] = extent_n; trans[2][0] = 0; trans[2][1] = 0; IndexPartition y_ip = runtime->create_partition_by_restriction(ctx, y_is, part_is, trans, extent); assert(runtime->is_index_partition_disjoint(ctx, y_ip)); assert(runtime->is_index_partition_complete(ctx, y_ip)); LogicalPartition y_lp = runtime->get_logical_partition(ctx, y_lr, y_ip); LogicalPartition y_grad_lp = runtime->get_logical_partition(ctx, y_grad_lr, y_ip); // Note: we only need replica's grad, so no need to create lr/lp for forward Rect<3, coord_t> replica_rect(Point<3>(0, 0, 0), Point<3>(input_size-1, batch_size-1, LSTM_PER_NODE_LENGTH*num_par_c-1)); IndexSpaceT<3> replica_is = runtime->create_index_space(ctx, replica_rect); replica.region_grad = runtime->create_logical_region(ctx, replica_is, fs); trans[0][0] = 0; trans[0][1] = 0; trans[1][0] = 0; trans[1][1] = extent_n; trans[2][0] = LSTM_PER_NODE_LENGTH; trans[2][1] = 0; Rect<3, coord_t> replica_ext(Point<3>(0, 0, 0), Point<3>(input_size-1, extent_n-1, LSTM_PER_NODE_LENGTH-1)); IndexPartition replica_ip = runtime->create_partition_by_restriction(ctx, replica_is, part_is, trans, replica_ext); assert(runtime->is_index_partition_disjoint(ctx, replica_ip)); assert(runtime->is_index_partition_complete(ctx, replica_ip)); replica.partition_grad = runtime->get_logical_partition(ctx, replica.region_grad, replica_ip); for (int i = 0; i < num_par_c; i++) { Transform<3, 1, coord_t> input_trans; input_trans[0][0] = 0; input_trans[1][0] = inputs[0].pdim[1]; input_trans[2][0] = 0; Rect<3, coord_t> ext(Point<3>(0, 0, LSTM_PER_NODE_LENGTH*i), Point<3>(inputs[0].pdim[0]-1, inputs[0].pdim[1]-1, LSTM_PER_NODE_LENGTH*(i+1)-1)); IndexPartition ip = runtime->create_partition_by_restriction(ctx, replica_is, input_part_is, input_trans, ext); assert(runtime->is_index_partition_disjoint(ctx, ip)); replica_sub_lps[i] = runtime->get_logical_partition(ctx, replica.region_grad, ip); } outputs[0].numDim = 3; outputs[0].adim[0] = output_size; outputs[0].adim[1] = batch_size; outputs[0].adim[2] = LSTM_PER_NODE_LENGTH; outputs[0].pdim[0] = extent_c; outputs[0].pdim[1] = extent_n; outputs[0].pdim[2] = LSTM_PER_NODE_LENGTH; outputs[0].region = y_lr; outputs[0].partition = y_lp; outputs[0].region_grad = y_grad_lr; outputs[0].partition_grad = y_grad_lp; // Every partition reads all in_channels trans[0][0] = 0; trans[0][1] = 0; trans[1][0] = 0; trans[1][1] = extent_n; trans[2][0] = 0; trans[2][1] = 0; Rect<3, coord_t> input_ext(Point<3>(0, 0, 0), Point<3>(input_size-1, extent_n-1, LSTM_PER_NODE_LENGTH)); IndexSpaceT<3> input_is = IndexSpaceT<3>(inputs[0].region.get_index_space()); IndexPartition input_ip = runtime->create_partition_by_restriction(ctx, input_is, part_is, trans, input_ext); input_lp = runtime->get_logical_partition(ctx, inputs[0].region, input_ip); } /* regions[0](I): x regions[1](I): w regions[2](O): y */ OpMeta* Linear::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(task->regions.size() == 3); const LinearInitParams* linear = (LinearInitParams*) task->args; Rect<3> rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); assert(rect_x.hi[0] - rect_x.lo[0] + 1 == linear->inputSize); assert(rect_x.hi[1] - rect_x.lo[1] + 1 == linear->batchSize); assert(rect_x.hi[2] - rect_x.lo[2] + 1 == LSTM_PER_NODE_LENGTH); assert(rect_y.hi[0] - rect_y.lo[0] + 1 == linear->outputSize); assert(rect_y.hi[1] - rect_y.lo[1] + 1 == linear->batchSize); assert(rect_y.hi[2] - rect_y.lo[2] + 1 == LSTM_PER_NODE_LENGTH); assert(rect_w.hi[0] - rect_w.lo[0] + 1 == linear->outputSize*(linear->inputSize+1)); LinearMeta* m = new LinearMeta(linear->handle); m->profiling_runtime = false; #ifndef DISABLE_COMPUTATION int batch_size = linear->batchSize * LSTM_PER_NODE_LENGTH; float* dram_one_ptr = (float*) malloc(sizeof(float) * batch_size); for (int i = 0; i < batch_size; i++) dram_one_ptr[i] = 1.0f; checkCUDA(cudaMalloc(&m->one_ptr, sizeof(float) * batch_size)); checkCUDA(cudaMemcpy(m->one_ptr, dram_one_ptr, sizeof(float) * batch_size, cudaMemcpyHostToDevice)); #endif return m; } void Linear::init(const RnnModel& model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; for (PointInRectIterator<2> it(part_rect); it(); it++, idx++) { LinearInitParams initParams; initParams.handle = model.dnn_handlers[paraConfig.gpu[idx]]; initParams.batchSize = outputs[0].pdim[1]; initParams.inputSize = inputs[0].pdim[0]; initParams.outputSize = outputs[0].pdim[0]; TaskLauncher launcher(RNN_LINEAR_INIT_TASK_ID, TaskArgument(&initParams, sizeof(initParams)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); // Add input { LogicalRegion x = runtime->get_logical_subregion_by_color(input_lp, dp); launcher.add_region_requirement( RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); } launcher.add_region_requirement( RegionRequirement(params.subregions[num_par_c+dp[0]], READ_ONLY, EXCLUSIVE, params.region)); launcher.add_field(1, FID_DATA); // Add output { LogicalRegion y = runtime->get_logical_subregion_by_color(outputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); } Future f = runtime->execute_task(ctx, launcher); meta[idx] = f.get_result<OpMeta*>(); } } /* regions[0] (I): x regions[1] (I): w regions[2] (O): y */ void Linear::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 3); assert(task->regions.size() == 3); float alpha = 1.0f, beta = 0.0f; const LinearMeta* m = *((LinearMeta**) task->args); const AccessorRO<float, 3> acc_x(regions[0], FID_DATA); const AccessorRO<float, 1> acc_w(regions[1], FID_DATA); const AccessorWO<float, 3> acc_y(regions[2], FID_DATA); Rect<3> rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); assert(acc_x.accessor.is_dense_arbitrary(rect_x)); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); assert(acc_y.accessor.is_dense_arbitrary(rect_y)); int input_size = rect_x.hi[0] - rect_x.lo[0] + 1; int output_size = rect_y.hi[0] - rect_y.lo[0] + 1; int batch_size = (rect_x.hi[1] - rect_x.lo[1] + 1) * LSTM_PER_NODE_LENGTH; const float *x_ptr = acc_x.ptr(rect_x.lo); const float *w_ptr = acc_w.ptr(rect_w.lo); const float *bias_ptr = w_ptr + input_size; float *y_ptr = acc_y.ptr(rect_y.lo); cudaEvent_t t_start, t_end; if (m->profiling_runtime) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N, output_size, batch_size, input_size, &alpha, w_ptr, input_size + 1, x_ptr, input_size, &beta, y_ptr, output_size)); checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N, output_size, batch_size, 1, &alpha, bias_ptr, input_size + 1, m->one_ptr, 1, &alpha, y_ptr, output_size)); if (m->profiling_runtime) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Linear forward time = %.2lfms\n", elapsed); } #ifdef PRINT_INTERMEDIATE_RESULT print_tensor<3, float>(y_ptr, rect_y, "linear(fwd):y"); #endif #endif } void Linear::forward(const RnnModel &model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; for (PointInRectIterator<2> it(part_rect); it(); it++, idx++) { OpMeta* mp = meta[idx]; TaskLauncher launcher(RNN_LINEAR_FWD_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); // Add input { LogicalRegion x = runtime->get_logical_subregion_by_color(input_lp, dp); launcher.add_region_requirement( RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); } launcher.add_region_requirement( RegionRequirement(params.subregions[num_par_c+dp[0]], READ_ONLY, EXCLUSIVE, params.region)); launcher.add_field(1, FID_DATA); // Add output { LogicalRegion y = runtime->get_logical_subregion_by_color(outputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); } runtime->execute_task(ctx, launcher); } } /* regions[0](I): x regions[1](I): w regions[2](I): y regions[3](O); replica_grad regions[4](I/O): w_grad regions[5](I): y_grad */ void Linear::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 6); assert(task->regions.size() == 6); float alpha = 1.0f, beta = 0.0f; const LinearMeta* m = *((LinearMeta**) task->args); const AccessorRO<float, 3> acc_x(regions[0], FID_DATA); const AccessorRO<float, 1> acc_w(regions[1], FID_DATA); const AccessorRO<float, 3> acc_y(regions[2], FID_DATA); const AccessorWO<float, 3> acc_replica_grad(regions[3], FID_DATA); const AccessorRW<float, 1> acc_w_grad(regions[4], FID_DATA); const AccessorRO<float, 3> acc_y_grad(regions[5], FID_DATA); Rect<3> rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); Rect<3> rect_replica_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); Rect<1> rect_w_grad = runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space()); Rect<3> rect_y_grad = runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space()); assert(acc_x.accessor.is_dense_arbitrary(rect_x)); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); assert(acc_y.accessor.is_dense_arbitrary(rect_y)); assert(acc_replica_grad.accessor.is_dense_arbitrary(rect_replica_grad)); assert(acc_w_grad.accessor.is_dense_arbitrary(rect_w_grad)); assert(acc_y_grad.accessor.is_dense_arbitrary(rect_y_grad)); int input_size = rect_x.hi[0] - rect_x.lo[0] + 1; int output_size = rect_y.hi[0] - rect_y.lo[0] + 1; int batch_size = (rect_x.hi[1] - rect_x.lo[1] + 1) * LSTM_PER_NODE_LENGTH; const float *x_ptr = acc_x.ptr(rect_x.lo); const float *w_ptr = acc_w.ptr(rect_w.lo); const float *y_ptr = acc_y.ptr(rect_y.lo); float* replica_grad_ptr = acc_replica_grad.ptr(rect_replica_grad.lo); // Note that w_grad might be bigger than w assert(rect_w_grad.contains(rect_w)); float* w_grad_ptr = acc_w_grad.ptr(rect_w_grad.lo); float* bias_grad_ptr = w_grad_ptr + input_size; const float* y_grad_ptr = acc_y_grad.ptr(rect_y_grad.lo); cudaEvent_t t_start, t_end; if (m->profiling_runtime) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); // Compute weight gradient checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_T, input_size, output_size, batch_size, &alpha, x_ptr, input_size, y_grad_ptr, output_size, &alpha, w_grad_ptr, input_size+1)); // Compute bias gradient checkCUDA(cublasSgemv(m->handle.blas, CUBLAS_OP_N, output_size, batch_size, &alpha, y_grad_ptr, output_size, m->one_ptr, 1, &alpha, bias_grad_ptr, input_size+1)); // Compute data gradient checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_N, input_size, batch_size, output_size, &alpha, w_ptr, input_size + 1, y_grad_ptr, output_size, &beta, replica_grad_ptr, input_size)); if (m->profiling_runtime) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Linear backward time = %.2lfms\n", elapsed); } #ifdef PRINT_INTERMEDIATE_RESULT print_tensor<1, float>(w_grad_ptr, rect_w_grad, "linear(bwd):w_grad"); #endif #endif } /* regions[0](O): input regions[1..num_par_c](I): replicas */ void Linear::backward2_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION float alpha = 1.0f; const LinearMeta* m = *((LinearMeta**) task->args); const AccessorWO<float, 3> acc_input(regions[0], FID_DATA); Rect<3> rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); assert(acc_input.accessor.is_dense_arbitrary(rect_input)); float *input_ptr = acc_input.ptr(rect_input.lo); cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); for (int i = 1; i < task->regions.size(); i++) { const AccessorRO<float, 3> acc_replica(regions[i], FID_DATA); Rect<3> rect_replica = runtime->get_index_space_domain(ctx, task->regions[i].region.get_index_space()); assert(rect_replica.volume() == rect_input.volume()); assert(acc_replica.accessor.is_dense_arbitrary(rect_replica)); const float *replica_ptr = acc_replica.ptr(rect_replica.lo); if (i == 1) checkCUDA(cublasScopy(m->handle.blas, rect_input.volume(), replica_ptr, 1, input_ptr, 1)); else checkCUDA(cublasSaxpy(m->handle.blas, rect_input.volume(), &alpha, replica_ptr, 1, input_ptr, 1)); } #endif } void Linear::backward(const RnnModel& model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; for (PointInRectIterator<2> it(part_rect); it(); it++, idx++) { OpMeta* mp = meta[idx]; TaskLauncher launcher(RNN_LINEAR_BWD_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); // Add x { LogicalRegion x = runtime->get_logical_subregion_by_color(input_lp, dp); launcher.add_region_requirement( RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); } // Add w launcher.add_region_requirement( RegionRequirement(params.subregions[num_par_c+dp[0]], READ_ONLY, EXCLUSIVE, params.region)); launcher.add_field(1, FID_DATA); // Add y { LogicalRegion y = runtime->get_logical_subregion_by_color(outputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(y, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); } // Add replica_grad { LogicalRegion replica_grad = runtime->get_logical_subregion_by_color(replica.partition_grad, dp); launcher.add_region_requirement( RegionRequirement(replica_grad, WRITE_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(3, FID_DATA); } // Add w_grad launcher.add_region_requirement( RegionRequirement(params.gradients[paraConfig.gpu[idx]], READ_WRITE, EXCLUSIVE, params.gradients[paraConfig.gpu[idx]])); launcher.add_field(4, FID_DATA); // Add y_grad { LogicalRegion y_grad = runtime->get_logical_subregion_by_color(outputs[0].partition_grad, dp); launcher.add_region_requirement( RegionRequirement(y_grad, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(5, FID_DATA); } runtime->execute_task(ctx, launcher); } // We aggregate data from replica tensor to input tensor idx = 0; for (PointInRectIterator<1> it(input_part_rect); it(); it++, idx++) { OpMeta* mp = meta[idx]; TaskLauncher launcher(RNN_LINEAR_BWD2_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); LogicalRegion input = runtime->get_logical_subregion_by_color(inputs[0].partition_grad, dp); launcher.add_region_requirement( RegionRequirement(input, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; for (int i = 0; i < num_par_c; i++) { LogicalRegion r = runtime->get_logical_subregion_by_color(replica_sub_lps[i], dp); launcher.add_region_requirement( RegionRequirement(r, READ_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(i+1, FID_DATA); } runtime->execute_task(ctx, launcher); } } void Linear::update_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) {} void Linear::update(const RnnModel& model) { }
the_stack
#include <kfusion/cuda/device.hpp> using namespace kfusion::device; /* * VECTOR FIELD */ __device__ __forceinline__ float4* sobfu::device::VectorField::beg(int x, int y) const { return data + x + dims.x * y; } __device__ __forceinline__ float4* sobfu::device::VectorField::zstep(float4* const ptr) const { return ptr + dims.x * dims.y; } __device__ __forceinline__ float4* sobfu::device::VectorField::operator()(int x, int y, int z) const { return data + x + y * dims.x + z * dims.y * dims.x; } __device__ __forceinline__ float4 sobfu::device::VectorField::get_displacement(int x, int y, int z) const { return *(data + z * dims.y * dims.x + y * dims.x + x) - make_float4((float) x, (float) y, (float) z, 0.f); } void sobfu::device::clear(VectorField& field) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(field.dims.x, block.x), kfusion::device::divUp(field.dims.y, block.y)); clear_kernel<<<grid, block>>>(field); cudaSafeCall(cudaGetLastError()); } __global__ void sobfu::device::clear_kernel(sobfu::device::VectorField field) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > field.dims.x - 1 || y > field.dims.y - 1) { return; } float4* beg = field.beg(x, y); float4* end = beg + field.dims.x * field.dims.y * field.dims.z; for (float4* pos = beg; pos != end; pos = field.zstep(pos)) { *pos = make_float4(0.f, 0.f, 0.f, 0.f); } } /* * DEFORMATION FIELD */ void sobfu::device::init_identity(sobfu::device::DeformationField& psi) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(psi.dims.x, block.x), kfusion::device::divUp(psi.dims.y, block.y)); init_identity_kernel<<<grid, block>>>(psi); cudaSafeCall(cudaGetLastError()); } __global__ void sobfu::device::init_identity_kernel(sobfu::device::DeformationField psi) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > psi.dims.x - 1 || y > psi.dims.y - 1) { return; } float4 idx = make_float4((float) x, (float) y, 0.f, 0.f); float4 zstep = make_float4(0.f, 0.f, 1.f, 0.f); float4* pos = psi.beg(x, y); for (int i = 0; i <= psi.dims.z - 1; idx += zstep, pos = psi.zstep(pos), ++i) { *pos = idx; } } __global__ void sobfu::device::apply_kernel(const kfusion::device::TsdfVolume phi, kfusion::device::TsdfVolume phi_warped, const sobfu::device::DeformationField psi) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > phi_warped.dims.x - 1 || y > phi_warped.dims.y - 1) { return; } float4* psi_ptr = psi.beg(x, y); float2* phi_warped_ptr = phi_warped.beg(x, y); for (int i = 0; i <= phi_warped.dims.z - 1; psi_ptr = psi.zstep(psi_ptr), phi_warped_ptr = phi_warped.zstep(phi_warped_ptr), ++i) { float4 psi_val = *psi_ptr; float2 tsdf_deformed = interpolate_tsdf(phi, trunc(psi_val)); *phi_warped_ptr = tsdf_deformed; } } void sobfu::device::apply(const kfusion::device::TsdfVolume& phi, kfusion::device::TsdfVolume& phi_warped, const sobfu::device::DeformationField& psi) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(phi.dims.x, block.x), kfusion::device::divUp(phi.dims.y, block.y)); apply_kernel<<<grid, block>>>(phi, phi_warped, psi); cudaSafeCall(cudaGetLastError()); } __global__ void sobfu::device::estimate_inverse_kernel(sobfu::device::DeformationField psi, sobfu::device::DeformationField psi_inv) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > psi_inv.dims.x - 1 || y > psi_inv.dims.y - 1) { return; } float4* psi_inv_ptr = psi_inv.beg(x, y); for (int i = 0; i <= psi_inv.dims.z - 1; psi_inv_ptr = psi_inv.zstep(psi_inv_ptr), ++i) { float4 psi_inv_val = *psi_inv_ptr; *psi_inv_ptr = make_float4((float) x, (float) y, (float) i, 0.f) - 1.f * interpolate_field_inv(psi, trunc(psi_inv_val)); } } void sobfu::device::estimate_inverse(sobfu::device::DeformationField& psi, sobfu::device::DeformationField& psi_inverse) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(psi_inverse.dims.x, block.x), kfusion::device::divUp(psi_inverse.dims.y, block.y)); /* estimate inverse */ for (int iter = 0; iter < 48; ++iter) { estimate_inverse_kernel<<<grid, block>>>(psi, psi_inverse); cudaSafeCall(cudaGetLastError()); } } /* * TSDF DIFFERENTIATOR METHODS */ __global__ void sobfu::device::estimate_gradient_kernel(const sobfu::device::TsdfDifferentiator diff, sobfu::device::TsdfGradient grad) { diff(grad); } void sobfu::device::TsdfDifferentiator::calculate(sobfu::device::TsdfGradient& grad) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(grad.dims.x, block.x), kfusion::device::divUp(grad.dims.y, block.y)); estimate_gradient_kernel<<<grid, block>>>(*this, grad); cudaSafeCall(cudaGetLastError()); } __device__ __forceinline__ void sobfu::device::TsdfDifferentiator::operator()(sobfu::device::TsdfGradient& grad) const { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > vol.dims.x - 1 || y > vol.dims.y - 1) { return; } int idx_x_1 = x + 1; int idx_x_2 = x - 1; if (x == 0) { idx_x_2 = x + 1; } else if (x == vol.dims.x - 1) { idx_x_1 = x - 1; } int idx_y_1 = y + 1; int idx_y_2 = y - 1; if (y == 0) { idx_y_2 = y + 1; } else if (y == vol.dims.y - 1) { idx_y_1 = y - 1; } float4* grad_ptr = grad.beg(x, y); #pragma unroll for (int i = 0; i <= vol.dims.z - 1; grad_ptr = grad.zstep(grad_ptr), ++i) { int idx_z_1 = i + 1; int idx_z_2 = i - 1; if (i == 0) { idx_z_2 = i + 1; } else if (i == vol.dims.z - 1) { idx_z_1 = i - 1; } float Fx1 = (*vol(idx_x_1, y, i)).x; float Fx2 = (*vol(idx_x_2, y, i)).x; float n_x = __fdividef(Fx1 - Fx2, 2.f); float Fy1 = (*vol(x, idx_y_1, i)).x; float Fy2 = (*vol(x, idx_y_2, i)).x; float n_y = __fdividef(Fy1 - Fy2, 2.f); float Fz1 = (*vol(x, y, idx_z_1)).x; float Fz2 = (*vol(x, y, idx_z_2)).x; float n_z = __fdividef(Fz1 - Fz2, 2.f); float4 n = make_float4(n_x, n_y, n_z, 0.f); *grad_ptr = n; } } __global__ void sobfu::device::interpolate_gradient_kernel(sobfu::device::TsdfGradient nabla_phi_n_psi, sobfu::device::TsdfGradient nabla_phi_n_psi_t, sobfu::device::DeformationField psi) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > psi.dims.x - 1 || y > psi.dims.y - 1) { return; } float3 idx = make_float3(x, y, 0.f); float3 zstep = make_float3(0.f, 0.f, 1.f); int global_idx = y * nabla_phi_n_psi.dims.x + x; float4* nabla_phi_n_psi_t_ptr = nabla_phi_n_psi_t.beg(x, y); for (int i = 0; i <= psi.dims.z - 1; nabla_phi_n_psi_t_ptr = nabla_phi_n_psi_t.zstep(nabla_phi_n_psi_t_ptr), global_idx += nabla_phi_n_psi.dims.x * nabla_phi_n_psi.dims.y, idx += zstep, ++i) { float4 psi_val = psi.data[global_idx]; *nabla_phi_n_psi_t_ptr = interpolate_field(nabla_phi_n_psi, trunc(psi_val)); } } void sobfu::device::interpolate_gradient(sobfu::device::TsdfGradient& nabla_phi_n_psi, sobfu::device::TsdfGradient& nabla_phi_n_psi_t, sobfu::device::DeformationField& psi) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(psi.dims.x, block.x), kfusion::device::divUp(psi.dims.y, block.y)); interpolate_gradient_kernel<<<grid, block>>>(nabla_phi_n_psi, nabla_phi_n_psi_t, psi); cudaSafeCall(cudaGetLastError()); } /* * LAPLACIAN */ __global__ void sobfu::device::interpolate_laplacian_kernel(sobfu::device::Laplacian L, sobfu::device::Laplacian L_o_psi, sobfu::device::DeformationField psi) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > psi.dims.x - 1 || y > psi.dims.y - 1) { return; } float4* psi_ptr = psi.beg(x, y); float4* L_o_psi_ptr = L_o_psi.beg(x, y); for (int i = 0; i <= psi.dims.z - 1; psi_ptr = psi.zstep(psi_ptr), L_o_psi_ptr = L_o_psi.zstep(L_o_psi_ptr), ++i) { float4 psi_val = *psi_ptr; *L_o_psi_ptr = interpolate_field(L, trunc(psi_val)); } } void sobfu::device::interpolate_laplacian(sobfu::device::Laplacian& L, sobfu::device::Laplacian& L_o_psi, sobfu::device::DeformationField& psi) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(psi.dims.x, block.x), kfusion::device::divUp(psi.dims.y, block.y)); interpolate_laplacian_kernel<<<grid, block>>>(L, L_o_psi, psi); cudaSafeCall(cudaGetLastError()); } /* * SECOND ORDER DIFFERENTIATOR METHODS */ void sobfu::device::SecondOrderDifferentiator::calculate(sobfu::device::Laplacian& L) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(L.dims.x, block.x), kfusion::device::divUp(L.dims.y, block.y)); estimate_laplacian_kernel<<<grid, block>>>(*this, L); cudaSafeCall(cudaGetLastError()); } __global__ void sobfu::device::estimate_laplacian_kernel(const sobfu::device::SecondOrderDifferentiator diff, sobfu::device::Laplacian L) { diff.laplacian(L); } __device__ __forceinline__ void sobfu::device::SecondOrderDifferentiator::laplacian(sobfu::device::Laplacian& L) const { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > L.dims.x - 1 || y > L.dims.y - 1) { return; } int idx_x_1 = x + 1; int idx_x_2 = x - 1; if (x == 0) { idx_x_1 = x; idx_x_2 = x; } else if (x == L.dims.x - 1) { idx_x_1 = x; idx_x_2 = x; } int idx_y_1 = y + 1; int idx_y_2 = y - 1; if (y == 0) { idx_y_1 = y; idx_y_2 = y; } else if (y == L.dims.y - 1) { idx_y_1 = y; idx_y_2 = y; } float4* L_ptr = L.beg(x, y); #pragma unroll for (int i = 0; i <= L.dims.z - 1; L_ptr = L.zstep(L_ptr), ++i) { int idx_z_1 = i + 1; int idx_z_2 = i - 1; if (i == 0) { idx_z_1 = i; idx_z_2 = i; } else if (i == L.dims.z - 1) { idx_z_1 = i; idx_z_2 = i; } float4 L_val = -6.f * *psi(x, y, i) + *psi(idx_x_1, y, i) + *psi(idx_x_2, y, i) + *psi(x, idx_y_1, i) + *psi(x, idx_y_2, i) + *psi(x, y, idx_z_1) + *psi(x, y, idx_z_2); *L_ptr = -1.f * L_val; } } /* * JACOBIAN */ __device__ __forceinline__ Mat4f* sobfu::device::Jacobian::beg(int x, int y) const { return data + x + dims.x * y; } __device__ __forceinline__ Mat4f* sobfu::device::Jacobian::zstep(Mat4f* const ptr) const { return ptr + dims.x * dims.y; } __device__ __forceinline__ Mat4f* sobfu::device::Jacobian::operator()(int x, int y, int z) const { return data + x + y * dims.x + z * dims.y * dims.x; } void sobfu::device::clear(Jacobian& J) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(J.dims.x, block.x), kfusion::device::divUp(J.dims.y, block.y)); clear_jacobian_kernel<<<grid, block>>>(J); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); } __global__ void sobfu::device::clear_jacobian_kernel(sobfu::device::Jacobian J) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > J.dims.x - 1 || y > J.dims.y - 1) { return; } Mat4f* beg = J.beg(x, y); Mat4f* end = beg + J.dims.x * J.dims.y * J.dims.z; for (Mat4f* pos = beg; pos != end; pos = J.zstep(pos)) { float4 g = make_float4(0.f, 0.f, 0.f, 0.f); Mat4f val; val.data[0] = g; val.data[1] = g; val.data[2] = g; *pos = val; } } /* * DIFFERENTIATOR METHODS */ void sobfu::device::Differentiator::calculate(sobfu::device::Jacobian& J) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(J.dims.x, block.x), kfusion::device::divUp(J.dims.y, block.y)); estimate_jacobian_kernel<<<grid, block>>>(*this, J); cudaSafeCall(cudaGetLastError()); } void sobfu::device::Differentiator::calculate_deformation_jacobian(sobfu::device::Jacobian& J) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(J.dims.x, block.x), kfusion::device::divUp(J.dims.y, block.y)); estimate_deformation_jacobian_kernel<<<grid, block>>>(*this, J); cudaSafeCall(cudaGetLastError()); } __global__ void sobfu::device::estimate_jacobian_kernel(const sobfu::device::Differentiator diff, sobfu::device::Jacobian J) { diff(J, 0); } __global__ void sobfu::device::estimate_deformation_jacobian_kernel(const sobfu::device::Differentiator diff, sobfu::device::Jacobian J) { diff(J, 1); } __device__ __forceinline__ void sobfu::device::Differentiator::operator()(sobfu::device::Jacobian& J, int mode) const { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > psi.dims.x - 1 || y > psi.dims.y - 1) { return; } int idx_x_1 = x + 1; int idx_x_2 = x - 1; if (x == 0) { idx_x_2 = x + 1; } else if (x == psi.dims.x - 1) { idx_x_1 = x - 1; } int idx_y_1 = y + 1; int idx_y_2 = y - 1; if (y == 0) { idx_y_2 = y + 1; } else if (y == psi.dims.y - 1) { idx_y_1 = y - 1; } Mat4f* J_ptr = J.beg(x, y); #pragma unroll for (int i = 0; i <= psi.dims.z - 1; J_ptr = J.zstep(J_ptr), ++i) { int idx_z_1 = i + 1; int idx_z_2 = i - 1; if (i == 0) { idx_z_2 = i + 1; } else if (i == psi.dims.z - 1) { idx_z_1 = i - 1; } float4 J_x; float4 J_y; float4 J_z; if (mode == 0) { J_x = (*psi(idx_x_1, y, i) - *psi(idx_x_2, y, i)) / 2.f; J_y = (*psi(x, idx_y_1, i) - *psi(x, idx_y_2, i)) / 2.f; J_z = (*psi(x, y, idx_z_1) - *psi(x, y, idx_z_2)) / 2.f; } else if (mode == 1) { J_x = (psi.get_displacement(idx_x_1, y, i) - psi.get_displacement(idx_x_2, y, i)) / 2.f; J_y = (psi.get_displacement(x, idx_y_1, i) - psi.get_displacement(x, idx_y_2, i)) / 2.f; J_z = (psi.get_displacement(x, y, idx_z_1) - psi.get_displacement(x, y, idx_z_2)) / 2.f; } Mat4f val; val.data[0] = make_float4(J_x.x, J_y.x, J_z.x, 0.f); val.data[1] = make_float4(J_x.y, J_y.y, J_z.y, 0.f); val.data[2] = make_float4(J_x.z, J_y.z, J_z.z, 0.f); *J(x, y, i) = val; } }
the_stack
#include <iostream> #include <algorithm> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #include "Common.cuh" // ------------------------------------------------- // Forward // ------------------------------------------------- template <int N=6, int M=16, int MAX_NODE_UNIT> __global__ void kernal_fp32_MicroMlp_Forward ( float const *x_buf, float *y_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float const *output_W, float const *output_b, int node_size, int frame_size, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; // 係数読み込み __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float b1[MAX_NODE_UNIT]; __shared__ float const *x_ptr[N][MAX_NODE_UNIT]; float *y_ptr; if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } if ( id == 0 ) { b1[node_id] = output_b[node]; } // 読み込みアドレス for ( int i = 0; i < N; ++i ) { int in_idx = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[frame_stride * in_idx]; } // 書き込みアドレス y_ptr = &y_buf[frame_stride * node]; } __syncthreads(); // 1つのSMで1nodeを全フレーム処理 for ( int frame = id; frame < frame_size; frame += id_step ) { if ( node < node_size ) { // 入力データ読み込み float x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][frame]; } // 計算 float sig1 = b1[node_id]; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { sig0 += x[j] * W0[i][j][node_id]; } sig0 = fmaxf(sig0, 0); // ReLU sig1 += sig0 * W1[i][node_id]; } // 出力 y_ptr[frame] = sig1; } __syncthreads(); } } template <int N=6, int M=16> int bbcu_fp32_MicroMlp_Forward ( float const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int frame_stride, cudaStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 512; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); kernal_fp32_MicroMlp_Forward<N, M, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } int bbcu_fp32_MicroMlp6x16_Forward ( float const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { return bbcu_fp32_MicroMlp_Forward<6, 16>( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, input_node_size, output_node_size, frame_size, frame_stride, streamId ); } ///////////////////// // bit入力版 template <int N=6, int M=16, int MAX_NODE_UNIT=16> __global__ void kernal_bit_fp32_MicroMlp_Forward( int const *x_buf, float *y_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float const *output_W, float const *output_b, int node_size, int frame_size, int input_frame_stride, int output_frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; // 係数読み込み __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float b1[MAX_NODE_UNIT]; __shared__ int const *x_ptr[N][MAX_NODE_UNIT]; if ( node < node_size) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } if ( id == 0 ) { b1[node_id] = output_b[node]; } // 読み込みアドレス for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[input_frame_stride * input_node]; } } __syncthreads(); if ( node < node_size) { // 書き込みアドレス float *y_ptr = &y_buf[output_frame_stride * node]; // 1つのSMで1nodeを全フレーム処理 for ( int frame = id; frame < frame_size; frame += id_step ) { int bit = (1 << (frame & 0x1f)); int unit = (frame >> 5); // 入力データ読み込み int x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][unit]; } // 計算 float sig1 = b1[node_id]; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { float xx = (x[j] & bit) ? BB_BINARY_HI : BB_BINARY_LO; sig0 += W0[i][j][node_id] * xx; // if ( x[j] & bit ) { // sig0 += W0[i][j][node_id]; // } } sig0 = fmaxf(sig0, 0); // ReLU sig1 += sig0 * W1[i][node_id]; } // 出力 y_ptr[frame] = sig1; } } } template <int N=6, int M=16> int bbcu_bit_fp32_MicroMlp_Forward ( int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int input_frame_stride, int output_frame_stride, cudaStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 512; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); kernal_bit_fp32_MicroMlp_Forward<N, M, MAX_NODE_UNIT><<<grid, block, 0, streamId>>> ( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, output_node_size, frame_size, input_frame_stride, output_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } int bbcu_bit_fp32_MicroMlp6x16_Forward ( int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float const *dev_output_W, float const *dev_output_b, int input_node_size, int output_node_size, int frame_size, int input_frame_stride, int output_frame_stride, cudaStream_t streamId ) { return bbcu_bit_fp32_MicroMlp_Forward<6, 16> ( dev_x_buf, dev_y_buf, dev_input_index, dev_hidden_W, dev_hidden_b, dev_output_W, dev_output_b, input_node_size, output_node_size, frame_size, input_frame_stride, output_frame_stride, streamId ); } // ------------------------------------------------- // Backward // ------------------------------------------------- #if 0 __device__ __forceinline__ float device_fp32_LocalSum(float v, float *buf) { buf[threadIdx.x] = v; __syncthreads(); // スレッド間集計 int comb = 1; while (comb < blockDim.x) { int next = comb * 2; int mask = next - 1; if ((threadIdx.x & mask) == 0) { buf[threadIdx.x] += buf[threadIdx.x + comb]; } comb = next; __syncthreads(); } float sum = buf[0]; __syncthreads(); return sum; } #endif // kernel template <int N=6, int M=16, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=8> __global__ void kernal_fp32_MicroMlp_Backward ( float const *x_buf, float const *dy_buf, float *dx_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float *hidden_dW, float *hidden_db, float const *output_W, float const *output_b, float *output_dW, float *output_db, int node_size, int frame_size, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; __shared__ float sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT]; __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float dW0_prev[M][N][MAX_NODE_UNIT]; __shared__ float db0_prev[M][MAX_NODE_UNIT]; __shared__ float dW1_prev[M][MAX_NODE_UNIT]; __shared__ float db1_prev[MAX_NODE_UNIT]; __shared__ float const *x_ptr[N][MAX_NODE_UNIT]; float const *dy_ptr; if ( node < node_size ) { // 係数読み込み for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } // 直前の係数読み込み for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { dW0_prev[i][j][node_id] = hidden_dW[(node * M + i) * N + j]; } db0_prev[i][node_id] = hidden_db[node * M + i]; dW1_prev[i][node_id] = output_dW[node * M + i]; } if ( id == 0 ) { db1_prev[node_id] = output_db[node]; } // ポインタ読み込み for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[frame_stride * input_node]; } dy_ptr = &dy_buf[frame_stride * node]; } __syncthreads(); // 勾配初期化 float dW0[M][N]; float db0[M]; float dW1[M]; float db1; for ( int i = 0; i < M; ++ i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = 0; } } for ( int i = 0; i < M; ++i ) { db0[i] = 0; dW1[i] = 0; } db1 = 0; if ( node < node_size ) { // 1つのSMで1nodeを全フレーム処理 for ( int frame = id; frame < frame_size; frame += id_step ) { // 入力データ読み込み float x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][frame]; } // 1段目再計算して2段目逆伝播 float grad1 = dy_ptr[frame]; float grad0[M]; db1 += grad1; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { sig0 += x[j] * W0[i][j][node_id]; } sig0 = fmaxf(sig0, 0); // ReLU dW1[i] += grad1 * sig0; if ( sig0 > 0 ) { // ReLU grad0[i] = grad1 * W1[i][node_id]; } else { grad0[i] = 0; } } // 1段目逆伝播 float *dx_ptr = &dx_buf[frame_stride * N * node]; float dx[N]; for ( int i = 0; i < N; ++i ) { dx[i] = 0; // dx_ptr[frame_stride * i + frame]; } for ( int i = 0; i < M; ++i ) { db0[i] += grad0[i]; for ( int j = 0; j < N; ++j ) { dW0[i][j] += grad0[i] * x[j]; dx[j] += grad0[i] * W0[i][j][node_id]; } } // 誤差書き込み for ( int i = 0; i < N; ++i ) { dx_ptr[frame_stride * i + frame] = dx[i]; } } } __syncthreads(); // 係数統合 for ( int i = 0; i < M; ++i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = device_fp32_LocalSum(dW0[i][j], sbuf[node_id]); } db0[i] = device_fp32_LocalSum(db0[i], sbuf[node_id]); dW1[i] = device_fp32_LocalSum(dW1[i], sbuf[node_id]); } db1 = device_fp32_LocalSum(db1, sbuf[node_id]); // 勾配出力 if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { hidden_dW[(node * M + i) * N + j] = dW0[i][j] + dW0_prev[i][j][node_id]; } hidden_db[node * M + i] = db0[i] + db0_prev[i][node_id]; output_dW[node * M + i] = dW1[i] + dW1_prev[i][node_id]; } if (id == 0) { output_db[node] = db1 + db1_prev[node_id]; } } __syncthreads(); } template <int N=6> __global__ void kernal_fp32_MicroMlp_BackwardMarge ( float const *src_buf, float *dst_buf, int const *input_index, int node_size, int frame_size, int frame_stride ) { int frame = blockDim.x * blockIdx.x + threadIdx.x; for ( int node = 0; node < node_size; ++node ) { if ( frame < frame_size ) { for ( int n = 0; n < N; ++n ) { int in_idx = input_index[node*N + n]; float* dst_buf_ptr = &dst_buf[frame_stride * in_idx]; float prev_data = dst_buf_ptr[frame]; const float* src_buf_ptr = &src_buf[(N * node + n) * frame_stride]; dst_buf_ptr[frame] = prev_data + src_buf_ptr[frame]; } } __syncthreads(); } } template <int N=6, int M=16> int bbcu_fp32_MicroMlp_Backward ( float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int frame_stride, cudaStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); { unsigned int const THREAD_SIZE = 256; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); kernal_fp32_MicroMlp_Backward<N, M, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>> ( dev_x_buf, dev_dy_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } { BB_CUDA_SAFE_CALL(cudaMemset(dev_dx_buf, 0, input_node_size * frame_stride * sizeof(float))); int block_x = 1024; while ( block_x / 2 >= frame_size ) { block_x /= 2; } dim3 grid((frame_size + (block_x - 1)) / block_x); dim3 block(block_x); kernal_fp32_MicroMlp_BackwardMarge<N><<<grid, block>>> ( dev_dx_tmp, dev_dx_buf, dev_input_index, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } return 0; } BBCU_DLL_EXPORT int bbcu_fp32_MicroMlp6x16_Backward( float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { return bbcu_fp32_MicroMlp_Backward<6, 16>( dev_x_buf, dev_dy_buf, dev_dx_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, input_node_size, output_node_size, frame_size, frame_stride, streamId ); } /////////////////////////////// // kernel template <int N=6, int M=16, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=8> __global__ void kernal_bit_fp32_MicroMlp_Backward ( int const *x_buf, float const *dy_buf, float *dx_buf, int const *input_index, float const *hidden_W, float const *hidden_b, float *hidden_dW, float *hidden_db, float const *output_W, float const *output_b, float *output_dW, float *output_db, int node_size, int frame_size, int x_frame_stride, int frame_stride ) { int const node_id = threadIdx.y; int const node = blockIdx.y * blockDim.y + threadIdx.y; int const id = threadIdx.x; int const id_step = blockDim.x; __shared__ float sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT]; __shared__ float W0[M][N][MAX_NODE_UNIT]; __shared__ float b0[M][MAX_NODE_UNIT]; __shared__ float W1[M][MAX_NODE_UNIT]; __shared__ float dW0_prev[M][N][MAX_NODE_UNIT]; __shared__ float db0_prev[M][MAX_NODE_UNIT]; __shared__ float dW1_prev[M][MAX_NODE_UNIT]; __shared__ float db1_prev[MAX_NODE_UNIT]; __shared__ int const *x_ptr[N][MAX_NODE_UNIT]; float const *dy_ptr; if ( node < node_size ) { // 係数読み込み for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { W0[i][j][node_id] = hidden_W[(node * M + i) * N + j]; } b0[i][node_id] = hidden_b[node * M + i]; W1[i][node_id] = output_W[node * M + i]; } // 直前の係数読み込み for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { dW0_prev[i][j][node_id] = hidden_dW[(node * M + i) * N + j]; } db0_prev[i][node_id] = hidden_db[node * M + i]; dW1_prev[i][node_id] = output_dW[node * M + i]; } if ( id == 0 ) { db1_prev[node_id] = output_db[node]; } // ポインタ読み込み for ( int i = 0; i < N; ++i ) { int input_node = input_index[node*N + i]; x_ptr[i][node_id] = &x_buf[x_frame_stride * input_node]; } dy_ptr = &dy_buf[frame_stride * node]; } __syncthreads(); // 勾配初期化 float dW0[M][N]; float db0[M]; float dW1[M]; float db1; for ( int i = 0; i < M; ++ i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = 0; } } for ( int i = 0; i < M; ++i ) { db0[i] = 0; dW1[i] = 0; } db1 = 0; if ( node < node_size ) { // 1つのSMで1nodeを全フレーム処理 for ( int frame = id; frame < frame_size; frame += id_step ) { int bit = (1 << (frame & 0x1f)); int unit = (frame >> 5); // 入力データ読み込み int x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr[i][node_id][unit]; } // 1段目再計算して2段目逆伝播 float grad1 = dy_ptr[frame]; float grad0[M]; db1 += grad1; for ( int i = 0; i < M; ++i ) { float sig0 = b0[i][node_id]; for ( int j = 0; j < N; ++j ) { float xx = (x[j] & bit) ? BB_BINARY_HI : BB_BINARY_LO; sig0 += W0[i][j][node_id] * xx; // if ( x[j] & bit ) { // sig0 += W0[i][j][node_id]; // } } sig0 = fmaxf(sig0, 0); // ReLU dW1[i] += grad1 * sig0; if ( sig0 > 0 ) { // ReLU grad0[i] = grad1 * W1[i][node_id]; } else { grad0[i] = 0; } } // 1段目逆伝播 float *dx_ptr = &dx_buf[frame_stride * N * node]; float dx[N]; for ( int i = 0; i < N; ++i ) { dx[i] = 0; // dx_ptr[frame_stride * i + frame]; } for ( int i = 0; i < M; ++i ) { db0[i] += grad0[i]; for ( int j = 0; j < N; ++j ) { // if ( x[j] & bit ) { dW0[i][j] += grad0[i]; } float xx = (x[j] & bit) ? BB_BINARY_HI : BB_BINARY_LO; dW0[i][j] += grad0[i] * xx; dx[j] += grad0[i] * W0[i][j][node_id]; } } // 誤差書き込み for ( int i = 0; i < N; ++i ) { dx_ptr[frame_stride * i + frame] = dx[i]; } } } __syncthreads(); // 係数統合 for ( int i = 0; i < M; ++i ) { for ( int j = 0; j < N; ++j ) { dW0[i][j] = device_fp32_LocalSum(dW0[i][j], sbuf[node_id]); } db0[i] = device_fp32_LocalSum(db0[i], sbuf[node_id]); dW1[i] = device_fp32_LocalSum(dW1[i], sbuf[node_id]); } db1 = device_fp32_LocalSum(db1, sbuf[node_id]); // 勾配出力 if ( node < node_size ) { for ( int i = id; i < M; i += id_step ) { for ( int j = 0; j < N; ++j ) { hidden_dW[(node * M + i) * N + j] = dW0[i][j] + dW0_prev[i][j][node_id]; } hidden_db[node * M + i] = db0[i] + db0_prev[i][node_id]; output_dW[node * M + i] = dW1[i] + dW1_prev[i][node_id]; } if (id == 0) { output_db[node] = db1 + db1_prev[node_id]; } } __syncthreads(); } template <int N=6, int M=16> int bbcu_bit_fp32_MicroMlp_Backward ( int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int x_frame_stride, int frame_stride, cudaStream_t streamId = 0 ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); { unsigned int const THREAD_SIZE = 256; unsigned int const MAX_FRAME_UNIT = 256; unsigned int const MAX_NODE_UNIT = 16; #if 0 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid(1, (output_node_size + (block.y - 1)) / block.y); kernal_bit_fp32_MicroMlp_Backward<N, M, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>> ( dev_x_buf, dev_dy_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, output_node_size, frame_size, x_frame_stride, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } { BB_CUDA_SAFE_CALL(cudaMemset(dev_dx_buf, 0, input_node_size * frame_stride * sizeof(float))); int block_x = 1024; while ( block_x / 2 >= frame_size ) { block_x /= 2; } dim3 grid((frame_size + (block_x - 1)) / block_x); dim3 block(block_x); kernal_fp32_MicroMlp_BackwardMarge<N><<<grid, block>>> ( dev_dx_tmp, dev_dx_buf, dev_input_index, output_node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); } return 0; } BBCU_DLL_EXPORT int bbcu_bit_fp32_MicroMlp6x16_Backward ( int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, float const *dev_hidden_W, float const *dev_hidden_b, float *dev_hidden_dW, float *dev_hidden_db, float const *dev_output_W, float const *dev_output_b, float *dev_output_dW, float *dev_output_db, int input_node_size, int output_node_size, int frame_size, int x_frame_stride, int frame_stride, cudaStream_t streamId ) { return bbcu_bit_fp32_MicroMlp_Backward<6, 16> ( dev_x_buf, dev_dy_buf, dev_dx_buf, dev_dx_tmp, dev_input_index, dev_hidden_W, dev_hidden_b, dev_hidden_dW, dev_hidden_db, dev_output_W, dev_output_b, dev_output_dW, dev_output_db, input_node_size, output_node_size, frame_size, x_frame_stride, frame_stride, streamId ); } // end of file
the_stack
#include "libvis/cuda/patch_match_stereo.cuh" #include <math_constants.h> #include "libvis/cuda/cuda_auto_tuner.h" #include "libvis/cuda/cuda_unprojection_lookup.cuh" #include "libvis/cuda/cuda_util.cuh" #include "libvis/cuda/cuda_util.h" #include "libvis/cuda/patch_match_stereo_cost.cuh" #include "libvis/cuda/patch_match_stereo_util.cuh" namespace vis { __global__ void PatchMatchFilterOutliersCUDAKernel( const StereoParametersSingleCUDA p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4 reference_tr_stereo, CUDABuffer_<float> inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float> second_best_costs, float second_best_min_cost_factor) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; if (x >= p.context_radius && y >= p.context_radius && x < p.inv_depth_map.width() - p.context_radius && y < p.inv_depth_map.height() - p.context_radius) { if (!(p.costs(y, x) <= cost_threshold) || // includes NaNs !(p.inv_depth_map(y, x) > min_inv_depth)) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { // If there is another depth value with similar cost, reject the depth // estimate as ambiguous. if (second_best_min_cost_factor > 1) { if (!(second_best_costs(y, x) >= second_best_min_cost_factor * p.costs(y, x))) { // includes NaNs inv_depth_map_out(y, x) = kInvalidInvDepth; return; } } // If at the maximum or minimum depth for this pixel the stereo frame // would not observe that point, discard the pixel (i.e., enforce that // this depth range is observed by both frames). // This is to protect against mistakes that often happen when the frames // overlap in only a small depth range and the actual depth is not within // that range. float2 center_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y); float3 range_min_point = make_float3(required_range_min_depth * center_nxy.x, required_range_min_depth * center_nxy.y, required_range_min_depth); float3 range_max_point = make_float3(required_range_max_depth * center_nxy.x, required_range_max_depth * center_nxy.y, required_range_max_depth); float3 rmin_stereo_point = p.stereo_tr_reference * range_min_point; if (rmin_stereo_point.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmin_pxy = p.stereo_camera.Project(rmin_stereo_point); if (rmin_pxy.x < p.context_radius || rmin_pxy.y < p.context_radius || rmin_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmin_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmin_pxy.y, rmin_pxy.x) == 0)) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } float3 rmax_stereo_point = p.stereo_tr_reference * range_max_point; if (rmax_stereo_point.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmax_pxy = p.stereo_camera.Project(rmax_stereo_point); if (rmax_pxy.x < p.context_radius || rmax_pxy.y < p.context_radius || rmax_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmax_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmax_pxy.y, rmax_pxy.x) == 0)) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Texture filtering: remove pixels with too small gradients along the epipolar line direction in the patch used for matching. // TODO: The code below is only valid for the current ZNCC implementation, not SSD or Census! float inv_depth = p.inv_depth_map(y, x); const char2 normal_char = p.normals(y, x); float2 normal_xy = make_float2( normal_char.x * (1 / 127.f), normal_char.y * (1 / 127.f)); const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; float total_gradient_magnitude = 0; for (int sample = 0; sample < kNumSamples; ++ sample) { float dx = p.context_radius * kSamplesCUDA[sample][0]; float dy = p.context_radius * kSamplesCUDA[sample][1]; int ix = ::max(0, ::min(static_cast<int>(p.inv_depth_map.width()) - 1, static_cast<int>(x + dx))); int iy = ::max(0, ::min(static_cast<int>(p.inv_depth_map.height()) - 1, static_cast<int>(y + dy))); if (p.mask.address() && p.mask(iy, ix) == 0) { total_gradient_magnitude = -1; break; } float2 nxy = p.reference_unprojection_lookup.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers). float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); float3 original_reference_point = make_float3(nxy.x * plane_depth, nxy.y * plane_depth, plane_depth); float3 original_stereo_point = p.stereo_tr_reference * original_reference_point; constexpr float kShiftZ = 0.01f; float3 shifted_stereo_point = make_float3(original_stereo_point.x, original_stereo_point.y, original_stereo_point.z + kShiftZ); float3 shifted_reference_point = reference_tr_stereo * shifted_stereo_point; const float2 shifted_projection = p.stereo_camera.Project(shifted_reference_point); float2 epipolar_direction = make_float2(shifted_projection.x - 0.5f - (x + dx), shifted_projection.y - 0.5f - (y + dy)); float length = sqrtf(epipolar_direction.x * epipolar_direction.x + epipolar_direction.y * epipolar_direction.y); epipolar_direction = make_float2(epipolar_direction.x / length, epipolar_direction.y / length); // Normalize to length of 1 pixel float reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f, y + dy + 0.5f); float shifted_reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f + epipolar_direction.x, y + dy + 0.5f + epipolar_direction.y); total_gradient_magnitude += fabs(shifted_reference_value - reference_value); } if (total_gradient_magnitude < epipolar_gradient_threshold) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Angle filtering. // Estimate the surface normal from the depth map. float center_depth = 1.f / p.inv_depth_map(y, x); float right_depth = 1.f / p.inv_depth_map(y, x + 1); float left_depth = 1.f / p.inv_depth_map(y, x - 1); float bottom_depth = 1.f / p.inv_depth_map(y + 1, x); float top_depth = 1.f / p.inv_depth_map(y - 1, x); float2 left_nxy = p.reference_unprojection_lookup.UnprojectPoint(x - 1, y); float3 left_point = make_float3(left_depth * left_nxy.x, left_depth * left_nxy.y, left_depth); float2 right_nxy = p.reference_unprojection_lookup.UnprojectPoint(x + 1, y); float3 right_point = make_float3(right_depth * right_nxy.x, right_depth * right_nxy.y, right_depth); float2 top_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y - 1); float3 top_point = make_float3(top_depth * top_nxy.x, top_depth * top_nxy.y, top_depth); float2 bottom_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y + 1); float3 bottom_point = make_float3(bottom_depth * bottom_nxy.x, bottom_depth * bottom_nxy.y, bottom_depth); float3 center_point = make_float3(center_depth * center_nxy.x, center_depth * center_nxy.y, center_depth); constexpr float kRatioThreshold = 2.f; constexpr float kRatioThresholdSquared = kRatioThreshold * kRatioThreshold; float left_dist_squared = SquaredLength(left_point - center_point); float right_dist_squared = SquaredLength(right_point - center_point); float left_right_ratio = left_dist_squared / right_dist_squared; float3 left_to_right; if (left_right_ratio < kRatioThresholdSquared && left_right_ratio > 1.f / kRatioThresholdSquared) { left_to_right = right_point - left_point; } else if (left_dist_squared < right_dist_squared) { left_to_right = center_point - left_point; } else { // left_dist_squared >= right_dist_squared left_to_right = right_point - center_point; } float bottom_dist_squared = SquaredLength(bottom_point - center_point); float top_dist_squared = SquaredLength(top_point - center_point); float bottom_top_ratio = bottom_dist_squared / top_dist_squared; float3 bottom_to_top; if (bottom_top_ratio < kRatioThresholdSquared && bottom_top_ratio > 1.f / kRatioThresholdSquared) { bottom_to_top = top_point - bottom_point; } else if (bottom_dist_squared < top_dist_squared) { bottom_to_top = center_point - bottom_point; } else { // bottom_dist_squared >= top_dist_squared bottom_to_top = top_point - center_point; } float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); // Apply angle threshold. const float normal_length = Norm(normal); const float point_distance = Norm(center_point); const float view_cos_angle = Dot(normal, center_point) / (normal_length * point_distance); if (view_cos_angle > min_cos_angle) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { inv_depth_map_out(y, x) = p.inv_depth_map(y, x); } } } else if (x < p.inv_depth_map.width() && y < p.inv_depth_map.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; } } void PatchMatchFilterOutliersCUDA( const StereoParametersSingle& p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4& reference_tr_stereo, CUDABuffer_<float>* inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float>* second_best_costs, float second_best_min_cost_factor) { CHECK_CUDA_NO_ERROR(); CUDA_AUTO_TUNE_2D( PatchMatchFilterOutliersCUDAKernel, 16, 16, p.inv_depth_map.width(), p.inv_depth_map.height(), 0, p.stream, /* kernel parameters */ StereoParametersSingleCUDA(p), min_inv_depth, required_range_min_depth, required_range_max_depth, reference_tr_stereo, *inv_depth_map_out, cost_threshold, epipolar_gradient_threshold, min_cos_angle, *second_best_costs, second_best_min_cost_factor); CHECK_CUDA_NO_ERROR(); } template <bool kDebugFilterReasons> __global__ void PatchMatchFilterOutliersCUDAKernel( const StereoParametersMultiCUDA p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4* reference_tr_stereo, CUDABuffer_<float> inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float> second_best_costs, float second_best_min_cost_factor, CUDABuffer_<uchar3> filter_reasons) { // List of filter reasons with debug color: // dark red (127, 0, 0): The depth exceeds the maximum depth // dark green (0, 127, 0): The required depth range is not visible in any stereo image // red (255, 0, 0): The gradients in epipolar line directions are too small for all stereo images // (note: this only uses image-bounds visibility checking in the stereo images, // so it may incorrectly take images into account where the point is occluded) // dark yellow (140, 140, 0): Angle check failed // gray (127, 127, 127): Pixel is too close to the image borders (closer than context radius) // blue (0, 0, 255): Consistency check failed. // green (0, 255, 0): Connected component too small. // black (0, 0, 0): The pixel passed the filters. unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; if (x >= p.context_radius && y >= p.context_radius && x < p.inv_depth_map.width() - p.context_radius && y < p.inv_depth_map.height() - p.context_radius) { if (!(p.inv_depth_map(y, x) > min_inv_depth)) { // includes NaNs if (kDebugFilterReasons) { if (p.inv_depth_map(y, x) != kInvalidInvDepth) { filter_reasons(y, x) = make_uchar3(127, 0, 0); } } inv_depth_map_out(y, x) = kInvalidInvDepth; } else { // If there is another depth value with similar cost, reject the depth // estimate as ambiguous. // if (second_best_min_cost_factor > 1) { // if (!(second_best_costs(y, x) >= second_best_min_cost_factor * costs(y, x))) { // includes NaNs // inv_depth_map_out(y, x) = kInvalidInvDepth; // return; // } // } // If at the maximum or minimum depth for this pixel the stereo frame // would not observe that point, discard the pixel (i.e., enforce that // this depth range is observed by both frames). // This is to protect against mistakes that often happen when the frames // overlap in only a small depth range and the actual depth is not within // that range. float2 center_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y); float3 range_min_point = make_float3(required_range_min_depth * center_nxy.x, required_range_min_depth * center_nxy.y, required_range_min_depth); float3 range_max_point = make_float3(required_range_max_depth * center_nxy.x, required_range_max_depth * center_nxy.y, required_range_max_depth); bool valid = false; for (int s = 0; s < p.num_stereo_images; ++ s) { float3 rmin_stereo_point = p.stereo_tr_reference[s] * range_min_point; if (rmin_stereo_point.z <= 0.f) { continue; } const float2 rmin_pxy = p.stereo_camera.Project(rmin_stereo_point); if (rmin_pxy.x < p.context_radius || rmin_pxy.y < p.context_radius || rmin_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmin_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmin_pxy.y, rmin_pxy.x) == 0)) { continue; } float3 rmax_stereo_point = p.stereo_tr_reference[s] * range_max_point; if (rmax_stereo_point.z <= 0.f) { continue; } const float2 rmax_pxy = p.stereo_camera.Project(rmax_stereo_point); if (rmax_pxy.x < p.context_radius || rmax_pxy.y < p.context_radius || rmax_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmax_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmax_pxy.y, rmax_pxy.x) == 0)) { continue; } valid = true; break; } if (!valid) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(0, 127, 0); } return; } // TODO: Texture filtering is currently not implemented for the multi-image case // Texture filtering: remove pixels with too small gradients along the epipolar line direction in the patch used for matching. // TODO: The code below is only valid for the current ZNCC implementation, not SSD or Census! float inv_depth = p.inv_depth_map(y, x); const char2 normal_char = p.normals(y, x); float2 normal_xy = make_float2( normal_char.x * (1 / 127.f), normal_char.y * (1 / 127.f)); const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; valid = false; for (int s = 0; s < p.num_stereo_images; ++ s) { float total_gradient_magnitude = 0; for (int sample = 0; sample < kNumSamples; ++ sample) { float dx = p.context_radius * kSamplesCUDA[sample][0]; float dy = p.context_radius * kSamplesCUDA[sample][1]; if (s == 0) { int ix = ::max(0, ::min(static_cast<int>(p.inv_depth_map.width() - 1), static_cast<int>(x + dx))); int iy = ::max(0, ::min(static_cast<int>(p.inv_depth_map.height() - 1), static_cast<int>(y + dy))); if (p.mask.address() && p.mask(iy, ix) == 0) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(127, 127, 127); } return; } } float2 nxy = p.reference_unprojection_lookup.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers). float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); float3 original_reference_point = make_float3(nxy.x * plane_depth, nxy.y * plane_depth, plane_depth); float3 original_stereo_point = p.stereo_tr_reference[s] * original_reference_point; if (original_stereo_point.z <= 0) { continue; } const float2 stereo_projection = p.stereo_camera.Project(original_stereo_point); if (stereo_projection.x < p.context_radius || stereo_projection.y < p.context_radius || stereo_projection.x >= p.stereo_camera.width - 1 - p.context_radius || stereo_projection.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(stereo_projection.y, stereo_projection.x) == 0)) { continue; } constexpr float kShiftZ = 0.01f; float3 shifted_stereo_point = make_float3(original_stereo_point.x, original_stereo_point.y, original_stereo_point.z + kShiftZ); float3 shifted_reference_point = reference_tr_stereo[s] * shifted_stereo_point; const float2 shifted_projection = p.stereo_camera.Project(shifted_reference_point); float2 epipolar_direction = make_float2(shifted_projection.x - 0.5f - (x + dx), shifted_projection.y - 0.5f - (y + dy)); float length = sqrtf(epipolar_direction.x * epipolar_direction.x + epipolar_direction.y * epipolar_direction.y); epipolar_direction = make_float2(epipolar_direction.x / length, epipolar_direction.y / length); // Normalize to length of 1 pixel float reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f, y + dy + 0.5f); float shifted_reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f + epipolar_direction.x, y + dy + 0.5f + epipolar_direction.y); total_gradient_magnitude += fabs(shifted_reference_value - reference_value); } if (total_gradient_magnitude >= epipolar_gradient_threshold) { valid = true; break; } } if (!valid) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(255, 0, 0); } return; } // Angle filtering. // Estimate the surface normal from the depth map. float center_depth = 1.f / p.inv_depth_map(y, x); float right_depth = 1.f / p.inv_depth_map(y, x + 1); float left_depth = 1.f / p.inv_depth_map(y, x - 1); float bottom_depth = 1.f / p.inv_depth_map(y + 1, x); float top_depth = 1.f / p.inv_depth_map(y - 1, x); float2 left_nxy = p.reference_unprojection_lookup.UnprojectPoint(x - 1, y); float3 left_point = make_float3(left_depth * left_nxy.x, left_depth * left_nxy.y, left_depth); float2 right_nxy = p.reference_unprojection_lookup.UnprojectPoint(x + 1, y); float3 right_point = make_float3(right_depth * right_nxy.x, right_depth * right_nxy.y, right_depth); float2 top_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y - 1); float3 top_point = make_float3(top_depth * top_nxy.x, top_depth * top_nxy.y, top_depth); float2 bottom_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y + 1); float3 bottom_point = make_float3(bottom_depth * bottom_nxy.x, bottom_depth * bottom_nxy.y, bottom_depth); float3 center_point = make_float3(center_depth * center_nxy.x, center_depth * center_nxy.y, center_depth); constexpr float kRatioThreshold = 2.f; constexpr float kRatioThresholdSquared = kRatioThreshold * kRatioThreshold; float left_dist_squared = SquaredLength(left_point - center_point); float right_dist_squared = SquaredLength(right_point - center_point); float left_right_ratio = left_dist_squared / right_dist_squared; float3 left_to_right; if (left_right_ratio < kRatioThresholdSquared && left_right_ratio > 1.f / kRatioThresholdSquared) { left_to_right = right_point - left_point; } else if (left_dist_squared < right_dist_squared) { left_to_right = center_point - left_point; } else { // left_dist_squared >= right_dist_squared left_to_right = right_point - center_point; } float bottom_dist_squared = SquaredLength(bottom_point - center_point); float top_dist_squared = SquaredLength(top_point - center_point); float bottom_top_ratio = bottom_dist_squared / top_dist_squared; float3 bottom_to_top; if (bottom_top_ratio < kRatioThresholdSquared && bottom_top_ratio > 1.f / kRatioThresholdSquared) { bottom_to_top = top_point - bottom_point; } else if (bottom_dist_squared < top_dist_squared) { bottom_to_top = center_point - bottom_point; } else { // bottom_dist_squared >= top_dist_squared bottom_to_top = top_point - center_point; } float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); // Apply angle threshold. const float normal_length = Norm(normal); const float point_distance = Norm(center_point); const float view_cos_angle = Dot(normal, center_point) / (normal_length * point_distance); if (view_cos_angle > min_cos_angle) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(140, 140, 0); } } else { inv_depth_map_out(y, x) = p.inv_depth_map(y, x); if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(0, 0, 0); } } } } else if (x < p.inv_depth_map.width() && y < p.inv_depth_map.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(127, 127, 127); } } } void PatchMatchFilterOutliersCUDA( const StereoParametersMulti& p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4* reference_tr_stereo, CUDABuffer_<float>* inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float>* second_best_costs, float second_best_min_cost_factor, CUDABuffer_<uchar3>* filter_reasons) { CHECK_CUDA_NO_ERROR(); bool have_filter_reasons = filter_reasons != nullptr; COMPILE_OPTION(have_filter_reasons, CUDA_AUTO_TUNE_2D_TEMPLATED( PatchMatchFilterOutliersCUDAKernel, 16, 16, p.inv_depth_map.width(), p.inv_depth_map.height(), 0, p.stream, TEMPLATE_ARGUMENTS(_have_filter_reasons), /* kernel parameters */ StereoParametersMultiCUDA(p), min_inv_depth, required_range_min_depth, required_range_max_depth, reference_tr_stereo, *inv_depth_map_out, cost_threshold, epipolar_gradient_threshold, min_cos_angle, *second_best_costs, second_best_min_cost_factor, filter_reasons ? *filter_reasons : CUDABuffer_<uchar3>())); CHECK_CUDA_NO_ERROR(); } }
the_stack
#include "cuda_helper.h" #include "cuda_vectors.h" static uint32_t *d_gnounce[MAX_GPUS]; static uint32_t *d_GNonce[MAX_GPUS]; #define shl(x, n) (x << n) #define shr(x, n) (x >> n) #define ss0(x) (shr(x, 1)^ shl(x, 3) ^ ROTL32(x, 4) ^ ROTL32(x, 19)) #define ss1(x) (shr(x, 1)^ shl(x, 2) ^ ROL8(x) ^ ROTL32(x, 23)) #define ss2(x) (shr(x, 2)^ shl(x, 1) ^ ROTL32(x, 12) ^ ROTL32(x, 25)) #define ss3(x) (shr(x, 2)^ shl(x, 2) ^ ROTL32(x, 15) ^ ROTL32(x, 29)) #define ss4(x) (shr(x, 1) ^ x) #define ss5(x) (shr(x, 2) ^ x) #define rs1(x) ROTL32(x, 3) #define rs2(x) ROTL32(x, 7) #define rs3(x) ROTL32(x, 13) #define rs4(x) ROL16(x) #define rs5(x) ROTL32(x, 19) #define rs6(x) ROTL32(x, 23) #define rs7(x) ROTL32(x, 27) #define TPB 1024 #define NBN 2 __global__ __launch_bounds__(TPB,1) void bmw256_gpu_hash_32(uint32_t threads, uint2 *g_hash, uint32_t *const __restrict__ nonceVector, const uint2 target){ uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ uint32_t M32[16] = { 0 }; *(uint2*)&M32[ 0] = __ldg(&g_hash[thread]); *(uint2*)&M32[ 2] = __ldg(&g_hash[thread + 1 * threads]); *(uint2*)&M32[ 4] = __ldg(&g_hash[thread + 2 * threads]); *(uint2*)&M32[ 6] = __ldg(&g_hash[thread + 3 * threads]); M32[ 8]=0x80; M32[14]=0x100; // Compression256(message); uint32_t Q[32], XL32, XH32; const uint32_t H[16] = { 0x40414243, 0x44454647, 0x48494A4B, 0x4C4D4E4F, 0x50515253, 0x54555657, 0x58595A5B, 0x5C5D5E5F, 0x60616263, 0x64656667, 0x68696A6B, 0x6C6D6E6F, 0x70717273, 0x74757677, 0x78797A7B, 0x7C7D7E7F }; uint32_t tmp[16]; *(uint16*)&tmp[ 0] = *(uint16*)&M32[ 0] ^ *(uint16*)&H[ 0]; Q[ 0] = tmp[ 5] - tmp[ 7] + tmp[10] + tmp[13] + tmp[14]; Q[ 1] = tmp[ 6] - tmp[ 8] + tmp[11] + tmp[14] - tmp[15]; Q[ 2] = tmp[ 0] + tmp[ 7] + tmp[ 9] - tmp[12] + tmp[15]; Q[ 3] = tmp[ 0] - tmp[ 1] + tmp[ 8] - tmp[10] + tmp[13]; Q[ 4] = tmp[ 1] + tmp[ 2] + tmp[ 9] - tmp[11] - tmp[14]; Q[ 5] = tmp[ 3] - tmp[ 2] + tmp[10] - tmp[12] + tmp[15]; Q[ 6] = tmp[ 4] - tmp[ 0] - tmp[ 3] - tmp[11] + tmp[13]; Q[ 7] = tmp[ 1] - tmp[ 4] - tmp[ 5] - tmp[12] - tmp[14]; Q[ 8] = tmp[ 2] - tmp[ 5] - tmp[ 6] + tmp[13] - tmp[15]; Q[ 9] = tmp[ 0] - tmp[ 3] + tmp[ 6] - tmp[ 7] + tmp[14]; Q[10] = tmp[ 8] - tmp[ 1] - tmp[ 4] - tmp[ 7] + tmp[15]; Q[11] = tmp[ 8] - tmp[ 0] - tmp[ 2] - tmp[ 5] + tmp[ 9]; Q[12] = tmp[ 1] + tmp[ 3] - tmp[ 6] - tmp[ 9] + tmp[10]; Q[13] = tmp[ 2] + tmp[ 4] + tmp[ 7] + tmp[10] + tmp[11]; Q[14] = tmp[ 3] - tmp[ 5] + tmp[ 8] - tmp[11] - tmp[12]; Q[15] = tmp[12] - tmp[ 4] - tmp[ 6] - tmp[ 9] + tmp[13]; /* Diffuse the differences in every word in a bijective manner with ssi, and then add the values of the previous double pipe. */ Q[ 0] = ss0(Q[ 0]) + H[ 1]; Q[ 1] = ss1(Q[ 1]) + H[ 2]; Q[ 2] = ss2(Q[ 2]) + H[ 3]; Q[ 3] = ss3(Q[ 3]) + H[ 4]; Q[ 4] = ss4(Q[ 4]) + H[ 5]; Q[ 5] = ss0(Q[ 5]) + H[ 6]; Q[ 6] = ss1(Q[ 6]) + H[ 7]; Q[ 7] = ss2(Q[ 7]) + H[ 8]; Q[ 8] = ss3(Q[ 8]) + H[ 9]; Q[ 9] = ss4(Q[ 9]) + H[10]; Q[10] = ss0(Q[10]) + H[11]; Q[11] = ss1(Q[11]) + H[12]; Q[12] = ss2(Q[12]) + H[13]; Q[13] = ss3(Q[13]) + H[14]; Q[14] = ss4(Q[14]) + H[15]; Q[15] = ss0(Q[15]) + H[ 0]; /* This is the Message expansion or f_1 in the documentation. It has 16 rounds. Blue Midnight Wish has two tunable security parameters. */ /* The parameters are named EXPAND_1_ROUNDS and EXPAND_2_ROUNDS. The following relation for these parameters should is satisfied: */ /* EXPAND_1_ROUNDS + EXPAND_2_ROUNDS = 16 */ tmp[ 0] = ROTL32(M32[ 0], 1); tmp[ 1] = ROTL32(M32[ 1], 2); tmp[ 2] = ROTL32(M32[ 2], 3); tmp[ 3] = ROTL32(M32[ 3], 4); tmp[ 4] = ROTL32(M32[ 4], 5); tmp[ 5] = ROTL32(M32[ 5], 6); tmp[ 6] = ROTL32(M32[ 6], 7); tmp[ 7] = ROL8(M32[ 7]); tmp[ 8] = ROTL32(M32[ 8], 9); tmp[14] = ROTL32(M32[14],15); uint32_t tmp2[ 2]; Q[16] = ss1(Q[ 0]) + ss2(Q[ 1]) + ss3(Q[ 2]) + ss0(Q[ 3]) + ss1(Q[ 4]) + ss2(Q[ 5]) + ss3(Q[ 6]) + ss0(Q[ 7]) + ss1(Q[ 8]) + ss2(Q[ 9]) + ss3(Q[10]) + ss0(Q[11]) + ss1(Q[12]) + ss2(Q[13]) + ss3(Q[14]) + ss0(Q[15]) + ((shl(0x05555555,4) + tmp[ 0] + tmp[ 3]) ^ H[ 7]); Q[17] = ss1(Q[ 1]) + ss2(Q[ 2]) + ss3(Q[ 3]) + ss0(Q[ 4]) + ss1(Q[ 5]) + ss2(Q[ 6]) + ss3(Q[ 7]) + ss0(Q[ 8]) + ss1(Q[ 9]) + ss2(Q[10]) + ss3(Q[11]) + ss0(Q[12]) + ss1(Q[13]) + ss2(Q[14]) + ss3(Q[15]) + ss0(Q[16]) + ((17U*(0x05555555) + tmp[ 1] + tmp[ 4]) ^ H[ 8]); tmp2[ 0] = Q[ 2] + Q[ 4] + Q[ 6] + Q[ 8] + Q[10] + Q[12] + Q[14]; tmp2[ 1] = Q[ 3] + Q[ 5] + Q[ 7] + Q[ 9] + Q[11] + Q[13] + Q[15]; Q[18] = rs1(Q[ 3]) + rs2(Q[ 5]) + rs3(Q[ 7]) + rs4(Q[ 9]) + rs5(Q[11]) + rs6(Q[13]) + rs7(Q[15]) + ss4(Q[16]) + ss5(Q[17]) + tmp2[ 0] +((18U*(0x05555555) + tmp[ 2] + tmp[ 5]) ^ H[ 9]); Q[19] = rs1(Q[ 4]) + rs2(Q[ 6]) + rs3(Q[ 8]) + rs4(Q[10]) + rs5(Q[12]) + rs6(Q[14]) + rs7(Q[16]) + ss4(Q[17]) + ss5(Q[18]) + tmp2[ 1] +((19U*(0x05555555) + tmp[ 3] + tmp[ 6]) ^ H[10]); tmp2[ 0]+= Q[16] - Q[ 2]; tmp2[ 1]+= Q[17] - Q[ 3]; Q[20] = rs1(Q[ 5])+rs2(Q[ 7])+rs3(Q[ 9])+rs4(Q[11])+rs5(Q[13])+rs6(Q[15])+rs7(Q[17])+ss4(Q[18])+ss5(Q[19])+tmp2[ 0]+((20U*(0x05555555) + tmp[ 4] + tmp[ 7] - tmp[14]) ^ H[11]); Q[21] = rs1(Q[ 6])+rs2(Q[ 8])+rs3(Q[10])+rs4(Q[12])+rs5(Q[14])+rs6(Q[16])+rs7(Q[18])+ss4(Q[19])+ss5(Q[20])+tmp2[ 1]+((21U*(0x05555555) + tmp[ 5] + tmp[ 8]) ^ H[12]); tmp2[ 0]+= Q[18] - Q[ 4]; tmp2[ 1]+= Q[19] - Q[ 5]; Q[22] = rs1(Q[ 7])+rs2(Q[ 9])+rs3(Q[11])+rs4(Q[13])+rs5(Q[15])+rs6(Q[17])+rs7(Q[19])+ss4(Q[20])+ss5(Q[21])+tmp2[ 0]+((22U*(0x05555555) + tmp[ 6] - tmp[ 0]) ^ H[13]); Q[23] = rs1(Q[ 8])+rs2(Q[10])+rs3(Q[12])+rs4(Q[14])+rs5(Q[16])+rs6(Q[18])+rs7(Q[20])+ss4(Q[21])+ss5(Q[22])+tmp2[ 1]+((23U*(0x05555555) + tmp[ 7] - tmp[ 1]) ^ H[14]); tmp2[ 0]+= Q[20] - Q[ 6]; tmp2[ 1]+= Q[21] - Q[ 7]; Q[24] = rs1(Q[ 9])+rs2(Q[11])+rs3(Q[13])+rs4(Q[15])+rs5(Q[17])+rs6(Q[19])+rs7(Q[21])+ss4(Q[22])+ss5(Q[23])+tmp2[ 0]+((24U*(0x05555555) + tmp[ 8] - tmp[ 2]) ^ H[15]); Q[25] = rs1(Q[10])+rs2(Q[12])+rs3(Q[14])+rs4(Q[16])+rs5(Q[18])+rs6(Q[20])+rs7(Q[22])+ss4(Q[23])+ss5(Q[24])+tmp2[ 1]+((25U*(0x05555555) - tmp[ 3]) ^ H[ 0]); tmp2[ 0]+= Q[22] - Q[ 8]; tmp2[ 1]+= Q[23] - Q[ 9]; Q[26] = rs1(Q[11])+rs2(Q[13])+rs3(Q[15])+rs4(Q[17])+rs5(Q[19])+rs6(Q[21])+rs7(Q[23])+ss4(Q[24])+ss5(Q[25])+tmp2[ 0]+((26U*(0x05555555) - tmp[ 4]) ^ H[ 1]); Q[27] = rs1(Q[12])+rs2(Q[14])+rs3(Q[16])+rs4(Q[18])+rs5(Q[20])+rs6(Q[22])+rs7(Q[24])+ss4(Q[25])+ss5(Q[26])+tmp2[ 1]+((27U*(0x05555555) + tmp[14] - tmp[ 5]) ^ H[ 2]); tmp2[ 0]+= Q[24] - Q[10]; tmp2[ 1]+= Q[25] - Q[11]; Q[28] = rs1(Q[13])+rs2(Q[15])+rs3(Q[17])+rs4(Q[19])+rs5(Q[21])+rs6(Q[23])+rs7(Q[25])+ss4(Q[26])+ss5(Q[27])+tmp2[ 0]+((28U*(0x05555555) - tmp[ 6]) ^ H[ 3]); Q[29] = rs1(Q[14])+rs2(Q[16])+rs3(Q[18])+rs4(Q[20])+rs5(Q[22])+rs6(Q[24])+rs7(Q[26])+ss4(Q[27])+ss5(Q[28])+tmp2[ 1]+((29U*(0x05555555) + tmp[ 0] - tmp[ 7]) ^ H[ 4]); tmp2[ 0]+= Q[26] - Q[12]; tmp2[ 1]+= Q[27] - Q[13]; Q[30] = rs1(Q[15])+rs2(Q[17])+rs3(Q[19])+rs4(Q[21])+rs5(Q[23])+rs6(Q[25])+rs7(Q[27])+ss4(Q[28])+ss5(Q[29])+tmp2[ 0]+((30U*(0x05555555) + tmp[14] + tmp[ 1] - tmp[ 8]) ^ H[ 5]); Q[31] = rs1(Q[16])+rs2(Q[18])+rs3(Q[20])+rs4(Q[22])+rs5(Q[24])+rs6(Q[26])+rs7(Q[28])+ss4(Q[29])+ss5(Q[30])+tmp2[ 1]+((31U*(0x05555555) + tmp[ 2]) ^ H[ 6]); /* Blue Midnight Wish has two temporary cummulative variables that accumulate via XORing 16 new variables that are produced in the Message Expansion part. */ XL32 = Q[16] ^ xor3x(Q[17], Q[18], xor3x(Q[19], Q[20], xor3x(Q[21], Q[22], Q[23]))); XH32 = xor3x(XL32, Q[24], xor3x(Q[25], Q[26], xor3x(Q[27], Q[28], xor3x(Q[29], Q[30], Q[31])))); /* This part is the function f_2 - in the documentation */ /* Compute the double chaining pipe for the next message block. */ M32[0] = xor3x(shl(XH32, 5), shr(Q[16], 5), M32[ 0]) + xor3x(XL32, Q[24], Q[ 0]); M32[1] = xor3x(shr(XH32, 7), shl(Q[17], 8), M32[ 1]) + xor3x(XL32, Q[25], Q[ 1]); M32[2] = xor3x(shr(XH32, 5), shl(Q[18], 5), M32[ 2]) + xor3x(XL32, Q[26], Q[ 2]); M32[3] = xor3x(shr(XH32, 1), shl(Q[19], 5), M32[ 3]) + xor3x(XL32, Q[27], Q[ 3]); M32[4] = xor3x(shr(XH32, 3), Q[20] , M32[ 4]) + xor3x(XL32, Q[28], Q[ 4]); M32[5] = xor3x(shl(XH32, 6), shr(Q[21], 6), M32[ 5]) + xor3x(XL32, Q[29], Q[ 5]); M32[6] = xor3x(shr(XH32, 4), shl(Q[22], 6), M32[ 6]) + xor3x(XL32, Q[30], Q[ 6]); M32[7] = xor3x(shr(XH32,11), shl(Q[23], 2), M32[ 7]) + xor3x(XL32, Q[31], Q[ 7]); M32[ 8] = ROTL32(M32[ 4], 9) + xor3x(XH32, Q[24], M32[ 8]) + xor3x(shl(XL32, 8), Q[23], Q[ 8]); M32[ 9] = ROTL32(M32[ 5],10) + xor3x(XH32, Q[25], M32[ 9]) + xor3x(shr(XL32, 6), Q[16], Q[ 9]); M32[10] = ROTL32(M32[ 6],11) + xor3x(XH32, Q[26], M32[10]) + xor3x(shl(XL32, 6), Q[17], Q[10]); M32[11] = ROTL32(M32[ 7],12) + xor3x(XH32, Q[27], M32[11]) + xor3x(shl(XL32, 4), Q[18], Q[11]); M32[12] = ROTL32(M32[ 0],13) + xor3x(XH32, Q[28], M32[12]) + xor3x(shr(XL32, 3), Q[19], Q[12]); M32[13] = ROTL32(M32[ 1],14) + xor3x(XH32, Q[29], M32[13]) + xor3x(shr(XL32, 4), Q[20], Q[13]); M32[14] = ROTL32(M32[ 2],15) + xor3x(XH32, Q[30], M32[14]) + xor3x(shr(XL32, 7), Q[21], Q[14]); M32[15] = ROL16(M32[ 3]) + xor3x(XH32, Q[31], M32[15]) + xor3x(shr(XL32, 2), Q[22], Q[15]); // Compression256_2(M32); const uint32_t H2[16] = { 0xaaaaaaa0, 0xaaaaaaa1, 0xaaaaaaa2, 0xaaaaaaa3, 0xaaaaaaa4, 0xaaaaaaa5, 0xaaaaaaa6, 0xaaaaaaa7, 0xaaaaaaa8, 0xaaaaaaa9, 0xaaaaaaaa, 0xaaaaaaab, 0xaaaaaaac, 0xaaaaaaad, 0xaaaaaaae, 0xaaaaaaaf }; *(uint16*)&tmp[ 0] = *(uint16*)&M32[ 0] ^ *(uint16*)&H2[ 0]; Q[ 0] = tmp[ 5] - tmp[ 7] + tmp[10] + tmp[13] + tmp[14]; Q[ 1] = tmp[ 6] - tmp[ 8] + tmp[11] + tmp[14] - tmp[15]; Q[ 2] = tmp[ 0] + tmp[ 7] + tmp[ 9] - tmp[12] + tmp[15]; Q[ 3] = tmp[ 0] - tmp[ 1] + tmp[ 8] - tmp[10] + tmp[13]; Q[ 4] = tmp[ 1] + tmp[ 2] + tmp[ 9] - tmp[11] - tmp[14]; Q[ 5] = tmp[ 3] - tmp[ 2] + tmp[10] - tmp[12] + tmp[15]; Q[ 6] = tmp[ 4] - tmp[ 0] - tmp[ 3] - tmp[11] + tmp[13]; Q[ 7] = tmp[ 1] - tmp[ 4] - tmp[ 5] - tmp[12] - tmp[14]; Q[ 8] = tmp[ 2] - tmp[ 5] - tmp[ 6] + tmp[13] - tmp[15]; Q[ 9] = tmp[ 0] - tmp[ 3] + tmp[ 6] - tmp[ 7] + tmp[14]; Q[10] = tmp[ 8] - tmp[ 1] - tmp[ 4] - tmp[ 7] + tmp[15]; Q[11] = tmp[ 8] - tmp[ 0] - tmp[ 2] - tmp[ 5] + tmp[ 9]; Q[12] = tmp[ 1] + tmp[ 3] - tmp[ 6] - tmp[ 9] + tmp[10]; Q[13] = tmp[ 2] + tmp[ 4] + tmp[ 7] + tmp[10] + tmp[11]; Q[14] = tmp[ 3] - tmp[ 5] + tmp[ 8] - tmp[11] - tmp[12]; Q[15] = tmp[12] - tmp[ 4] - tmp[ 6] - tmp[ 9] + tmp[13]; /* Diffuse the differences in every word in a bijective manner with ssi, and then add the values of the previous double pipe. */ Q[ 0] = ss0(Q[ 0]) + H2[ 1]; Q[ 1] = ss1(Q[ 1]) + H2[ 2]; Q[ 2] = ss2(Q[ 2]) + H2[ 3]; Q[ 3] = ss3(Q[ 3]) + H2[ 4]; Q[ 4] = ss4(Q[ 4]) + H2[ 5]; Q[ 5] = ss0(Q[ 5]) + H2[ 6]; Q[ 6] = ss1(Q[ 6]) + H2[ 7]; Q[ 7] = ss2(Q[ 7]) + H2[ 8]; Q[ 8] = ss3(Q[ 8]) + H2[ 9]; Q[ 9] = ss4(Q[ 9]) + H2[10]; Q[10] = ss0(Q[10]) + H2[11]; Q[11] = ss1(Q[11]) + H2[12]; Q[12] = ss2(Q[12]) + H2[13]; Q[13] = ss3(Q[13]) + H2[14]; Q[14] = ss4(Q[14]) + H2[15]; Q[15] = ss0(Q[15]) + H2[ 0]; /* This is the Message expansion or f_1 in the documentation. It has 16 rounds. Blue Midnight Wish has two tunable security parameters. */ /* The parameters are named EXPAND_1_ROUNDS and EXPAND_2_ROUNDS. The following relation for these parameters should is satisfied: */ /* EXPAND_1_ROUNDS + EXPAND_2_ROUNDS = 16 */ tmp[ 0] = ROTL32(M32[ 0], 1); tmp[ 1] = ROTL32(M32[ 1], 2); tmp[ 2] = ROTL32(M32[ 2], 3); tmp[ 3] = ROTL32(M32[ 3], 4); tmp[ 4] = ROTL32(M32[ 4], 5); tmp[ 5] = ROTL32(M32[ 5], 6); tmp[ 6] = ROTL32(M32[ 6], 7); tmp[ 7] = ROL8(M32[ 7]); tmp[ 8] = ROTL32(M32[ 8], 9); tmp[ 9] = ROTL32(M32[ 9],10); tmp[10] = ROTL32(M32[10],11); tmp[11] = ROTL32(M32[11],12); tmp[12] = ROTL32(M32[12],13); tmp[13] = ROTL32(M32[13],14); tmp[14] = ROTL32(M32[14],15); tmp[15] = ROL16(M32[15]); Q[16] = ss1(Q[ 0]) + ss2(Q[ 1]) + ss3(Q[ 2]) + ss0(Q[ 3]) + ss1(Q[ 4]) + ss2(Q[ 5]) + ss3(Q[ 6]) + ss0(Q[ 7]) + ss1(Q[ 8]) + ss2(Q[ 9]) + ss3(Q[10]) + ss0(Q[11]) + ss1(Q[12]) + ss2(Q[13]) + ss3(Q[14]) + ss0(Q[15]) + ((shl(0x05555555,4) + tmp[ 0] + tmp[ 3] - tmp[10]) ^ H2[ 7]); Q[17] = ss1(Q[ 1]) + ss2(Q[ 2]) + ss3(Q[ 3]) + ss0(Q[ 4]) + ss1(Q[ 5]) + ss2(Q[ 6]) + ss3(Q[ 7]) + ss0(Q[ 8]) + ss1(Q[ 9]) + ss2(Q[10]) + ss3(Q[11]) + ss0(Q[12]) + ss1(Q[13]) + ss2(Q[14]) + ss3(Q[15]) + ss0(Q[16]) + ((17U*(0x05555555) + tmp[ 1] + tmp[ 4] - tmp[11]) ^ H2[ 8]); tmp2[ 0] = Q[ 2] + Q[ 4] + Q[ 6] + Q[ 8] + Q[10] + Q[12] + Q[14]; tmp2[ 1] = Q[ 3] + Q[ 5] + Q[ 7] + Q[ 9] + Q[11] + Q[13] + Q[15]; Q[18] = rs1(Q[ 3])+rs2(Q[ 5])+rs3(Q[ 7])+rs4(Q[ 9])+rs5(Q[11])+rs6(Q[13])+rs7(Q[15])+ss4(Q[16])+ss5(Q[17])+tmp2[ 0]+((18U*(0x05555555) + tmp[ 2] + tmp[ 5] - tmp[12]) ^ H2[ 9]); Q[19] = rs1(Q[ 4])+rs2(Q[ 6])+rs3(Q[ 8])+rs4(Q[10])+rs5(Q[12])+rs6(Q[14])+rs7(Q[16])+ss4(Q[17])+ss5(Q[18])+tmp2[ 1]+((19U*(0x05555555) + tmp[ 3] + tmp[ 6] - tmp[13]) ^ H2[10]); tmp2[ 0]+= Q[16] - Q[ 2]; tmp2[ 1]+= Q[17] - Q[ 3]; Q[20] = rs1(Q[ 5])+rs2(Q[ 7])+rs3(Q[ 9])+rs4(Q[11])+rs5(Q[13])+rs6(Q[15])+rs7(Q[17])+ss4(Q[18])+ss5(Q[19])+tmp2[ 0]+((20U*(0x05555555) + tmp[ 4] + tmp[ 7] - tmp[14]) ^ H2[11]); Q[21] = rs1(Q[ 6])+rs2(Q[ 8])+rs3(Q[10])+rs4(Q[12])+rs5(Q[14])+rs6(Q[16])+rs7(Q[18])+ss4(Q[19])+ss5(Q[20])+tmp2[ 1]+((21U*(0x05555555) + tmp[ 5] + tmp[ 8] - tmp[15]) ^ H2[12]); tmp2[ 0]+= Q[18] - Q[ 4]; tmp2[ 1]+= Q[19] - Q[ 5]; Q[22] = rs1(Q[ 7])+rs2(Q[ 9])+rs3(Q[11])+rs4(Q[13])+rs5(Q[15])+rs6(Q[17])+rs7(Q[19])+ss4(Q[20])+ss5(Q[21])+tmp2[ 0]+((22U*(0x05555555) + tmp[ 6] + tmp[ 9] - tmp[ 0]) ^ H2[13]); Q[23] = rs1(Q[ 8])+rs2(Q[10])+rs3(Q[12])+rs4(Q[14])+rs5(Q[16])+rs6(Q[18])+rs7(Q[20])+ss4(Q[21])+ss5(Q[22])+tmp2[ 1]+((23U*(0x05555555) + tmp[ 7] + tmp[10] - tmp[ 1]) ^ H2[14]); tmp2[ 0]+= Q[20] - Q[ 6]; tmp2[ 1]+= Q[21] - Q[ 7]; Q[24] = rs1(Q[ 9])+rs2(Q[11])+rs3(Q[13])+rs4(Q[15])+rs5(Q[17])+rs6(Q[19])+rs7(Q[21])+ss4(Q[22])+ss5(Q[23])+tmp2[ 0]+((24U*(0x05555555) + tmp[ 8] + tmp[11] - tmp[ 2]) ^ H2[15]); Q[25] = rs1(Q[10])+rs2(Q[12])+rs3(Q[14])+rs4(Q[16])+rs5(Q[18])+rs6(Q[20])+rs7(Q[22])+ss4(Q[23])+ss5(Q[24])+tmp2[ 1]+((25U*(0x05555555) + tmp[ 9] + tmp[12] - tmp[ 3]) ^ H2[ 0]); tmp2[ 0]+= Q[22] - Q[ 8]; tmp2[ 1]+= Q[23] - Q[ 9]; Q[26] = rs1(Q[11])+rs2(Q[13])+rs3(Q[15])+rs4(Q[17])+rs5(Q[19])+rs6(Q[21])+rs7(Q[23])+ss4(Q[24])+ss5(Q[25])+tmp2[ 0]+((26U*(0x05555555) + tmp[10] + tmp[13] - tmp[ 4]) ^ H2[ 1]); Q[27] = rs1(Q[12])+rs2(Q[14])+rs3(Q[16])+rs4(Q[18])+rs5(Q[20])+rs6(Q[22])+rs7(Q[24])+ss4(Q[25])+ss5(Q[26])+tmp2[ 1]+((27U*(0x05555555) + tmp[11] + tmp[14] - tmp[ 5]) ^ H2[ 2]); tmp2[ 0]+= Q[24] - Q[10]; tmp2[ 1]+= Q[25] - Q[11]; Q[28] = rs1(Q[13])+rs2(Q[15])+rs3(Q[17])+rs4(Q[19])+rs5(Q[21])+rs6(Q[23])+rs7(Q[25])+ss4(Q[26])+ss5(Q[27])+tmp2[ 0]+((28U*(0x05555555) + tmp[12] + tmp[15] - tmp[ 6]) ^ H2[ 3]); Q[29] = rs1(Q[14])+rs2(Q[16])+rs3(Q[18])+rs4(Q[20])+rs5(Q[22])+rs6(Q[24])+rs7(Q[26])+ss4(Q[27])+ss5(Q[28])+tmp2[ 1]+((29U*(0x05555555) + tmp[13] + tmp[ 0] - tmp[ 7]) ^ H2[ 4]); tmp2[ 0]+= Q[26] - Q[12]; tmp2[ 1]+= Q[27] - Q[13]; Q[30] = rs1(Q[15])+rs2(Q[17])+rs3(Q[19])+rs4(Q[21])+rs5(Q[23])+rs6(Q[25])+rs7(Q[27])+ss4(Q[28])+ss5(Q[29])+tmp2[ 0]+((30U*(0x05555555) + tmp[14] + tmp[ 1] - tmp[ 8]) ^ H2[ 5]); Q[31] = rs1(Q[16])+rs2(Q[18])+rs3(Q[20])+rs4(Q[22])+rs5(Q[24])+rs6(Q[26])+rs7(Q[28])+ss4(Q[29])+ss5(Q[30])+tmp2[ 1]+((31U*(0x05555555) + tmp[15] + tmp[ 2] - tmp[ 9]) ^ H2[ 6]); /* Blue Midnight Wish has two temporary cummulative variables that accumulate via XORing */ /* 16 new variables that are produced in the Message Expansion part. */ XL32 = Q[16] ^ Q[17] ^ Q[18] ^ Q[19] ^ Q[20] ^ Q[21] ^ Q[22] ^ Q[23]; XH32 = XL32 ^ Q[24] ^ Q[25] ^ Q[26] ^ Q[27] ^ Q[28] ^ Q[29] ^ Q[30] ^ Q[31]; M32[ 3] = (M32[ 3] ^ shl(Q[19], 5) ^ shr(XH32, 1)) + (Q[27] ^ Q[ 3] ^ XL32); M32[15] = ROL16(M32[ 3]) + (Q[31] ^ M32[15] ^ XH32) + (Q[22] ^ Q[15] ^ shr(XL32, 2)); if (M32[15] <= target.y){ M32[ 2] = xor3x(shr(XH32, 5), shl(Q[18], 5), M32[ 2]) + xor3x(XL32, Q[26], Q[ 2]); M32[14] = ROTL32(M32[ 2], 15) + xor3x(XH32, Q[30], M32[14]) + xor3x(shr(XL32, 7), Q[21], Q[14]); if (M32[14] <= target.x){ uint32_t tmp = atomicExch(&nonceVector[0], thread); if (tmp != 0) nonceVector[1] = tmp; } } } } __host__ void bmw256_cpu_hash_32(int thr_id, uint32_t threads, uint2 *g_hash, uint32_t *resultnonces, const uint2 target) { const dim3 grid((threads + TPB - 1) / TPB); const dim3 block(TPB); bmw256_gpu_hash_32<<<grid, block>>>(threads, g_hash, d_GNonce[thr_id], target); // cudaThreadSynchronize(); cudaMemcpy(d_gnounce[thr_id], d_GNonce[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); resultnonces[0] = *(d_gnounce[thr_id]); resultnonces[1] = *(d_gnounce[thr_id] + 1); } __host__ void bmw256_cpu_init(int thr_id) { cudaMalloc(&d_GNonce[thr_id], 2 * sizeof(uint32_t)); cudaMallocHost(&d_gnounce[thr_id], 2 * sizeof(uint32_t)); } __host__ void bmw_set_output(int thr_id) { cudaMemset(d_GNonce[thr_id], 0, 2 * sizeof(uint32_t)); } __host__ void bmw256_cpu_free(int thr_id) { cudaFree(d_GNonce[thr_id]); cudaFreeHost(d_gnounce[thr_id]); }
the_stack
//#pragma once #include <gunrock/util/str_to_T.cuh> namespace gunrock { namespace util { template <> long strtoT_simple<long>(const char *str, char **str_end, int base) { return strtol(str, str_end, base); } template <> unsigned long strtoT_simple<unsigned long>(const char *str, char **str_end, int base) { return strtoul(str, str_end, base); } template <> long long strtoT_simple<long long>(const char *str, char **str_end, int base) { return strtoll(str, str_end, base); } template <> unsigned long long strtoT_simple<unsigned long long>(const char *str, char **str_end, int base) { return strtoull(str, str_end, base); } template <> char strtoT_simple<char>(const char *str, char **str_end, int base) { long val = strtoT_simple<long>(str, str_end, base); if (val < CHAR_MIN) val = CHAR_MIN; if (val > CHAR_MAX) val = CHAR_MAX; return (char)val; } template <> signed char strtoT_simple<signed char>(const char *str, char **str_end, int base) { signed long val = strtoT_simple<signed long>(str, str_end, base); if (val < SCHAR_MIN) val = SCHAR_MIN; if (val > SCHAR_MAX) val = SCHAR_MAX; return (signed char)val; } template <> unsigned char strtoT_simple<unsigned char>(const char *str, char **str_end, int base) { unsigned long val = strtoT_simple<unsigned long>(str, str_end, base); if (val > UCHAR_MAX) val = UCHAR_MAX; return (unsigned char)val; } template <> short strtoT_simple<short>(const char *str, char **str_end, int base) { long val = strtoT_simple<long>(str, str_end, base); if (val < SHRT_MIN) val = SHRT_MIN; if (val > SHRT_MAX) val = SHRT_MAX; return (short)val; } template <> unsigned short strtoT_simple<unsigned short>(const char *str, char **str_end, int base) { unsigned long val = strtoT_simple<unsigned long>(str, str_end, base); if (val > USHRT_MAX) val = USHRT_MAX; return (unsigned short)val; } template <> int strtoT_simple<int>(const char *str, char **str_end, int base) { long val = strtoT_simple<long>(str, str_end, base); if (val < INT_MIN) val = INT_MIN; if (val > INT_MAX) val = INT_MAX; return (int)val; } template <> unsigned int strtoT_simple<unsigned int>(const char *str, char **str_end, int base) { unsigned long val = strtoT_simple<unsigned long>(str, str_end, base); if (val > UINT_MAX) val = UINT_MAX; return (unsigned int)val; } template <> float strtoT_simple<float>(const char *str, char **str_end, int base) { return strtof(str, str_end); } template <> double strtoT_simple<double>(const char *str, char **str_end, int base) { return strtod(str, str_end); } template <> long double strtoT_simple<long double>(const char *str, char **str_end, int base) { return strtold(str, str_end); } template <> bool strtoT_simple<bool>(const char *str, char **str_end, int base) { unsigned int i = 0; unsigned int length = strlen(str); while (i < length) { if (isspace(str[i])) i++; else break; } if (i + 5 <= length) { // std::cout << "Cond 1" << std::endl; if (tolower(str[i]) == 'f' && tolower(str[i + 1]) == 'a' && tolower(str[i + 2]) == 'l' && tolower(str[i + 3]) == 's' && tolower(str[i + 4]) == 'e') { *str_end = const_cast<char *>(str) + i + 5; return false; } } if (i + 4 <= length) { // std::cout << "Cond 2" << std::endl; if (tolower(str[i]) == 't' && tolower(str[i + 1]) == 'r' && tolower(str[i + 2]) == 'u' && tolower(str[i + 3]) == 'e') { *str_end = const_cast<char *>(str) + i + 4; return true; } } if (i + 1 <= length) { // std::cout << "Cond 3" << std::endl; if (str[i] == '0' || tolower(str[i]) == 'f') { *str_end = const_cast<char *>(str) + i + 1; return false; } if (str[i] == '1' || tolower(str[i]) == 't') { *str_end = const_cast<char *>(str) + i + 1; return true; } } *str_end = const_cast<char *>(str) + i; return true; } template <> char *strtoT_simple<char *>(const char *str, char **str_end, int base) { *str_end = const_cast<char *>(str) + strlen(str); return const_cast<char *>(str); } template <> std::string strtoT_simple<std::string>(const char *str, char **str_end, int base) { *str_end = const_cast<char *>(str) + strlen(str); return std::string(str); } std::string TypeName(const std::type_info *t_info) { if (std::type_index(*t_info) == std::type_index(typeid(char))) return "char"; if (std::type_index(*t_info) == std::type_index(typeid(signed char))) return "signed char"; if (std::type_index(*t_info) == std::type_index(typeid(unsigned char))) return "unsigned char"; if (std::type_index(*t_info) == std::type_index(typeid(short))) return "short"; if (std::type_index(*t_info) == std::type_index(typeid(unsigned short))) return "unsigned short"; if (std::type_index(*t_info) == std::type_index(typeid(int))) return "int"; if (std::type_index(*t_info) == std::type_index(typeid(unsigned int))) return "unsigned int"; if (std::type_index(*t_info) == std::type_index(typeid(long))) return "long"; if (std::type_index(*t_info) == std::type_index(typeid(unsigned long))) return "unsigned long"; if (std::type_index(*t_info) == std::type_index(typeid(long long))) return "long long"; if (std::type_index(*t_info) == std::type_index(typeid(unsigned long))) return "unsigned long long"; if (std::type_index(*t_info) == std::type_index(typeid(bool))) return "bool"; if (std::type_index(*t_info) == std::type_index(typeid(float))) return "float"; if (std::type_index(*t_info) == std::type_index(typeid(double))) return "double"; if (std::type_index(*t_info) == std::type_index(typeid(long double))) return "long double"; if (std::type_index(*t_info) == std::type_index(typeid(std::string))) return "std::string"; if (std::type_index(*t_info) == std::type_index(typeid(char *))) return "char*"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<char>))) return "std::vector<char>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<signed char>))) return "std::vector<signed char>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned char>))) return "std::vector<unsigned char>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<short>))) return "std::vector<short>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned short>))) return "std::vector<unsigned short>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<int>))) return "std::vector<int>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned int>))) return "std::vector<unsigned int>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long>))) return "std::vector<long>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned long>))) return "std::vector<unsigned long>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long long>))) return "std::vector<long long>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned long>))) return "std::vector<unsigned long long>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<bool>))) return "std::vector<bool>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<float>))) return "std::vector<float>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<double>))) return "std::vector<double>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long double>))) return "std::vector<long double>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<std::string>))) return "std::vector<std::string>"; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<char *>))) return "std::vector<char*>"; return std::string(t_info->name()); } bool isVector(const std::type_info *t_info) { if (std::type_index(*t_info) == std::type_index(typeid(std::vector<char>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<signed char>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned char>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<short>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned short>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<int>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned int>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned long>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long long>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned long long>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<float>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<double>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long double>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<bool>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<std::string>))) return true; if (std::type_index(*t_info) == std::type_index(typeid(std::vector<char *>))) return true; return false; } const std::type_info *toVector(const std::type_info *t_info) { if (std::type_index(*t_info) == std::type_index(typeid(char))) return &typeid(std::vector<char>); if (std::type_index(*t_info) == std::type_index(typeid(signed char))) return &typeid(std::vector<signed char>); if (std::type_index(*t_info) == std::type_index(typeid(unsigned char))) return &typeid(std::vector<unsigned char>); if (std::type_index(*t_info) == std::type_index(typeid(short))) return &typeid(std::vector<short>); if (std::type_index(*t_info) == std::type_index(typeid(unsigned short))) return &typeid(std::vector<unsigned short>); if (std::type_index(*t_info) == std::type_index(typeid(int))) return &typeid(std::vector<int>); if (std::type_index(*t_info) == std::type_index(typeid(unsigned int))) return &typeid(std::vector<unsigned int>); if (std::type_index(*t_info) == std::type_index(typeid(long))) return &typeid(std::vector<long>); if (std::type_index(*t_info) == std::type_index(typeid(unsigned long))) return &typeid(std::vector<unsigned long>); if (std::type_index(*t_info) == std::type_index(typeid(long long))) return &typeid(std::vector<long long>); if (std::type_index(*t_info) == std::type_index(typeid(unsigned long long))) return &typeid(std::vector<unsigned long long>); if (std::type_index(*t_info) == std::type_index(typeid(float))) return &typeid(std::vector<float>); if (std::type_index(*t_info) == std::type_index(typeid(double))) return &typeid(std::vector<double>); if (std::type_index(*t_info) == std::type_index(typeid(long double))) return &typeid(std::vector<long double>); if (std::type_index(*t_info) == std::type_index(typeid(bool))) return &typeid(std::vector<bool>); if (std::type_index(*t_info) == std::type_index(typeid(std::string))) return &typeid(std::vector<std::string>); if (std::type_index(*t_info) == std::type_index(typeid(char *))) return &typeid(std::vector<char *>); return NULL; } bool isValidString(const char *str, const std::type_info *t_info, int base) { if (std::type_index(*t_info) == std::type_index(typeid(char))) return isValidString<char>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(signed char))) return isValidString<signed char>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(unsigned char))) return isValidString<unsigned char>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(short))) return isValidString<short>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(unsigned short))) return isValidString<unsigned short>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(int))) return isValidString<int>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(unsigned int))) return isValidString<unsigned int>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(long))) return isValidString<long>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(unsigned long))) return isValidString<unsigned long>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(long long))) return isValidString<long long>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(unsigned long long))) return isValidString<unsigned long long>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(float))) return isValidString<float>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(double))) return isValidString<double>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(long double))) return isValidString<long double>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(bool))) return isValidString<bool>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::string))) return isValidString<std::string>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(char *))) return isValidString<char *>(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<char>))) return isValidString<std::vector<char> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<signed char>))) return isValidString<std::vector<signed char> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned char>))) return isValidString<std::vector<unsigned char> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<short>))) return isValidString<std::vector<short> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned short>))) return isValidString<std::vector<unsigned short> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<int>))) return isValidString<std::vector<int> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned int>))) return isValidString<std::vector<unsigned int> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long>))) return isValidString<std::vector<long> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned long>))) return isValidString<std::vector<unsigned long> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long long>))) return isValidString<std::vector<long long> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<unsigned long long>))) return isValidString<std::vector<unsigned long long> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<float>))) return isValidString<std::vector<float> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<double>))) return isValidString<std::vector<double> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<long double>))) return isValidString<std::vector<long double> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<bool>))) return isValidString<std::vector<bool> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<std::string>))) return isValidString<std::vector<std::string> >(str, base); if (std::type_index(*t_info) == std::type_index(typeid(std::vector<char *>))) return isValidString<std::vector<char *> >(str, base); return true; } bool isValidString(const std::string str, const std::type_info *t_info, int base) { return isValidString(str.c_str(), t_info, base); } } // namespace util } // namespace gunrock template <> std::ostream &operator<<(std::ostream &sout, const std::vector<bool> &vec) { bool first_element = true; for (auto item : vec) { sout << (first_element ? "" : ",") << (item ? "true" : "false"); first_element = false; } return sout; } template <> std::istream &operator>>(std::istream &s_in, std::vector<bool> &vec) { vec.clear(); std::string str = "", item = ""; char *str_end; s_in >> str; for (unsigned int i = 0; i < str.length(); i++) { if (str[i] == ',') { vec.push_back(gunrock::util::strtoT<bool>(item.c_str(), &str_end)); item = ""; } else item = item + str[i]; } vec.push_back(gunrock::util::strtoT<bool>(item.c_str(), &str_end)); item = ""; return s_in; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "dragon/core/context_cuda.h" #include "dragon/utils/device/common_cub.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename T, typename AccT> __global__ void _PRelu(const int N, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { const AccT val = convert::To<AccT>(__ldg(x + i)); y[i] = val > AccT(0) ? __ldg(x + i) : convert::To<T>(val * convert::To<AccT>(__ldg(w))); } } template <typename T, typename AccT, StorageOrder kOrder> __global__ void _PRelu( const int NxCxS, const int S, const int C, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(i, NxCxS) { const int j = (kOrder == StorageOrder::NCHW ? (i / S) % C : i % C); const AccT val = convert::To<AccT>(__ldg(x + i)); y[i] = val > AccT(0) ? __ldg(x + i) : convert::To<T>(val * convert::To<AccT>(__ldg(w + j))); } } template <typename T, typename AccT> __global__ void _PReluGrad(const int N, const T* dy, const T* x, const T* w, T* dx) { CUDA_1D_KERNEL_LOOP(i, N) { dx[i] = convert::To<T>( convert::To<AccT>(dy[i]) * (convert::To<AccT>(x[i]) > AccT(0) ? AccT(1) : convert::To<AccT>(__ldg(w)))); } } template <typename T, typename AccT, StorageOrder kOrder> __global__ void _PReluGrad( const int NxCxS, const int S, const int C, const T* dy, const T* x, const T* w, T* dx) { CUDA_1D_KERNEL_LOOP(i, NxCxS) { const int j = (kOrder == StorageOrder::NCHW ? (i / S) % C : i % C); dx[i] = convert::To<T>( convert::To<AccT>(dy[i]) * (convert::To<AccT>(x[i]) > AccT(0) ? AccT(1) : convert::To<AccT>(__ldg(w + j)))); } } template <typename T, typename AccT> __global__ void _PReluWGrad(const int N, const T* dy, const T* x, T* dw) { __shared__ typename BlockReduce<AccT>::TempStorage storage; AccT val = AccT(0); CUDA_2D_KERNEL_LOOP2(i, N) { val += convert::To<AccT>(__ldg(x + i)) < AccT(0) ? convert::To<AccT>(dy[i]) * convert::To<AccT>(__ldg(x + i)) : AccT(0); } val = BlockReduce<AccT>(storage).Sum(val); if (threadIdx.x == 0) { dw[0] = convert::To<T>(val); } } template <typename T, typename AccT, StorageOrder kOrder> __global__ void _PReluWGrad( const int NxS, const int S, const int C, const T* dy, const T* x, T* dw) { __shared__ typename BlockReduce<AccT>::TempStorage storage; CUDA_2D_KERNEL_LOOP1(i, C) { AccT val = AccT(0); CUDA_2D_KERNEL_LOOP2(j, NxS) { const int index = (kOrder == StorageOrder::NCHW ? (j / S * C + i) * S + j % S : j * C + i); val += convert::To<AccT>(__ldg(x + index)) < AccT(0) ? convert::To<AccT>(dy[index]) * convert::To<AccT>(__ldg(x + index)) : AccT(0); } val = BlockReduce<AccT>(storage).Sum(val); if (threadIdx.x == 0) { dw[i] = convert::To<T>(val); } } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_CWISE_PRELU_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ name<T, AccT, StorageOrder::NCHW> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (data_format == "NHWC") { \ name<T, AccT, StorageOrder::NHWC> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void PRelu<T, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const string& data_format, \ const T* x, \ const T* w, \ T* y, \ CUDAContext* ctx) { \ const auto NxCxS = N * C * S; \ if (C > 1) { \ DISPATCH_CWISE_PRELU_KERNEL( \ _PRelu, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ CUDA_BLOCKS(NxCxS), \ CUDA_THREADS, \ NxCxS, \ S, \ C, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<const math::ScalarType<T>::type*>(w), \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } else { \ _PRelu<math::ScalarType<T>::type, math::AccmulatorType<T>::type> \ <<<CUDA_BLOCKS(NxCxS), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ NxCxS, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<const math::ScalarType<T>::type*>(w), \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T) \ template <> \ void PReluGrad<T, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const string& data_format, \ const T* dy, \ const T* x, \ const T* w, \ T* dx, \ CUDAContext* ctx) { \ const auto NxCxS = N * C * S; \ if (C > 1) { \ DISPATCH_CWISE_PRELU_KERNEL( \ _PReluGrad, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ CUDA_BLOCKS(NxCxS), \ CUDA_THREADS, \ NxCxS, \ S, \ C, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<const math::ScalarType<T>::type*>(w), \ reinterpret_cast<math::ScalarType<T>::type*>(dx)); \ } else { \ _PReluGrad<math::ScalarType<T>::type, math::AccmulatorType<T>::type> \ <<<CUDA_BLOCKS(NxCxS), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ NxCxS, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<const math::ScalarType<T>::type*>(w), \ reinterpret_cast<math::ScalarType<T>::type*>(dx)); \ } \ } \ template <> \ void PReluWGrad<T, CUDAContext>( \ const int N, \ const int S, \ const int C, \ const string& data_format, \ const T* dy, \ const T* x, \ T* dw, \ CUDAContext* ctx) { \ const auto NxS = N * S; \ const auto NxCxS = NxS * C; \ if (C > 1) { \ DISPATCH_CWISE_PRELU_KERNEL( \ _PReluWGrad, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ C, \ CUDA_THREADS, \ NxS, \ S, \ C, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<math::ScalarType<T>::type*>(dw)); \ } else { \ _PReluWGrad<math::ScalarType<T>::type, math::AccmulatorType<T>::type> \ <<<1, CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ NxCxS, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<math::ScalarType<T>::type*>(dw)); \ } \ } DEFINE_KERNEL_LAUNCHER(float16); DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); DEFINE_GRAD_KERNEL_LAUNCHER(float16); DEFINE_GRAD_KERNEL_LAUNCHER(float); DEFINE_GRAD_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER #undef DISPATCH_CWISE_PRELU_KERNEL } // namespace kernels } // namespace dragon #endif // USE_CUDA
the_stack
#include <stdio.h> #include "cuda_math.cuh" //----------------------------------------- GVDB Data Structure #define CUDA_PATHWAY #include "cuda_gvdb_scene.cuh" // GVDB Scene #include "cuda_gvdb_nodes.cuh" // GVDB Node structure #include "cuda_gvdb_geom.cuh" // GVDB Geom helpers #include "cuda_gvdb_dda.cuh" // GVDB DDA #include "cuda_gvdb_raycast.cuh" // GVDB Raycasting //----------------------------------------- // Operator functions #include "cuda_gvdb_operators.cuh" // Particle functions #include "cuda_gvdb_particles.cuh" inline __device__ float4 performPhongShading( VDBInfo* gvdb, uchar chan, float3 shit, float3 snorm, float4 sclr, gvdbBrickFunc_t brickfunc ) { if ( shit.z == NOHIT) // no surface hit return SCN_BACKCLR; // phong float3 lightdir = normalize(scn.light_pos - shit); float diff = 0.9 * max(0.0f, dot(snorm, lightdir) ); float amb = 0.1f; // shadow ray if (SCN_SHADOWAMT > 0) { float3 hit2 = make_float3(0,0,NOHIT); float4 hclr2 = make_float4(0,0,0,1); float3 norm2; rayCast ( gvdb, chan, shit + snorm * SCN_SHADOWBIAS, lightdir, hit2, norm2, hclr2, brickfunc ); // shadow ray diff = (hit2.z==NOHIT ? diff : diff*(1.0-SCN_SHADOWAMT) ); } return make_float4( fxyz(sclr) * (diff + amb), 1.0 ); } // Raytracing functions extern "C" __global__ void gvdbRayDeep ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float4 clr = make_float4(0,0,0,1); float3 hit = make_float3(0,0,NOHIT); float3 norm; float3 rpos = getViewPos(); float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height)); // ray deep sampling rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayDeepBrick ); clr = make_float4( lerp3(SCN_BACKCLR, clr, 1.0-clr.w), 1.0-clr.w ); outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, clr.w*255 ); } // Render the volume data by raycasting extern "C" __global__ void gvdbRaySurfaceVoxel ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float3 hit = make_float3(NOHIT,NOHIT,NOHIT); float3 norm; float3 rpos = getViewPos(); float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height)); float4 clr = make_float4(1,1,1,1); // ray surface hit rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceVoxelBrick ); clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceVoxelBrick ); outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255); } // Render the volume data by raycasting extern "C" __global__ void gvdbRaySurfaceTrilinear ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float3 hit = make_float3(NOHIT,NOHIT,NOHIT); float3 norm; float3 rpos = getViewPos(); float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height)); float4 clr = make_float4(1,1,1,1); // ray surface hit rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTrilinearBrick ); clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick ); outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255); } // Render the volume data by raycasting extern "C" __global__ void gvdbRaySurfaceTricubic ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float3 hit = make_float3(NOHIT,NOHIT,NOHIT); float3 norm; float3 rpos = getViewPos(); float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height)); float4 clr = make_float4(1,1,1,1); // ray surface hit rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTricubicBrick ); clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick ); outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255); } // Render the volume data by raycasting extern "C" __global__ void gvdbRaySurfaceDepth ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float3 hit = make_float3(NOHIT,NOHIT,NOHIT); float3 norm; float3 rpos = getViewPos(); float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height)); float4 clr = make_float4(1,1,1,1); // ray surface hit // *NOTE*: raySurfaceDepthBrick not yet implemented rayCast (gvdb, chan, rpos, rdir, hit, norm, clr, raySurfaceTrilinearBrick ); clr = performPhongShading ( gvdb, chan, hit, norm, clr, raySurfaceTrilinearBrick ); outBuf[y*scn.width + x] = make_uchar4(clr.x*255, clr.y*255, clr.z*255, clr.w*255 ); } // Render the volume data by raycasting extern "C" __global__ void gvdbRayLevelSet ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float3 hit = make_float3(0,0,NOHIT); float4 clr = make_float4(1,1,1,1); float3 norm; float3 rpos = getViewPos(); float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height)); // Raycast Level Set rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayLevelSetBrick ); clr = performPhongShading ( gvdb, chan, hit, norm, clr, rayLevelSetBrick ); outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, clr.w*255 ); } // Render the volume data by raycasting extern "C" __global__ void gvdbRayEmptySkip ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float4 clr = make_float4(1,1,1,1); float3 hit = make_float3(NOHIT,NOHIT,NOHIT); float3 norm; float3 rpos = getViewPos(); float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height)); // Empty skipping rayCast ( gvdb, chan, rpos, rdir, hit, norm, clr, rayEmptySkipBrick ); if ( hit.z != NOHIT) { clr = make_float4( hit * 0.01, 1 ); } else { clr = SCN_BACKCLR; } outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, 255 ); } // Raytrace a bundle of rays extern "C" __global__ void gvdbRaytrace ( VDBInfo* gvdb, uchar chan, int num_rays, ScnRay* rays, float bias ) { int x = blockIdx.x * blockDim.x + threadIdx.x; if ( x >= num_rays ) return; // raytrace rays[x].hit = make_float3(NOHIT, NOHIT, NOHIT); float4 hclr = make_float4(1,1,1,1); rayCast ( gvdb, chan, rays[x].orig, rays[x].dir, rays[x].hit, rays[x].normal, hclr, raySurfaceTrilinearBrick ); if ( rays[x].hit.z != NOHIT ) rays[x].hit -= rays[x].dir * bias; } // Render a cross section of the volume data in 3D extern "C" __global__ void gvdbSection3D ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float4 clr = make_float4(1,1,1,0); float3 norm; float3 rdir = getViewRay(float(x + 0.5f) / float(scn.width), float(y + 0.5f) / float(scn.height)); // raytrace with cross-section plane float3 wpos = getViewPos(); float t = rayPlaneIntersect ( wpos, rdir, SCN_SLICE_NORM, SCN_SLICE_PNT ); // hit section plane if ( t > 0 ) { // yes.. wpos += t*rdir; // get point of surface float3 offs, vmin; uint64 nid; VDBNode* node = getNodeAtPoint ( gvdb, wpos, &offs, &vmin, &nid ); // find vdb node at point if ( node != 0x0 ) { //---- debugging: show apron // float3 p = offs + (wpos-vmin)*(34.0/16.0) - make_float3(gvdb.atlas_apron); // clr = transfer ( tex3D ( volTexIn, p.x, p.y, p.z ) ); t = getTrilinear ( gvdb, chan, wpos, offs, vmin ); // t <= voxel value clr = transfer ( gvdb, t ); // clr at point on surface if ( gvdb->clr_chan != CHAN_UNDEF ) { float3 p = offs + (wpos - vmin); clr *= make_float4( make_float3( getColor(gvdb, gvdb->clr_chan, p) ), 1.0 ); } } else { t = 0; // set t=0, no voxel value found } } // 3D surface raytrace float3 hit = make_float3(NOHIT,NOHIT,NOHIT); float4 hclr = make_float4(1,1,1,1); // using previous wpos (set above) to start ray, trace beyond section plane to get 3D surface hit rayCast ( gvdb, chan, wpos, rdir, hit, norm, hclr, raySurfaceTrilinearBrick ); if ( hit.z != NOHIT) { // 3D surface hit.. float3 lightdir = normalize ( scn.light_pos - hit ); float ds = (t > SCN_THRESH) ? 1 : 0.8*max(0.0f, dot( norm, lightdir )); // if voxel value on section plane is inside surface, no diffuse shading clr = lerp4( hclr * ds, clr, clr.w ); // blend 3D surface with cross-section clr } else { clr = lerp4( SCN_BACKCLR, clr, clr.w ); // no 3D hit. blend background with cross-section } outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, 255 ); } // Render a section of the volume data in 2D extern "C" __global__ void gvdbSection2D ( VDBInfo* gvdb, uchar chan, uchar4* outBuf ) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x >= scn.width || y >= scn.height ) return; float4 clr = make_float4(1,1,1,0); float3 bgclr = make_float3 ( 0, 0, 0 ); float3 wpos; float3 spnt = make_float3( float(x)*2.0/scn.width - 1.0, 0, float(y)*2.0/scn.height - 1.0); wpos = SCN_SLICE_PNT + spnt * SCN_SLICE_NORM; // get leaf node at hit point float3 offs, vmin; uint64 nid; VDBNode* node = getNodeAtPoint ( gvdb, wpos, &offs, &vmin, &nid ); if ( node == 0x0 ) { outBuf [ y*scn.width + x ] = make_uchar4(bgclr.x*255, bgclr.y*255, bgclr.z*255, 255); return; } // get tricubic data value clr = transfer ( gvdb, getTrilinear ( gvdb, chan, wpos, offs, vmin ) ); bgclr = lerp3 ( bgclr, make_float3(clr.x,clr.y,clr.z), clr.w ); outBuf [ y*scn.width + x ] = make_uchar4( bgclr.x*255, bgclr.y*255, bgclr.z*255, 255 ); }
the_stack
#include "common.cuh" #include <kat/on_device/collaboration/warp.cuh> #include <type_traits> ///@cond #include <kat/detail/execution_space_specifiers.hpp> ///@endcond namespace kat { namespace collaborative { namespace warp { namespace detail { template<typename LHS, typename RHS = LHS, typename Result = LHS> struct plus { using first_argument_type = LHS; using second_argument_type = RHS; using result_type = Result; KAT_FHD Result operator() (const LHS& x, const RHS& y) const noexcept { return x + y; } struct accumulator { KAT_FHD Result operator()( typename std::enable_if<std::is_same<LHS, RHS>::value, Result>::type& x, const RHS& y) const noexcept { return x += y; } struct atomic { #ifdef __CUDA_ARCH__ KAT_FD Result operator()( typename std::enable_if<std::is_same<LHS,RHS>::value, Result>::type& x, const RHS& y) const noexcept { return kat::atomic::add(&x,y); } #endif // __CUDA_ARCH__ }; KAT_FHD static Result neutral_value() noexcept { return 0; }; }; KAT_FHD static Result neutral_value() noexcept { return 0; }; }; } // namespace detail /** * Performs a reduction (e.g. a summation or a multiplication) of all elements passed into * the function by the threads of a block - but with each thread ending up with the reduction * result for all threads upto itself. * * @note What about inclusivity? * * @todo offer both an inclusive and an exclusive versionn */ template<typename T, typename AccumulationOp> KAT_FD T reduce(T value, AccumulationOp op) { auto partial_result { value }; for (int shuffle_mask = warp_size/2; shuffle_mask > 0; shuffle_mask >>= 1) op(partial_result, shuffle_xor(partial_result, shuffle_mask)); return partial_result; } template <typename T> KAT_FD T sum(T value) { const auto plus = [](T& x, T y) { x += y; }; return reduce(value, plus); } template < typename T, typename AccumulationOp, inclusivity_t Inclusivity = inclusivity_t::Inclusive, T NeutralValue = T{} > KAT_FD T scan(T value, AccumulationOp op) { T x; if (Inclusivity == inclusivity_t::Exclusive) { T preshuffled = shuffle_up(value, 1); // ... and now we can pretend to be doing an inclusive shuffle x = lane::is_first() ? NeutralValue : preshuffled; } else { x = value; } // x of lane i holds the reduction of values of // the lanes i - 2*(offset) ... i - offset , and we've already // taken care of the iteration for offset = 0 , above. #pragma unroll for (int offset = 1; offset < warp_size; offset <<= 1) { T shuffled = shuffle_up(x, offset); if(lane::id() >= offset) { op(x, shuffled); } } return x; } // TODO: Need to implement a scan-and-reduce warp primitive template < typename T, inclusivity_t Inclusivity = inclusivity_t::Inclusive, T NeutralValue = T{} > KAT_FD T prefix_sum(T value) { const auto plus = [](T& x, T y) { x += y; }; return scan<T, decltype(plus), Inclusivity, NeutralValue>(value, plus); } template <typename T, T NeutralValue = T{}> KAT_FD T exclusive_prefix_sum(T value) { return prefix_sum<T, inclusivity_t::Exclusive, NeutralValue>(value); } //-------------------------------------------------- template <typename RandomAccessIterator, typename Size, typename T> KAT_FD void fill_n(RandomAccessIterator start, Size count, const T& value) { auto f = [=](promoted_size_t<Size> pos) { start[pos] = value; }; at_warp_stride(count, f); } template <typename RandomAccessIterator, typename T, typename Size = decltype(std::declval<RandomAccessIterator>() - std::declval<RandomAccessIterator>())> KAT_FD void fill(RandomAccessIterator start, RandomAccessIterator end, const T& value) { Size count = end - start; return fill_n(start, count, value); } template <typename RandomAccessIterator, typename Size> KAT_FD void memzero_n(RandomAccessIterator start, Size count) { using value_type = typename std::iterator_traits<RandomAccessIterator>::value_type; return fill_n(start, count, value_type{0}); } template <typename RandomAccessIterator, typename Size = decltype(std::declval<RandomAccessIterator>() - std::declval<RandomAccessIterator>())> KAT_FD void memzero(RandomAccessIterator start, RandomAccessIterator end) { auto count = end - start; return memzero_n(start, count); } /** * @brief apply a transformation to each element of an array, placing the results * in another array. * * @param source The (block-common) origin of the data * @param target The (block-common) destination into which to write the converted elements * @param length The (block-common) number of elements available (for reading?] at the * source */ template <typename T, typename S, typename UnaryOperation, typename Size> KAT_FD void transform_n( const S* __restrict__ source, Size length, T* __restrict__ target, UnaryOperation unary_op) { auto f = [&](promoted_size_t<Size> pos) { target[pos] = unary_op(source[pos]); }; at_warp_stride(length, f); } /** * @note Prefer `copy_n()`; this will force the size to `ptrdiff_t`, which unnecessarily large. */ template <typename S, typename T, typename UnaryOperation, typename Size = std::ptrdiff_t> KAT_FD void transform( const S* __restrict__ source_start, const S* __restrict__ source_end, T* __restrict__ target, UnaryOperation unary_op) { Size length = source_end - source_start; return transform_n(source_start, length, target, unary_op); } //----------------------------------- /** * Have all warp threads collaborate in copying * data between two memory locations (possibly not in the same memory * space), while also converting types. * * @param target The destination into which to write the converted elements * @param source The origin of the data * @param length The number of elements available (for reading?] at the * source */ template <typename S, typename T, typename Size> KAT_FD void cast_and_copy_n( const S* __restrict__ source, Size length, T* __restrict__ target) { auto op = [](S x) -> T { return T(x);} ; return transform_n(source, length, target, op); } template <typename T, typename U, typename Size = std::ptrdiff_t> KAT_FD void cast_and_copy( const U* __restrict__ source_start, const U* __restrict__ source_end, T* __restrict__ target) { Size length = source_end - source_start; return cast_and_copy_n(source_start, length, target); } namespace detail { /** * A version of `kat::copy()` which ignores pointer alignment, * and the memory transaction size, simply making coalesced writes * of warp_size elements at a time (except for the last range) * @param target * @param source * @param length */ template <typename T, typename Size> KAT_FD void naive_copy( const T* __restrict__ source, Size length, T* __restrict__ target) { auto f = [&](promoted_size_t<Size> pos) { target[pos] = source[pos]; }; at_warp_stride(length, f); } template <typename T> constexpr KAT_FHD T clear_lower_bits(T x, unsigned k) { return x & ~((1 << k) - 1); } } // namespace detail /** * Has the warp copy data from one place to another * * @note if the input is not 32-byte (sometimes 128-byte )-aligned, * and more importantly, the output is not 128-byte-aligned, * performance will likely degrade due to the need to execute a pair * of memory transactions for every single 32 x 4 byte write. * * @tparam T type of the elements being copied * @tparam Size type of the length parameter * @tparam MayHaveSlack * we "like" data whose size is a multiple of 4 bytes, * and can copy it faster. When this is true, we assume * the overall size of data to copy is a multiple of 4, * without taking the time to check. In the future the * semantics of this parameter will change to involve * alignment of the start and end addresses. * @param[out] target starting address of the region of memory to copy into * @param[in] source starting address of the region of memory to copy from * @param[in] length number of elements (of type T) to copy */ template <typename T, typename Size, bool MayHaveSlack = true> KAT_FD void copy_n( const T* __restrict__ source, Size length, T* __restrict__ target) { using namespace linear_grid::grid_info; enum { elements_per_lane_in_full_warp_write = collaborative::detail::elements_per_lane_in_full_warp_write<T>::value }; if ((elements_per_lane_in_full_warp_write == 1) or not (sizeof(T) == 1 or sizeof(T) == 2 or sizeof(T) == 4 or sizeof(T) == 8) or not std::is_trivially_copy_constructible<T>::value) { detail::naive_copy<T, Size>(source, length, target); } else { // elements_per_lane_in_full_warp_write is either 1, 2... kat::array<T, elements_per_lane_in_full_warp_write> buffer; // ... so this has either 1 or 2 elements and its overall size is 4 promoted_size_t<Size> truncated_length = MayHaveSlack ? detail::clear_lower_bits(length, constexpr_::log2(elements_per_lane_in_full_warp_write)) : length; // TODO: Should I pragma-unroll this by a fixed amount? Should // I not specify an unroll at all? #pragma unroll for(promoted_size_t<Size> pos = lane::index() * elements_per_lane_in_full_warp_write; pos < truncated_length; pos += warp_size * elements_per_lane_in_full_warp_write) { * (reinterpret_cast<decltype(buffer) *>(target + pos)) = *( reinterpret_cast<const decltype(buffer) *>(source + pos) ); } if (MayHaveSlack) { if (elements_per_lane_in_full_warp_write == 2) { // the slack must be exactly 1 // Multiple writes to the same place are safe according to // the CUDA C Programming Guide v8 section G.3.2 Global Memory target[truncated_length] = source[truncated_length]; } else { auto num_slack_elements = length - truncated_length; if (lane::index() < num_slack_elements) { auto pos = truncated_length + lane::index(); target[pos] = source[pos]; } } } } } template <typename T, bool MayHaveSlack = true, typename Size = std::ptrdiff_t> KAT_FD void copy( const T* __restrict__ source_start, const T* __restrict__ source_end, T* __restrict__ target_start) { Size length = source_end - source_start; return copy_n(source_start, length, target_start); } /** * Use a lookup table to convert numeric indices to a sequence * of values of any type */ template <typename T, typename I, typename Size, typename U = T> KAT_FD void lookup( T* __restrict__ target, const U* __restrict__ lookup_table, const I* __restrict__ indices, Size num_indices) { auto f = [=](promoted_size_t<Size> pos) { target[pos] = lookup_table[indices[pos]]; }; at_warp_stride(num_indices, f); } /** * Perform an accumulation operation (e.g. addition) between equal-sized arrays - * with either regular or atomic semantics. Usable with memory locations which * the entire block has the same view of and accessibility to (mostly shared * and global, but not just those). * * @note * 1. Assumes a linear block. * 2. The operation is supposed to have the signature: * WhateverWeDontCare operation(D& accumulator_element, S value) * otherwise it might be a no-op here. * 3. If you're having multiple blocks calling this function with the same * destination, it will have to be atomic (as you cannot guarantee these blocks will * not execute simultaneously, either on different multiprocessors or on the same * multiprocessor). Also, if you want to use a global-mem source, you will * need to pass this function block-specific offsets; remember it is not * a kernel! * * @tparam D Destination data type * @tparam S Source data type * @tparam AccumulatingOperation Typically, one of the 'accumulator' substructures of * the functors in liftedfunctions.hpp ; but it may very well be an accumulator::atomic * substructure * @tparam Size ... so that you don't have to decide whether you want to specify your * number of elements as an int, uint, long long int, ulong long etc. * @param[inout] destination The array into which we accumulate; holds existing data * and is not simply overwritten. * @param[in] source The array of partial data to integrate via accumulation. * @param[in] length the length in elements of @p destination and @p source * * @todo consider taking a GSL-span-like parameter isntead of a ptr+length * * @todo Some inclusions in the block-primitives might only be relevant to the * functions here; double-check. * * @todo consider using elementwise_apply for this. * */ template <typename D, typename RandomAccessIterator, typename AccumulatingOperation, typename Size> KAT_FD void elementwise_accumulate_n( AccumulatingOperation op, D* __restrict__ destination, RandomAccessIterator __restrict__ source, Size length) { auto accumulate_in_element = [&](promoted_size_t<Size> pos) { op(destination[pos], source[pos]); }; at_warp_stride(length, accumulate_in_element); } template <typename D, typename RandomAccessIterator, typename AccumulatingOperation, typename Size = std::ptrdiff_t> KAT_FD void elementwise_accumulate( AccumulatingOperation op, D* __restrict__ destination, RandomAccessIterator __restrict__ source_start, RandomAccessIterator __restrict__ source_end) { elementwise_accumulate_n(op, destination, source_start, // kat::distance(source_start, source_end) source_end - source_start ); } } // namespace warp } // namespace collaborative } // namespace kat #endif // CUDA_KAT_WARP_COLLABORATIVE_SEQUENCE_OPS_CUH_
the_stack
#include "octnet/gpu/combine.h" #include "octnet/gpu/gpu.h" #include <cstdio> #include <cstdlib> __global__ void kernel_concat(ot_data_t* out, int n_leafs, const ot_data_t* in1, const ot_data_t* in2, const ot_size_t feature_size_in1, const ot_size_t feature_size_in2, const ot_size_t feature_size_out) { CUDA_KERNEL_LOOP(vx_idx, n_leafs) { octree_cpy_leaf(in1 + vx_idx * feature_size_in1, feature_size_in1, out + vx_idx * feature_size_out); octree_cpy_leaf(in2 + vx_idx * feature_size_in2, feature_size_in2, out + vx_idx * feature_size_out + feature_size_in1); } } void octree_concat_gpu(const octree* in1, const octree* in2, bool check, octree* out) { if(DEBUG) { printf("[DEBUG] octree_concat_gpu\n"); } if(check && (!octree_equal_trees_gpu(in1, in2))) { printf("ERROR: tree structure of inputs do not match\n"); exit(-1); } ot_size_t feature_size_in1 = in1->feature_size; ot_size_t feature_size_in2 = in2->feature_size; ot_size_t feature_size_out = feature_size_in1 + feature_size_in2; octree_resize_gpu(in1->n, in1->grid_depth, in1->grid_height, in1->grid_width, feature_size_out, in1->n_leafs, out); octree_cpy_trees_gpu_gpu(in1, out); octree_cpy_prefix_leafs_gpu_gpu(in1, out); kernel_concat<<<GET_BLOCKS(in1->n_leafs), CUDA_NUM_THREADS>>>( out->data, in1->n_leafs, in1->data, in2->data, feature_size_in1, feature_size_in2, feature_size_out ); CUDA_POST_KERNEL_CHECK; } template <bool do_grad_in2> __global__ void kernel_concat_bwd(ot_data_t* grad_in1, ot_data_t* grad_in2, int n_leafs, const ot_data_t* grad_out, const ot_size_t feature_size_in1, const ot_size_t feature_size_in2, const ot_size_t feature_size_out) { CUDA_KERNEL_LOOP(vx_idx, n_leafs) { octree_cpy_leaf(grad_out + vx_idx * feature_size_out, feature_size_in1, grad_in1 + vx_idx * feature_size_in1); if(do_grad_in2) { octree_cpy_leaf(grad_out + vx_idx * feature_size_out + feature_size_in1, feature_size_in2, grad_in2 + vx_idx * feature_size_in2); } } } void octree_concat_bwd_gpu(const octree* in1, const octree* in2, const octree* grad_out, bool do_grad_in2, octree* grad_in1, octree* grad_in2) { if(DEBUG) { printf("[DEBUG] octree_concat_bwd_gpu\n"); } octree_resize_as_gpu(in1, grad_in1); octree_cpy_trees_gpu_gpu(in1, grad_in1); octree_cpy_prefix_leafs_gpu_gpu(in1, grad_in1); ot_size_t feature_size_in1 = in1->feature_size; ot_size_t feature_size_in2 = in2->feature_size; ot_size_t feature_size_out = feature_size_in1 + feature_size_in2; if(do_grad_in2) { octree_resize_as_gpu(in2, grad_in2); octree_cpy_trees_gpu_gpu(in2, grad_in2); octree_cpy_prefix_leafs_gpu_gpu(in2, grad_in2); kernel_concat_bwd<true><<<GET_BLOCKS(in1->n_leafs), CUDA_NUM_THREADS>>>( grad_in1->data, grad_in2->data, in1->n_leafs, grad_out->data, feature_size_in1, feature_size_in2, feature_size_out ); } else { kernel_concat_bwd<false><<<GET_BLOCKS(in1->n_leafs), CUDA_NUM_THREADS>>>( grad_in1->data, grad_in2->data, in1->n_leafs, grad_out->data, feature_size_in1, feature_size_in2, feature_size_out ); } CUDA_POST_KERNEL_CHECK; } __global__ void kernel_concat_ds(octree out, int n_leafs, const octree in1, const octree in2) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { int out_idx = leaf_idx * out.feature_size; octree_cpy_leaf(in1.data + leaf_idx * in1.feature_size, in1.feature_size, out.data + out_idx); const int in1_grid_idx = leaf_idx_to_grid_idx(&in1, leaf_idx); const ot_tree_t* in1_tree = octree_get_tree(&in1, in1_grid_idx); int in1_data_idx = leaf_idx - in1.prefix_leafs[in1_grid_idx]; int in1_bit_idx = data_idx_to_bit_idx(in1_tree, in1_data_idx); int n,ds,hs,ws; int cell_depth = octree_ind_to_dense_ind(&in1, in1_grid_idx, in1_bit_idx, &n, &ds,&hs,&ws); int cell_width = width_from_depth(cell_depth); for(int f = 0; f < in2.feature_size; ++f) { out.data[out_idx + in1.feature_size + f] = 0; } for(int d = ds; d < (ds+cell_width); ++d) { for(int h = hs; h < (hs+cell_width); ++h) { for(int w = ws; w < (ws+cell_width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int in2_grid_idx = octree_grid_idx(&in2, n, gd,gh,gw); const ot_tree_t* in2_tree = octree_get_tree(&in2, in2_grid_idx); int in2_bit_idx = tree_bit_idx(in2_tree, bd,bh,bw); const ot_data_t* in2_data = octree_get_data(&in2, in2_grid_idx); int in2_data_idx = tree_data_idx(in2_tree, in2_bit_idx, in2.feature_size); for(int f = 0; f < in2.feature_size; ++f) { out.data[out_idx + in1.feature_size + f] += in2_data[in2_data_idx + f]; } } } } for(int f = 0; f < in2.feature_size; ++f) { out.data[out_idx + in1.feature_size + f] /= (cell_width*cell_width*cell_width); } } } void octree_concat_ds_gpu(const octree* in1, const octree* in2, octree* out) { if(DEBUG) { printf("[DEBUG] octree_concat_ds_gpu\n"); } ot_size_t feature_size_in1 = in1->feature_size; ot_size_t feature_size_in2 = in2->feature_size; ot_size_t feature_size_out = feature_size_in1 + feature_size_in2; octree_resize_gpu(in1->n, in1->grid_depth, in1->grid_height, in1->grid_width, feature_size_out, in1->n_leafs, out); octree_cpy_trees_gpu_gpu(in1, out); octree_cpy_prefix_leafs_gpu_gpu(in1, out); kernel_concat_ds<<<GET_BLOCKS(in1->n_leafs), CUDA_NUM_THREADS>>>( *out, in1->n_leafs, *in1, *in2 ); CUDA_POST_KERNEL_CHECK; } __global__ void kernel_concat_ds_bwd1(octree grad_in1, int n_leafs, const octree grad_out) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { int out_idx = leaf_idx * grad_out.feature_size; octree_cpy_leaf(grad_out.data + out_idx, grad_in1.feature_size, grad_in1.data + leaf_idx * grad_in1.feature_size); } } __global__ void kernel_concat_ds_bwd2(octree grad_in2, int n_leafs, const octree grad_out, int in1_feature_size) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int in2_grid_idx = leaf_idx_to_grid_idx(&grad_in2, leaf_idx); const ot_tree_t* in2_tree = octree_get_tree(&grad_in2, in2_grid_idx); int in2_data_idx = leaf_idx - grad_in2.prefix_leafs[in2_grid_idx]; int in2_bit_idx = data_idx_to_bit_idx(in2_tree, in2_data_idx); int n,ds,hs,ws; int cell_depth = octree_ind_to_dense_ind(&grad_in2, in2_grid_idx, in2_bit_idx, &n, &ds,&hs,&ws); int cell_width = width_from_depth(cell_depth); int cell_width3 = cell_width * cell_width * cell_width; for(int f = 0; f < grad_in2.feature_size; ++f) { grad_in2.data[leaf_idx * grad_in2.feature_size + f] = 0; } for(int d = ds; d < (ds+cell_width); ++d) { for(int h = hs; h < (hs+cell_width); ++h) { for(int w = ws; w < (ws+cell_width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int out_grid_idx = octree_grid_idx(&grad_out, n, gd,gh,gw); const ot_tree_t* out_tree = octree_get_tree(&grad_out, out_grid_idx); int out_bit_idx = tree_bit_idx(out_tree, bd,bh,bw); ot_data_t* out_data = octree_get_data(&grad_out, out_grid_idx); int out_data_idx = tree_data_idx(out_tree, out_bit_idx, grad_out.feature_size); // int out_cell_width = width_from_bit_idx(out_bit_idx); // int out_cell_width3 = out_cell_width*out_cell_width*out_cell_width; for(int f = 0; f < grad_in2.feature_size; ++f) { float val = out_data[out_data_idx + in1_feature_size + f]; grad_in2.data[leaf_idx * grad_in2.feature_size + f] += val / cell_width3; } } } } } } void octree_concat_ds_bwd_gpu(const octree* in1, const octree* in2, const octree* grad_out, bool do_grad_in2, octree* grad_in1, octree* grad_in2) { if(DEBUG) { printf("[DEBUG] octree_concat_bwd_gpu\n"); } octree_resize_as_gpu(in1, grad_in1); octree_cpy_trees_gpu_gpu(in1, grad_in1); octree_cpy_prefix_leafs_gpu_gpu(in1, grad_in1); kernel_concat_ds_bwd1<<<GET_BLOCKS(in1->n_leafs), CUDA_NUM_THREADS>>>( *grad_in1, in1->n_leafs, *grad_out ); CUDA_POST_KERNEL_CHECK; if(do_grad_in2) { octree_resize_as_gpu(in2, grad_in2); octree_cpy_trees_gpu_gpu(in2, grad_in2); octree_cpy_prefix_leafs_gpu_gpu(in2, grad_in2); kernel_concat_ds_bwd2<<<GET_BLOCKS(in2->n_leafs), CUDA_NUM_THREADS>>>( *grad_in2, in2->n_leafs, *grad_out, in1->feature_size ); CUDA_POST_KERNEL_CHECK; } } __global__ void kernel_concat_dense(ot_data_t* out, int n_leafs, const octree in1, const ot_data_t* in2, const ot_size_t feature_size1, const ot_size_t feature_size2, const ot_size_t feature_size_out) { const int dense_depth = 8 * in1.grid_depth; const int dense_height = 8 * in1.grid_height; const int dense_width = 8 * in1.grid_width; CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { octree_cpy_leaf(in1.data + leaf_idx * feature_size1, feature_size1, out + leaf_idx * feature_size_out); int grid_idx = leaf_idx_to_grid_idx(&in1, leaf_idx); const ot_tree_t* tree = octree_get_tree(&in1, grid_idx); int cum_n_leafs = in1.prefix_leafs[grid_idx]; int data_idx = leaf_idx - cum_n_leafs; int bit_idx = data_idx_to_bit_idx(tree, data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&in1, grid_idx, bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); for(int f = 0; f < feature_size2; ++f) { ot_data_t val = 0; for(int d = ds; d < ds+width; ++d) { for(int h = hs; h < hs+width; ++h) { for(int w = ws; w < ws+width; ++w) { int dense_idx = (((n * feature_size2 + f) * dense_depth + d) * dense_height + h) * dense_width + w; float add = in2[dense_idx]; // float add = in2[dense_idx] / (width*width*width); val += add; } } } out[leaf_idx * feature_size_out + feature_size1 + f] = val / (width*width*width); // out[leaf_idx * feature_size_out + feature_size1 + f] = val; } } } void octree_concat_dense_gpu(const octree* in1, const ot_data_t* in2, ot_size_t feature_size2, octree* out) { if(DEBUG) { printf("[DEBUG] octree_concat_dense_gpu\n"); } ot_size_t feature_size1 = in1->feature_size; ot_size_t feature_size_out = feature_size1 + feature_size2; octree_resize_gpu(in1->n, in1->grid_depth, in1->grid_height, in1->grid_width, feature_size_out, in1->n_leafs, out); octree_cpy_trees_gpu_gpu(in1, out); octree_cpy_prefix_leafs_gpu_gpu(in1, out); kernel_concat_dense<<<GET_BLOCKS(in1->n_leafs), CUDA_NUM_THREADS>>>( out->data, in1->n_leafs, *in1, in2, feature_size1, feature_size2, feature_size_out ); CUDA_POST_KERNEL_CHECK; } template <bool do_grad_in2> __global__ void kernel_concat_dense_bwd(ot_data_t* grad_in1, ot_data_t* grad_in2, int n_leafs, const octree grad_out, const ot_size_t feature_size1, const ot_size_t feature_size2, const ot_size_t feature_size_out) { const int dense_depth = 8 * grad_out.grid_depth; const int dense_height = 8 * grad_out.grid_height; const int dense_width = 8 * grad_out.grid_width; CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { octree_cpy_leaf(grad_out.data + leaf_idx * feature_size_out, feature_size1, grad_in1 + leaf_idx * feature_size1); if(do_grad_in2) { int grid_idx = leaf_idx_to_grid_idx(&grad_out, leaf_idx); const ot_tree_t* tree = octree_get_tree(&grad_out, grid_idx); int cum_n_leafs = grad_out.prefix_leafs[grid_idx]; int data_idx = leaf_idx - cum_n_leafs; int bit_idx = data_idx_to_bit_idx(tree, data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&grad_out, grid_idx, bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); for(int f = 0; f < feature_size2; ++f) { ot_data_t val = grad_out.data[leaf_idx * grad_out.feature_size + feature_size1 + f]; for(int d = ds; d < ds+width; ++d) { for(int h = hs; h < hs+width; ++h) { for(int w = ws; w < ws+width; ++w) { grad_in2[(((n * feature_size2 + f) * dense_depth + d) * dense_height + h) * dense_width + w] = val; } } } } } } } void octree_concat_dense_bwd_gpu(const octree* in1, const ot_data_t* in2, ot_size_t feature_size2, const octree* grad_out, bool do_grad_in2, octree* grad_in1, ot_data_t* grad_in2) { if(DEBUG) { printf("[DEBUG] octree_concat_dense_bwd_gpu\n"); } octree_resize_as_gpu(in1, grad_in1); octree_cpy_trees_gpu_gpu(in1, grad_in1); octree_cpy_prefix_leafs_gpu_gpu(in1, grad_in1); ot_size_t feature_size1 = in1->feature_size; ot_size_t feature_size_out = feature_size1 + feature_size2; if(do_grad_in2) { kernel_concat_dense_bwd<true><<<GET_BLOCKS(in1->n_leafs), CUDA_NUM_THREADS>>>( grad_in1->data, grad_in2, in1->n_leafs, *grad_out, feature_size1, feature_size2, feature_size_out ); } else { kernel_concat_dense_bwd<false><<<GET_BLOCKS(in1->n_leafs), CUDA_NUM_THREADS>>>( grad_in1->data, grad_in2, in1->n_leafs, *grad_out, feature_size1, feature_size2, feature_size_out ); } CUDA_POST_KERNEL_CHECK; }
the_stack
#include <cuda.h> #include <float.h> #include <stdint.h> // NanoVDB #include <nanovdb/NanoVDB.h> #include <nanovdb/util/HDDA.h> #include <nanovdb/util/Ray.h> #include <nanovdb/util/SampleFromVoxels.h> #define CUDA_PATHWAY #include <cuda_math.cuh> #include <cuda_gvdb_scene.cuh> #include <cuda_gvdb_nodes.cuh> // Shorter type name for a NanoVDB grid with a given value type, log base 2 (branching) dimension // for level-2 InternalNodes, log base 2 dimension for level-1 InternalNodes, and log base 2 // dimension for leaves. (These are the same values as returned by gvdb.getLD(level).) template<class ValueT, int Node2LogDim, int Node1LogDim, int LeafLogDim> using NanoGridCustom = nanovdb::Grid<nanovdb::Tree<nanovdb::RootNode< nanovdb::InternalNode<nanovdb::InternalNode< nanovdb::LeafNode<ValueT, nanovdb::Coord, nanovdb::Mask, LeafLogDim>, Node1LogDim>, Node2LogDim>>>>; // Overloaded function to get the maximum value of a ValueT, since nanovdb::Maximum // doesn't handle Vec3f at the moment template<class T> __device__ T ExportToNanoVDB_MaximumValue() { return FLT_MAX; } template<> __device__ int ExportToNanoVDB_MaximumValue<int>() { return INT_MAX; } template<> __device__ nanovdb::Vec3f ExportToNanoVDB_MaximumValue<nanovdb::Vec3f>() { return { FLT_MAX, FLT_MAX, FLT_MAX }; } // Implementation of min and max for nanovdb::Vec3f types. __device__ nanovdb::Vec3f min(const nanovdb::Vec3f& a, const nanovdb::Vec3f& b) { return { min(a[0], b[0]), min(a[1], b[1]), min(a[2], b[2]) }; } __device__ nanovdb::Vec3f max(const nanovdb::Vec3f& a, const nanovdb::Vec3f& b) { return { max(a[0], b[0]), max(a[1], b[1]), max(a[2], b[2]) }; } // Reads a value from the atlas. template<class ValueT> __device__ ValueT ReadValue(const cudaSurfaceObject_t& atlas, const int x, const int y, const int z) { return surf3Dread<ValueT>(atlas, x * sizeof(ValueT), y, z); } // Partial specialization for Vec3f, where we have to switch to using float4s: template<> __device__ nanovdb::Vec3f ReadValue<nanovdb::Vec3f>(const cudaSurfaceObject_t& atlas, const int x, const int y, const int z) { float4 value = surf3Dread<float4>(atlas, x * sizeof(float3), y, z); // Note float4/float3 change! return { value.x, value.y, value.z }; } // A ProcessLeafFunc is a function that takes a VDBInfo*, void*, cudaSurfaceObject_t, and int, and returns nothing. using ProcessLeafFunc = void(*)(VDBInfo*, void*, cudaSurfaceObject_t, int); template<class ValueT, int LOG2DIM> __device__ void ProcessLeaf(VDBInfo* gvdb, void* nanoVDBLeafNodes, cudaSurfaceObject_t atlas, int numLeaves) { const int leafIdx = blockIdx.x * blockDim.x + threadIdx.x; if (leafIdx >= numLeaves) return; VDBNode* gvdbNode = getNode(gvdb, 0, leafIdx); // Sometimes, the GVDB node list can contain nodes that were previously part of trees. // These will be ignored, although there will still be space for them in the NanoVDB volume. // (This isn't a requirement of exporting - we could prune these nodes as well.) if (gvdbNode->mChildList != ID_UNDEFL) return; // The NanoVDB node and data using LeafT = nanovdb::LeafNode<ValueT, nanovdb::Coord, nanovdb::Mask, LOG2DIM>; const int brickres = 1 << LOG2DIM; LeafT* leafNodes = reinterpret_cast<LeafT*>(nanoVDBLeafNodes); LeafT& node = leafNodes[leafIdx]; LeafT::DataType& nodeData = *reinterpret_cast<LeafT::DataType*>(&node); // All values in a brick are active in GVDB nodeData.mValueMask.set(true); // Minimum and maximum value of all elements ValueT minValue = ExportToNanoVDB_MaximumValue<ValueT>(); ValueT maxValue = -minValue; const int3 brickValue = gvdbNode->mValue; // Copy the brick in the order ((x * T) + y) * T + z. for (int x = 0; x < brickres; x++) { for (int y = 0; y < brickres; y++) { for (int z = 0; z < brickres; z++) { const ValueT value = ReadValue<ValueT>(atlas, x + brickValue.x, y + brickValue.y, z + brickValue.z); nodeData.mValues[(x * brickres + y) * brickres + z] = value; minValue = min(minValue, value); maxValue = max(maxValue, value); } } } nodeData.mMinimum = minValue; nodeData.mMaximum = maxValue; // Note that mAverage and mStdDevi currently aren't set. // Since all voxels are active, the bounding box of active values of this node is the // bounding box of the node itself: nodeData.mBBoxMin = { gvdbNode->mPos.x, gvdbNode->mPos.y, gvdbNode->mPos.z }; nodeData.mBBoxDif[0] = brickres; nodeData.mBBoxDif[1] = brickres; nodeData.mBBoxDif[2] = brickres; } // Autogenerated list of ProcessLeaf instantiation function pointers. You can generate this list // using the following Python code: /* print('static const __device__ ProcessLeafFunc processLeafFuncs[3][6] = {') for type in ['float', 'nanovdb::Vec3f', 'int']: print('{', end='') for ld in range(2, 8): print(f'ProcessLeaf<{type}, {ld}>', end='') if ld != 7: print(', ', end='') if(type == 'int'): print('}') else: print('},') print('};') */ static const __device__ ProcessLeafFunc processLeafFuncs[3][6] = { {ProcessLeaf<float, 2>, ProcessLeaf<float, 3>, ProcessLeaf<float, 4>, ProcessLeaf<float, 5>, ProcessLeaf<float, 6>, ProcessLeaf<float, 7>}, {ProcessLeaf<nanovdb::Vec3f, 2>, ProcessLeaf<nanovdb::Vec3f, 3>, ProcessLeaf<nanovdb::Vec3f, 4>, ProcessLeaf<nanovdb::Vec3f, 5>, ProcessLeaf<nanovdb::Vec3f, 6>, ProcessLeaf<nanovdb::Vec3f, 7>}, {ProcessLeaf<int, 2>, ProcessLeaf<int, 3>, ProcessLeaf<int, 4>, ProcessLeaf<int, 5>, ProcessLeaf<int, 6>, ProcessLeaf<int, 7>} }; extern "C" __global__ void gvdbToNanoVDBProcessLeaves( VDBInfo * gvdb, void* nanoVDBLeafNodes, int typeTableIndex, cudaSurfaceObject_t atlas, int numLeaves) { // Redirect to the appropriate function instantiation processLeafFuncs[typeTableIndex][gvdb->dim[0] - 2](gvdb, nanoVDBLeafNodes, atlas, numLeaves); } //------------------------------------------------------------------------------------------------- // The code to process internal nodes from PTX involves probably the most complex function // indirection in this file. If we wanted to template this over a single function, we would need to // know four things: // - the level of the node (if level 1, then its child is a LeafNode, while if level 2, then its // child is another InternalNode) // - the value type // - the log2dim of this node // - the log2dim of the child node // // If we expressed this all in a 4D array of templates, we would have 2 * 3 * 6 * 6 = 216 cases. // Which could be reasonable! Instead, we reduce this to two smaller tables of cases: // gvdbToNanoVDBProcessInternalNodes starts out by switching over a 2D table of 3 * 6 = 18 cases // (value type and log2dim of this node). // When getting the extents of the child node, we switch over a 3D table of 2 * 3 * 6 = 36 cases. // These functions return their extent data in a common format. // A union type for the possible values of a ValueT. union ValueUnion { float f; nanovdb::Vec3f f3; int i; }; template<class T> __device__ T* getValueUnion(ValueUnion& value) { return reinterpret_cast<T*>(&value.f3); } // Common structure for the extents of a node. struct NodeRangeData { ValueUnion valueMin; ValueUnion valueMax; nanovdb::CoordBBox aabb; }; // Gets information about the range of the given node in a C-like format. template<class NodeT> __device__ NodeRangeData GetNodeRange(uint8_t* nodeStart, int nodeIdx) { using ValueT = NodeT::ValueType; NodeT* node = reinterpret_cast<NodeT*>(nodeStart) + nodeIdx; NodeRangeData result; *getValueUnion<ValueT>(result.valueMin) = node->valueMin(); *getValueUnion<ValueT>(result.valueMax) = node->valueMax(); result.aabb = node->bbox(); return result; } // A NodeRangeFunc is a function that takes a uint8_t* and an int and returns a NodeRangeData. using NodeRangeFunc = NodeRangeData(*)(uint8_t*, int); // Short versions of leaf node and internal node template<class ValueT, int LOG2DIM> using LeafNodeSmpl = nanovdb::LeafNode<ValueT, nanovdb::Coord, nanovdb::Mask, LOG2DIM>; // Using 3 for the leaf node's LOG2DIM suffices here template<class ValueT, int LOG2DIM> using INodeSmpl = nanovdb::InternalNode<nanovdb::LeafNode<ValueT>, LOG2DIM>; // 3D table of [child node is leaf or level-1][value type][child node log2dim - 2]. // This was autogenerated using the following Python code: /* print('static const __device__ NodeRangeFunc rangeFunctions[2][3][6] = {{') for type in ['float', 'nanovdb::Vec3f', 'int']: print('{', end='') for ld in range(2, 8): print(f'GetNodeRange<LeafNodeSmpl<{type}, {ld}>>', end='') if ld != 7: print(', ', end='') if(type == 'int'): print('}') else: print('},') print('},{',) for type in ['float', 'nanovdb::Vec3f', 'int']: print('{', end='') for ld in range(2, 8): # Using 3 for the leaf node's LOG2DIM suffices here print(f'GetNodeRange<INodeSmpl<{type}, {ld}>>', end='') if ld != 7: print(', ', end='') if(type == 'int'): print('}') else: print('},') print('}};') */ static const __device__ NodeRangeFunc rangeFunctions[2][3][6] = { { {GetNodeRange<LeafNodeSmpl<float, 2>>, GetNodeRange<LeafNodeSmpl<float, 3>>, GetNodeRange<LeafNodeSmpl<float, 4>>, GetNodeRange<LeafNodeSmpl<float, 5>>, GetNodeRange<LeafNodeSmpl<float, 6>>, GetNodeRange<LeafNodeSmpl<float, 7>>}, {GetNodeRange<LeafNodeSmpl<nanovdb::Vec3f, 2>>, GetNodeRange<LeafNodeSmpl<nanovdb::Vec3f, 3>>, GetNodeRange<LeafNodeSmpl<nanovdb::Vec3f, 4>>, GetNodeRange<LeafNodeSmpl<nanovdb::Vec3f, 5>>, GetNodeRange<LeafNodeSmpl<nanovdb::Vec3f, 6>>, GetNodeRange<LeafNodeSmpl<nanovdb::Vec3f, 7>>}, {GetNodeRange<LeafNodeSmpl<int, 2>>, GetNodeRange<LeafNodeSmpl<int, 3>>, GetNodeRange<LeafNodeSmpl<int, 4>>, GetNodeRange<LeafNodeSmpl<int, 5>>, GetNodeRange<LeafNodeSmpl<int, 6>>, GetNodeRange<LeafNodeSmpl<int, 7>>} },{ {GetNodeRange<INodeSmpl<float, 2>>, GetNodeRange<INodeSmpl<float, 3>>, GetNodeRange<INodeSmpl<float, 4>>, GetNodeRange<INodeSmpl<float, 5>>, GetNodeRange<INodeSmpl<float, 6>>, GetNodeRange<INodeSmpl<float, 7>>}, {GetNodeRange<INodeSmpl<nanovdb::Vec3f, 2>>, GetNodeRange<INodeSmpl<nanovdb::Vec3f, 3>>, GetNodeRange<INodeSmpl<nanovdb::Vec3f, 4>>, GetNodeRange<INodeSmpl<nanovdb::Vec3f, 5>>, GetNodeRange<INodeSmpl<nanovdb::Vec3f, 6>>, GetNodeRange<INodeSmpl<nanovdb::Vec3f, 7>>}, {GetNodeRange<INodeSmpl<int, 2>>, GetNodeRange<INodeSmpl<int, 3>>, GetNodeRange<INodeSmpl<int, 4>>, GetNodeRange<INodeSmpl<int, 5>>, GetNodeRange<INodeSmpl<int, 6>>, GetNodeRange<INodeSmpl<int, 7>>} } }; using ProcessInternalNodeFunc = void(*)(VDBInfo*, VDBNode*, uint8_t*, uint8_t*, int, int, int, int, ValueUnion); // Here, LOG2DIM is the log2dim of the internal node (not the child node). // childLevel is this node's level minus 1. template<class ValueT, int LOG2DIM> __device__ void ProcessInternalNode( VDBInfo* gvdb, VDBNode* gvdbNode, uint8_t* nanoVDBNodes, uint8_t* nanoVDBChildNodes, int numNodes, int nodeIdx, int level, int childLog2Dim, ValueUnion backgroundUnion) { using NodeT = INodeSmpl<ValueT, LOG2DIM>; using DataT = NodeT::DataType; const uint32_t res = 1 << LOG2DIM; const uint32_t numChildren = res * res * res; DataT* nodeData = reinterpret_cast<DataT*>(nanoVDBNodes) + nodeIdx; // We never have any values to fill in: nodeData->mValueMask.setOff(); // We'll turn on the child mask as needed: nodeData->mChildMask.setOff(); // There are different ways that we could combine mOffset and childID, but for now we // simply make mOffset give the number of InternalNodes until the start of the next node // section: nodeData->mOffset = numNodes - nodeIdx; // Initialize counters for the min and max value and bounding box ValueT valueMin = ExportToNanoVDB_MaximumValue<ValueT>(); ValueT valueMax = -valueMin; nanovdb::Coord aabbMin = { INT_MAX, INT_MAX, INT_MAX }; nanovdb::Coord aabbMax = { -INT_MAX, -INT_MAX, -INT_MAX }; // Get a pointer to the function instantiation to use for getting ndoe extents. NodeRangeFunc rangeFunc; const int childLevel = level - 1; if (std::is_same<ValueT, float>::value) { rangeFunc = rangeFunctions[childLevel][0][childLog2Dim - 2]; } else if (std::is_same<ValueT, nanovdb::Vec3f>::value) { rangeFunc = rangeFunctions[childLevel][1][childLog2Dim - 2]; } else { rangeFunc = rangeFunctions[childLevel][2][childLog2Dim - 2]; } // The child list contains either ID_UNDEF64 or a 64-bit pool reference (grp, lev, index) // for each child. This translates well into NanoVDB's mask + indices! // (and also means that yes, you could implement a DAG with GVDB or NanoVDB) // At the moment, NanoVDB's internal nodes store their children in (x*T+y)*T+z order, while // GVDB stores its children in (z*T+y)*T+x order. (See the implementations of CoordToOffset // inside NanoVDB.) const uint32_t resMask = res - 1; const uint32_t resSquared = res * res; const uint32_t middleMask = res * resMask; for (uint32_t gvdbChildIdx = 0; gvdbChildIdx < numChildren; gvdbChildIdx++) { // Swap the order of the x, y, and z components, by interpreting gvdbChildIdx // as a base-res number. const uint32_t nanoVDBChildIdx = (gvdbChildIdx / resSquared) + (gvdbChildIdx & middleMask) + (gvdbChildIdx & resMask) * resSquared; const int childID = getChild(gvdb, gvdbNode, gvdbChildIdx); // Skip children that don't exist, filling them with the background value. // These aren't active, but still have values. if (childID == (int)ID_UNDEF64) { nodeData->mTable[nanoVDBChildIdx].value = *getValueUnion<ValueT>(backgroundUnion); continue; } nodeData->mChildMask.setOn(nanoVDBChildIdx); nodeData->mTable[nanoVDBChildIdx].childID = static_cast<uint32_t>(childID); // Now, since we've already filled in the lower levels of the tree, update this node's // min and max values and bounding box: NodeRangeData rangeData = rangeFunc(nanoVDBChildNodes, childID); valueMin = min(valueMin, *getValueUnion<ValueT>(rangeData.valueMin)); valueMax = max(valueMax, *getValueUnion<ValueT>(rangeData.valueMax)); const nanovdb::BBox<nanovdb::Coord> childAABB = rangeData.aabb; for (int c = 0; c < 3; c++) { aabbMin[c] = min(aabbMin[c], childAABB.min()[c]); aabbMax[c] = max(aabbMax[c], childAABB.max()[c]); } } nodeData->mMinimum = valueMin; nodeData->mMaximum = valueMax; nodeData->mBBox.min() = aabbMin; nodeData->mBBox.max() = aabbMax; } // Table of instantiations of ProcessInternalNode, [value type][node log2dim - 2]. This was // autogenerated using the following Python script: /* print('static const __device__ ProcessInternalNodeFunc processInternalNodeFuncs[3][6] = {') for type in ['float', 'nanovdb::Vec3f', 'int']: print('{', end='') for ld in range(2, 8): print(f'ProcessInternalNode<{type}, {ld}>', end='') if ld != 7: print(', ', end='') if(type == 'int'): print('}') else: print('},') print('};') */ static const __device__ ProcessInternalNodeFunc processInternalNodeFuncs[3][6] = { {ProcessInternalNode<float, 2>, ProcessInternalNode<float, 3>, ProcessInternalNode<float, 4>, ProcessInternalNode<float, 5>, ProcessInternalNode<float, 6>, ProcessInternalNode<float, 7>}, {ProcessInternalNode<nanovdb::Vec3f, 2>, ProcessInternalNode<nanovdb::Vec3f, 3>, ProcessInternalNode<nanovdb::Vec3f, 4>, ProcessInternalNode<nanovdb::Vec3f, 5>, ProcessInternalNode<nanovdb::Vec3f, 6>, ProcessInternalNode<nanovdb::Vec3f, 7>}, {ProcessInternalNode<int, 2>, ProcessInternalNode<int, 3>, ProcessInternalNode<int, 4>, ProcessInternalNode<int, 5>, ProcessInternalNode<int, 6>, ProcessInternalNode<int, 7>} }; extern "C" __global__ void gvdbToNanoVDBProcessInternalNodes( VDBInfo * gvdb, uint8_t* nanoVDBNodes, uint8_t* nanoVDBChildNodes, int numNodes, int level, int nodeLog2Dim, int childLog2Dim, ValueUnion backgroundUnion, int typeTableIndex) { const int nodeIdx = blockIdx.x * blockDim.x + threadIdx.x; if (nodeIdx >= numNodes) return; VDBNode* gvdbNode = getNode(gvdb, level, nodeIdx); // Get the child list of the node, skipping nodes with no children. (Note: These will still // take up space, though! We could prune these if we wanted to.) if (gvdbNode->mChildList == ID_UNDEFL) { // All fields will be 0 return; } // Redirect to the correct instantiation of ProcessInternalNode. processInternalNodeFuncs[typeTableIndex][nodeLog2Dim - 2]( gvdb, gvdbNode, // Volume and GVDB node nanoVDBNodes, nanoVDBChildNodes, // Pointers to NanoVDB data numNodes, // Number of level-`level` nodes nodeIdx, // Index of the current node level, // The node level childLog2Dim, // The child log2dim backgroundUnion // The background ); } //------------------------------------------------------------------------------------------------- // Here's an implementation of a simple transfer function accessible from NanoVDB. You could also // look at GVDB's memory used for its transfer function directly. __device__ float4 transferFunction(float value) { // Emulating the transfer function in cuda_gvdb_dda.cuh: const float scnThresh = .1f; const float scnVmax = .1f; const float scnVmin = 0.0f; const float t = max(0.0f, min(1.0f, (value - scnThresh) / (scnVmax - scnVmin))); // Applying a linear transfer function: const float4 keys[5] = { make_float4(0.0f, 0.0f, 0.0f, 0.0f), make_float4(1.5f, 1.5f, 0.0f, 0.1f), make_float4(1.5f, 0.0f, 0.0f, 0.3f), make_float4(0.3f, 0.3f, 0.3f, 0.1f), make_float4(0.0f, 0.0f, 0.0f, 0.0f) }; const float t4 = t * 4; const int baseKey = int(min(3.0f, floorf(t4))); const float ft4 = t4 - float(baseKey); return keys[baseKey] * (1.0f - ft4) + keys[baseKey] * ft4; } __device__ unsigned char floatToUchar(float v) { return static_cast<unsigned char>(255.0f * fminf(1.0f, v)); } extern "C" __global__ void gvdbExportNanoVDBRender(void* vPtrGrid, nanovdb::Vec3f eye, float4 camTopLeftWS, float4 camRightWS, float4 camDownWS, unsigned char* image, uint32_t width, uint32_t height) { using GridT = NanoGridCustom<float, 5, 4, 3>; using AccT = GridT::AccessorType; GridT* grid = reinterpret_cast<GridT*>(vPtrGrid); const uint32_t xi = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t yi = blockIdx.y * blockDim.y + threadIdx.y; if (xi >= width || yi >= height) return; AccT acc = grid->getAccessor(); // 0----> u // | | // V ---* // v float u = static_cast<float>(xi) / static_cast<float>(width); float v = static_cast<float>(yi) / static_cast<float>(height); const nanovdb::Vec3f direction( camTopLeftWS.x + u * camRightWS.x + v * camDownWS.x, camTopLeftWS.y + u * camRightWS.y + v * camDownWS.y, camTopLeftWS.z + u * camRightWS.z + v * camDownWS.z ); nanovdb::Ray<float> rayWS(eye, direction); nanovdb::SampleFromVoxels<AccT, 1, true> sampler(acc); // Convert to index-space nanovdb::Ray<float> iRay = rayWS.worldToIndexF(*grid); float3 color = make_float3(0.0f, 0.0f, 0.0f); float transmittance = 1.0f; // Compute transmittance if the ray intersects the bounding box if (iRay.clip(grid->tree().bbox())) { // Integrate color and transmittance. // This is done in a brute-force manner for this sample, but it's possible to do better // (e.g. using delta tracking) const float dt = 0.25f; // Roughly, 1/(number of samples per voxel). for (float t = iRay.t0(); t < iRay.t1(); t += dt) { float sigma = sampler(iRay(t)); float4 value = transferFunction(sigma); // Color and opacity value.w = exp(-dt * value.w); color.x += value.x * transmittance * (1 - value.w); color.y += value.y * transmittance * (1 - value.w); color.z += value.z * transmittance * (1 - value.w); transmittance *= value.w; } } // Composite against the background const int checkerboard = 1 << 7; const float checkerboardValue = (((xi & checkerboard) == (yi & checkerboard)) ? 1.0f : 0.0f); float3 finalColor = color * (1.0f - transmittance) + checkerboardValue * transmittance; // Note that this function doesn't do any linear->sRGB conversion! In a production setting, // it's more physically accurate to do rendering in linear-space, then convert to sRGB. image[4 * (yi * width + xi) + 0] = floatToUchar(finalColor.x); image[4 * (yi * width + xi) + 1] = floatToUchar(finalColor.y); image[4 * (yi * width + xi) + 2] = floatToUchar(finalColor.z); image[4 * (yi * width + xi) + 3] = 255; }
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <curand.h> #include <stdio.h> #include <math.h> #include <float.h> #include "cudaCG_all.h" //#define NUM_THREADS 512 #define BLOCK 512 #define CEIL_DIV(num, denum) (num+denum-1)/denum #define IDX(b,l,t,m,i,cum,L) (i+2*(m+t*(2*l+1)+cum[l]+b*cum[L+1])) //cum[l] remembers the start of the middle channel for l (in (?, tm, 2)) #define PLUSMINUS(k) ((k%2==1) ? -1 : 1) #define LLL2L(lll, L) (lll / (L+1) / (L+1)) #define LLL2L1(lll, L) (lll / (L+1) % (L+1)) #define LLL2L2(lll, L) (lll % (L+1)) #define MAX_LMAX 512-1 #define MAX_LOGFACT_FROM_L(L) (5*(L+1) + 20) #define MAX_(a,b) ((a<b)?b:a) #define MIN_(a,b) ((a>b)?b:a) __constant__ double LogFact_CONST[MAX_LOGFACT_FROM_L(MAX_LMAX)]; // #define LOGFACT(n,mem) ((n < 2) ? 0. : LogFact_CONST[n]) namespace { __device__ __forceinline__ float _naiveCG( int l1, int l2, int l, int m1, int m2, int m, const double* mem){ int m3=-m; int t1=l2-m1-l; int t2=l1+m2-l; int t3=l1+l2-l; int t4=l1-m1; int t5=l2+m2; int tmin=max(0,max(t1,t2)); int tmax=min(t3,min(t4,t5)); double wigner=0; double logA=(log((double)2*l+1)+LOGFACT(l+l1-l2,mem)+LOGFACT(l-l1+l2,mem)+LOGFACT(l1+l2-l,mem)-LOGFACT(l1+l2+l+1,mem))/2; logA+=(LOGFACT(l-m3,mem)+LOGFACT(l+m3,mem)+LOGFACT(l1-m1,mem)+LOGFACT(l1+m1,mem)+LOGFACT(l2-m2,mem)+LOGFACT(l2+m2,mem))/2; for(int t=tmin; t<=tmax; t++){ double logB = LOGFACT(t,mem)+LOGFACT(t3-t,mem)+LOGFACT(t4-t,mem)+LOGFACT(t5-t,mem)+LOGFACT(-t1+t,mem)+LOGFACT(-t2+t,mem); wigner += PLUSMINUS(t)*exp(logA-logB); } return (float) PLUSMINUS(l1-l2-m3)*PLUSMINUS(l1-l2+m)*wigner; } __device__ __forceinline__ float naiveCG_cal_m( int l1, int l2, int l, int m1, int m2){ return _naiveCG(l1, l2, l, m1, m2, m1+m2, NULL); } __device__ float naiveCG_cal_m1( int l1, int l2, int l, int m, int m2){ return _naiveCG(l1, l2, l, m - m2, m2, m, NULL); } __global__ void new_precomputeCG_kernel(float* __restrict__ CG, int Lmax, int* __restrict__ llls, int nllls, int* CG_offsets) { const int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int lllidx = blockIdx.x * blockDim.x + threadIdx.x; int l, l1, l2, lll; if (lllidx < nllls){ //compute the l, l1, l2 indices lll = llls[lllidx]; l = LLL2L(lll, Lmax); l1 = LLL2L1(lll, Lmax); l2 = LLL2L2(lll, Lmax); int start = CG_offsets[lllidx]; for (int m = 0; m < 2 * l +1 ; m++){ for (int m2 = 0; m2 < 2 * l2 + 1; m2++){ int m1 = (m-l) - (m2-l2); if (-l1 <= m1 && m1 <= l1){ CG[start + m * (2*l2+1) + m2] = naiveCG_cal_m1(l1,l2,l,m-l,m2-l2); } //start += 1; } } } } //================================================================================================================== __global__ void cudaCG_forward_kernel( const float* F, float* FF, const int* t_F, const int* cumu_tm_F, const int* cumu_tm_FF, int Lmax, const int* llls, const int* ll1_to_lllidx_offsets, //new int nthreads, const int* t_offsets, const float* CG, const int* CG_offsets) { int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int b = blockIdx.z; int lllidx; int l1, l2, t1, t2, CG_offset;//need to be computed from threadid int l, t, m; int t_offset; if (global_threadId < nthreads){ //compute l, t, m l = 0; while (cumu_tm_FF[l] <= global_threadId){ l++; } l--; //cumu_tm_FF[l] <= global_threadId < cumu_tm_FF[l] t = (global_threadId - cumu_tm_FF[l]) / (2 * l + 1); m = (global_threadId - cumu_tm_FF[l]) % (2 * l + 1) - l; //[-l, l] //compute lllidx #define LL_to_LLLIDX(l_, l1_) ll1_to_lllidx_offsets[l_ * (Lmax+2) + l1_] t_offset = 0; l1 = 0; while (l1 <= Lmax){ t_offset = t_offsets[LL_to_LLLIDX(l, l1)]; if (t_offset > t){ l1 -= 1; break; } else if (l1 == Lmax) { break; } l1++; } //now, t_offsets[LL_to_LLLIDX(l, l1)] <= t < t_offsets[LL_to_LLLIDX(l, l1+1)]) lllidx = LL_to_LLLIDX(l, l1); while (lllidx < LL_to_LLLIDX(l, l1+1)){ t_offset = t_offsets[lllidx]; if (t_offset > t){ lllidx -= 1; break; } else if (lllidx == LL_to_LLLIDX(l, l1+1) - 1){ break; } lllidx ++; } l2 = LLL2L2(llls[lllidx], Lmax); #undef LL_to_LLLIDX int t_offset = t_offsets[lllidx]; //now, t_offsets[lllidx] <= t < t_offsets[lllidx+1], so lllidx maps to the l1&l2 that generates t t2 = (t - t_offset) % t_F[l2]; t1 = (t - t_offset) / t_F[l2]; //compute CG_offset as well int CG_offset = CG_offsets[lllidx]; float real0 = 0., imag0 = 0.; for (int m2 = -l2; m2 <= l2; m2++){ int m1 = m - m2; if (-l1 <= m1 && m1 <= l1){ float CGcoef = CG[CG_offset + (m+l) * (2*l2+1) + (m2+l2)]; //This is cached (read from global ram) //float CGcoef = naiveCG_cal_m1(l1,l2,l,m,m2); float real1 = F[IDX(b,l1,t1,m1+l1,0,cumu_tm_F,Lmax)]; float imag1 = F[IDX(b,l1,t1,m1+l1,1,cumu_tm_F,Lmax)]; float real2 = F[IDX(b,l2,t2,m2+l2,0,cumu_tm_F,Lmax)]; float imag2 = F[IDX(b,l2,t2,m2+l2,1,cumu_tm_F,Lmax)]; real0 += (real1 * real2 - imag1 * imag2) * CGcoef; imag0 += (real1 * imag2 + real2 * imag1) * CGcoef; } } //FF[IDX(b,l,t,m+l,0,cumu_tm_FF,Lmax)] = 1000 * b + 100 * l1 + 10 * t1 + 1 * (m + l); //FF[IDX(b,l,t,m+l,1,cumu_tm_FF,Lmax)] = 1000 * b + 100 * l2 + 10 * t2 + 1 * (m + l); //FF[IDX(b,l,t,m+l,0,cumu_tm_FF,Lmax)] = 1000 * b + 100 * l + 10 * t + 1 * (m + l); //good //FF[IDX(b,l,t,m+l,1,cumu_tm_FF,Lmax)] = l2 + l1 * (Lmax + 1) + l * (Lmax + 1) * (Lmax + 1); //good FF[IDX(b,l,t,m+l,0,cumu_tm_FF,Lmax)] = real0; FF[IDX(b,l,t,m+l,1,cumu_tm_FF,Lmax)] = imag0; } } __global__ void cudaCG_backward_kernel( const float* F, float* grad_F, const float* grad_FF, const int* t_F, const int* cumu_tm_F, const int* cumu_tm_FF, int Lmax, const int* llls, int nllls, const int* t_offsets, const float* CG, const int* CG_offsets) { int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int b = blockIdx.z; int l1, l2, l, m1, m2, m, t1, t2, t; int lll; int thread_l, thread_t, thread_m; float real1, imag1, real2, imag2, real, imag; //Here we paralellized over l1, t1 and m1. if (global_threadId < cumu_tm_F[Lmax+1]){ //Compute the l1, m1, t1 for this thread thread_l = 0; while (cumu_tm_F[thread_l]<=global_threadId) { thread_l++; } thread_l -= 1; thread_t = (global_threadId - cumu_tm_F[thread_l]) / (2*thread_l+1); thread_m = (global_threadId - cumu_tm_F[thread_l]) % (2*thread_l+1); //init the gradients to 0 real1=imag1=0; int old_lllidx1 = -1, old_lllidx2 = -1, lllidx1 = -1, lllidx2 = -1; while (old_lllidx1 < nllls || old_lllidx2 < nllls){ for (int lllidx = MIN_(old_lllidx1, old_lllidx2) + 1; lllidx < nllls; lllidx++){ lll = llls[lllidx]; if (lllidx1 <= old_lllidx1 && lllidx > old_lllidx1 && LLL2L1(lll, Lmax) == thread_l){ lllidx1 = lllidx; } if (lllidx2 <= old_lllidx2 && lllidx > old_lllidx2 && LLL2L2(lll, Lmax) == thread_l){ lllidx2 = lllidx; } } if (lllidx1 > old_lllidx1){ lll = llls[lllidx1]; l2 = LLL2L2(lll, Lmax); l = LLL2L(lll, Lmax); l1 = thread_l; int CG_offset = CG_offsets[lllidx1]; //This is the first case //iterate over l2 and l to compute the gradient int t_offset = t_offsets[lllidx1]; for (m2 = 0; m2 < 2*l2+1; m2++){ m = thread_m-thread_l + m2-l2 + l; if (0 <= m && m <= 2*l){ float CGcoef = CG[CG_offset + (m) * (2*l2+1) + (m2)]; //float CGcoef = naiveCG_cal_m1(l1,l2,l,m-l,m2-l2); for (t2 = 0; t2 < t_F[l2]; t2++){ t = t_F[l2] * thread_t + t2 + t_offset; real = grad_FF[IDX(b,l,t,m,0,cumu_tm_FF,Lmax)]; imag = grad_FF[IDX(b,l,t,m,1,cumu_tm_FF,Lmax)]; real2 = F[IDX(b,l2,t2,m2,0,cumu_tm_F,Lmax)]; imag2 = F[IDX(b,l2,t2,m2,1,cumu_tm_F,Lmax)]; real1 += (real * real2 + imag * imag2) * CGcoef; imag1 += (real2 * imag - real * imag2) * CGcoef; //grad_FF[IDX(b,l,t,m,0,cumu_tm_FF,Lmax)] = 1000 * lllidx1 + 000 * b + 100 * thread_l + 10 * thread_t + (thread_m); } } } old_lllidx1 = lllidx1; } else { //stop old_lllidx1 = nllls; lllidx1 = nllls; } if (lllidx2 > old_lllidx2){ //l2 == thread_l lll = llls[lllidx2]; l1 = LLL2L1(lll, Lmax); l = LLL2L(lll, Lmax); l2 = thread_l; int CG_offset = CG_offsets[lllidx2]; int t_offset = t_offsets[lllidx2]; for (m1 = 0; m1 < 2*l1+1; m1++){ m = m1-l1 + thread_m-thread_l + l; if (0 <= m && m <= 2 * l){ float CGcoef = CG[CG_offset + (m) * (2*thread_l+1) + (thread_m)]; //float CGcoef = naiveCG_cal_m1(l1,l2,l,m-l,thread_m-thread_l); for (t1 = 0; t1 < t_F[l1]; t1++){ t = t_F[thread_l] * t1 + thread_t + t_offset; real = grad_FF[IDX(b,l,t,m,0,cumu_tm_FF,Lmax)]; imag = grad_FF[IDX(b,l,t,m,1,cumu_tm_FF,Lmax)]; //This time we need to access l1 t1 and m1 real2 = F[IDX(b,l1,t1,m1,0,cumu_tm_F,Lmax)]; imag2 = F[IDX(b,l1,t1,m1,1,cumu_tm_F,Lmax)]; real1 += (real * real2 + imag * imag2) * CGcoef; imag1 += (real2 * imag - real * imag2) * CGcoef; //grad_FF[IDX(b,l,t,m,1,cumu_tm_FF,Lmax)] = 1000 * lllidx2 + 000 * b + 100 * thread_l + 10 * thread_t + (thread_m); } } } old_lllidx2 = lllidx2; } else { //stop lllidx2 = nllls; old_lllidx2 = nllls; } } grad_F[(global_threadId + cumu_tm_F[Lmax+1] * b)*2] = real1; grad_F[(global_threadId + cumu_tm_F[Lmax+1] * b)*2+1] = imag1; } } } // namespace void CG_sparse_precompute_cuda(float* CGspace, int L, int* llls, int nllls, int* CG_offsets){ double *logfact; //device ptrs int logfact_size = MAX_LOGFACT_FROM_L(L); logfact_size = init_logfactorials_new(&logfact, logfact_size); cudaMemcpyToSymbol(LogFact_CONST, logfact, logfact_size * sizeof(double )); //len(CG_offsets) == nllls+1 dim3 DimBlock(BLOCK, 1, 1); dim3 DimGrid0(CEIL_DIV(nllls, BLOCK), 1, 1); new_precomputeCG_kernel<<<DimGrid0, DimBlock>>>(CGspace, L, llls, nllls, CG_offsets); cudaDeviceSynchronize(); free(logfact); } void CG_sparse_cuda_forward( torch::Tensor input, torch::Tensor output, int L, int B, int* d_t_F, int* d_cumu_tm_F, int* d_t_FF, int* d_cumu_tm_FF, int* llls, int* d_ll1_to_lllidx_offsets, int nthreads, int* t_offsets, float* CG, int* CG_offsets){ //auto output = torch::zeros_like(old_cell); float* F = input.data<float>(); float* FF = output.data<float>(); double *logfact; int logfact_size = MAX_LOGFACT_FROM_L(L); logfact_size = init_logfactorials_new(&logfact, logfact_size); cudaMemcpyToSymbol(LogFact_CONST, logfact, logfact_size * sizeof(double )); dim3 DimBlock(BLOCK, 1, 1); dim3 DimGrid(CEIL_DIV(nthreads, BLOCK), 1, B); cudaCG_forward_kernel<<<DimGrid, DimBlock>>>(F, FF, d_t_F, d_cumu_tm_F, d_cumu_tm_FF, L, llls, d_ll1_to_lllidx_offsets, nthreads, t_offsets, CG, CG_offsets); cudaDeviceSynchronize(); free(logfact); } void CG_sparse_cuda_backward( torch::Tensor input, torch::Tensor grad_in, torch::Tensor grad_out, //torch::Tensor CG_tensor, int L, int B, int* d_t_F, int* d_cumu_tm_F, int* d_t_FF, int* d_cumu_tm_FF, int* llls, int nllls, int size, int* t_offsets, float* CG, int* CG_offsets){ float* F = input.data<float>(); float* grad_F = grad_in.data<float>(); float* grad_O = grad_out.data<float>(); double *logfact; int logfact_size = MAX_LOGFACT_FROM_L(L); logfact_size = init_logfactorials_new(&logfact, logfact_size); cudaMemcpyToSymbol(LogFact_CONST, logfact, logfact_size * sizeof(double )); dim3 DimBlock(BLOCK, 1, 1); dim3 DimGrid(CEIL_DIV(size, BLOCK), 1, B); cudaCG_backward_kernel<<<DimGrid, DimBlock>>>(F, grad_F, grad_O, d_t_F, d_cumu_tm_F, d_cumu_tm_FF, L, llls, nllls, t_offsets, CG, CG_offsets); cudaDeviceSynchronize(); free(logfact); }
the_stack
__device__ float square(const float a) { return a*a; } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } void convContrastNorm(cudamat* images, cudamat* meanDiffs, cudamat* denoms, cudamat* target, int numFilters, int sizeX, float addScale, float powScale) { int numImages = images->size[0]; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); //assert(meanDiffs.isSameDims(images)); //assert(!meanDiffs.isTrans()); //assert(!images.isTrans()); //assert(images.isContiguous()); //assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); //target.resize(images); //denoms.resize(images); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, addScale, powScale); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 3) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 5) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 6) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 7) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 8) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numImages, sizeX, addScale, powScale); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, addScale, powScale); } } } getLastCudaError("convResponseNorm: kernel execution failed"); }
the_stack
#define MAX_K 4 #define MAX_BATCH_SIZE 32 #define MAX_N 12 void FFModel::group_by(const Tensor& input, const Tensor& assign, Tensor* outputs, int n, float alpha, const char* name) { Group_by* group_by = new Group_by(*this, input, assign, n, alpha, name); layers.push_back(group_by); for (int i = 0; i < n; i++) outputs[i] = group_by->outputs[i]; } Group_by::Group_by(FFModel& model, const Tensor& _input, const Tensor& _assign, int _n, float _alpha, const char* name) : Op(model, OP_GROUP_BY, name, _input, _assign), n(_n), alpha(_alpha), profiling(model.config.profiling) { // FIXME: For now, set upper limits Better: Do as follows, but memory is // assigned per block, so requires to check that // https://stackoverflow.com/questions/5531247/allocating-shared-memory/5531640#5531640 assert(n <= MAX_N && "Increase MAX_N in #define"); assert(inputs[1].adim[0] <= MAX_K && "Increase MAX_K in #define"); assert(inputs[0].adim[1] <= MAX_BATCH_SIZE && "Increase MAX_BATCH_SIZE in #define"); assert(_input.numDim == 2); // TODO: support dims > 2 assert(_input.numDim == 2); assert(_input.adim[1] == _assign.adim[1]); assert(n > 0); // List of outputs int k = _assign.adim[0]; for(int i = 0; i < n; i++) { outputs[i].numDim = 2; outputs[i].adim[0] = inputs[0].adim[0]; outputs[i].adim[1] = (int)ceil(alpha*k/n*inputs[0].adim[1]); } numWeights = 0; } void Group_by::create_weights(FFModel& model) { // Do nothing } void Group_by::create_output_and_partition(FFModel& model) { // Retrieve the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); // Can only partition over the sample dim assert(part_rect.hi[0] == part_rect.lo[0]); int k = inputs[1].adim[0]; const int dims[2] = {(int)ceil(alpha*k/n*inputs[0].adim[1]), inputs[0].adim[0]}; for(int i = 0; i < n; i++) { outputs[i] = model.create_tensor<2>(dims, DT_FLOAT, this); outputs[i].owner_op = this; outputs[i].owner_idx = i; } // Compute partition bound for input Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { model.create_disjoint_partition<2>( inputs[0], (IndexSpaceT<2>)task_is, input_lps[0], input_grad_lps[0]); } input_rect = runtime->get_index_partition_color_space( ctx, inputs[1].part.get_index_partition()); if (input_rect == part_rect) { input_lps[1] = inputs[1].part; input_grad_lps[1] = inputs[1].part_grad; } else { model.create_disjoint_partition<2>( inputs[1], (IndexSpaceT<2>)task_is, input_lps[1], input_grad_lps[1]); } } OpMeta* Group_by::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { Group_by* gb = (Group_by*) task->args; FFHandler handle = *((FFHandler*)task->local_args); GroupByMeta* m = new GroupByMeta(handle, gb->n); m->profiling = gb->profiling; return m; } void Group_by::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(GROUP_BY_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Group_by)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } __global__ void gb_forward_kernel(const float* input, const int* exp_assign, float** outputs, int n, // num experts int k, // chosen experts float alpha, // factor additional memory assigned int batch_size, int data_dim) { __shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE]; // Get pred pointers, single thread per block if(threadIdx.x == 0) { int exp_tensor_rows = ceil(alpha*k/n*batch_size); int expert_idx[MAX_N] = {0}; for(int i = 0; i < k*batch_size; i++) { // Get pointer to chosen expert predictions int expert = exp_assign[i]; if(expert_idx[expert] >= exp_tensor_rows) { // dropped sample chosen_exp_preds[i] = 0; continue; } chosen_exp_preds[i] = outputs[expert] + expert_idx[expert]*data_dim; expert_idx[expert]++; } } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k*batch_size*data_dim) { if(chosen_exp_preds[i/data_dim] != 0) { float a = input[(i/(k*data_dim))*data_dim + i%data_dim]; chosen_exp_preds[i/data_dim][i%data_dim] = a; } } } __global__ void gb_backward_kernel(float* input_grad, const int* exp_assign, float** output_grads, int n, // num experts int k, // chosen experts float alpha, // factor additional memory assigned int batch_size, int data_dim) { __shared__ float* chosen_exp_grads[MAX_K*MAX_BATCH_SIZE]; // Get pred pointers, single thread if(blockIdx.x * blockDim.x + threadIdx.x == 0) { int exp_tensor_rows = ceil(alpha*k/n*batch_size); int expert_idx[MAX_N] = {0}; for(int i = 0; i < k*batch_size; i++) { // Get pointer to chosen expert predictions int expert = exp_assign[i]; if(expert_idx[expert] >= exp_tensor_rows) { // dropped sample chosen_exp_grads[i] = 0; continue; } chosen_exp_grads[i] = output_grads[expert] + expert_idx[expert]*data_dim; expert_idx[expert]++; } } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k*batch_size*data_dim) { if(chosen_exp_grads[i/data_dim] != 0) { input_grad[(i/(k*data_dim))*data_dim + i%data_dim] = chosen_exp_grads[i/data_dim][i%data_dim]; } } } void Group_by::forward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { // Get n, alpha const Group_by* gb = (Group_by*) task->args; int n = gb->n; float alpha = gb->alpha; assert((int)regions.size() == n+2); assert((int)task->regions.size() == n+2); const GroupByMeta* m = *((GroupByMeta**)task->local_args); // get input and assign regions const AccessorRO<float, 2> acc_input(regions[0], FID_DATA); const AccessorRO<int, 2> acc_assign(regions[1], FID_DATA); Rect<2> rect_input = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<2> rect_assign = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); coord_t input_rows = rect_input.hi[1] - rect_input.lo[1] + 1; coord_t input_cols = rect_input.hi[0] - rect_input.lo[0] + 1; assert(input_rows == rect_assign.hi[1] - rect_assign.lo[1] + 1); int k = rect_assign.hi[0] - rect_assign.lo[0] + 1; int batch_size = input_rows; int data_dim = input_cols; // get output float* outputs[n]; //int exp_output_rows = (int)ceil(alpha*k/n*batch_size); for(int i = 0; i < n; i++) { Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[i+2].region.get_index_space()); outputs[i] = helperGetTensorPointerWO<float>( regions[i+2], task->regions[i+2], FID_DATA, ctx, runtime); //coord_t output_rows = out_domain.hi()[1] - out_domain.lo()[1] + 1; coord_t output_cols = out_domain.hi()[0] - out_domain.lo()[0] + 1; //assert((int)output_rows == exp_output_rows); assert(output_cols == input_cols); } // TODO: why cublas/cudnn stream is needed here? cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); // call forward kernel cudaMemcpy(m->dev_region_ptrs, outputs, n*sizeof(float*), cudaMemcpyHostToDevice); gb_forward_kernel<<<GET_BLOCKS(batch_size*k*data_dim), min(CUDA_NUM_THREADS,(int)(batch_size*k*data_dim)), 0, stream>>>( acc_input.ptr(rect_input), acc_assign.ptr(rect_assign), m->dev_region_ptrs, n, k, alpha, batch_size, data_dim); } void Group_by::backward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { // Get n, alpha const GroupByMeta* m = *((GroupByMeta**)task->local_args); const Group_by* gb = (Group_by*) task->args; int n = gb->n; float alpha = gb->alpha; assert((int)regions.size() == n+2); assert((int)task->regions.size() == n+2); // get input and assign regions const AccessorWO<float, 2> acc_input_grad(regions[0], FID_DATA); const AccessorRO<int, 2> acc_assign(regions[1], FID_DATA); Rect<2> rect_input_grad = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<2> rect_assign = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); coord_t input_rows = rect_input_grad.hi[1] - rect_input_grad.lo[1] + 1; coord_t input_cols = rect_input_grad.hi[0] - rect_input_grad.lo[0] + 1; assert(input_rows == rect_assign.hi[1] - rect_assign.lo[1] + 1); int k = rect_assign.hi[0] - rect_assign.lo[0] + 1; int batch_size = input_rows; int data_dim = input_cols; // get output float* output_grads[n]; //int exp_output_rows = (int)ceil(alpha*k/n*batch_size); for(int i = 0; i < n; i++) { Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[i+2].region.get_index_space()); output_grads[i] = helperGetTensorPointerRW<float>( regions[i+2], task->regions[i+2], FID_DATA, ctx, runtime); //coord_t output_rows = out_domain.hi()[1] - out_domain.lo()[1] + 1; coord_t output_cols = out_domain.hi()[0] - out_domain.lo()[0] + 1; //assert((int)output_rows == exp_output_rows); assert(output_cols == input_cols); } // TODO: why cublas/cudnn stream is needed here cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); // call forward kernel cudaMemcpy(m->dev_region_ptrs, output_grads, n*sizeof(float*), cudaMemcpyHostToDevice); gb_backward_kernel<<<GET_BLOCKS(batch_size*k*data_dim), min(CUDA_NUM_THREADS,(int)(batch_size*k*data_dim)), 0, stream>>>( acc_input_grad.ptr(rect_input_grad), acc_assign.ptr(rect_assign), m->dev_region_ptrs, n, k, alpha, batch_size, data_dim); } void Group_by::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(GROUP_BY_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Group_by)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // data launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // assign launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); // output for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(outputs[i].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[i].region)); launcher.add_field(i+2, FID_DATA); } runtime->execute_index_space(ctx, launcher); } void Group_by::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(GROUP_BY_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Group_by)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // input_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); // assign launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); // output grad for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(outputs[i].part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[i].region_grad)); launcher.add_field(i+2, FID_DATA); } runtime->execute_index_space(ctx, launcher); } GroupByMeta::GroupByMeta(FFHandler handler, int n) : OpMeta(handler) { checkCUDA(cudaMalloc(&dev_region_ptrs, n*sizeof(float*))); } GroupByMeta::~GroupByMeta(void) { checkCUDA(cudaFree(&dev_region_ptrs)); } bool Group_by::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { //TODO: implement cost_metrics.forward_time = 0.0f; cost_metrics.backward_time = 0.0f; cost_metrics.memory_requirement = 0; return false; }
the_stack
#include "cudpp_mergesort.h" #include <cudpp_globals.h> #include <cudpp_util.h> #include "sharedmem.h" #include "cta/mergesort_cta.cuh" /** * @file * mergesort_kernel.cu * * @brief CUDPP kernel-level radix sorting routines */ /** \addtogroup cudpp_kernel * @{ */ /** @name MergeSort Functions * @{ */ typedef unsigned int uint; /** @brief Copies unused portions of arrays in our ping-pong strategy * @param[in] A_keys_dev, A_vals_dev The keys and values we will be copying * @param[out] A_keys_out_dev, A_vals_out_dev The keys and values array we will copy to * @param[in] offset The offset we are starting to copy from * @param[in] numElementsToCopy The number of elements we copy starting from the offset **/ template <class T> __global__ void simpleCopy(T* A_keys_dev, unsigned int* A_vals_dev, T* A_keys_out_dev, unsigned int* A_vals_out_dev, int offset, int numElementsToCopy) { int myId = blockIdx.x*blockDim.x + threadIdx.x; if(myId >= numElementsToCopy) return; A_keys_out_dev[offset+myId] = A_keys_dev[offset+myId]; A_vals_out_dev[offset+myId] = A_vals_dev[offset+myId]; } /** @brief Sorts blocks of data of size blockSize * @param[in,out] A_keys keys to be sorted * @param[in,out] A_values associated values to keys * @param[in] blockSize Size of the chunks being sorted * @param[in] totalSize Size of the enitre array **/ template<class T, int depth> __global__ void blockWiseSort(T *A_keys, unsigned int* A_values, int blockSize, size_t totalSize) { //load into registers T myKey[depth]; unsigned int myValue[depth]; unsigned int myAddress[depth]; #if (__CUDA_ARCH__ >= 200) extern __shared__ char shared[]; #else extern __shared__ unsigned int shared[]; #endif //scratchPad is for stuffing keys T* scratchPad = (T*) shared; unsigned int* addressPad = (unsigned int*) &scratchPad[BLOCKSORT_SIZE]; int bid = blockIdx.x; int tid = threadIdx.x; T MAX_VAL = getMax<T>(); //Grab values in coalesced fashion //out of order, but since no sorting has been done, doesn't matter for(int i = 0; i < depth; i++) { myKey[i] = ((bid*blockSize+i*blockDim.x + tid) < totalSize ? A_keys [bid*blockSize+i*blockDim.x + tid] : MAX_VAL); myValue[i] = ((bid*blockSize+i*blockDim.x + tid) < totalSize ? A_values[bid*blockSize+i*blockDim.x + tid] : 0); } //Register Sort - Begin compareSwapVal<T>(myKey[0], myKey[1], myValue[0], myValue[1]); compareSwapVal<T>(myKey[1], myKey[2], myValue[1], myValue[2]); compareSwapVal<T>(myKey[2], myKey[3], myValue[2], myValue[3]); compareSwapVal<T>(myKey[3], myKey[4], myValue[3], myValue[4]); compareSwapVal<T>(myKey[4], myKey[5], myValue[4], myValue[5]); compareSwapVal<T>(myKey[5], myKey[6], myValue[5], myValue[6]); compareSwapVal<T>(myKey[6], myKey[7], myValue[6], myValue[7]); compareSwapVal<T>(myKey[0], myKey[1], myValue[0], myValue[1]); compareSwapVal<T>(myKey[1], myKey[2], myValue[1], myValue[2]); compareSwapVal<T>(myKey[2], myKey[3], myValue[2], myValue[3]); compareSwapVal<T>(myKey[3], myKey[4], myValue[3], myValue[4]); compareSwapVal<T>(myKey[4], myKey[5], myValue[4], myValue[5]); compareSwapVal<T>(myKey[5], myKey[6], myValue[5], myValue[6]); compareSwapVal<T>(myKey[0], myKey[1], myValue[0], myValue[1]); compareSwapVal<T>(myKey[1], myKey[2], myValue[1], myValue[2]); compareSwapVal<T>(myKey[2], myKey[3], myValue[2], myValue[3]); compareSwapVal<T>(myKey[3], myKey[4], myValue[3], myValue[4]); compareSwapVal<T>(myKey[4], myKey[5], myValue[4], myValue[5]); compareSwapVal<T>(myKey[0], myKey[1], myValue[0], myValue[1]); compareSwapVal<T>(myKey[1], myKey[2], myValue[1], myValue[2]); compareSwapVal<T>(myKey[2], myKey[3], myValue[2], myValue[3]); compareSwapVal<T>(myKey[3], myKey[4], myValue[3], myValue[4]); compareSwapVal<T>(myKey[0], myKey[1], myValue[0], myValue[1]); compareSwapVal<T>(myKey[1], myKey[2], myValue[1], myValue[2]); compareSwapVal<T>(myKey[2], myKey[3], myValue[2], myValue[3]); compareSwapVal<T>(myKey[0], myKey[1], myValue[0], myValue[1]); compareSwapVal<T>(myKey[1], myKey[2], myValue[1], myValue[2]); compareSwapVal<T>(myKey[0], myKey[1], myValue[0], myValue[1]); //Register Sort - End //Manually unroll save for performance //TODO: Use template unrolling? scratchPad[tid*depth ] = myKey[0]; scratchPad[tid*depth+1] = myKey[1]; scratchPad[tid*depth+2] = myKey[2]; scratchPad[tid*depth+3] = myKey[3]; scratchPad[tid*depth+4] = myKey[4]; scratchPad[tid*depth+5] = myKey[5]; scratchPad[tid*depth+6] = myKey[6]; scratchPad[tid*depth+7] = myKey[7]; __syncthreads(); //now we merge unsigned int j; unsigned int mult = 1; unsigned int steps = 128; //Seven Merge steps (2^7) while (mult < steps) { unsigned int first, last; //Determine the search space for each thread first = (tid/(mult*2))*depth*2*mult; unsigned int midPoint = first+mult*depth; //If you are the "right" block or "left" block unsigned int addPart = threadIdx.x%(mult<<1) >= mult ? 1 : 0; //if "right" block search in "left", otherwise search in "right" if(addPart == 0) first += depth*mult; last = first+depth*mult-1; j = (first+last)/2; unsigned int startAddress = threadIdx.x*depth-midPoint; unsigned int range = last-first; T cmpValue; __syncthreads(); //Binary Search switch(range) { case 1023: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 256, addPart); case 511: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 128, addPart); case 255: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 64, addPart); case 127: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 32, addPart); case 63: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 16, addPart); case 31: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 8, addPart); case 15: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 4, addPart); case 7: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 2, addPart); case 3: bin_search_block<T, depth>(cmpValue, myKey[0], scratchPad, j, 1, addPart); } cmpValue = scratchPad[j]; //Binary search done, some post search correction if(cmpValue < myKey[0] || (cmpValue == myKey[0] && addPart == 1)) cmpValue = scratchPad[++j]; if((cmpValue < myKey[0] || (cmpValue == myKey[0] && addPart == 1)) && j == last) j++; //Save first address, then perform linear searches __syncthreads(); myAddress[0] = j + startAddress; addressPad[myAddress[0]] = myValue[0]; //Save address in new slot, unless we want to ping-pong in shared memory need extra registers lin_search_block<T, depth>(cmpValue, myKey[1], myAddress[1], scratchPad, addressPad, j, 1, last, startAddress, addPart); addressPad[myAddress[1]] = myValue[1]; lin_search_block<T, depth>(cmpValue, myKey[2], myAddress[2], scratchPad, addressPad, j, 2, last, startAddress, addPart); addressPad[myAddress[2]] = myValue[2]; lin_search_block<T, depth>(cmpValue, myKey[3], myAddress[3], scratchPad, addressPad, j, 3, last, startAddress, addPart); addressPad[myAddress[3]] = myValue[3]; lin_search_block<T, depth>(cmpValue, myKey[4], myAddress[4], scratchPad, addressPad, j, 4, last, startAddress, addPart); addressPad[myAddress[4]] = myValue[4]; lin_search_block<T, depth>(cmpValue, myKey[5], myAddress[5], scratchPad, addressPad, j, 5, last, startAddress, addPart); addressPad[myAddress[5]] = myValue[5]; lin_search_block<T, depth>(cmpValue, myKey[6], myAddress[6], scratchPad, addressPad, j, 6, last, startAddress, addPart); addressPad[myAddress[6]] = myValue[6]; lin_search_block<T, depth>(cmpValue, myKey[7], myAddress[7], scratchPad, addressPad, j, 7, last, startAddress, addPart); addressPad[myAddress[7]] = myValue[7]; //Save Key values in correct addresses -- Unrolled for performance __syncthreads(); scratchPad[myAddress[0]] = myKey[0]; scratchPad[myAddress[1]] = myKey[1]; scratchPad[myAddress[2]] = myKey[2]; scratchPad[myAddress[3]] = myKey[3]; scratchPad[myAddress[4]] = myKey[4]; scratchPad[myAddress[5]] = myKey[5]; scratchPad[myAddress[6]] = myKey[6]; scratchPad[myAddress[7]] = myKey[7]; __syncthreads(); if(mult < steps) { __syncthreads(); //Grab new key values -- Unrolled for performance myKey[0] = scratchPad[tid*depth]; myKey[1] = scratchPad[tid*depth+1]; myKey[2] = scratchPad[tid*depth+2]; myKey[3] = scratchPad[tid*depth+3]; myKey[4] = scratchPad[tid*depth+4]; myKey[5] = scratchPad[tid*depth+5]; myKey[6] = scratchPad[tid*depth+6]; myKey[7] = scratchPad[tid*depth+7]; myValue[0] = addressPad[tid*depth]; myValue[1] = addressPad[tid*depth+1]; myValue[2] = addressPad[tid*depth+2]; myValue[3] = addressPad[tid*depth+3]; myValue[4] = addressPad[tid*depth+4]; myValue[5] = addressPad[tid*depth+5]; myValue[6] = addressPad[tid*depth+6]; myValue[7] = addressPad[tid*depth+7]; } __syncthreads(); mult*=2; } __syncthreads(); //Coalesced Write back to Memory #pragma unroll for(int i=tid;i<blockSize && bid*blockSize+i < totalSize ;i+= CTA_BLOCK) { A_keys[bid*blockSize+i] = scratchPad[i]; A_values[bid*blockSize+i] = addressPad[i]; } } /** @brief Merges the indices for the "lower" block (left block) * * Utilizes a "ping-pong" strategy * @param[in] A_keys Global array of keys to be merged * @param[in] A_values Global array of values to be merged * @param[out] A_keys_out Resulting array of keys merged * @param[out] A_values_out Resulting array of values merged * @param[in] sizePerPartition Size of each partition being merged * @param[in] size Size of total Array being sorted **/ template<class T, int depth> __global__ void simpleMerge_lower(T *A_keys, unsigned int* A_values, T *A_keys_out, unsigned int *A_values_out, int sizePerPartition, int size) { //each block will be responsible for a submerge int myId = blockIdx.x; int tid = threadIdx.x; int myStartIdxA = 2*myId*sizePerPartition; int myStartIdxB = (2*myId+1)*sizePerPartition; int myStartIdxC = myStartIdxA; int partitionSizeB = sizePerPartition < (size - myStartIdxB) ? sizePerPartition : size - myStartIdxB; T MAX_VAL = getMax<T>(); T MIN_VAL = getMin<T>(); unsigned int UMAX_VAL = getMax<unsigned int>(); //__shared__ T BKeys[INTERSECT_B_BLOCK_SIZE_simple+2]; #if (__CUDA_ARCH__ >= 200) extern __shared__ char shared[]; #else extern __shared__ unsigned int shared[]; #endif T* BKeys = (T*) shared; T* BMax = (T*) &BKeys[INTERSECT_B_BLOCK_SIZE_simple]; T localMaxB, localMaxA, localMinB; int globalCAddress; int index, bIndex = 0, aIndex = 0; bool breakout = false; T myKey[depth]; unsigned int myValue[depth]; //Load Registers if(aIndex + INTERSECT_A_BLOCK_SIZE_simple < sizePerPartition) { #pragma unroll for(int i = 0;i < depth; i++) { myKey[i] = A_keys [myStartIdxA + aIndex + depth*tid + i]; myValue[i] = A_values[myStartIdxA + aIndex + depth*tid + i]; } } else { #pragma unroll for(int i = 0;i < depth; i++) { myKey[i] = (aIndex+depth*tid + i < sizePerPartition ? A_keys [myStartIdxA + aIndex+ depth*tid + i] : MAX_VAL); myValue[i] = (aIndex+depth*tid + i < sizePerPartition ? A_values[myStartIdxA + aIndex+ depth*tid + i] : UMAX_VAL); } } //load smem values if(bIndex + INTERSECT_B_BLOCK_SIZE_simple < partitionSizeB) { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_simple/CTASIZE_simple; i++, bi+=CTASIZE_simple) { BKeys[bi] = A_keys[myStartIdxB + bIndex + bi]; } } else { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_simple/CTASIZE_simple; i++, bi+=CTASIZE_simple) { BKeys[bi] = (bIndex + bi < partitionSizeB ? A_keys[myStartIdxB + bIndex + bi] : MAX_VAL);} } //Save localMaxA and localMaxB if(tid == CTASIZE_simple-1) BMax[1] = myKey[depth-1]; if(tid == 0) BMax[0] = (bIndex + INTERSECT_B_BLOCK_SIZE_simple - 1 < partitionSizeB ? A_keys[myStartIdxB + bIndex + INTERSECT_B_BLOCK_SIZE_simple - 1] : MAX_VAL); __syncthreads(); //Maximum values for B and A in this stream localMinB = MIN_VAL; localMaxB = BMax[0]; localMaxA = BMax[1]; do { __syncthreads(); globalCAddress = myStartIdxC + bIndex + aIndex + tid*depth; index = 0; if((!(myKey[depth-1] < localMinB || myKey[0] > localMaxB) || (bIndex+INTERSECT_B_BLOCK_SIZE_simple) >= (partitionSizeB-1)) && (aIndex + tid*depth) < sizePerPartition) { binSearch_whole_lower<T>(BKeys, index, myKey[0]); T cmpValue = BKeys[index]; if(cmpValue < myKey[0] && index < INTERSECT_B_BLOCK_SIZE_simple) cmpValue = BKeys[++index]; index = (cmpValue < myKey[0] ? index+1 : index); //Save Key-Value Pair if((myKey[0] < localMaxB && myKey[0] > localMinB) || (bIndex+index) >= (partitionSizeB) || (index > 0 && index <INTERSECT_B_BLOCK_SIZE_simple)) { A_keys_out [globalCAddress + index] = myKey[0]; A_values_out[globalCAddress + index] = myValue[0]; } while(BKeys[index] < myKey[1] && index < INTERSECT_B_BLOCK_SIZE_simple) index++; //save Key-Value Pair if(((myKey[1] <= localMaxB && myKey[1] > localMinB) || bIndex+index >= (partitionSizeB)) && (aIndex+tid*depth+1< sizePerPartition)) { A_keys_out[globalCAddress+index+1] = myKey[1]; A_values_out[globalCAddress+index+1] = myValue[1]; } } __syncthreads(); if((localMaxA <= localMaxB || (bIndex+INTERSECT_B_BLOCK_SIZE_simple) >= partitionSizeB) && (aIndex+INTERSECT_A_BLOCK_SIZE_simple) < sizePerPartition) { aIndex += INTERSECT_A_BLOCK_SIZE_simple; if(aIndex + INTERSECT_A_BLOCK_SIZE_simple < sizePerPartition) { for(int i = 0;i < depth; i++) { myKey[i] = A_keys [myStartIdxA + aIndex+ depth*tid + i]; myValue[i] = A_values[myStartIdxA + aIndex + depth*tid + i]; } } else { for(int i = 0;i < depth; i++) { myKey[i] = (aIndex+depth*tid + i < sizePerPartition ? A_keys[myStartIdxA + aIndex+ depth*tid + i] : MAX_VAL); myValue[i] = (aIndex+depth*tid + i < sizePerPartition ? A_values[myStartIdxA + aIndex+ depth*tid + i] : UMAX_VAL); } } if(tid == CTASIZE_simple-1) BMax[1] = myKey[depth-1]; //localMaxA for all threads } else if(localMaxB < localMaxA && (bIndex+INTERSECT_B_BLOCK_SIZE_simple) < partitionSizeB) { localMinB = localMaxB; //Use INT_MAX as an "invalid/no-value" type in case the streaming window cannot be filled bIndex += INTERSECT_B_BLOCK_SIZE_simple; if(bIndex + INTERSECT_B_BLOCK_SIZE_simple < partitionSizeB) { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_simple/CTASIZE_simple; i++, bi+=CTASIZE_simple) { BKeys[bi] = A_keys[myStartIdxB + bIndex + bi]; } } else { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_simple/CTASIZE_simple; i++, bi+=CTASIZE_simple) { BKeys[bi] = (bIndex + bi < partitionSizeB ? A_keys[myStartIdxB + bIndex + bi] : MAX_VAL); } } if(tid == 0) BMax[0] = (bIndex + INTERSECT_B_BLOCK_SIZE_simple < partitionSizeB ? A_keys[myStartIdxB + bIndex + INTERSECT_B_BLOCK_SIZE_simple] : MAX_VAL); } else breakout = true; __syncthreads(); localMaxB = BMax[0]; localMaxA = BMax[1]; } while(!breakout); } /** @brief Merges the indices for the "upper" block (right block) * * Utilizes a "ping-pong" strategy * @param[in] A_keys Global array of keys to be merged * @param[in] A_values Global array of values to be merged * @param[out] A_keys_out Resulting array of keys merged * @param[out] A_values_out Resulting array of values merged * @param[in] sizePerPartition Size of each partition being merged * @param[in] size Size of total Array being sorted **/ template<class T, int depth> __global__ void simpleMerge_higher(T *A_keys, unsigned int* A_values, T* A_keys_out, unsigned int *A_values_out, int sizePerPartition, int size) { T MAX_VAL = getMax<T>(); T MIN_VAL = getMin<T>(); unsigned int UMAX_VAL = getMax<unsigned int>(); int myId = blockIdx.x; int myStartIdxB = 2*myId*sizePerPartition; int myStartIdxA = (2*myId+1)*sizePerPartition; int myStartIdxC = myStartIdxB; T nextMaxB, nextMaxA, localMaxB, localMinB; int partitionSizeA = (sizePerPartition < (size - myStartIdxA) ? sizePerPartition : size - myStartIdxA); int index, bIndex = 0, aIndex = 0; #if (__CUDA_ARCH__ >= 200) extern __shared__ char shared[]; #else extern __shared__ unsigned int shared[]; #endif T* BKeys = (T*) shared; //__shared__ T BKeys[INTERSECT_B_BLOCK_SIZE_simple+3]; T* BMax = (T*) &BKeys[INTERSECT_B_BLOCK_SIZE_simple]; bool breakout = false; int tid = threadIdx.x; T myKey[depth]; unsigned int myValue[depth]; #pragma unroll for(int i =0; i <depth; i++) { myKey[i] = (aIndex+depth*tid + i < partitionSizeA ? A_keys [myStartIdxA + aIndex+depth*tid+i] : MAX_VAL); myValue[i] = (aIndex+depth*tid + i < partitionSizeA ? A_values[myStartIdxA + aIndex+depth*tid+i] : UMAX_VAL); } if(bIndex + INTERSECT_B_BLOCK_SIZE_simple < sizePerPartition) { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_simple/CTASIZE_simple; i++, bi+=CTASIZE_simple) { BKeys[bi] = A_keys[myStartIdxB + bIndex + bi]; } } else { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_simple/CTASIZE_simple; i++, bi+=CTASIZE_simple) { BKeys[bi] = (bIndex + bi < sizePerPartition ? A_keys[myStartIdxB + bIndex + bi] : MAX_VAL); } } if(tid == CTASIZE_simple-1) { BMax[0] = (bIndex + INTERSECT_B_BLOCK_SIZE_simple < sizePerPartition ? A_keys[myStartIdxB + bIndex + INTERSECT_B_BLOCK_SIZE_simple] : MAX_VAL); BMax[1] = (aIndex + INTERSECT_A_BLOCK_SIZE_simple < partitionSizeA ? A_keys[myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_simple] : A_keys[myStartIdxA + partitionSizeA-1]+1); } __syncthreads(); localMinB = MIN_VAL; localMaxB = BKeys[INTERSECT_B_BLOCK_SIZE_simple-1]; nextMaxB = BMax[0]; nextMaxA = BMax[1]; int globalCAddress; do { __syncthreads(); index = 0; globalCAddress = myStartIdxC + bIndex + aIndex + depth*tid; //if(myKey[0] >= DVAL1 && myKey[0] <= DVAL2 && sizePerPartition == 2048) // printf("higher myKey0 %u %d %d\n", myKey[0], globalCAddress + index, aIndex+tid*depth); if((myKey[0] < nextMaxB && myKey[depth-1] >= localMinB || (bIndex+INTERSECT_B_BLOCK_SIZE_simple) >= sizePerPartition) && (aIndex+depth*tid < partitionSizeA)) { binSearch_whole_higher(BKeys, index, myKey[0]); T cmpValue = BKeys[index]; if(cmpValue <= myKey[0] && index < INTERSECT_B_BLOCK_SIZE_simple) cmpValue = BKeys[++index]; index = (cmpValue <= myKey[0] ? index + 1 : index); //End Binary Search //binary search done for first element in our set (A_0) if(myKey[0] >= localMinB) { A_keys_out[globalCAddress+index] = myKey[0]; A_values_out[globalCAddress+index] = myValue[0]; } while(BKeys[index] <= myKey[1] && index < INTERSECT_B_BLOCK_SIZE_simple ) index++; //Save Key-Value Pair if(((myKey[1] < nextMaxB && myKey[1] >= localMinB) || bIndex+index >=sizePerPartition) && (aIndex+depth*tid+1 < partitionSizeA)) { A_keys_out[globalCAddress + index + 1] = myKey[1]; A_values_out[globalCAddress + index + 1] = myValue[1]; } } // if(threadIdx.x == blockDim.x - 1) { *lastAIndex = index; } __syncthreads(); if((nextMaxA <= nextMaxB || (bIndex+INTERSECT_B_BLOCK_SIZE_simple) >= sizePerPartition ) && (aIndex+INTERSECT_A_BLOCK_SIZE_simple)< partitionSizeA) { aIndex += INTERSECT_A_BLOCK_SIZE_simple; //Use INT_MAX-1 as an "invalid/no-value" type in case we are out of values to check #pragma unroll for(int i=0;i <depth;i++) { myKey[i] = (aIndex+depth*tid+i < partitionSizeA ? A_keys[myStartIdxA + aIndex + depth * tid + i] : MAX_VAL); myValue[i] = (aIndex+depth*tid+i < partitionSizeA ? A_values[myStartIdxA + aIndex + depth * tid + i] : UMAX_VAL); } if(tid == CTASIZE_simple-1) { BMax[1] = (aIndex + INTERSECT_A_BLOCK_SIZE_simple < partitionSizeA ? A_keys[myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_simple] : A_keys[myStartIdxA + partitionSizeA - 1] + 1); BMax[2] = myKey[depth-1]; } } else if(nextMaxB <= nextMaxA && (bIndex+INTERSECT_B_BLOCK_SIZE_simple) < sizePerPartition) { localMinB = localMaxB; //Use INT_MAX as an "invalid/no-value" type in case the streaming window cannot be filled bIndex += INTERSECT_B_BLOCK_SIZE_simple; if(bIndex + INTERSECT_B_BLOCK_SIZE_simple < sizePerPartition) { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_simple/CTASIZE_simple; i++, bi+=CTASIZE_simple) { BKeys[bi] = A_keys[myStartIdxB + bIndex + bi]; } } else { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_simple/CTASIZE_simple; i++, bi+=CTASIZE_simple) { BKeys[bi] = (bIndex + bi < sizePerPartition ? A_keys[myStartIdxB + bIndex + bi] : MAX_VAL); } } if(tid == 0) BMax[0] = (bIndex + INTERSECT_B_BLOCK_SIZE_simple < sizePerPartition ? A_keys[myStartIdxB + bIndex + INTERSECT_B_BLOCK_SIZE_simple] : MAX_VAL); } else breakout = true; __syncthreads(); //For each thread grab your value ranges for B and A //These will look at the end of our window, and the beginning of the next window for A and B //We make decisions on whether to advance a window, or save our merged value based on these nextMaxB = BMax[0]; nextMaxA = BMax[1]; localMaxB = BKeys[INTERSECT_B_BLOCK_SIZE_simple-1]; } while(!breakout); } /** @brief Merges the indices for the "upper" block (right block) * * Utilizes a "ping-pong" strategy * @param[in] A Global array of keys * @param[in] splitsPP Global array of values to be merged * @param[in] numPartitions number of partitions being considered * @param[in] partitionSize Size of each partition being considered * @param[out] partitionBeginA Where each partition/subpartition will begin in A * @param[out] partitionSizesA Size of each partition/subpartition in A * @param[in] sizeA Size of the entire array **/ template<class T> __global__ void findMultiPartitions(T *A, int splitsPP, int numPartitions, int partitionSize, int* partitionBeginA, int* partitionSizesA, int sizeA) { T MY_INVALID = getMax<T>(); int myId = threadIdx.x + blockIdx.x*blockDim.x; if (myId >= (numPartitions*splitsPP)/2) return; int myStartA, myEndA; int testIdx; int subPartitionSize = partitionSize/splitsPP; int myPartitionId = myId/splitsPP; int mySubPartitionId = myId%splitsPP; int saveId = 2*(myPartitionId*splitsPP) + mySubPartitionId; // we are at the beginning of a partition myStartA = 2*(myPartitionId)*partitionSize + (mySubPartitionId)*subPartitionSize; myEndA = myStartA + (subPartitionSize)-1; T mySample = A[myStartA]; T nextSample, testSample, myStartSample, myEndSample; if(mySubPartitionId != 0); { //need to ensure that we don't start inbetween duplicates testSample = (myId == 0 ? MY_INVALID : A[myStartA-1]); int count = 1; testIdx = myStartA; // we have sampled in the middle of a repeated sequence search until we are at a new sequence while(testSample == mySample && (testIdx-count) >= 2*(myPartitionId)*partitionSize) testSample = A[testIdx - (count++)]; myStartA = (testIdx - count+1); } partitionBeginA[saveId] = myStartA; //partitionBegin found for first set myStartSample = mySample; mySample = A[myEndA]; int count; if(mySubPartitionId!= splitsPP-1 ) { //need to ensure that we don't start inbetween duplicates testSample = (A[myEndA+1]); count = 1; testIdx = myEndA; while(testSample == mySample && (testIdx+count) < (2*myPartitionId+1)*partitionSize ) testSample = A[testIdx + (count++)]; myEndA = myEndA+count-1; myEndSample = A[(myEndA < (2*myPartitionId+1)*partitionSize && myEndA < sizeA) ? myEndA+1 : sizeA-1]; } else { myEndA = (2*myPartitionId)*partitionSize + partitionSize-1; myEndSample = A[myEndA]; //<---Start Sample } partitionSizesA[saveId] = myEndA-myStartA + 1; //Now that we have found our range for "A" we search our neighboring partition "B" for its corresponding range //Therefore partitions [0] will match with partitions [splitsPP] [1] with [splitsPP + 1] .... [splitsPP-1] [2*splitsPP-1] int myStartRange = (2*myPartitionId)*partitionSize + partitionSize; int myEndRange = min(myStartRange + partitionSize, sizeA); //search for myStartSample in between range int first = myStartRange; int last = myEndRange; int mid = (first + last)/2; testSample = A[mid]; while(testSample != myStartSample) { if(testSample < myStartSample) first = mid; else last = mid; if(testSample > myEndSample) myEndRange = mid; mid = (first+last)/2; testSample = A[mid]; if(mid == last || mid == first ) break; } while (testSample > myStartSample && mid > myStartRange) testSample = A[--mid]; nextSample = (mid > myStartRange ? A[mid] : MY_INVALID); if(testSample == nextSample) { while(testSample == nextSample && mid > myStartRange) testSample = A[--mid]; } myStartA = mid; first = myStartA; last = myEndRange; mid = (first + last)/2; testSample = A[mid]; while(testSample != myEndSample) { if(testSample <= myEndSample) first = mid; else last = mid; mid = (first+last)/2; testSample = A[mid]; if(mid == last || mid == first ) break; } if(testSample <= myEndSample) mid++; nextSample = (mid < myEndRange ? A[mid] : MY_INVALID); while (myEndSample >= nextSample && mid < myEndRange) nextSample = A[++mid]; myEndA = mid; if(mySubPartitionId % splitsPP== 0) myStartA = (2*myPartitionId)*partitionSize + partitionSize; if(mySubPartitionId % splitsPP == splitsPP-1) myEndA = (2*myPartitionId)*partitionSize + 2*partitionSize; if(myEndA > sizeA) myEndA = sizeA; partitionBeginA[saveId + splitsPP] = myStartA; partitionSizesA[saveId + splitsPP] = myEndA-myStartA; } /** @brief Blocks cooperatively Merge two partitions for the indices in the "lower" block (left block) * * Utilizes a "ping-pong" strategy * @param[out] A_keys_out Resulting array of keys merged * @param[out] A_vals_out Resulting array of values merged * @param[in] A_keys Global array of keys to be merged * @param[in] A_vals Global array of values to be merged * @param[in] subPartitions Number of blocks working on a partition (number of sub-partitions) * @param[in] numBlocks * @param[in] partitionBeginA Partition starting points decided by function findMultiPartitions * @param[in] partitionSizeA Partition sizes decided by function findMultiPartitions * @param[in] entirePartitionSize The size of an entire partition (before it is split up) * @param[in] sizeA The total size of our array **/ template<class T, int depth> __global__ void mergeMulti_lower(T *A_keys_out, unsigned int* A_vals_out, T *A_keys, unsigned int *A_vals, int subPartitions, int numBlocks, int *partitionBeginA, int *partitionSizeA, int entirePartitionSize, int sizeA) { T MAX_VAL = getMax<T>(); T MIN_VAL = getMin<T>(); unsigned int UMAX_VAL = getMax<unsigned int>(); int myId = blockIdx.x; int tid = threadIdx.x; int myStartId = (myId%subPartitions) + 2*(myId/subPartitions)*subPartitions; int myStartIdxA = partitionBeginA[myStartId]; int myStartIdxB = partitionBeginA[myStartId+subPartitions]; int localAPartSize = partitionSizeA[myStartId]; int localBPartSize = partitionSizeA[myStartId+subPartitions]; int myStartIdxC; myStartIdxC = myStartIdxA + myStartIdxB - ((myStartId+subPartitions)/subPartitions)*entirePartitionSize; int partitionEndA = 2*(myId/subPartitions)*entirePartitionSize + entirePartitionSize < sizeA ? (myId/subPartitions)*entirePartitionSize*2 + entirePartitionSize : sizeA; int partitionEndB = partitionEndA + entirePartitionSize < sizeA ? partitionEndA + entirePartitionSize : sizeA; if(localAPartSize == 0) return; //Now we have the beginning and end points of our subpartitions, merge the two together T nextMaxB, nextMaxA, localMinB, localMaxB, cmpValue; int index, bIndex = 0; int aIndex = 0; int localAIndex = aIndex+depth*tid; #if (__CUDA_ARCH__ >= 200) extern __shared__ char shared[]; #else extern __shared__ unsigned int shared[]; #endif T* BKeys = (T*) shared; //__shared__ T BKeys[INTERSECT_B_BLOCK_SIZE_multi+3]; T* BMax = (T*) &BKeys[INTERSECT_B_BLOCK_SIZE_multi]; bool breakout = false; //bool endPartition = myStartIdxA+localAPartSize >= partitionEndA; if(myStartIdxB == 0 || myId%subPartitions == 0 || myStartIdxB == partitionEndA ) localMinB = MIN_VAL; else localMinB = A_keys[myStartIdxB-1]; T myKey[depth]; unsigned int myVal[depth]; #pragma unroll for(int i =0; i <depth; i++) { myKey[i] = (localAIndex + i < localAPartSize ? A_keys[myStartIdxA + aIndex+depth*tid+i] : MAX_VAL); myVal[i] = (localAIndex + i < localAPartSize ? A_vals[myStartIdxA + aIndex+depth*tid+i] : UMAX_VAL); } if(bIndex + INTERSECT_B_BLOCK_SIZE_multi < localBPartSize) { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_multi/CTASIZE_multi; i++, bi+=CTASIZE_multi) { BKeys[bi] = A_keys[myStartIdxB + bIndex + bi]; } } else { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_multi/CTASIZE_multi; i++, bi+=CTASIZE_multi) { BKeys[bi] = (bIndex + bi < localBPartSize ? A_keys[myStartIdxB + bIndex + bi] : MAX_VAL); } } __syncthreads(); if(tid == CTASIZE_multi-1) { if(bIndex + INTERSECT_B_BLOCK_SIZE_multi < localBPartSize) BMax[0] = A_keys[myStartIdxB + bIndex + INTERSECT_B_BLOCK_SIZE_multi]; else BMax[0] = (myStartIdxB + bIndex + localBPartSize < partitionEndB ? A_keys[myStartIdxB + bIndex + localBPartSize] : MAX_VAL); BMax[1] = (myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_multi < partitionEndA ? A_keys[myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_multi] : MAX_VAL); } __syncthreads(); if(localBPartSize == 0) { localMaxB = MAX_VAL; } else { localMaxB = (localBPartSize < INTERSECT_B_BLOCK_SIZE_multi ? BKeys[localBPartSize-1] : BKeys[INTERSECT_B_BLOCK_SIZE_multi-1]); } nextMaxB = BMax[0]; nextMaxA = BMax[1]; do{ __syncthreads(); index = 0; if(myKey[0] <= nextMaxB && myKey[depth-1] >= localMinB && localAIndex < localAPartSize) { index = (INTERSECT_B_BLOCK_SIZE_multi/2)-1; binSearch_fragment_lower<T> (BKeys, 256, index, myKey[0]); binSearch_fragment_lower<T> (BKeys, 128, index, myKey[0]); binSearch_fragment_lower<T> (BKeys, 64, index, myKey[0]); binSearch_fragment_lower<T> (BKeys, 32, index, myKey[0]); binSearch_fragment_lower<T> (BKeys, 16, index, myKey[0]); binSearch_fragment_lower<T> (BKeys, 8, index, myKey[0]); binSearch_fragment_lower<T> (BKeys, 4, index, myKey[0]); binSearch_fragment_lower<T> (BKeys, 2, index, myKey[0]); binSearch_fragment_lower<T> (BKeys, 1, index, myKey[0]); cmpValue = BKeys[index]; if(cmpValue < myKey[0] && index < (localBPartSize-bIndex) && index < INTERSECT_B_BLOCK_SIZE_multi) cmpValue = BKeys[++index]; if(cmpValue < myKey[0]) index++; int globalCAddress = (myStartIdxC + bIndex + aIndex + tid*depth); //Save Key-Value Pair (after bin search) if(((myKey[0] <= nextMaxB || myKey[0] <= localMaxB) && myKey[0] >= localMinB) && localAIndex < localAPartSize) { A_keys_out[globalCAddress+index] = myKey[0]; A_vals_out[globalCAddress+index] = myVal[0]; } if(localAIndex + 1 < localAPartSize) linearMerge_lower<T, depth>(BKeys, myKey[1], myVal[1], index, A_keys_out, A_vals_out, myStartIdxC, nextMaxB, localAPartSize, localBPartSize, localMaxB, localMinB, aIndex, bIndex, 1); if(localAIndex + 2 < localAPartSize) linearMerge_lower<T, depth>(BKeys, myKey[2], myVal[2], index, A_keys_out, A_vals_out, myStartIdxC, nextMaxB, localAPartSize, localBPartSize, localMaxB, localMinB, aIndex, bIndex, 2); if(localAIndex + 3 < localAPartSize) { linearMerge_lower<T, depth>(BKeys, myKey[3], myVal[3], index, A_keys_out, A_vals_out, myStartIdxC, nextMaxB, localAPartSize, localBPartSize, localMaxB, localMinB, aIndex, bIndex, 3); } } //We try to cleverly move the memory window ahead to get more overlap between our register window and smem window __syncthreads(); if((nextMaxA <= nextMaxB || (bIndex+INTERSECT_B_BLOCK_SIZE_multi) >= localBPartSize) && (aIndex+INTERSECT_A_BLOCK_SIZE_multi)< localAPartSize) { aIndex += INTERSECT_A_BLOCK_SIZE_multi; //Use INT_MAX-1 as an "invalid/no-value" type in case we are out of values to check #pragma unroll for(int i=0;i <depth;i++) { myKey[i] = (aIndex+depth*tid + i < localAPartSize ? A_keys[myStartIdxA + aIndex+depth*tid+i] : MAX_VAL); myVal[i] = (aIndex+depth*tid + i < localAPartSize ? A_vals[myStartIdxA + aIndex+depth*tid+i] : UMAX_VAL); } if(tid == CTASIZE_multi-1) { BKeys[INTERSECT_B_BLOCK_SIZE_multi+1] = (myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_multi < partitionEndA ? A_keys[myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_multi] : MAX_VAL); BKeys[INTERSECT_B_BLOCK_SIZE_multi+2] = myKey[depth-1]; } } else if(nextMaxB <= nextMaxA && (bIndex+INTERSECT_B_BLOCK_SIZE_multi) < localBPartSize) { //Use INT_MAX as an "invalid/no-value" type in case the streaming window cannot be filled localMinB = nextMaxB; bIndex += INTERSECT_B_BLOCK_SIZE_multi; if(bIndex + INTERSECT_B_BLOCK_SIZE_multi < localBPartSize) { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_multi/CTASIZE_multi; i++, bi+=CTASIZE_multi) { BKeys[bi] = A_keys[myStartIdxB + bIndex + bi]; } } else { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_multi/CTASIZE_multi; i++, bi+=CTASIZE_multi) { BKeys[bi] = (bIndex + bi < localBPartSize ? A_keys[myStartIdxB + bIndex + bi] : MAX_VAL); } } if(tid == CTASIZE_multi-1) { if(bIndex + INTERSECT_B_BLOCK_SIZE_multi < localBPartSize) BMax[0] = A_keys[myStartIdxB + bIndex + INTERSECT_B_BLOCK_SIZE_multi]; else BMax[0] = (myStartIdxB + bIndex + localBPartSize < partitionEndB ? A_keys[myStartIdxB + bIndex + localBPartSize] : MAX_VAL); } } else breakout = true; __syncthreads(); localMaxB = ( (localBPartSize-bIndex) < INTERSECT_B_BLOCK_SIZE_multi && (localBPartSize - bIndex) > 0 ? BKeys[localBPartSize-bIndex-1] : BKeys[INTERSECT_B_BLOCK_SIZE_multi-1]); nextMaxB = BMax[0]; nextMaxA = BMax[1]; __syncthreads(); } while(!breakout); } /** @brief Blocks cooperatively Merge two partitions for the indices in the "upper" block (right block) * * Utilizes a "ping-pong" strategy * @param[out] A_keys_out Resulting array of keys merged * @param[out] A_vals_out Resulting array of values merged * @param[in] A_keys Global array of keys to be merged * @param[in] A_vals Global array of values to be merged * @param[in] subPartitions Number of blocks working on a partition (number of sub-partitions) * @param[in] numBlocks * @param[in] partitionBeginA Partition starting points decided by function findMultiPartitions * @param[in] partitionSizeA Partition sizes decided by function findMultiPartitions * @param[in] entirePartitionSize The size of an entire partition (before it is split up) * @param[in] sizeA The total size of our array **/ template<class T, int depth> __global__ void mergeMulti_higher(T *A_keys_out, unsigned int* A_vals_out, T *A_keys, unsigned int*A_vals, int subPartitions, int numBlocks, int *partitionBeginA, int *partitionSizeA, int entirePartitionSize, int sizeA) { T MAX_VAL = getMax<T>(); T MIN_VAL = getMin<T>(); unsigned int UMAX_VAL = getMax<unsigned int>(); int myId = blockIdx.x; int myStartId = (myId%subPartitions) + 2*(myId/subPartitions)*subPartitions; int myStartIdxB = partitionBeginA[myStartId]; int myStartIdxA = partitionBeginA[myStartId+subPartitions]; int myStartIdxC; int localBPartSize = partitionSizeA[myStartId]; int localAPartSize = partitionSizeA[myStartId+subPartitions]; if(localAPartSize == 0) return; myStartIdxC = myStartIdxA + myStartIdxB - ((myStartId+subPartitions)/subPartitions)*entirePartitionSize; //Now we have the beginning and end points of our subpartitions, merge the two together T nextMaxB, nextMaxA, cmpValue, localMaxB, localMinB; int index, bIndex = 0, aIndex = 0; T myKey[depth]; unsigned int myVal[depth]; #if (__CUDA_ARCH__ >= 200) extern __shared__ char shared[]; #else extern __shared__ unsigned int shared[]; #endif T* BKeys = (T*) shared; //__shared__ T BKeys[INTERSECT_B_BLOCK_SIZE_multi+3]; T* BMax = (T*) &BKeys[INTERSECT_B_BLOCK_SIZE_multi]; bool breakout = false; int tid = threadIdx.x; int localAIndex = aIndex+depth*tid; //bool endPartition = (myId%subPartitions == subPartitions-1); int partitionEndB = (myId/subPartitions)*(entirePartitionSize*2) + entirePartitionSize < sizeA ? (myId/subPartitions)*entirePartitionSize*2 + entirePartitionSize : sizeA; int partitionEndA = partitionEndB + entirePartitionSize < sizeA ? partitionEndB + entirePartitionSize : sizeA; #pragma unroll for(int i =0; i <depth; i++) { myKey[i] = (localAIndex+ i < localAPartSize ? A_keys[myStartIdxA + localAIndex+i] : MAX_VAL); myVal[i] = (localAIndex + i < localAPartSize ? A_vals[myStartIdxA + localAIndex+i] : UMAX_VAL); } if(bIndex + INTERSECT_B_BLOCK_SIZE_multi < localBPartSize) { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_multi/CTASIZE_multi; i++, bi+=CTASIZE_multi) { BKeys[bi] = A_keys[myStartIdxB + bIndex + bi]; } } else { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_multi/CTASIZE_multi; i++, bi+=CTASIZE_multi) { BKeys[bi] = (myStartIdxB + bIndex + bi < partitionEndB ? A_keys[myStartIdxB + bIndex + bi] : MAX_VAL); } } if(tid == 0) { if(bIndex + INTERSECT_B_BLOCK_SIZE_multi < localBPartSize) BMax[0] = A_keys[myStartIdxB + bIndex + INTERSECT_B_BLOCK_SIZE_multi]; else BMax[0] = (myStartIdxB + bIndex + localBPartSize < partitionEndB ? A_keys[myStartIdxB + bIndex + localBPartSize] : MAX_VAL); BMax[1] = (myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_multi < partitionEndA ? A_keys[myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_multi] : MAX_VAL); } __syncthreads(); if(myStartIdxB == 0 || myId%subPartitions == 0 ) localMinB = MIN_VAL; else localMinB = A_keys[myStartIdxB-1]; if(localBPartSize == 0) localMaxB = MAX_VAL; else localMaxB = ( localBPartSize < INTERSECT_B_BLOCK_SIZE_multi ? BKeys[localBPartSize-1] : BKeys[INTERSECT_B_BLOCK_SIZE_multi-1]); nextMaxB = BMax[0]; nextMaxA = BMax[1]; do { __syncthreads(); index = 0; if((myKey[0] <= nextMaxB) && myKey[depth-1] >= localMinB && localAIndex < localAPartSize) { index = (INTERSECT_B_BLOCK_SIZE_multi/2)-1; binSearch_fragment_higher<T> (BKeys, 256, index, myKey[0]); binSearch_fragment_higher<T> (BKeys, 128, index, myKey[0]); binSearch_fragment_higher<T> (BKeys, 64, index, myKey[0]); binSearch_fragment_higher<T> (BKeys, 32, index, myKey[0]); binSearch_fragment_higher<T> (BKeys, 16, index, myKey[0]); binSearch_fragment_higher<T> (BKeys, 8, index, myKey[0]); binSearch_fragment_higher<T> (BKeys, 4, index, myKey[0]); binSearch_fragment_higher<T> (BKeys, 2, index, myKey[0]); binSearch_fragment_higher<T> (BKeys, 1, index, myKey[0]); cmpValue = BKeys[index]; if(cmpValue <= myKey[0] && index < INTERSECT_B_BLOCK_SIZE_multi) cmpValue = BKeys[++index]; if(cmpValue <= myKey[0]) index++; index = index >= (localBPartSize-bIndex) ? localBPartSize-bIndex : index; if((myKey[0] >= localMinB && myKey[0] < nextMaxB /*|| bIndex+index >= localBPartSize*/) && aIndex+depth*tid < localAPartSize) { A_keys_out[myStartIdxC + bIndex + aIndex+depth*tid+index] = myKey[0]; A_vals_out[myStartIdxC + bIndex + aIndex+depth*tid+index] = myVal[0]; } if(localAIndex + 1 < localAPartSize) linearMerge_higher<T, depth>(BKeys, myKey[1], myVal[1], index, A_keys_out, A_vals_out, myStartIdxC, localMinB, nextMaxB, aIndex, bIndex, 1, localAPartSize, localBPartSize); if(localAIndex+2 < localAPartSize) linearMerge_higher<T, depth>(BKeys, myKey[2], myVal[2], index, A_keys_out, A_vals_out, myStartIdxC, localMinB, nextMaxB, aIndex, bIndex, 2, localAPartSize, localBPartSize); if(localAIndex+3 < localAPartSize) linearMerge_higher<T, depth>(BKeys, myKey[3], myVal[3], index, A_keys_out, A_vals_out, myStartIdxC, localMinB, nextMaxB, aIndex, bIndex, 3, localAPartSize, localBPartSize); } __syncthreads(); __threadfence(); if((nextMaxA <= nextMaxB /*&& localMaxA != nextMaxB*/ || (bIndex+INTERSECT_B_BLOCK_SIZE_multi) >= localBPartSize) && (aIndex+INTERSECT_A_BLOCK_SIZE_multi)< localAPartSize) { aIndex += INTERSECT_A_BLOCK_SIZE_multi; //Use INT_MAX-1 as an "invalid/no-value" type in case we are out of values to check #pragma unroll for(int i=0;i <depth;i++) { myKey[i] = (aIndex+depth*tid+i < localAPartSize ? A_keys[myStartIdxA + aIndex+depth*tid+i] : MAX_VAL); myVal[i] = (aIndex+depth*tid+i < localAPartSize ? A_vals[myStartIdxA + aIndex+depth*tid+i] : UMAX_VAL); } if(tid == CTASIZE_multi-1) { BMax[1] = (myStartIdxA+aIndex + INTERSECT_A_BLOCK_SIZE_multi < partitionEndA ? A_keys[myStartIdxA + aIndex + INTERSECT_A_BLOCK_SIZE_multi] : MAX_VAL); } } else if(nextMaxB <= nextMaxA && (bIndex+INTERSECT_B_BLOCK_SIZE_multi) < localBPartSize) { localMinB = localMaxB; //Use INT_MAX as an "invalid/no-value" type in case the streaming window cannot be filled bIndex += INTERSECT_B_BLOCK_SIZE_multi; if(bIndex + INTERSECT_B_BLOCK_SIZE_multi < localBPartSize) { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_multi/CTASIZE_multi; i++, bi+=CTASIZE_multi) { BKeys[bi] = A_keys[myStartIdxB + bIndex + bi]; } } else { int bi = tid; #pragma unroll for(int i = 0;i < INTERSECT_B_BLOCK_SIZE_multi/CTASIZE_multi; i++, bi+=CTASIZE_multi) { BKeys[bi] = (myStartIdxB+bIndex + bi < partitionEndB ? A_keys[myStartIdxB + bIndex + bi] : MAX_VAL); } } if(tid == 0) { if(bIndex + INTERSECT_B_BLOCK_SIZE_multi < localBPartSize) BMax[0] = A_keys[myStartIdxB + bIndex + INTERSECT_B_BLOCK_SIZE_multi]; else BMax[0] = (myStartIdxB + bIndex + localBPartSize < partitionEndB ? A_keys[myStartIdxB + bIndex + localBPartSize] : MAX_VAL); } } else breakout = true; __syncthreads(); //For each thread grab your value ranges for B and A //These will look at the end of our window, and the beginning of the next window for A and B //We make decisions on whether to advance a window, or save our merged value based on these nextMaxB = BMax[0]; nextMaxA = BMax[1]; localMaxB = ( (localBPartSize-bIndex) < INTERSECT_B_BLOCK_SIZE_multi && (localBPartSize - bIndex) > 0 ? BKeys[localBPartSize-bIndex-1] : BKeys[INTERSECT_B_BLOCK_SIZE_multi-1]); __syncthreads(); } while(!breakout); } /** @} */ // end MergeSort functions /** @} */ // end cudpp_kernel
the_stack
#if ( MODEL == HYDRO && FLU_SCHEME == CTU ) // external functions #ifdef __CUDACC__ #include "CUFLU_Shared_FluUtility.cu" #include "CUFLU_Shared_DataReconstruction.cu" #include "CUFLU_Shared_ComputeFlux.cu" #include "CUFLU_Shared_FullStepUpdate.cu" #ifdef MHD #include "CUFLU_Shared_ConstrainedTransport.cu" #endif #include "CUDA_ConstMemory.h" #else // #ifdef __CUDACC__ void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset ); void Hydro_DataReconstruction( const real g_ConVar [][ CUBE(FLU_NXT) ], const real g_FC_B [][ SQR(FLU_NXT)*FLU_NXT_P1 ], real g_PriVar [][ CUBE(FLU_NXT) ], real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_Slope_PPM[][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], const bool Con2Pri, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const real dt, const real dh, const real MinDens, const real MinPres, const real MinEint, const bool FracPassive, const int NFrac, const int FracIdx[], const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t *EoS ); void Hydro_ComputeFlux( const real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], const int NFlux, const int NSkip_N, const int NSkip_T, const bool CorrHalfVel, const real g_Pot_USG[], const double g_Corner[], const real dt, const real dh, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double ExtAcc_AuxArray[], const real MinDens, const real MinPres, const bool DumpIntFlux, real g_IntFlux[][NCOMP_TOTAL][ SQR(PS2) ], const EoS_t *EoS ); void Hydro_FullStepUpdate( const real g_Input[][ CUBE(FLU_NXT) ], real g_Output[][ CUBE(PS2) ], char g_DE_Status[], const real g_FC_B[][ PS2P1*SQR(PS2) ], const real g_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], const real dt, const real dh, const real MinDens, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const int NormIdx[], const EoS_t *EoS ); #ifdef MHD void MHD_ComputeElectric( real g_EC_Ele[][ CUBE(N_EC_ELE) ], const real g_FC_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], const real g_PriVar[][ CUBE(FLU_NXT) ], const int NEle, const int NFlux, const int NPri, const int OffsetPri, const real dt, const real dh, const bool DumpIntEle, real g_IntEle[][NCOMP_ELE][ PS2P1*PS2 ], const bool CorrHalfVel, const real g_Pot_USG[], const double g_Corner[], const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double ExtAcc_AuxArray[] ); void MHD_UpdateMagnetic( real *g_FC_Bx_Out, real *g_FC_By_Out, real *g_FC_Bz_Out, const real g_FC_B_In[][ FLU_NXT_P1*SQR(FLU_NXT) ], const real g_EC_Ele[][ CUBE(N_EC_ELE) ], const real dt, const real dh, const int NOut, const int NEle, const int Offset_B_In ); void MHD_HalfStepPrimitive( const real g_Flu_In[][ CUBE(FLU_NXT) ], const real g_FC_B_Half[][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_PriVar_Out[][ CUBE(FLU_NXT) ], const real g_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], const real dt, const real dh, const real MinDens ); #endif // #ifdef MHD #endif // #ifdef __CUDACC__ ... else ... // internal functions GPU_DEVICE void Hydro_TGradientCorrection( real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], const real g_FC_Flux [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], const real g_FC_B_In [][ FLU_NXT_P1*SQR(FLU_NXT) ], const real g_FC_B_Half[][ FLU_NXT_P1*SQR(FLU_NXT) ], const real g_EC_Ele [][ CUBE(N_EC_ELE) ], const real g_PriVar [][ CUBE(FLU_NXT) ], const real dt, const real dh, const real MinDens, const real MinEint ); //------------------------------------------------------------------------------------------------------- // Function : CPU/CUFLU_FluidSolver_CTU // Description : CPU/GPU fluid solver based on the Corner-Transport-Upwind (CTU) scheme // // Note : 1. Ref: (a) Stone et al., ApJS, 178, 137 (2008) // (b) Gardiner & Stone, J. Comput. Phys., 227, 4123 (2008) // 2. See include/CUFLU.h for the values and description of different symbolic constants // such as N_FC_VAR, N_FC_FLUX, N_SLOPE_PPM, N_FL_FLUX, N_HF_VAR // 3. Arrays with a prefix "g_" are stored in the global memory of GPU // // Parameter : g_Flu_Array_In : Array storing the input fluid variables // g_Flu_Array_Out : Array to store the output fluid variables // g_Mag_Array_In : Array storing the input B field (for MHD only) // g_Mag_Array_Out : Array to store the output B field (for MHD only) // g_DE_Array_Out : Array to store the dual-energy status // g_Flux_Array : Array to store the output fluxes // g_Ele_Array : Array to store the output electric field (for MHD only) // g_Corner_Array : Array storing the physical corner coordinates of each patch group (for UNSPLIT_GRAVITY) // g_Pot_Array_USG : Array storing the input potential for UNSPLIT_GRAVITY // g_PriVar : Array to store the primitive variables // g_Slope_PPM : Array to store the slope for the PPM reconstruction // g_FC_Var : Array to store the half-step variables // g_FC_Flux : Array to store the face-centered fluxes // g_FC_Mag_Half : Array to store the half-step B field (for MHD only) // g_EC_Ele : Array to store the edge-centered electric field (for MHD only) // NPatchGroup : Number of patch groups to be evaluated // dt : Time interval to advance solution // dh : Cell size // StoreFlux : true --> store the coarse-fine fluxes // StoreElectric : true --> store the coarse-fine electric field // LR_Limiter : Slope limiter for the data reconstruction in the MHM/MHM_RP/CTU schemes // (0/1/2/3/4) = (vanLeer/generalized MinMod/vanAlbada/ // vanLeer + generalized MinMod/extrema-preserving) limiter // MinMod_Coeff : Coefficient of the generalized MinMod limiter // Time : Current physical time (for UNSPLIT_GRAVITY only) // UsePot : Add self-gravity and/or external potential (for UNSPLIT_GRAVITY only) // ExtAcc : Add external acceleration (for UNSPLIT_GRAVITY only) // ExtAcc_Func : Function pointer to the external acceleration routine (for UNSPLIT_GRAVITY only) // c_ExtAcc_AuxArray : Auxiliary array for adding external acceleration (for UNSPLIT_GRAVITY and CPU only) // --> When using GPU, this array is stored in the constant memory header // CUDA_ConstMemory.h and does not need to be passed as a function argument // MinDens/Pres/Eint : Density, pressure, and internal energy floors // DualEnergySwitch : Use the dual-energy formalism if E_int/E_kin < DualEnergySwitch // NormPassive : true --> normalize passive scalars so that the sum of their mass density // is equal to the gas mass density // NNorm : Number of passive scalars to be normalized // --> Should be set to the global variable "PassiveNorm_NVar" // c_NormIdx : Target variable indices to be normalized // --> Should be set to the global variable "PassiveNorm_VarIdx" // --> When using GPU, this array is stored in the constant memory and does // not need to be passed as a function argument // --> Declared in CUDA_ConstMemory.h with the prefix "c_" to // highlight that this is a constant variable on GPU // FracPassive : true --> convert passive scalars to mass fraction during data reconstruction // NFrac : Number of passive scalars for the option "FracPassive" // --> Should be set to the global variable "PassiveIntFrac_NVar" // c_FracIdx : Target variable indices for the option "FracPassive" // --> Should be set to the global variable "PassiveIntFrac_VarIdx" // --> When using GPU, this array is stored in the constant memory and does // not need to be passed as a function argument // --> Declared in CUDA_ConstMemory.h with the prefix "c_" to // highlight that this is a constant variable on GPU // JeansMinPres : Apply minimum pressure estimated from the Jeans length // JeansMinPres_Coeff : Coefficient used by JeansMinPres = G*(Jeans_NCell*Jeans_dh)^2/(Gamma*pi); // EoS : EoS object //------------------------------------------------------------------------------------------------------- #ifdef __CUDACC__ __global__ void CUFLU_FluidSolver_CTU( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ) #else void CPU_FluidSolver_CTU( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const int NPatchGroup, const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double c_ExtAcc_AuxArray[], const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const int c_NormIdx[], const bool FracPassive, const int NFrac, const int c_FracIdx[], const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ) #endif // #ifdef __CUDACC__ ... else ... { # ifdef UNSPLIT_GRAVITY const bool CorrHalfVel = true; # else const bool CorrHalfVel = false; # endif const bool CorrHalfVel_No = false; const bool StoreFlux_No = false; const bool Con2Pri_Yes = true; # ifdef MHD const bool StoreElectric_No = false; # endif # if ( defined __CUDACC__ && !defined GRAVITY ) const double *c_ExtAcc_AuxArray = NULL; # endif // openmp pragma for the CPU solver # ifndef __CUDACC__ # pragma omp parallel # endif { // point to the arrays associated with different OpenMP threads (for CPU) or CUDA thread blocks (for GPU) # ifdef __CUDACC__ const int array_idx = blockIdx.x; # else # ifdef OPENMP const int array_idx = omp_get_thread_num(); # else const int array_idx = 0; # endif # endif // #ifdef __CUDACC__ ... else ... real (*const g_FC_Var_1PG )[NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ] = g_FC_Var [array_idx]; real (*const g_FC_Flux_1PG )[NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ] = g_FC_Flux [array_idx]; real (*const g_PriVar_1PG ) [ CUBE(FLU_NXT) ] = g_PriVar [array_idx]; real (*const g_Slope_PPM_1PG)[NCOMP_LR ][ CUBE(N_SLOPE_PPM) ] = g_Slope_PPM[array_idx]; # ifdef MHD real (*const g_FC_Mag_Half_1PG)[ FLU_NXT_P1*SQR(FLU_NXT) ] = g_FC_Mag_Half[array_idx]; real (*const g_EC_Ele_1PG )[ CUBE(N_EC_ELE) ] = g_EC_Ele [array_idx]; # else real (*const g_FC_Mag_Half_1PG)[ FLU_NXT_P1*SQR(FLU_NXT) ] = NULL; real (*const g_EC_Ele_1PG )[ CUBE(N_EC_ELE) ] = NULL; # endif # ifdef MHD real (*const g_PriVar_Half_1PG)[ CUBE(FLU_NXT) ] = g_PriVar_1PG; # endif // loop over all patch groups // --> CPU/GPU solver: use different (OpenMP threads) / (CUDA thread blocks) // to work on different patch groups # ifdef __CUDACC__ const int P = blockIdx.x; # else # pragma omp for schedule( runtime ) for (int P=0; P<NPatchGroup; P++) # endif { // 1. evaluate the face-centered values at the half time-step Hydro_DataReconstruction( g_Flu_Array_In[P], g_Mag_Array_In[P], g_PriVar_1PG, g_FC_Var_1PG, g_Slope_PPM_1PG, Con2Pri_Yes, LR_Limiter, MinMod_Coeff, dt, dh, MinDens, MinPres, MinEint, FracPassive, NFrac, c_FracIdx, JeansMinPres, JeansMinPres_Coeff, &EoS ); // 2. evaluate the face-centered half-step fluxes by solving the Riemann problem Hydro_ComputeFlux( g_FC_Var_1PG, g_FC_Flux_1PG, N_HF_FLUX, 0, 0, CorrHalfVel_No, NULL, NULL, NULL_REAL, NULL_REAL, NULL_REAL, EXT_POT_NONE, EXT_ACC_NONE, NULL, NULL, MinDens, MinPres, StoreFlux_No, NULL, &EoS ); // 3. evaluate electric field and update B field at the half time-step # ifdef MHD MHD_ComputeElectric( g_EC_Ele_1PG, g_FC_Flux_1PG, g_PriVar_1PG, N_HF_ELE, N_HF_FLUX, FLU_NXT, LR_GHOST_SIZE, dt, dh, StoreElectric_No, NULL, CorrHalfVel_No, NULL, NULL, NULL_REAL, EXT_POT_NONE, EXT_ACC_NONE, NULL, NULL ); MHD_UpdateMagnetic( g_FC_Mag_Half_1PG[0], g_FC_Mag_Half_1PG[1], g_FC_Mag_Half_1PG[2], g_Mag_Array_In[P], g_EC_Ele_1PG, (real)0.5*dt, dh, N_HF_VAR, N_HF_ELE, FLU_GHOST_SIZE-1 ); # endif // 4. correct the face-centered variables by the transverse flux gradients Hydro_TGradientCorrection( g_FC_Var_1PG, g_FC_Flux_1PG, g_Mag_Array_In[P], g_FC_Mag_Half_1PG, g_EC_Ele_1PG, g_PriVar_1PG, dt, dh, MinDens, MinEint ); // 5. evaluate the cell-centered primitive variables at the half time-step // --> for computing CT electric field later # ifdef MHD MHD_HalfStepPrimitive( g_Flu_Array_In[P], g_FC_Mag_Half_1PG, g_PriVar_Half_1PG, g_FC_Flux_1PG, dt, dh, MinDens ); # endif // 6. evaluate the face-centered full-step fluxes by solving the Riemann problem with the corrected data # ifdef MHD const int NSkip_N = 1; const int NSkip_T = 1; # else const int NSkip_N = 0; const int NSkip_T = 1; # endif Hydro_ComputeFlux( g_FC_Var_1PG, g_FC_Flux_1PG, N_FL_FLUX, NSkip_N, NSkip_T, CorrHalfVel, g_Pot_Array_USG[P], g_Corner_Array[P], dt, dh, Time, UsePot, ExtAcc, ExtAcc_Func, c_ExtAcc_AuxArray, MinDens, MinPres, StoreFlux, g_Flux_Array[P], &EoS ); // 7. evaluate electric field and update B field at the full time-step // --> must update B field before Hydro_FullStepUpdate() since the latter requires // the updated magnetic energy when adopting the dual-energy formalism # ifdef MHD MHD_ComputeElectric( g_EC_Ele_1PG, g_FC_Flux_1PG, g_PriVar_Half_1PG, N_FL_ELE, N_FL_FLUX, N_HF_VAR, 0, dt, dh, StoreElectric, g_Ele_Array[P], CorrHalfVel, g_Pot_Array_USG[P], g_Corner_Array[P], Time, UsePot, ExtAcc, ExtAcc_Func, c_ExtAcc_AuxArray ); MHD_UpdateMagnetic( g_Mag_Array_Out[P][0], g_Mag_Array_Out[P][1], g_Mag_Array_Out[P][2], g_Mag_Array_In[P], g_EC_Ele_1PG, dt, dh, PS2, N_FL_ELE, FLU_GHOST_SIZE ); # endif // 8. full-step evolution of the fluid data Hydro_FullStepUpdate( g_Flu_Array_In[P], g_Flu_Array_Out[P], g_DE_Array_Out[P], g_Mag_Array_Out[P], g_FC_Flux_1PG, dt, dh, MinDens, MinEint, DualEnergySwitch, NormPassive, NNorm, c_NormIdx, &EoS ); } // loop over all patch groups } // OpenMP parallel region } // FUNCTION : CPU_FluidSolver_CTU //------------------------------------------------------------------------------------------------------- // Function : Hydro_TGradientCorrection // Description : Correct the face-centered variables by the transverse flux gradients // // Note : 1. Ref: (a) Stone et al., ApJS, 178, 137 (2008) // (b) Gardiner & Stone, J. Comput. Phys., 227, 4123 (2008) // 2. Assuming "N_FC_VAR == N_HF_FLUX" // // Parameter : g_FC_Var : Array to store the input and output face-centered conserved variables // --> Accessed with the stride N_FC_VAR // g_FC_Flux : Array storing the input face-centered fluxes // --> Accessed with the stride N_HF_FLUX // g_FC_B_In : Array storing the input initial face-centered B field // g_FC_B_Half : Array storing the input half-step face-centered B field // g_EC_Ele : Array storing the input edge-centered electric field // g_PriVar : Array storing the input cell-centered primitive variables // dt : Time interval to advance solution // dh : Cell size // MinDens/Eint : Density and internal energy floors //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_TGradientCorrection( real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], const real g_FC_Flux [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], const real g_FC_B_In [][ FLU_NXT_P1*SQR(FLU_NXT) ], const real g_FC_B_Half[][ FLU_NXT_P1*SQR(FLU_NXT) ], const real g_EC_Ele [][ CUBE(N_EC_ELE) ], const real g_PriVar [][ CUBE(FLU_NXT) ], const real dt, const real dh, const real MinDens, const real MinEint ) { const int didx_flux[3] = { 1, N_HF_FLUX, SQR(N_HF_FLUX) }; const real dt_dh2 = (real)0.5*dt/dh; # ifdef MHD const int didx_b_in [3] = { 1, FLU_NXT, SQR(FLU_NXT) }; const int didx_b_half[3] = { 1, N_HF_VAR, SQR(N_HF_VAR) }; const int didx_ele [3] = { 1, N_HF_ELE, SQR(N_HF_ELE) }; const real _dh = (real)1.0/dh; const real dt_dh4 = (real)0.25*dt*_dh; const real dt_2 = (real)0.5*dt; real PriVar_1Cell[NCOMP_FLUID+NCOMP_MAG], B_Face[NCOMP_MAG][2]; // [2]=left/right faces # endif // loop over different spatial directions for (int d=0; d<3; d++) { const int faceL = 2*d; const int faceR = faceL+1; const int TDir1 = (d+1)%3; // transverse direction 1 const int TDir2 = (d+2)%3; // transverse direction 2 real fc_var[2][NCOMP_TOTAL_PLUS_MAG]; // [2]=left/right faces # ifdef MHD const int nskip[3] = { 1, 1, 1 }; # else int nskip[3]; switch ( d ) { case 0 : nskip[0] = 0; nskip[1] = 1; nskip[2] = 1; break; case 1 : nskip[0] = 1; nskip[1] = 0; nskip[2] = 1; break; case 2 : nskip[0] = 1; nskip[1] = 1; nskip[2] = 0; break; } # endif const int size_i = ( N_FC_VAR - 2*nskip[0] ); const int size_j = ( N_FC_VAR - 2*nskip[1] ); const int size_k = ( N_FC_VAR - 2*nskip[2] ); const int size_ij = size_i*size_j; CGPU_LOOP( idx0, size_i*size_j*size_k ) { // i/j/k0 start from zero const int i0 = idx0 % size_i; const int j0 = idx0 % size_ij / size_i; const int k0 = idx0 / size_ij; const int i_fc_var = i0 + nskip[0]; const int j_fc_var = j0 + nskip[1]; const int k_fc_var = k0 + nskip[2]; const int idx_fc_var = IDX321( i_fc_var, j_fc_var, k_fc_var, N_FC_VAR, N_FC_VAR ); const int idx_fluxR = idx_fc_var; // assuming N_FC_VAR == N_HF_FLUX const int idx_fluxL1 = idx_fluxR - didx_flux[TDir1]; const int idx_fluxL2 = idx_fluxR - didx_flux[TDir2]; // 0. load g_FC_Var[] to the local variable fc[] to reduce the GPU global memory access for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) { fc_var[0][v] = g_FC_Var[faceL][v][idx_fc_var]; fc_var[1][v] = g_FC_Var[faceR][v][idx_fc_var]; } // 1. calculate the transverse fluid flux gradients and update the corresponding face-centered fluid variables for (int v=0; v<NCOMP_TOTAL; v++) { real Correct, TGrad1, TGrad2; TGrad1 = g_FC_Flux[TDir1][v][idx_fluxR] - g_FC_Flux[TDir1][v][idx_fluxL1]; TGrad2 = g_FC_Flux[TDir2][v][idx_fluxR] - g_FC_Flux[TDir2][v][idx_fluxL2]; Correct = -dt_dh2*( TGrad1 + TGrad2 ); fc_var[0][v] += Correct; fc_var[1][v] += Correct; } # ifdef MHD // 2. correct the transverse B field const int idx_ele = IDX321( i0, j0, k0, N_HF_ELE, N_HF_ELE ); for (int v=1; v<NCOMP_MAG; v++) { real Correct, TGrad1, TGrad2, Sign; const int TD1 = (d+v)%3; // transverse direction 1 const int TD2 = (d+2*v)%3; // transverse direction 2 const int TB = TD1 + MAG_OFFSET; // target transverse B field TGrad1 = g_EC_Ele[d][ idx_ele + didx_ele[TD2] ] - g_EC_Ele[d][ idx_ele ]; TGrad2 = g_EC_Ele[d][ idx_ele + didx_ele[TD2] + didx_ele[TD1] ] - g_EC_Ele[d][ idx_ele + didx_ele[TD1] ]; Sign = (real)2.0*v - (real)3.0; // v=1/2 --> sign=-1/+1 Correct = Sign*dt_dh4*( TGrad2 + TGrad1 ); fc_var[0][TB] += Correct; fc_var[1][TB] += Correct; } // for (int v=1; v<NCOMP_MAG; v++) // 3. add the divergence(B) source terms Hydro_Rotate3D( fc_var[0], d, true, MAG_OFFSET ); Hydro_Rotate3D( fc_var[1], d, true, MAG_OFFSET ); // 3-1. get the initial cell-centered primitive variables const int i_pri = i_fc_var + LR_GHOST_SIZE; const int j_pri = j_fc_var + LR_GHOST_SIZE; const int k_pri = k_fc_var + LR_GHOST_SIZE; const int idx_pri = IDX321( i_pri, j_pri, k_pri, FLU_NXT, FLU_NXT ); // skip passive scalars for (int v=0; v<NCOMP_FLUID; v++) PriVar_1Cell[ v ] = g_PriVar[ v ][idx_pri]; for (int v=0; v<NCOMP_MAG; v++) PriVar_1Cell[ v + NCOMP_FLUID ] = g_PriVar[ v + MAG_OFFSET ][idx_pri]; Hydro_Rotate3D( PriVar_1Cell, d, true, NCOMP_FLUID ); // 3-2. get the initial face-centered B field const int idx_b_in[3] = { IDX321( i_pri, j_pri, k_pri, FLU_NXT_P1, FLU_NXT ), IDX321( i_pri, j_pri, k_pri, FLU_NXT, FLU_NXT_P1 ), IDX321( i_pri, j_pri, k_pri, FLU_NXT, FLU_NXT ) }; B_Face[0][0] = g_FC_B_In[ d ][ idx_b_in[ d ] ]; B_Face[0][1] = g_FC_B_In[ d ][ idx_b_in[ d ] + didx_b_in[ d ] ]; B_Face[1][0] = g_FC_B_In[TDir1][ idx_b_in[TDir1] ]; B_Face[1][1] = g_FC_B_In[TDir1][ idx_b_in[TDir1] + didx_b_in[TDir1] ]; B_Face[2][0] = g_FC_B_In[TDir2][ idx_b_in[TDir2] ]; B_Face[2][1] = g_FC_B_In[TDir2][ idx_b_in[TDir2] + didx_b_in[TDir2] ]; // 3-3. add the divergence(B) source term const real Vy = PriVar_1Cell[ 2 ]; const real Vz = PriVar_1Cell[ 3 ]; const real Bx = PriVar_1Cell[ 0 + NCOMP_FLUID ]; const real By = PriVar_1Cell[ 1 + NCOMP_FLUID ]; const real Bz = PriVar_1Cell[ 2 + NCOMP_FLUID ]; real dB[NCOMP_MAG], SrcFlu[NCOMP_FLUID-1], SrcMag[2], Vy_MinModBxz, Vz_MinModBxy; for (int v=0; v<NCOMP_MAG; v++) dB[v] = ( B_Face[v][1] - B_Face[v][0] )*_dh; # define MINMOD( a , b ) ( ( (a)*(b)>(real)0.0 ) ? ( SIGN(a)*FMIN(FABS(a),FABS(b)) ) : (real)0.0 ) Vy_MinModBxz = Vy*MINMOD( -dB[2], dB[0] ); Vz_MinModBxy = Vz*MINMOD( -dB[1], dB[0] ); # undef MINMOD SrcFlu[0] = dt_2*Bx*dB[0]; SrcFlu[1] = dt_2*By*dB[0]; SrcFlu[2] = dt_2*Bz*dB[0]; SrcFlu[3] = dt_2*( By*Vy_MinModBxz + Bz*Vz_MinModBxy ); SrcMag[0] = dt_2*Vy_MinModBxz; SrcMag[1] = dt_2*Vz_MinModBxy; for (int f=0; f<2; f++) { fc_var[f][ 1 ] += SrcFlu[0]; fc_var[f][ 2 ] += SrcFlu[1]; fc_var[f][ 3 ] += SrcFlu[2]; fc_var[f][ 4 ] += SrcFlu[3]; fc_var[f][ 1 + MAG_OFFSET ] += SrcMag[0]; fc_var[f][ 2 + MAG_OFFSET ] += SrcMag[1]; } // 4. set the longitudinal B field to the half-step values updated by MHD_UpdateMagnetic() int idx_b_half; switch ( d ) { case 0 : idx_b_half = IDX321( i0, j0, k0, N_HF_VAR+1, N_HF_VAR ); break; case 1 : idx_b_half = IDX321( i0, j0, k0, N_HF_VAR, N_HF_VAR+1 ); break; case 2 : idx_b_half = IDX321( i0, j0, k0, N_HF_VAR, N_HF_VAR ); break; } fc_var[0][MAG_OFFSET] = g_FC_B_Half[d][ idx_b_half ]; fc_var[1][MAG_OFFSET] = g_FC_B_Half[d][ idx_b_half + didx_b_half[d] ]; Hydro_Rotate3D( fc_var[0], d, false, MAG_OFFSET ); Hydro_Rotate3D( fc_var[1], d, false, MAG_OFFSET ); # endif // #ifdef MHD // 5. apply density and internal energy floors for (int f=0; f<2; f++) { # ifdef MHD const real Bx = fc_var[f][ MAG_OFFSET + 0 ]; const real By = fc_var[f][ MAG_OFFSET + 1 ]; const real Bz = fc_var[f][ MAG_OFFSET + 2 ]; const real Emag= (real)0.5*( SQR(Bx) + SQR(By) + SQR(Bz) ); # else const real Emag = NULL_REAL; # endif fc_var[f][0] = FMAX( fc_var[f][0], MinDens ); fc_var[f][4] = Hydro_CheckMinEintInEngy( fc_var[f][0], fc_var[f][1], fc_var[f][2], fc_var[f][3], fc_var[f][4], MinEint, Emag ); # if ( NCOMP_PASSIVE > 0 ) for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) fc_var[f][v] = FMAX( fc_var[f][v], TINY_NUMBER ); # endif } // store the results to g_FC_Var[] for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) { g_FC_Var[faceL][v][idx_fc_var] = fc_var[0][v]; g_FC_Var[faceR][v][idx_fc_var] = fc_var[1][v]; } } // CGPU_LOOP( idx0, size_i*size_j*size_k ) } // for (int d=0; d<3; d++) # ifdef __CUDACC__ __syncthreads(); # endif } // FUNCTION : Hydro_TGradientCorrection #endif // #if ( MODEL == HYDRO && FLU_SCHEME == CTU )
the_stack
// This CUDA kernel produces the semi-ring product of two // sparse matrices of types T_A and T_B and common index space size n, to a // output matrix of type T_C. The matrices are sparse, with different numbers // of non-zeros and different sparsity patterns. // ie. we want to produce C = A'*B in the sense of the given semi-ring. // This version uses a merge-path algorithm, when the sizes nnzA and nnzB are // relatively close in size, neither is very spare nor dense, for any size of N. // Handles arbitrary sparsity patterns with guaranteed load balance. // Both the grid and block are 1D, so blockDim.x is the # threads in a // threadblock, and the # of threadblocks is grid.x // Let b = blockIdx.x, and let s be blockDim.x. s= 32 with a variable number // of active threads = min( min(g_xnz, g_ynz), 32) // Thus, threadblock b owns a part of the index set spanned by g_xi and g_yi. Its job // is to find the intersection of the index sets g_xi and g_yi, perform the semi-ring dot // product on those items in the intersection, and finally reduce this data to a scalar, // on exit write it to g_odata [b]. // int64_t start <- start of vector pairs for this kernel // int64_t end <- end of vector pairs for this kernel // int64_t *Bucket <- array of pair indices for all kernels // matrix<T_C> *C <- result matrix // matrix<T_M> *M <- mask matrix // matrix<T_A> *A <- input matrix A // matrix<T_B> *B <- input matrix B #define GB_CUDA_KERNEL #include <limits> #include <cstdint> #include "matrix.h" #include <cooperative_groups.h> // Using tile size fixed at compile time, we don't need shared memory #define tile_sz 32 using namespace cooperative_groups; template< typename T, int warp_sz> __device__ __inline__ T GB_reduce_sum(thread_block_tile<warp_sz> g, T val) { // Each iteration halves the number of active threads // Each thread adds its partial sum[i] to sum[lane+i] for (int i = g.size() / 2; i > 0; i /= 2) { T next = g.shfl_down( val, i); val = GB_ADD( val, next ) ; } return val; } template< typename T, int warp_sz> __device__ __inline__ T reduce_plus(thread_block_tile<warp_sz> g, T val) { // Each iteration halves the number of active threads // Each thread adds its partial sum[i] to sum[lane+i] for (int i = g.size() / 2; i > 0; i /= 2) { val += g.shfl_down( val, i) ; } return val; // note: only thread 0 will return full sum and flag value } #define intersects_per_thread 8 template< typename T_C, typename T_A, typename T_B, typename T_X, typename T_Y, typename T_Z> __global__ void AxB_dot3_phase3_warpix ( int64_t start, int64_t end, int64_t *__restrict__ Bucket, GrB_Matrix C, GrB_Matrix M, GrB_Matrix A, GrB_Matrix B, int sz ) { T_A *__restrict__ Ax = (T_A*)A->x; T_B *__restrict__ Bx = (T_B*)B->x; T_C *__restrict__ Cx = (T_C*)C->x; int64_t *__restrict__ Ci = C->i; int64_t *__restrict__ Mi = M->i; int64_t *__restrict__ Mp = M->p; int64_t *__restrict__ Ai = A->i; int64_t *__restrict__ Bi = B->i; int64_t *__restrict__ Ap = A->p; int64_t *__restrict__ Bp = B->p; int64_t mnvec = M->nvec; // zombie count int zc; int64_t pair_id; // set thread ID int tid_global = threadIdx.x+ blockDim.x* blockIdx.x; int tid = threadIdx.x; int b = blockIdx.x ; // total items to be inspected int64_t nnzA = 0; int64_t nnzB = 0; thread_block_tile<tile_sz> tile = tiled_partition<tile_sz>( this_thread_block()); //int parts = gridDim.x; //Each warp is a part //Find our part of the work bucket int64_t pfirst, plast, kfirst, klast ; GB_PARTITION (pfirst, plast, end-start, b, gridDim.x ) ; /* if( tid ==0 ) { printf("block%d is alive, pf,pl=%ld,%ld \n", b, pfirst, plast); } __syncthreads(); */ __shared__ int64_t As[256]; __shared__ int64_t Bs[256]; __shared__ T_A Axs[256]; __shared__ T_B Bxs[256]; /* int Bpl[9]; // local offsets into shared for multiple vectors of B int shr_vec[8] ; //columns of B we see in this task pair_id = Bucket[pfirst]; int64_t i = Mi[pair_id] ; int vecs = 1 ; int last_vec = i; shr_vec[0] = i; for (int id =1; id< plast-pfirst; id++) { pair_id = Bucket[pfirst+id]; i = Mi[pair_id]; if (i == last_vec) continue; vecs++; shr_vec[vecs] = i; last_vec = i; } int all_loaded = 0; Bpl[0] = 0; for ( int k = 0; k < vecs; k++) { int64_t pA = Ap[ shr_vec[k] ]; int64_t pA_end = Ap[ shr_vec[k] +1]; nnzA = pA_end - pA; Bpl[k+1] = Bpl[k] + nnzA; for (int i = tid ; i < nnzA; i+= blockDim.x) { As[ Bpl[k] +i ] = Ai[ pA + i ] ; } __syncthreads(); } //pre-load columns of B, which will be reused, to shared memory //Due to loading a contigious block with stride 1 this is fast all_loaded = (Bpl[vecs] < 256 ); if( tid == 0 ) { printf("block%d loaded %d vals from B, vecs=%d, all_loaded=%d\n", b, Bpl[vecs], vecs, all_loaded ); } __syncthreads(); // reset counter */ // Main loop over pairs for (int id = start + pfirst; // loop on pairs id < start+ plast; id ++ ) { int64_t pair_id = Bucket[id]; int64_t i = Mi[pair_id]; int64_t j = Ci[pair_id] >> 4; int64_t pA = Ap[i]; int64_t pA_end = Ap[i+1]; nnzA = pA_end - pA; int64_t pB = Bp[j]; int64_t pB_end = Bp[j+1]; nnzB = pB_end - pB; zc = 0 ; int j_last = -1 ; // No search, this warp does all the work int tx_start = pA; int tx_end = pA_end; int ty_start = pB; int ty_end = pB_end; for ( int i = tid; i < nnzA ; i+= blockDim.x) { As [i] = Ai[ pA + i]; Axs[i] = Ax[ pA + i]; } __syncthreads(); if ( j != j_last) { for ( int i = tid; i < nnzB ; i+= blockDim.x) { Bs [i] = Bi[ pB + i]; Bxs[i] = Bx[ pB + i]; } __syncthreads(); j_last = j; } /* if ( tid==0 ) { //printf("block %d dot %lld i,j= %lld,%lld\n", blockIdx.x, pair_id, i, j); printf("block%d dot %ld(i,j)=(%ld,%ld) xs,xe= %d,%d ys,ye = %d,%d \n", b, pair_id, i, j, tx_start,tx_end, ty_start, ty_end); //for(int a = 0; a < nnzA; a++) printf(" As[%d]:%ld ",a, As[j]); } tile.sync(); */ // Warp intersection: balanced by design, no idle threads. // Each 32 thread warp will handle 32 comparisons per loop. // Either A or B takes stride 4, other takes stride 8 // For this version A strides 4, B strides 8 T_A aki; T_B bkj; T_Z cij = GB_IDENTITY ; int Astride = nnzA > nnzB ? 8 : 4; int Ashift = nnzA > nnzB ? 3 : 2; int Amask = nnzA > nnzB ? 7 : 3; int Bstride = nnzB >= nnzA ? 8 : 4; //printf(" Astride = %d, Bstride = %d\n", Astride, Bstride); // TODO PLUS_PAIR_INT64, FP32, FP64: no need for cij_exists. // just check if cij > 0 int cij_exists = 0 ; //Warp intersection dot product int bitty_row = tid & Amask ; int bitty_col = tid >> Ashift ; int k = tx_start + bitty_row ; int l = ty_start + bitty_col ; //Ai[k] = As[ k -pA ]; for lookup //Bi[l] = Bs[ l -pB ]; int inc_k,inc_l; int active = ( ( k < tx_end) && (l < ty_end ) ); /* printf("block%d tid%d Ai,As=%ld,%ld Bi,Bs=%ld,%ld k,l =%d,%d active:%d\n", b,tid, Ai[k], As[k -pA], Bi[l], Bs[l -pB], k, l, active ); */ while ( tile.any(active) ) { inc_k = 0; inc_l = 0; int kp = k-pA; int lp = l-pB; if ( active ) { coalesced_group g = coalesced_threads(); if ( g.thread_rank() == g.size()-1) { inc_k = ( As[kp] <= Bs[lp] ) ; inc_l = ( Bs[lp] <= As[kp] ) ; // printf("block%d tid%d inc_k= %d inc_l = %d\n",b, tid, inc_k, inc_l ); } //tile.sync(); if ( As [kp] == Bs [lp] ) { //Axs[kp] = Ax[k]; //Bxs[lp] = Bx[l]; GB_GETA ( aki=(T_Z)Axs[kp] ) ; GB_GETB ( bkj=(T_Z)Bxs[lp] ) ; if (cij_exists) { T_Z t = GB_MULT( (T_Z) aki, (T_Z) bkj); GB_ADD_F( cij, t ) ; //printf("block%d thd%d ix at %ld(%ld) cij += %d * %d\n",b, tid, Ai[k], As[kp], aki, bkj); } else { cij_exists = 1 ; cij = GB_MULT ( (T_Z) aki, (T_Z) bkj) ; //printf(" thd%d ix at %ld(%ld) cij = %d * %d \n", tid, Ai[k], Ais[kp], aki, bkj); } } // TODO check terminal condition //printf(" block%u work value = %d, exists = %d\n", b, cij, cij_exists); //printf("block%d tid%d k,l = %d,%d Ai,Bi = %ld,%ld \n", b, tid, k, l, Ai[k], Bi[l] ); } //tile.sync(); //inc_k = tile.shfl_down( inc_k, 31-tid); if( tile.any(inc_k) ) { k =1+ tile.shfl_down(k,31-tid) + bitty_row ; // tid%Astride; //Ais [k-pA] = As[k-pA]; //Axs [bitty_row] = Ax[k]; } if( tile.any(inc_l) ) { l =1+ tile.shfl_down(l,31-tid) + bitty_col ; // tid/Astride; //Bis [l-pB] = Bs[l-pB]; //Bxs [bitty_col] = Bx[l]; } active = ( ( k < tx_end) && (l < ty_end ) ); //printf("block%d tid = %d k = %d l= %d active=%d\n", b, tid, k, l,active); } tile.sync(); //-------------------------------------------------------------------------- // reduce sum per-thread values to a single scalar, get OR of flag //-------------------------------------------------------------------------- // Do vote here for control. cij_exists = tile.any( cij_exists); tile.sync(); if (cij_exists) { cij = GB_reduce_sum<T_Z, tile_sz>( tile, cij ); } tile.sync(); // Atomic write result for this block to global mem if (tid == 0) { //printf ("final %d : %d exists = %d\n", b, cij, cij_exists) ; if (cij_exists) { //printf("block%d i,j =%ld,%ld cij = %d\n",b, i, j, cij); GB_PUTC( Cx[pair_id] = (T_C) cij ) ; GB_PUTC ( Ci[pair_id] = i ) ; } else { //printf(" dot %d is a zombie\n", pair_id); zc++; GB_PUTC ( Ci[pair_id] = GB_FLIP (i) ) ; } //__syncthreads(); if( zc > 0) { //printf("warp %d zombie count = %d\n", blockIdx.x, zc); atomicAdd( (unsigned long long int*)&(C->nzombies), (unsigned long long int)zc); //printf("blk:%d Czombie = %lld\n",blockIdx.x,C->zombies); } } tile.sync(); /* */ } }
the_stack
#include "PPPMForceComputeGPU.cuh" #include "hoomd/TextureTools.h" // __scalar2int_rd is __float2int_rd in single, __double2int_rd in double #ifdef SINGLE_PRECISION #define __scalar2int_rd __float2int_rd #else #define __scalar2int_rd __double2int_rd #endif #define GPU_PPPM_MAX_ORDER 7 // workaround for HIP bug #ifdef __HIP_PLATFORM_HCC__ inline __device__ float myAtomicAdd(float* address, float val) { unsigned int* address_as_uint = (unsigned int*)address; unsigned int old = *address_as_uint, assumed; do { assumed = old; old = atomicCAS(address_as_uint, assumed, __float_as_uint(val + __uint_as_float(assumed))); } while (assumed != old); return __uint_as_float(old); } #else inline __device__ float myAtomicAdd(float* address, float val) { return atomicAdd(address, val); } #endif //! GPU implementation of sinc(x)==sin(x)/x __device__ Scalar gpu_sinc(Scalar x) { Scalar sinc = 0; //! Coefficients of a power expansion of sin(x)/x const Scalar sinc_coeff[] = {Scalar(1.0), Scalar(-1.0 / 6.0), Scalar(1.0 / 120.0), Scalar(-1.0 / 5040.0), Scalar(1.0 / 362880.0), Scalar(-1.0 / 39916800.0)}; if (x * x <= Scalar(1.0)) { Scalar term = Scalar(1.0); for (unsigned int i = 0; i < 6; ++i) { sinc += sinc_coeff[i] * term; term *= x * x; } } else { sinc = fast::sin(x) / x; } return sinc; } __device__ int3 find_cell(const Scalar3& pos, const unsigned int& inner_nx, const unsigned int& inner_ny, const unsigned int& inner_nz, const uint3& n_ghost_cells, const BoxDim& box, int order, Scalar3& dr) { // compute coordinates in units of the mesh size Scalar3 f = box.makeFraction(pos); uchar3 periodic = box.getPeriodic(); Scalar3 reduced_pos = make_scalar3(f.x * (Scalar)inner_nx, f.y * (Scalar)inner_ny, f.z * (Scalar)inner_nz); reduced_pos += make_scalar3(n_ghost_cells.x, n_ghost_cells.y, n_ghost_cells.z); Scalar shift, shiftone; if (order % 2) { shift = Scalar(0.5); shiftone = Scalar(0.0); } else { shift = Scalar(0.0); shiftone = Scalar(0.5); } int ix = __scalar2int_rd(reduced_pos.x + shift); int iy = __scalar2int_rd(reduced_pos.y + shift); int iz = __scalar2int_rd(reduced_pos.z + shift); // set distance to cell center dr.x = shiftone + (Scalar)ix - reduced_pos.x; dr.y = shiftone + (Scalar)iy - reduced_pos.y; dr.z = shiftone + (Scalar)iz - reduced_pos.z; // handle particles on the boundary if (periodic.x && ix == (int)inner_nx) ix = 0; if (periodic.y && iy == (int)inner_ny) iy = 0; if (periodic.z && iz == (int)inner_nz) iz = 0; return make_int3(ix, iy, iz); } __global__ void gpu_assign_particles_kernel(const uint3 mesh_dim, const uint3 n_ghost_bins, unsigned int work_size, const unsigned int* d_index_array, const Scalar4* d_postype, const Scalar* d_charge, hipfftComplex* d_mesh, Scalar V_cell, int order, unsigned int offset, BoxDim box, const Scalar* d_rho_coeff) { extern __shared__ Scalar s_coeff[]; // load in interpolation coefficients unsigned int ncoeffs = order * (2 * order + 1); for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < ncoeffs) { s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x]; } } __syncthreads(); unsigned int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx >= work_size) return; unsigned int group_idx = work_idx + offset; int3 bin_dim = make_int3(mesh_dim.x + 2 * n_ghost_bins.x, mesh_dim.y + 2 * n_ghost_bins.y, mesh_dim.z + 2 * n_ghost_bins.z); // grid coordinates of bin (column-major) unsigned int idx = d_index_array[group_idx]; Scalar4 postype = d_postype[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); Scalar qi = d_charge[idx]; // compute coordinates in units of the cell size Scalar3 dr = make_scalar3(0, 0, 0); int3 bin_coord = find_cell(pos, mesh_dim.x, mesh_dim.y, mesh_dim.z, n_ghost_bins, box, order, dr); // ignore particles that are not within our domain (the error should be caught by HOOMD's cell // list) if (bin_coord.x < 0 || bin_coord.x >= bin_dim.x || bin_coord.y < 0 || bin_coord.y >= bin_dim.y || bin_coord.z < 0 || bin_coord.z >= bin_dim.z) { return; } int i = bin_coord.x; int j = bin_coord.y; int k = bin_coord.z; int nlower = -(order - 1) / 2; int nupper = order / 2; Scalar result; int mult_fact = 2 * order + 1; Scalar x0 = qi; bool ignore_x = false; bool ignore_y = false; bool ignore_z = false; // loop over neighboring bins for (int l = nlower; l <= nupper; ++l) { // precalculate assignment factor result = Scalar(0.0); for (int iorder = order - 1; iorder >= 0; iorder--) { result = s_coeff[l - nlower + iorder * mult_fact] + result * dr.x; } Scalar y0 = x0 * result; int neighi = i + l; if (neighi >= (int)bin_dim.x) { if (!n_ghost_bins.x) neighi -= (int)bin_dim.x; else ignore_x = true; } else if (neighi < 0) { if (!n_ghost_bins.x) neighi += (int)bin_dim.x; else ignore_x = true; } for (int m = nlower; m <= nupper; ++m) { result = Scalar(0.0); for (int iorder = order - 1; iorder >= 0; iorder--) { result = s_coeff[m - nlower + iorder * mult_fact] + result * dr.y; } Scalar z0 = y0 * result; int neighj = j + m; if (neighj >= (int)bin_dim.y) { if (!n_ghost_bins.y) neighj -= (int)bin_dim.y; else ignore_y = true; } else if (neighj < 0) { if (!n_ghost_bins.y) neighj += (int)bin_dim.y; else ignore_y = true; } for (int n = nlower; n <= nupper; ++n) { result = Scalar(0.0); for (int iorder = order - 1; iorder >= 0; iorder--) { result = s_coeff[n - nlower + iorder * mult_fact] + result * dr.z; } int neighk = k + n; if (neighk >= (int)bin_dim.z) { if (!n_ghost_bins.z) neighk -= (int)bin_dim.z; else ignore_z = true; } else if (neighk < 0) { if (!n_ghost_bins.z) neighk += (int)bin_dim.z; else ignore_z = true; } if (!ignore_x && !ignore_y && !ignore_z) { // write out to global memory using row-major unsigned int cell_idx = neighi + bin_dim.x * (neighj + bin_dim.y * neighk); // compute fraction of particle density assigned to cell // from particles in this bin myAtomicAdd(&d_mesh[cell_idx].x, z0 * result / V_cell); } ignore_z = false; } ignore_y = false; } ignore_x = false; } // end of loop over neighboring bins } __global__ void gpu_reduce_meshes(const unsigned int mesh_elements, const hipfftComplex* d_mesh_scratch, hipfftComplex* d_mesh, unsigned int ngpu) { unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= mesh_elements) return; hipfftComplex res; res.x = 0; res.y = 0; // reduce over all temporary meshes for (unsigned int igpu = 0; igpu < ngpu; ++igpu) { hipfftComplex m = d_mesh_scratch[idx + igpu * mesh_elements]; res.x += m.x; res.y += m.y; } d_mesh[idx] = res; } void gpu_assign_particles(const uint3 mesh_dim, const uint3 n_ghost_bins, const uint3 grid_dim, unsigned int group_size, const unsigned int* d_index_array, const Scalar4* d_postype, const Scalar* d_charge, hipfftComplex* d_mesh, hipfftComplex* d_mesh_scratch, const unsigned int mesh_elements, int order, const BoxDim& box, unsigned int block_size, const Scalar* d_rho_coeff, const hipDeviceProp_t& dev_prop, const GPUPartition& gpu_partition) { hipMemsetAsync(d_mesh, 0, sizeof(hipfftComplex) * grid_dim.x * grid_dim.y * grid_dim.z); Scalar V_cell = box.getVolume() / (Scalar)(mesh_dim.x * mesh_dim.y * mesh_dim.z); unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_assign_particles_kernel); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(max_block_size, block_size); while (attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock) { run_block_size -= dev_prop.warpSize; } // iterate over active GPUs in reverse, to end up on first GPU when returning from this function unsigned int ngpu = gpu_partition.getNumActiveGPUs(); for (int idev = ngpu - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); if (ngpu > 1) { // zero the temporary mesh array hipMemsetAsync(d_mesh_scratch + idev * mesh_elements, 0, sizeof(hipfftComplex) * mesh_elements); } unsigned int nwork = range.second - range.first; unsigned int n_blocks = nwork / run_block_size + 1; const size_t shared_bytes = order * (2 * order + 1) * sizeof(Scalar); hipLaunchKernelGGL((gpu_assign_particles_kernel), dim3(n_blocks), dim3(run_block_size), shared_bytes, 0, mesh_dim, n_ghost_bins, nwork, d_index_array, d_postype, d_charge, ngpu > 1 ? d_mesh_scratch + idev * mesh_elements : d_mesh, V_cell, order, range.first, box, d_rho_coeff); } } //! Reduce temporary arrays for every GPU void gpu_reduce_meshes(const unsigned int mesh_elements, const hipfftComplex* d_mesh_scratch, hipfftComplex* d_mesh, const unsigned int ngpu, const unsigned int block_size) { // reduce meshes on GPU 0 hipLaunchKernelGGL((gpu_reduce_meshes), dim3(mesh_elements / block_size + 1), dim3(block_size), 0, 0, mesh_elements, d_mesh_scratch, d_mesh, ngpu); } __global__ void gpu_compute_mesh_virial_kernel(const unsigned int n_wave_vectors, hipfftComplex* d_fourier_mesh, Scalar* d_inf_f, Scalar* d_virial_mesh, const Scalar3* d_k, const bool exclude_dc, Scalar kappa) { unsigned int idx; idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= n_wave_vectors) return; if (!exclude_dc || idx != 0) { // non-zero wave vector hipfftComplex fourier = d_fourier_mesh[idx]; Scalar3 k = d_k[idx]; Scalar rhog = (fourier.x * fourier.x + fourier.y * fourier.y) * d_inf_f[idx]; Scalar vterm = -Scalar(2.0) * (Scalar(1.0) / dot(k, k) + Scalar(0.25) / (kappa * kappa)); d_virial_mesh[0 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.x * k.x); // xx d_virial_mesh[1 * n_wave_vectors + idx] = rhog * (vterm * k.x * k.y); // xy d_virial_mesh[2 * n_wave_vectors + idx] = rhog * (vterm * k.x * k.z); // xz d_virial_mesh[3 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.y * k.y); // yy d_virial_mesh[4 * n_wave_vectors + idx] = rhog * (vterm * k.y * k.z); // yz d_virial_mesh[5 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.z * k.z); // zz } else { d_virial_mesh[0 * n_wave_vectors + idx] = Scalar(0.0); d_virial_mesh[1 * n_wave_vectors + idx] = Scalar(0.0); d_virial_mesh[2 * n_wave_vectors + idx] = Scalar(0.0); d_virial_mesh[3 * n_wave_vectors + idx] = Scalar(0.0); d_virial_mesh[4 * n_wave_vectors + idx] = Scalar(0.0); d_virial_mesh[5 * n_wave_vectors + idx] = Scalar(0.0); } } void gpu_compute_mesh_virial(const unsigned int n_wave_vectors, hipfftComplex* d_fourier_mesh, Scalar* d_inf_f, Scalar* d_virial_mesh, const Scalar3* d_k, const bool exclude_dc, Scalar kappa) { const unsigned int block_size = 256; dim3 grid(n_wave_vectors / block_size + 1, 1, 1); hipLaunchKernelGGL((gpu_compute_mesh_virial_kernel), dim3(grid), dim3(block_size), 0, 0, n_wave_vectors, d_fourier_mesh, d_inf_f, d_virial_mesh, d_k, exclude_dc, kappa); } __global__ void gpu_update_meshes_kernel(const unsigned int n_wave_vectors, hipfftComplex* d_fourier_mesh, hipfftComplex* d_fourier_mesh_G_x, hipfftComplex* d_fourier_mesh_G_y, hipfftComplex* d_fourier_mesh_G_z, const Scalar* d_inf_f, const Scalar3* d_k, unsigned int NNN) { unsigned int k; k = blockDim.x * blockIdx.x + threadIdx.x; if (k >= n_wave_vectors) return; hipfftComplex f = d_fourier_mesh[k]; Scalar scaled_inf_f = d_inf_f[k] / ((Scalar)NNN); Scalar3 kvec = d_k[k]; // Normalization hipfftComplex fourier_G_x; fourier_G_x.x = f.y * kvec.x * scaled_inf_f; fourier_G_x.y = -f.x * kvec.x * scaled_inf_f; hipfftComplex fourier_G_y; fourier_G_y.x = f.y * kvec.y * scaled_inf_f; fourier_G_y.y = -f.x * kvec.y * scaled_inf_f; hipfftComplex fourier_G_z; fourier_G_z.x = f.y * kvec.z * scaled_inf_f; fourier_G_z.y = -f.x * kvec.z * scaled_inf_f; // store in global memory d_fourier_mesh_G_x[k] = fourier_G_x; d_fourier_mesh_G_y[k] = fourier_G_y; d_fourier_mesh_G_z[k] = fourier_G_z; } void gpu_update_meshes(const unsigned int n_wave_vectors, hipfftComplex* d_fourier_mesh, hipfftComplex* d_fourier_mesh_G_x, hipfftComplex* d_fourier_mesh_G_y, hipfftComplex* d_fourier_mesh_G_z, const Scalar* d_inf_f, const Scalar3* d_k, unsigned int NNN, unsigned int block_size) { unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_update_meshes_kernel); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(max_block_size, block_size); dim3 grid(n_wave_vectors / run_block_size + 1, 1, 1); hipLaunchKernelGGL((gpu_update_meshes_kernel), dim3(grid), dim3(run_block_size), 0, 0, n_wave_vectors, d_fourier_mesh, d_fourier_mesh_G_x, d_fourier_mesh_G_y, d_fourier_mesh_G_z, d_inf_f, d_k, NNN); } __global__ void gpu_compute_forces_kernel(const unsigned int work_size, const Scalar4* d_postype, Scalar4* d_force, const uint3 grid_dim, const uint3 n_ghost_cells, const Scalar* d_charge, const BoxDim box, int order, const unsigned int* d_index_array, const hipfftComplex* inv_fourier_mesh_x, const hipfftComplex* inv_fourier_mesh_y, const hipfftComplex* inv_fourier_mesh_z, const Scalar* d_rho_coeff, const unsigned int offset) { extern __shared__ Scalar s_coeff[]; // load in interpolation coefficients unsigned int ncoeffs = order * (2 * order + 1); for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < ncoeffs) { s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x]; } } __syncthreads(); unsigned int work_idx = blockIdx.x * blockDim.x + threadIdx.x; if (work_idx >= work_size) return; unsigned int group_idx = work_idx + offset; unsigned int idx = d_index_array[group_idx]; int3 inner_dim = make_int3(grid_dim.x - 2 * n_ghost_cells.x, grid_dim.y - 2 * n_ghost_cells.y, grid_dim.z - 2 * n_ghost_cells.z); Scalar4 postype = d_postype[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); unsigned int type = __scalar_as_int(postype.w); Scalar qi = d_charge[idx]; Scalar3 dr = make_scalar3(0, 0, 0); // find cell the particle is in int3 cell_coord = find_cell(pos, inner_dim.x, inner_dim.y, inner_dim.z, n_ghost_cells, box, order, dr); // ignore particles that are not within our domain (the error should be caught by HOOMD's cell // list) if (cell_coord.x < 0 || cell_coord.x >= (int)grid_dim.x || cell_coord.y < 0 || cell_coord.y >= (int)grid_dim.y || cell_coord.z < 0 || cell_coord.z >= (int)grid_dim.z) { return; } Scalar3 force = make_scalar3(0.0, 0.0, 0.0); int nlower = -(order - 1) / 2; int nupper = order / 2; Scalar result; int mult_fact = 2 * order + 1; // back-interpolate forces from neighboring mesh points for (int l = nlower; l <= nupper; ++l) { result = Scalar(0.0); for (int k = order - 1; k >= 0; k--) { result = s_coeff[l - nlower + k * mult_fact] + result * dr.x; } Scalar x0 = result; for (int m = nlower; m <= nupper; ++m) { result = Scalar(0.0); for (int k = order - 1; k >= 0; k--) { result = s_coeff[m - nlower + k * mult_fact] + result * dr.y; } Scalar y0 = x0 * result; for (int n = nlower; n <= nupper; ++n) { result = Scalar(0.0); for (int k = order - 1; k >= 0; k--) { result = s_coeff[n - nlower + k * mult_fact] + result * dr.z; } Scalar z0 = y0 * result; int neighl = (int)cell_coord.x + l; int neighm = (int)cell_coord.y + m; int neighn = (int)cell_coord.z + n; if (!n_ghost_cells.x) { if (neighl >= (int)grid_dim.x) neighl -= grid_dim.x; else if (neighl < 0) neighl += grid_dim.x; } if (!n_ghost_cells.y) { if (neighm >= (int)grid_dim.y) neighm -= grid_dim.y; else if (neighm < 0) neighm += grid_dim.y; } if (!n_ghost_cells.z) { if (neighn >= (int)grid_dim.z) neighn -= grid_dim.z; else if (neighn < 0) neighn += grid_dim.z; } // use column-major layout unsigned int cell_idx = neighl + grid_dim.x * (neighm + grid_dim.y * neighn); hipfftComplex inv_mesh_x = inv_fourier_mesh_x[cell_idx]; hipfftComplex inv_mesh_y = inv_fourier_mesh_y[cell_idx]; hipfftComplex inv_mesh_z = inv_fourier_mesh_z[cell_idx]; force.x += qi * z0 * inv_mesh_x.x; force.y += qi * z0 * inv_mesh_y.x; force.z += qi * z0 * inv_mesh_z.x; } } } // end neighbor cells loop d_force[idx] = make_scalar4(force.x, force.y, force.z, 0.0); } void gpu_compute_forces(const unsigned int N, const Scalar4* d_postype, Scalar4* d_force, const hipfftComplex* d_inv_fourier_mesh_x, const hipfftComplex* d_inv_fourier_mesh_y, const hipfftComplex* d_inv_fourier_mesh_z, const uint3 grid_dim, const uint3 n_ghost_cells, const Scalar* d_charge, const BoxDim& box, int order, const unsigned int* d_index_array, const GPUPartition& gpu_partition, const GPUPartition& all_gpu_partition, const Scalar* d_rho_coeff, unsigned int block_size, bool local_fft, unsigned int inv_mesh_elements) { unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_forces_kernel); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(max_block_size, block_size); // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = all_gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = all_gpu_partition.getRangeAndSetGPU(idev); // reset force array for ALL particles hipMemsetAsync(d_force + range.first, 0, sizeof(Scalar4) * (range.second - range.first)); } // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; unsigned int n_blocks = nwork / run_block_size + 1; const size_t shared_bytes = order * (2 * order + 1) * sizeof(Scalar); hipLaunchKernelGGL( (gpu_compute_forces_kernel), dim3(n_blocks), dim3(run_block_size), shared_bytes, 0, nwork, d_postype, d_force, grid_dim, n_ghost_cells, d_charge, box, order, d_index_array, local_fft ? d_inv_fourier_mesh_x + idev * inv_mesh_elements : d_inv_fourier_mesh_x, local_fft ? d_inv_fourier_mesh_y + idev * inv_mesh_elements : d_inv_fourier_mesh_y, local_fft ? d_inv_fourier_mesh_z + idev * inv_mesh_elements : d_inv_fourier_mesh_z, d_rho_coeff, range.first); } } __global__ void kernel_calculate_pe_partial(int n_wave_vectors, Scalar* sum_partial, const hipfftComplex* d_fourier_mesh, const Scalar* d_inf_f, const bool exclude_dc) { HIP_DYNAMIC_SHARED(Scalar, sdata) unsigned int tidx = threadIdx.x; unsigned int j; j = blockDim.x * blockIdx.x + threadIdx.x; Scalar mySum = Scalar(0.0); if (j < n_wave_vectors) { if (!exclude_dc || j != 0) { mySum = d_fourier_mesh[j].x * d_fourier_mesh[j].x + d_fourier_mesh[j].y * d_fourier_mesh[j].y; mySum *= d_inf_f[j]; } } sdata[tidx] = mySum; __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (tidx < offs) { sdata[tidx] += sdata[tidx + offs]; } offs >>= 1; __syncthreads(); } // write result to global memory if (tidx == 0) sum_partial[blockIdx.x] = sdata[0]; } __global__ void kernel_final_reduce_pe(Scalar* sum_partial, unsigned int nblocks, Scalar* sum) { HIP_DYNAMIC_SHARED(Scalar, smem) if (threadIdx.x == 0) *sum = Scalar(0.0); for (int start = 0; start < nblocks; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < nblocks) smem[threadIdx.x] = sum_partial[start + threadIdx.x]; else smem[threadIdx.x] = Scalar(0.0); __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) smem[threadIdx.x] += smem[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { *sum += smem[0]; } } } void gpu_compute_pe(unsigned int n_wave_vectors, Scalar* d_sum_partial, Scalar* d_sum, const hipfftComplex* d_fourier_mesh, const Scalar* d_inf_f, const unsigned int block_size, const uint3 mesh_dim, const bool exclude_dc) { unsigned int n_blocks = n_wave_vectors / block_size + 1; unsigned int shared_size = (unsigned int)(block_size * sizeof(Scalar)); dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((kernel_calculate_pe_partial), dim3(grid), dim3(block_size), shared_size, 0, n_wave_vectors, d_sum_partial, d_fourier_mesh, d_inf_f, exclude_dc); // calculate final sum of mesh values const unsigned int final_block_size = 256; shared_size = final_block_size * sizeof(Scalar); hipLaunchKernelGGL((kernel_final_reduce_pe), dim3(1), dim3(final_block_size), shared_size, 0, d_sum_partial, n_blocks, d_sum); } __global__ void kernel_calculate_virial_partial(int n_wave_vectors, Scalar* sum_virial_partial, const Scalar* d_mesh_virial) { HIP_DYNAMIC_SHARED(Scalar, sdata) unsigned int j; j = blockDim.x * blockIdx.x + threadIdx.x; unsigned int tidx = threadIdx.x; Scalar mySum_xx = Scalar(0.0); Scalar mySum_xy = Scalar(0.0); Scalar mySum_xz = Scalar(0.0); Scalar mySum_yy = Scalar(0.0); Scalar mySum_yz = Scalar(0.0); Scalar mySum_zz = Scalar(0.0); if (j < n_wave_vectors) { mySum_xx = d_mesh_virial[0 * n_wave_vectors + j]; mySum_xy = d_mesh_virial[1 * n_wave_vectors + j]; mySum_xz = d_mesh_virial[2 * n_wave_vectors + j]; mySum_yy = d_mesh_virial[3 * n_wave_vectors + j]; mySum_yz = d_mesh_virial[4 * n_wave_vectors + j]; mySum_zz = d_mesh_virial[5 * n_wave_vectors + j]; } sdata[0 * blockDim.x + tidx] = mySum_xx; sdata[1 * blockDim.x + tidx] = mySum_xy; sdata[2 * blockDim.x + tidx] = mySum_xz; sdata[3 * blockDim.x + tidx] = mySum_yy; sdata[4 * blockDim.x + tidx] = mySum_yz; sdata[5 * blockDim.x + tidx] = mySum_zz; __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (tidx < offs) { sdata[0 * blockDim.x + tidx] += sdata[0 * blockDim.x + tidx + offs]; sdata[1 * blockDim.x + tidx] += sdata[1 * blockDim.x + tidx + offs]; sdata[2 * blockDim.x + tidx] += sdata[2 * blockDim.x + tidx + offs]; sdata[3 * blockDim.x + tidx] += sdata[3 * blockDim.x + tidx + offs]; sdata[4 * blockDim.x + tidx] += sdata[4 * blockDim.x + tidx + offs]; sdata[5 * blockDim.x + tidx] += sdata[5 * blockDim.x + tidx + offs]; } offs >>= 1; __syncthreads(); } // write result to global memory if (tidx == 0) { sum_virial_partial[0 * gridDim.x + blockIdx.x] = sdata[0 * blockDim.x]; sum_virial_partial[1 * gridDim.x + blockIdx.x] = sdata[1 * blockDim.x]; sum_virial_partial[2 * gridDim.x + blockIdx.x] = sdata[2 * blockDim.x]; sum_virial_partial[3 * gridDim.x + blockIdx.x] = sdata[3 * blockDim.x]; sum_virial_partial[4 * gridDim.x + blockIdx.x] = sdata[4 * blockDim.x]; sum_virial_partial[5 * gridDim.x + blockIdx.x] = sdata[5 * blockDim.x]; } } __global__ void kernel_final_reduce_virial(Scalar* sum_virial_partial, unsigned int nblocks, Scalar* sum_virial) { HIP_DYNAMIC_SHARED(Scalar, smem) if (threadIdx.x == 0) { sum_virial[0] = Scalar(0.0); sum_virial[1] = Scalar(0.0); sum_virial[2] = Scalar(0.0); sum_virial[3] = Scalar(0.0); sum_virial[4] = Scalar(0.0); sum_virial[5] = Scalar(0.0); } for (int start = 0; start < nblocks; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < nblocks) { smem[0 * blockDim.x + threadIdx.x] = sum_virial_partial[0 * nblocks + start + threadIdx.x]; smem[1 * blockDim.x + threadIdx.x] = sum_virial_partial[1 * nblocks + start + threadIdx.x]; smem[2 * blockDim.x + threadIdx.x] = sum_virial_partial[2 * nblocks + start + threadIdx.x]; smem[3 * blockDim.x + threadIdx.x] = sum_virial_partial[3 * nblocks + start + threadIdx.x]; smem[4 * blockDim.x + threadIdx.x] = sum_virial_partial[4 * nblocks + start + threadIdx.x]; smem[5 * blockDim.x + threadIdx.x] = sum_virial_partial[5 * nblocks + start + threadIdx.x]; } else { smem[0 * blockDim.x + threadIdx.x] = Scalar(0.0); smem[1 * blockDim.x + threadIdx.x] = Scalar(0.0); smem[2 * blockDim.x + threadIdx.x] = Scalar(0.0); smem[3 * blockDim.x + threadIdx.x] = Scalar(0.0); smem[4 * blockDim.x + threadIdx.x] = Scalar(0.0); smem[5 * blockDim.x + threadIdx.x] = Scalar(0.0); } __syncthreads(); // reduce the sum int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { smem[0 * blockDim.x + threadIdx.x] += smem[0 * blockDim.x + threadIdx.x + offs]; smem[1 * blockDim.x + threadIdx.x] += smem[1 * blockDim.x + threadIdx.x + offs]; smem[2 * blockDim.x + threadIdx.x] += smem[2 * blockDim.x + threadIdx.x + offs]; smem[3 * blockDim.x + threadIdx.x] += smem[3 * blockDim.x + threadIdx.x + offs]; smem[4 * blockDim.x + threadIdx.x] += smem[4 * blockDim.x + threadIdx.x + offs]; smem[5 * blockDim.x + threadIdx.x] += smem[5 * blockDim.x + threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { sum_virial[0] += smem[0 * blockDim.x]; sum_virial[1] += smem[1 * blockDim.x]; sum_virial[2] += smem[2 * blockDim.x]; sum_virial[3] += smem[3 * blockDim.x]; sum_virial[4] += smem[4 * blockDim.x]; sum_virial[5] += smem[5 * blockDim.x]; } } } void gpu_compute_virial(unsigned int n_wave_vectors, Scalar* d_sum_virial_partial, Scalar* d_sum_virial, const Scalar* d_mesh_virial, const unsigned int block_size) { unsigned int n_blocks = n_wave_vectors / block_size + 1; unsigned int shared_size = (unsigned int)(6 * block_size * sizeof(Scalar)); dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((kernel_calculate_virial_partial), dim3(grid), dim3(block_size), shared_size, 0, n_wave_vectors, d_sum_virial_partial, d_mesh_virial); // calculate final virial values const unsigned int final_block_size = 256; shared_size = 6 * final_block_size * sizeof(Scalar); hipLaunchKernelGGL((kernel_final_reduce_virial), dim3(1), dim3(final_block_size), shared_size, 0, d_sum_virial_partial, n_blocks, d_sum_virial); } template<bool local_fft> __global__ void gpu_compute_influence_function_kernel(const uint3 mesh_dim, const unsigned int n_wave_vectors, const uint3 global_dim, Scalar* d_inf_f, Scalar3* d_k, const Scalar3 b1, const Scalar3 b2, const Scalar3 b3, const uint3 pidx, const uint3 pdim, int nbx, int nby, int nbz, const Scalar* gf_b, int order, Scalar kappa, Scalar alpha) { unsigned int kidx; kidx = blockDim.x * blockIdx.x + threadIdx.x; if (kidx >= n_wave_vectors) return; int l, m, n; if (local_fft) { // use row-major layout int ny = mesh_dim.y; int nx = mesh_dim.x; n = kidx / ny / nx; m = (kidx - n * ny * nx) / nx; l = kidx % nx; } #ifdef ENABLE_MPI else { // local layout: row-major int ny = mesh_dim.y; int nx = mesh_dim.x; int n_local = kidx / ny / nx; int m_local = (kidx - n_local * ny * nx) / nx; int l_local = kidx % nx; // cyclic distribution l = l_local * pdim.x + pidx.x; m = m_local * pdim.y + pidx.y; n = n_local * pdim.z + pidx.z; } #endif // compute Miller indices if (l >= (int)(global_dim.x / 2 + global_dim.x % 2)) l -= (int)global_dim.x; if (m >= (int)(global_dim.y / 2 + global_dim.y % 2)) m -= (int)global_dim.y; if (n >= (int)(global_dim.z / 2 + global_dim.z % 2)) n -= (int)global_dim.z; Scalar val; Scalar3 kval = (Scalar)l * b1 + (Scalar)m * b2 + (Scalar)n * b3; Scalar3 kH = Scalar(2.0 * M_PI) * make_scalar3(Scalar(1.0) / (Scalar)global_dim.x, Scalar(1.0) / (Scalar)global_dim.y, Scalar(1.0) / (Scalar)global_dim.z); Scalar snx = fast::sin(Scalar(0.5) * l * kH.x); Scalar snx2 = snx * snx; Scalar sny = fast::sin(Scalar(0.5) * m * kH.y); Scalar sny2 = sny * sny; Scalar snz = fast::sin(Scalar(0.5) * n * kH.z); Scalar snz2 = snz * snz; Scalar sx(0.0), sy(0.0), sz(0.0); for (int iorder = order - 1; iorder >= 0; iorder--) { sx = gf_b[iorder] + sx * snx2; sy = gf_b[iorder] + sy * sny2; sz = gf_b[iorder] + sz * snz2; } Scalar denominator = sx * sy * sz; denominator *= denominator; if (l != 0 || m != 0 || n != 0) { Scalar sum1(0.0); Scalar numerator = Scalar(4.0 * M_PI) / dot(kval, kval); for (int ix = -nbx; ix <= nbx; ix++) { Scalar qx = ((Scalar)l + (Scalar)ix * global_dim.x); Scalar3 knx = qx * b1; Scalar argx = Scalar(0.5) * qx * kH.x; Scalar wxs = gpu_sinc(argx); Scalar wx(1.0); for (int iorder = 0; iorder < order; ++iorder) { wx *= wxs; } for (int iy = -nby; iy <= nby; iy++) { Scalar qy = ((Scalar)m + (Scalar)iy * global_dim.y); Scalar3 kny = qy * b2; Scalar argy = Scalar(0.5) * qy * kH.y; Scalar wys = gpu_sinc(argy); Scalar wy(1.0); for (int iorder = 0; iorder < order; ++iorder) { wy *= wys; } for (int iz = -nbz; iz <= nbz; iz++) { Scalar qz = ((Scalar)n + (Scalar)iz * global_dim.z); Scalar3 knz = qz * b3; Scalar argz = Scalar(0.5) * qz * kH.z; Scalar wzs = gpu_sinc(argz); Scalar wz(1.0); for (int iorder = 0; iorder < order; ++iorder) { wz *= wzs; } Scalar3 kn = knx + kny + knz; Scalar dot1 = dot(kn, kval); Scalar dot2 = dot(kn, kn) + alpha * alpha; Scalar arg_gauss = Scalar(0.25) * dot2 / kappa / kappa; Scalar gauss = exp(-arg_gauss); sum1 += (dot1 / dot2) * gauss * wx * wx * wy * wy * wz * wz; } } } val = numerator * sum1 / denominator; } else { val = Scalar(0.0); } // write out result d_inf_f[kidx] = val; d_k[kidx] = kval; } void gpu_compute_influence_function(const uint3 mesh_dim, const uint3 global_dim, Scalar* d_inf_f, Scalar3* d_k, const BoxDim& global_box, const bool local_fft, const uint3 pidx, const uint3 pdim, const Scalar EPS_HOC, Scalar kappa, Scalar alpha, const Scalar* d_gf_b, int order, unsigned int block_size) { // compute reciprocal lattice vectors Scalar3 a1 = global_box.getLatticeVector(0); Scalar3 a2 = global_box.getLatticeVector(1); Scalar3 a3 = global_box.getLatticeVector(2); Scalar V_box = global_box.getVolume(); Scalar3 b1 = Scalar(2.0 * M_PI) * make_scalar3(a2.y * a3.z - a2.z * a3.y, a2.z * a3.x - a2.x * a3.z, a2.x * a3.y - a2.y * a3.x) / V_box; Scalar3 b2 = Scalar(2.0 * M_PI) * make_scalar3(a3.y * a1.z - a3.z * a1.y, a3.z * a1.x - a3.x * a1.z, a3.x * a1.y - a3.y * a1.x) / V_box; Scalar3 b3 = Scalar(2.0 * M_PI) * make_scalar3(a1.y * a2.z - a1.z * a2.y, a1.z * a2.x - a1.x * a2.z, a1.x * a2.y - a1.y * a2.x) / V_box; unsigned int num_wave_vectors = mesh_dim.x * mesh_dim.y * mesh_dim.z; Scalar3 L = global_box.getL(); Scalar temp = floor(((kappa * L.x / (M_PI * global_dim.x)) * pow(-log(EPS_HOC), 0.25))); int nbx = (int)temp; temp = floor(((kappa * L.y / (M_PI * global_dim.y)) * pow(-log(EPS_HOC), 0.25))); int nby = (int)temp; temp = floor(((kappa * L.z / (M_PI * global_dim.z)) * pow(-log(EPS_HOC), 0.25))); int nbz = (int)temp; if (local_fft) { unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<true>); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(max_block_size, block_size); unsigned int n_blocks = num_wave_vectors / run_block_size; if (num_wave_vectors % run_block_size) n_blocks += 1; dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((gpu_compute_influence_function_kernel<true>), dim3(grid), dim3(run_block_size), 0, 0, mesh_dim, num_wave_vectors, global_dim, d_inf_f, d_k, b1, b2, b3, pidx, pdim, nbx, nby, nbz, d_gf_b, order, kappa, alpha); } #ifdef ENABLE_MPI else { unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<false>); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(max_block_size, block_size); unsigned int n_blocks = num_wave_vectors / run_block_size; if (num_wave_vectors % run_block_size) n_blocks += 1; dim3 grid(n_blocks, 1, 1); hipLaunchKernelGGL((gpu_compute_influence_function_kernel<false>), dim3(grid), dim3(run_block_size), 0, 0, mesh_dim, num_wave_vectors, global_dim, d_inf_f, d_k, b1, b2, b3, pidx, pdim, nbx, nby, nbz, d_gf_b, order, kappa, alpha); } #endif } //! The developer has chosen not to document this function __global__ void gpu_fix_exclusions_kernel(Scalar4* d_force, Scalar* d_virial, const size_t virial_pitch, const Scalar4* d_pos, const Scalar* d_charge, const BoxDim box, const unsigned int* d_n_neigh, const unsigned int* d_nlist, const Index2D nli, Scalar kappa, Scalar alpha, unsigned int* d_group_members, unsigned int group_size) { // start by identifying which particle we are to handle int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; const Scalar sqrtpi = sqrtf(M_PI); unsigned int n_neigh = d_n_neigh[idx]; Scalar4 postypei = __ldg(d_pos + idx); Scalar3 posi = make_scalar3(postypei.x, postypei.y, postypei.z); Scalar qi = __ldg(d_charge + idx); // initialize the force to 0 Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); Scalar virial[6]; for (unsigned int i = 0; i < 6; i++) virial[i] = Scalar(0.0); unsigned int cur_j = 0; // prefetch neighbor index unsigned int next_j = d_nlist[nli(idx, 0)]; for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++) { { // read the current neighbor index (MEM TRANSFER: 4 bytes) // prefetch the next value and set the current one cur_j = next_j; if (neigh_idx + 1 < n_neigh) next_j = d_nlist[nli(idx, neigh_idx + 1)]; // get the neighbor's position (MEM TRANSFER: 16 bytes) Scalar4 postypej = __ldg(d_pos + cur_j); Scalar3 posj = make_scalar3(postypej.x, postypej.y, postypej.z); Scalar qj = __ldg(d_charge + cur_j); // calculate dr (with periodic boundary conditions) (FLOPS: 3) Scalar3 dx = posi - posj; // apply periodic boundary conditions: (FLOPS 12) dx = box.minImage(dx); // calculate r squared (FLOPS: 5) Scalar rsq = dot(dx, dx); Scalar r = sqrtf(rsq); Scalar qiqj = qi * qj; Scalar expfac = fast::exp(-alpha * r); Scalar arg1 = kappa * r - alpha / Scalar(2.0) / kappa; Scalar arg2 = kappa * r + alpha / Scalar(2.0) / kappa; Scalar erffac = (::erf(arg1) * expfac + expfac - fast::erfc(arg2) * exp(alpha * r)) / (Scalar(2.0) * r); Scalar force_divr = qiqj * (expfac * Scalar(2.0) * kappa / sqrtpi * fast::exp(-arg1 * arg1) - Scalar(0.5) * alpha * (expfac * ::erfc(arg1) + fast::exp(alpha * r) * fast::erfc(arg2)) - erffac) / rsq; // subtract long-range part of pair-interaction Scalar pair_eng = -qiqj * erffac; Scalar force_div2r = Scalar(0.5) * force_divr; virial[0] += dx.x * dx.x * force_div2r; virial[1] += dx.x * dx.y * force_div2r; virial[2] += dx.x * dx.z * force_div2r; virial[3] += dx.y * dx.y * force_div2r; virial[4] += dx.y * dx.z * force_div2r; virial[5] += dx.z * dx.z * force_div2r; force.x += dx.x * force_divr; force.y += dx.y * force_divr; force.z += dx.z * force_divr; force.w += pair_eng; } } force.w *= Scalar(0.5); d_force[idx].x += force.x; d_force[idx].y += force.y; d_force[idx].z += force.z; d_force[idx].w += force.w; for (unsigned int i = 0; i < 6; i++) d_virial[i * virial_pitch + idx] += virial[i]; } } //! The developer has chosen not to document this function hipError_t gpu_fix_exclusions(Scalar4* d_force, Scalar* d_virial, const size_t virial_pitch, const unsigned int Nmax, const Scalar4* d_pos, const Scalar* d_charge, const BoxDim& box, const unsigned int* d_n_ex, const unsigned int* d_exlist, const Index2D nex, Scalar kappa, Scalar alpha, unsigned int* d_group_members, unsigned int group_size, int block_size) { dim3 grid(group_size / block_size + 1, 1, 1); dim3 threads(block_size, 1, 1); hipLaunchKernelGGL((gpu_fix_exclusions_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, d_pos, d_charge, box, d_n_ex, d_exlist, nex, kappa, alpha, d_group_members, group_size); return hipSuccess; }
the_stack
#include <kfusion/cuda/device.hpp> using namespace kfusion; using namespace kfusion::device; /* * POTENTIAL GRADIENT */ __global__ void sobfu::device::calculate_potential_gradient_kernel(float2* phi_n_psi, float2* phi_global, float4* nabla_phi_n_o_psi, float4* L, float4* nabla_U, float w_reg, int dim_x, int dim_y, int dim_z) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > dim_x - 1 || y > dim_y - 1) { return; } int idx = y * dim_x + x; for (int i = 0; i <= dim_z - 1; idx += dim_y * dim_x, ++i) { float tsdf_n_psi = phi_n_psi[idx].x; float tsdf_global = phi_global[idx].x; nabla_U[idx] = (tsdf_n_psi - tsdf_global) * nabla_phi_n_o_psi[idx] + w_reg * L[idx]; } } void sobfu::device::calculate_potential_gradient(kfusion::device::TsdfVolume& phi_n_psi, kfusion::device::TsdfVolume& phi_global, sobfu::device::TsdfGradient& nabla_phi_n_o_psi, sobfu::device::Laplacian& L, sobfu::device::PotentialGradient& nabla_U, float w_reg) { dim3 block(64, 16); dim3 grid(kfusion::device::divUp(phi_n_psi.dims.x, block.x), kfusion::device::divUp(phi_n_psi.dims.y, block.y)); calculate_potential_gradient_kernel<<<grid, block>>>(phi_n_psi.data, phi_global.data, nabla_phi_n_o_psi.data, L.data, nabla_U.data, w_reg, phi_n_psi.dims.x, phi_n_psi.dims.y, phi_n_psi.dims.z); cudaSafeCall(cudaGetLastError()); } /* * DEFORMATION FIELD */ __global__ void sobfu::device::update_psi_kernel(float4* psi, float4* nabla_U_S, float4* updates, float alpha, int dim_x, int dim_y, int dim_z) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x > dim_x - 1 || y > dim_y - 1) { return; } int global_idx = y * dim_x + x; for (int i = 0; i <= dim_z - 1; global_idx += dim_y * dim_x, ++i) { float4 update = alpha * nabla_U_S[global_idx]; updates[global_idx] = update; psi[global_idx] -= update; } } void sobfu::device::update_psi(sobfu::device::DeformationField& psi, sobfu::device::PotentialGradient& nabla_U_S, float4* updates, float alpha) { /* integrate in time */ dim3 block(64, 16); dim3 grid(kfusion::device::divUp(psi.dims.x, block.x), kfusion::device::divUp(psi.dims.y, block.y)); update_psi_kernel<<<grid, block>>>(psi.data, nabla_U_S.data, updates, alpha, psi.dims.x, psi.dims.y, psi.dims.z); cudaSafeCall(cudaGetLastError()); } /* * PIPELINE */ void sobfu::device::estimate_psi(SDFs& sdfs, sobfu::device::DeformationField& psi, sobfu::device::DeformationField& psi_inv, sobfu::device::SpatialGradients* spatial_grads, Differentiators& differentiators, float* d_S_i, sobfu::device::Reductor* r, SolverParams& params) { /* copy sobolev filter to constant memory */ set_convolution_kernel(d_S_i); /* create cuda streams */ int no_streams = 3; cudaStream_t streams[no_streams]; for (int i = 0; i < no_streams; i++) { cudaSafeCall(cudaStreamCreate(&streams[i])); } /* calculate no. of blocks and no. of threads per block */ int3 dims = psi.dims; dim3 block(64, 16); dim3 grid(kfusion::device::divUp(dims.x, block.x), kfusion::device::divUp(dims.y, block.y)); /* apply psi to phi_n */ apply_kernel<<<grid, block, 0, streams[0]>>>(sdfs.phi_n, sdfs.phi_n_psi, psi); cudaSafeCall(cudaGetLastError()); /* run gradient descent */ float2 curr_max_update_norm; float e_curr = std::numeric_limits<float>::infinity(); int iter = 1; while (iter <= params.max_iter) { if (iter == 1 || iter % 50 == 0) { std::cout << "iter. no. " << iter << std::endl; } /* calculate the gradient of phi_n */ estimate_gradient_kernel<<<grid, block, 0, streams[0]>>>(differentiators.tsdf_diff, *(spatial_grads->nabla_phi_n_o_psi)); cudaSafeCall(cudaGetLastError()); /* calculate the jacobian of psi */ estimate_deformation_jacobian_kernel<<<grid, block, 0, streams[1]>>>(differentiators.diff, *(spatial_grads->J)); cudaSafeCall(cudaGetLastError()); /* calculate the laplacian of psi */ estimate_laplacian_kernel<<<grid, block, 0, streams[2]>>>(differentiators.second_order_diff, *(spatial_grads->L)); cudaSafeCall(cudaGetLastError()); /* calculate current value of the energy functional */ if ((params.verbosity == 1 && (iter == 1 || iter % 50 == 0 || iter == params.max_iter) || params.verbosity == 2)) { /* data term */ float e_data = r->data_energy(sdfs.phi_global.data, sdfs.phi_n_psi.data); /* regularisation term */ float e_reg = r->reg_energy_sobolev(spatial_grads->J->data); e_curr = e_data + params.w_reg * e_reg; std::cout << "data energy + w_reg * reg energy = " << e_data << " + " << params.w_reg << " * " << e_reg << " = " << e_curr << std::endl; } /* * PDE'S */ /* calculate gradient of the potential */ sobfu::device::calculate_potential_gradient(sdfs.phi_n_psi, sdfs.phi_global, *(spatial_grads->nabla_phi_n_o_psi), *(spatial_grads->L), *(spatial_grads->nabla_U), params.w_reg); cudaSafeCall(cudaGetLastError()); /* convolve gradient of the potential with a sobolev kernel */ sobfu::device::convolution_rows((*(spatial_grads->nabla_U_S)).data, (*(spatial_grads->nabla_U)).data, dims.x, dims.y, dims.z); sobfu::device::convolution_columns((*(spatial_grads->nabla_U_S)).data, (*(spatial_grads->nabla_U)).data, dims.x, dims.y, dims.z); sobfu::device::convolution_depth((*(spatial_grads->nabla_U_S)).data, (*(spatial_grads->nabla_U)).data, dims.x, dims.y, dims.z); /* update psi */ update_psi_kernel<<<grid, block, 0, streams[0]>>>(psi.data, (*(spatial_grads->nabla_U_S)).data, r->updates, params.alpha, dims.x, dims.y, dims.z); cudaSafeCall(cudaGetLastError()); /* apply psi to phi_n */ apply_kernel<<<grid, block, 0, streams[0]>>>(sdfs.phi_n, sdfs.phi_n_psi, psi); cudaSafeCall(cudaGetLastError()); /* get value of the max. update norm at the current iteration of the solver */ curr_max_update_norm = r->max_update_norm(); if ((params.verbosity == 1 && (iter == 1 || iter % 50 == 0 || iter == params.max_iter) || params.verbosity == 2)) { int idx_x = curr_max_update_norm.y / (psi.dims.x * psi.dims.y); int idx_y = (curr_max_update_norm.y - idx_x * psi.dims.x * psi.dims.y) / psi.dims.x; int idx_z = curr_max_update_norm.y - psi.dims.x * (idx_y + psi.dims.y * idx_x); std::cout << "max. update norm " << curr_max_update_norm.x << " at voxel (" << idx_z << ", " << idx_y << ", " << idx_x << ")" << std::endl; } if (curr_max_update_norm.x <= params.max_update_norm) { std::cout << "SOLVER CONVERGED AFTER " << iter << " ITERATIONS" << std::endl; break; } if (iter == params.max_iter) { std::cout << "SOLVER REACHED MAX. NO. OF ITERATIONS WITHOUT CONVERGING" << std::endl; } iter++; } /* iteratively estimate the inverse deformation field */ sobfu::device::init_identity(psi_inv); sobfu::device::estimate_inverse(psi, psi_inv); /* apply psi_inv to phi_global */ apply_kernel<<<grid, block>>>(sdfs.phi_global, sdfs.phi_global_psi_inv, psi_inv); cudaSafeCall(cudaGetLastError()); for (int i = 0; i < no_streams; i++) { cudaSafeCall(cudaStreamDestroy(streams[i])); } } /* * CONVOLUTIONS */ #define KERNEL_RADIUS 3 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) #define ROWS_BLOCKDIM_X 4 #define ROWS_BLOCKDIM_Y 64 #define ROWS_RESULT_STEPS 4 #define ROWS_HALO_STEPS 1 #define COLUMNS_BLOCKDIM_X 64 #define COLUMNS_BLOCKDIM_Y 4 #define COLUMNS_RESULT_STEPS 4 #define COLUMNS_HALO_STEPS 1 #define DEPTH_BLOCKDIM_X 64 #define DEPTH_BLOCKDIM_Z 4 #define DEPTH_RESULT_STEPS 4 #define DEPTH_HALO_STEPS 1 __constant__ float S[KERNEL_LENGTH]; void sobfu::device::set_convolution_kernel(float* d_kernel) { cudaSafeCall(cudaMemcpyToSymbol(S, d_kernel, KERNEL_LENGTH * sizeof(float), 0, cudaMemcpyDeviceToDevice)); cudaSafeCall(cudaGetLastError()); } /*** ROW CONVOLUTION ***/ __global__ void sobfu::device::convolution_rows_kernel(float4* d_dst, float4* d_src, int image_w, int image_h, int image_d) { __shared__ float4 s_data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; /* offset to the left halo edge */ const int base_x = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int base_y = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int base_z = blockIdx.z; const int first_pixel_in_line = ROWS_BLOCKDIM_X * ROWS_HALO_STEPS - threadIdx.x; const int last_pixel_in_line = image_w - base_x - 1; d_dst += base_z * image_h * image_w + base_y * image_w + base_x; d_src += base_z * image_h * image_w + base_y * image_w + base_x; /* load main data */ #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (image_w - base_x > i * ROWS_BLOCKDIM_X) ? d_src[i * ROWS_BLOCKDIM_X] : d_src[last_pixel_in_line]; } /* load left halo */ #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (base_x >= -i * ROWS_BLOCKDIM_X) ? d_src[i * ROWS_BLOCKDIM_X] : d_src[first_pixel_in_line]; } /* load right halo */ #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (image_w - base_x > i * ROWS_BLOCKDIM_X) ? d_src[i * ROWS_BLOCKDIM_X] : d_src[last_pixel_in_line]; } /* compute and store results */ __syncthreads(); /* this pixel is not part of the iamge and doesn't need to be convolved */ if (base_y >= image_h) { return; } #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { if (image_w - base_x > i * ROWS_BLOCKDIM_X) { float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += S[KERNEL_RADIUS - j] * s_data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_dst[i * ROWS_BLOCKDIM_X] = sum; } } } void sobfu::device::convolution_rows(float4* d_dst, float4* d_src, int image_w, int image_h, int image_d) { int blocks_x = image_w / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) + min(1, image_w % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X)); int blocks_y = image_h / ROWS_BLOCKDIM_Y + min(1, image_h % ROWS_BLOCKDIM_Y); int blocks_z = image_d; dim3 blocks(blocks_x, blocks_y, blocks_z); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y, 1); convolution_rows_kernel<<<blocks, threads>>>(d_dst, d_src, image_w, image_h, image_d); cudaSafeCall(cudaGetLastError()); } /*** COLUMMN CONVOLUTION ***/ __global__ void sobfu::device::convolution_columns_kernel(float4* d_dst, float4* d_src, int image_w, int image_h, int image_d) { __shared__ float4 s_data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; /* offset to the upper halo edge */ const int base_x = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int base_y = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const int base_z = blockIdx.z; const int first_pixel_in_line = (COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS - threadIdx.y) * image_w; const int last_pixel_in_line = (image_h - base_y - 1) * image_w; d_dst += base_z * image_h * image_w + base_y * image_w + base_x; d_src += base_z * image_h * image_w + base_y * image_w + base_x; /* main data */ #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (image_h - base_y > i * COLUMNS_BLOCKDIM_Y) ? d_src[i * COLUMNS_BLOCKDIM_Y * image_w] : d_src[last_pixel_in_line]; } /* upper halo */ #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (base_y >= -i * COLUMNS_BLOCKDIM_Y) ? d_src[i * COLUMNS_BLOCKDIM_Y * image_w] : d_src[first_pixel_in_line]; } /* lower halo */ #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (image_h - base_y > i * COLUMNS_BLOCKDIM_Y) ? d_src[i * COLUMNS_BLOCKDIM_Y * image_w] : d_src[last_pixel_in_line]; } /* compute and store results */ __syncthreads(); /* this pixel isn't part of hte image and doesn't need to be convolved */ if (base_x >= image_w) { return; } #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { if (image_h - base_y > i * COLUMNS_BLOCKDIM_Y) { float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += S[KERNEL_RADIUS - j] * s_data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_dst[i * COLUMNS_BLOCKDIM_Y * image_w] += sum; } } } void sobfu::device::convolution_columns(float4* d_dst, float4* d_src, int image_w, int image_h, int image_d) { int blocks_x = image_w / COLUMNS_BLOCKDIM_X + min(1, image_w % COLUMNS_BLOCKDIM_X); int blocks_y = image_h / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) + min(1, image_h % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); int blocks_z = image_d; dim3 blocks(blocks_x, blocks_y, blocks_z); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y, 1); convolution_columns_kernel<<<blocks, threads>>>(d_dst, d_src, image_w, image_h, image_d); cudaSafeCall(cudaGetLastError()); } /*** DEPTH CONVOLUTION ***/ __global__ void sobfu::device::convolution_depth_kernel(float4* d_dst, float4* d_src, int image_w, int image_h, int image_d) { /* here it is [x][z] as we leave out y bc it has a size of 1 */ __shared__ float4 s_data[DEPTH_BLOCKDIM_X][(DEPTH_RESULT_STEPS + 2 * DEPTH_HALO_STEPS) * DEPTH_BLOCKDIM_Z + 1]; /* offset to the upper halo edge */ const int base_x = blockIdx.x * DEPTH_BLOCKDIM_X + threadIdx.x; const int base_y = blockIdx.y; const int base_z = (blockIdx.z * DEPTH_RESULT_STEPS - DEPTH_HALO_STEPS) * DEPTH_BLOCKDIM_Z + threadIdx.z; const int first_pixel_in_line = (DEPTH_BLOCKDIM_Z * DEPTH_HALO_STEPS - threadIdx.z) * image_w * image_h; const int last_pixel_in_line = (image_d - base_z - 1) * image_w * image_h; d_dst += base_z * image_h * image_w + base_y * image_w + base_x; d_src += base_z * image_h * image_w + base_y * image_w + base_x; /* main data */ #pragma unroll for (int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++) { s_data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (image_d - base_z > i * DEPTH_BLOCKDIM_Z) ? d_src[i * DEPTH_BLOCKDIM_Z * image_w * image_h] : d_src[last_pixel_in_line]; } /* upper halo */ #pragma unroll for (int i = 0; i < DEPTH_HALO_STEPS; i++) { s_data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (base_z >= -i * DEPTH_BLOCKDIM_Z) ? d_src[i * DEPTH_BLOCKDIM_Z * image_w * image_h] : d_src[first_pixel_in_line]; } /* lower halo */ #pragma unroll for (int i = DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS + DEPTH_HALO_STEPS; i++) { s_data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z] = (image_d - base_z > i * DEPTH_BLOCKDIM_Z) ? d_src[i * DEPTH_BLOCKDIM_Z * image_w * image_h] : d_src[last_pixel_in_line]; } /* compute and store results */ __syncthreads(); /* this pixel is not part of the image and doesn't need to be convolved */ if (base_x >= image_w) { return; } #pragma unroll for (int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++) { if (image_d - base_z > i * DEPTH_BLOCKDIM_Z) { float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += S[KERNEL_RADIUS - j] * s_data[threadIdx.x][threadIdx.z + i * DEPTH_BLOCKDIM_Z + j]; } d_dst[i * DEPTH_BLOCKDIM_Z * image_w * image_h] += sum; } } } void sobfu::device::convolution_depth(float4* d_dst, float4* d_src, int image_w, int image_h, int image_d) { int blocks_x = image_w / DEPTH_BLOCKDIM_X + min(1, image_w % DEPTH_BLOCKDIM_X); int blocks_y = image_h; int blocks_z = image_d / (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z) + min(1, image_d % (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z)); dim3 blocks(blocks_x, blocks_y, blocks_z); dim3 threads(DEPTH_BLOCKDIM_X, 1, DEPTH_BLOCKDIM_Z); convolution_depth_kernel<<<blocks, threads>>>(d_dst, d_src, image_w, image_h, image_d); cudaSafeCall(cudaGetLastError()); }
the_stack
#include "../kernels/transformerKernels.h" #include "../kernels/embKernels_int8.h" #include "../kernels/transformerKernels_int8.h" #include "cublas_helper.h" /** @file QuantTransformer encoder, composed by gemm lib and custom cuda kernel function */ namespace lightseq { namespace cuda { template <OperationType OpType_> QuantEncoder<OpType_>::QuantEncoder(int max_batch_size, int *p_d_token_id, int *p_d_padding_mask, _DataType *p_d_output, const QuantTransformerWeight<OpType_> &tw, cudaStream_t stream, cublasHandle_t hd, const int *p_d_lang_id) : _max_batch_size(max_batch_size), _p_d_token_id(p_d_token_id), _p_d_padding_mask(p_d_padding_mask), _p_d_output(p_d_output), _p_d_lang_id(p_d_lang_id), _tw(tw), _stream(stream), _hd(hd), _p_d_src_emb_wei(tw.get_src_emb_wei()), _p_d_enc_wei(tw.get_enc_wei()), _fone((_DataType)1.f), _fzero((_DataType)0.f), _src_emb_clip_max(tw.get_src_emb_clip_max()), _enc_clip_max(tw.get_enc_clip_max()), _ione((int32_t)1), _izero((int32_t)0), _atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)), _max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size), _max_thread_per_block(1024) { CHECK_GPU_ERROR(cublasLtCreate(&_cublas_lt_handle)); } /** Init the GPU memory pointer which point to the memory buffer needed by encoder. These buffer are used during custom cuda kernel function, find the corresponding function to see how these buffer are used */ template <OperationType OpType_> void QuantEncoder<OpType_>::init_buffer() { std::cout << "encoder buffer init start" << std::endl; _DataType *qkv_buf; CHECK_GPU_ERROR(cudaMalloc(&qkv_buf, 3 * _max_batch_dim * sizeof(_DataType))); _p_d_q = qkv_buf; _p_d_k = qkv_buf + _max_batch_dim; _p_d_v = qkv_buf + 2 * _max_batch_dim; CHECK_GPU_ERROR(cudaMalloc(&_p_d_c, _max_batch_size * _tw._head_num * _tw._max_step * _tw._max_step * sizeof(_DataType))); int max_batch_dim = _max_batch_size * _tw._max_step * std::max(_tw._inner_size, _tw._hidden_size * 3); CHECK_GPU_ERROR(cudaMalloc(&_int8_ffn_in_buf, max_batch_dim)); CHECK_GPU_ERROR( cudaMalloc(&_int32_ffn_out_buf, max_batch_dim * sizeof(int32_t))); CHECK_GPU_ERROR( cudaMalloc(&_int8_ffn_out_buf, max_batch_dim * sizeof(int8_t))); CHECK_GPU_ERROR( cudaMalloc(&_int8_p_d_src_emb_wei, _tw._src_vocab_size * _tw._hidden_size * sizeof(int8_t))); quantize_weight(_p_d_src_emb_wei[0], _int8_p_d_src_emb_wei, _tw._src_vocab_size, _tw._hidden_size, _quant_range / _src_emb_clip_max, _stream, _cublas_lt_handle, kRowMajor); _p_device_emb.push_back(nullptr); _p_device_emb.push_back( to_gpu(_p_d_src_emb_wei[1], _tw._max_step * _tw._hidden_size, _stream)); _p_device_emb.push_back( to_gpu(_p_d_src_emb_wei[2], _tw._hidden_size, _stream)); _p_device_emb.push_back( to_gpu(_p_d_src_emb_wei[3], _tw._hidden_size, _stream)); if (_tw._multilg_type != 0) { _p_device_emb.push_back( to_gpu(_p_d_src_emb_wei[4], _tw._hidden_size, _stream)); } else { _p_device_emb.push_back(nullptr); } // prepare gpu memory for weight _int8_p_d_enc_wei = std::vector<int8_t *>(_tw._n_enc_layer * 4); _scaled_ffn2_colsum = std::vector<_DataType *>(_tw._n_enc_layer); for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; CHECK_GPU_ERROR(cudaMalloc(&_int8_p_d_enc_wei[_layer_id * 4], _tw._hidden_size * 3 * _tw._hidden_size)); CHECK_GPU_ERROR(cudaMalloc(&_int8_p_d_enc_wei[_layer_id * 4 + 1], _tw._hidden_size * _tw._hidden_size)); CHECK_GPU_ERROR(cudaMalloc(&_int8_p_d_enc_wei[_layer_id * 4 + 2], _tw._hidden_size * _tw._inner_size)); CHECK_GPU_ERROR(cudaMalloc(&_int8_p_d_enc_wei[_layer_id * 4 + 3], _tw._inner_size * _tw._hidden_size)); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset], _tw._hidden_size, _stream)); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 1], _tw._hidden_size, _stream)); _p_device_wei.push_back(nullptr); _p_device_wei.push_back(to_gpu(_p_d_enc_wei[_weight_offset + 3], _tw._hidden_size * 3, _stream)); _p_device_wei.push_back(nullptr); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 5], _tw._hidden_size, _stream)); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 6], _tw._hidden_size, _stream)); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 7], _tw._hidden_size, _stream)); _p_device_wei.push_back(nullptr); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 9], _tw._inner_size, _stream)); _p_device_wei.push_back(nullptr); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 11], _tw._hidden_size, _stream)); quantize_weight(_p_d_enc_wei[_weight_offset + 2], _int8_p_d_enc_wei[_layer_id * 4], _tw._hidden_size, _tw._hidden_size * 3, _quant_range / _enc_clip_max[_layer_id * 12], _stream, _cublas_lt_handle); quantize_weight(_p_d_enc_wei[_weight_offset + 4], _int8_p_d_enc_wei[_layer_id * 4 + 1], _tw._hidden_size, _tw._hidden_size, _quant_range / _enc_clip_max[_layer_id * 12 + 1], _stream, _cublas_lt_handle); quantize_weight(_p_d_enc_wei[_weight_offset + 8], _int8_p_d_enc_wei[_layer_id * 4 + 2], _tw._hidden_size, _tw._inner_size, _quant_range / _enc_clip_max[_layer_id * 12 + 2], _stream, _cublas_lt_handle); quantize_weight(_p_d_enc_wei[_weight_offset + 10], _int8_p_d_enc_wei[_layer_id * 4 + 3], _tw._inner_size, _tw._hidden_size, _quant_range / _enc_clip_max[_layer_id * 12 + 3], _stream, _cublas_lt_handle); if (_tw._use_gelu) { _scaled_ffn2_colsum[_layer_id] = nullptr; } else { CHECK_GPU_ERROR(cudaMalloc(&_scaled_ffn2_colsum[_layer_id], _tw._hidden_size * sizeof(_DataType))); float relu_scale = _enc_clip_max[_layer_id * 12 + 7] / 2; _DataType *temp; int weight_size = _tw._inner_size * _tw._hidden_size; CHECK_GPU_ERROR(cudaMalloc(&temp, weight_size * sizeof(_DataType))); CHECK_GPU_ERROR(cudaMemcpyAsync(temp, _p_d_enc_wei[_weight_offset + 10], weight_size * sizeof(_DataType), cudaMemcpyHostToDevice, _stream)); launch_scaled_colsum(temp, _scaled_ffn2_colsum[_layer_id], _tw._inner_size, _tw._hidden_size, relu_scale, _stream); CHECK_GPU_ERROR(cudaGetLastError()); CHECK_GPU_ERROR(cudaFree(temp)); } } std::cout << "encoder buffer init succeed" << std::endl; return; } /** Some requirements needed by custom cuda kernel function */ template <OperationType OpType_> std::string QuantEncoder<OpType_>::check() { // if (_max_thread_per_block < _tw._hidden_size) { // return "violate hidden_size <= max_thread_per_block"; // } if (_tw._inner_size & 1) { return "violate inner_size % 2 = 0"; } if (_tw._dim_per_head & 1) { return "violate dim_per_head % 2 = 0"; } if (_tw._multilg_type == 0 && _p_d_src_emb_wei.size() != 4) { return "violate p_d_src_emb_wei.size() = 4"; } if (_tw._multilg_type != 0 && _p_d_src_emb_wei.size() != 5) { return "violate p_d_src_emb_wei.size() = 5"; } if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) { return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer"; } if (_tw._multilg_type != 0 && _p_d_lang_id == nullptr) { return "lang id should not be null when multilg"; } return ""; } /** QuantEncoder inference */ template <OperationType OpType_> void QuantEncoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) { if (batch_size > _max_batch_size) { throw std::runtime_error("batch size of input greater than max_batch_size"); } if (batch_seq_len > _tw._max_step) { throw std::runtime_error("seq len of input greater than max_step"); } /* ---step1. init--- */ _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len); #endif /* ---step2. encoder feedforward--- */ launch_enc_emb_i8I<_DataType>( _int8_p_d_src_emb_wei, _p_device_emb[1], _p_d_token_id, _p_d_output, _p_d_padding_mask, _tw._padding_id, batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_device_emb[4], _p_d_lang_id, _tw._multilg_type, _src_emb_clip_max / _quant_range, true); #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _batch_seq_len; j++) { // token_id std::cout << "emb out: token-" << j << std::endl; print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size + j * _tw._hidden_size, "emb out", 10); } } // not normal print_vec(_int8_p_d_src_emb_wei, "token embedding weight", 10); print_vec(_p_device_emb[1], "position embedding weight", 10); #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(); ffn_add_norm(); } #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _batch_seq_len; j++) { // token_id std::cout << "encoder output: token-" << j << std::endl; print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size + j * _tw._hidden_size, "encoder_output", _tw._dim_per_head); } } // not normal #endif return; } /** QuantEncoder self attention */ template <OperationType OpType_> void QuantEncoder<OpType_>::self_attention() { if (_layer_id == 0) { ker_norm_layer_resual_i8O_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _int8_ffn_in_buf, _p_device_wei[_weight_offset], _p_device_wei[_weight_offset + 1], _p_device_wei[_weight_offset + 5], _max_thread_per_block, _quant_range / _enc_clip_max[_layer_id * 12 + 4], _tw._is_post_ln, true); } CHECK_GPU_ERROR(cudaGetLastError()); cublasLtMM_withAlgo_i8IO( _int8_ffn_out_buf, 1, _batch_token_num, _tw._hidden_size * 3, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12] * _enc_clip_max[_layer_id * 12 + 4] / (_enc_clip_max[_layer_id * 12 + 8] * _quant_range), _int8_ffn_in_buf, _int8_p_d_enc_wei[_layer_id * 4], _cublas_lt_handle, _stream, false); // get q, k, v by split and reshape qkv ker_arrange_encself_qkv_i8I_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _int8_ffn_out_buf, _p_device_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block, _enc_clip_max[_layer_id * 12 + 8] / _quant_range, true); /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _batch_seq_len, _tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len, _batch_seq_len * _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_correlation_softmax_encself_launcher<_DataType>( _batch_size, _batch_seq_len, _tw._head_num, _stream, _p_d_c, _p_d_padding_mask); /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _batch_seq_len, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_i8O_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_q, _int8_ffn_in_buf, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block, _quant_range / _enc_clip_max[_layer_id * 12 + 5], true); /* ---step 4. new_q = ori_q + new_q * output_wei--- */ cublasLtMM_withAlgo_i8IO( _int8_ffn_out_buf, 1, _batch_token_num, _tw._hidden_size, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12 + 1] * _enc_clip_max[_layer_id * 12 + 5] / (_enc_clip_max[_layer_id * 12 + 9] * _quant_range), _int8_ffn_in_buf, _int8_p_d_enc_wei[_layer_id * 4 + 1], _cublas_lt_handle, _stream, false); ker_residual_bias_ln_i8I_i8O_launcher<_DataType>( _int8_ffn_out_buf, _p_device_wei[_weight_offset + 6], _p_device_wei[_weight_offset + 7], _p_device_wei[_weight_offset + 11], _int8_ffn_in_buf, _p_d_output, _batch_token_num, _tw._hidden_size, _enc_clip_max[_layer_id * 12 + 9] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 6], _max_thread_per_block, _stream, _tw._is_post_ln, true, true); return; } template <OperationType OpType_> void QuantEncoder<OpType_>::ffn_add_norm() { cublasLtMM_withAlgo_i8IO( _int8_ffn_out_buf, 1, _batch_token_num, _tw._inner_size, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12 + 2] * _enc_clip_max[_layer_id * 12 + 6] / (_enc_clip_max[_layer_id * 12 + 10] * _quant_range), _int8_ffn_in_buf, _int8_p_d_enc_wei[_layer_id * 4 + 2], _cublas_lt_handle, _stream, false); if (_tw._use_gelu) { ker_bias_gelu_i8I_i8O_launcher<_DataType>( _batch_token_num, _stream, _int8_ffn_out_buf, _int8_ffn_in_buf, _p_device_wei[_weight_offset + 9], _tw._inner_size, _enc_clip_max[_layer_id * 12 + 10] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 7], true, true); } else { ker_bias_relu_i8I_i8O_launcher<_DataType>( _batch_token_num, _stream, _int8_ffn_out_buf, _int8_ffn_in_buf, _p_device_wei[_weight_offset + 9], _tw._inner_size, _enc_clip_max[_layer_id * 12 + 10] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 7], _enc_clip_max[_layer_id * 12 + 7], true, true, true); } /* ---step 2. second ffn layer--- */ cublasLtMM_withAlgo(_int32_ffn_out_buf, 1, _batch_token_num, _tw._hidden_size, _tw._inner_size, 0, 0, 0, _int8_ffn_in_buf, _int8_p_d_enc_wei[_layer_id * 4 + 3], _cublas_lt_handle, _stream, false); const _DataType *scale_ptr, *bias_ptr, *res_bias_ptr; float clip_max, dequant_scale; if (_tw._use_gelu) { dequant_scale = _enc_clip_max[_layer_id * 12 + 3] * _enc_clip_max[_layer_id * 12 + 7] / (_quant_range * _quant_range); } else { dequant_scale = _enc_clip_max[_layer_id * 12 + 3] * _enc_clip_max[_layer_id * 12 + 7] / (2 * _quant_range * _quant_range); } if (_layer_id == _tw._n_enc_layer - 1) { scale_ptr = _p_device_emb[2]; bias_ptr = _p_device_emb[3]; ker_residual_bias_ln_i32I_launcher<_DataType>( _int32_ffn_out_buf, scale_ptr, bias_ptr, _p_d_output, _p_d_output, _batch_token_num, _tw._hidden_size, dequant_scale, _max_thread_per_block, _stream, true, _scaled_ffn2_colsum[_layer_id]); } else { scale_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer]; bias_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer + 1]; res_bias_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer + 5]; clip_max = _enc_clip_max[(_layer_id + 1) * 12 + 4]; ker_residual_bias_ln_i32I_i8O_launcher<_DataType>( _int32_ffn_out_buf, scale_ptr, bias_ptr, res_bias_ptr, _int8_ffn_in_buf, _p_d_output, _batch_token_num, _tw._hidden_size, dequant_scale, _quant_range / clip_max, _max_thread_per_block, _stream, _tw._is_post_ln, true, true, _scaled_ffn2_colsum[_layer_id]); } return; } template class QuantEncoder<OperationType::FP16>; template class QuantEncoder<OperationType::FP32>; } // namespace cuda } // namespace lightseq
the_stack
// System includes #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // CUDA runtime #include <cuda_runtime.h> //CUFFT Header file #include <cufftXt.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> // Complex data type typedef float2 Complex; // Data configuration const int GPU_COUNT = 2; const int BSZ_Y = 4; const int BSZ_X = 4; // Forward Declaration void solvePoissonEquation(cudaLibXtDesc *, cudaLibXtDesc *, float **, int, int); __global__ void solvePoisson(cufftComplex *, cufftComplex *, float *, int, int, int n_gpu); /////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf( "\nPoisson equation using CUFFT library on Multiple GPUs is " "starting...\n\n"); int GPU_N; checkCudaErrors(cudaGetDeviceCount(&GPU_N)); if (GPU_N < GPU_COUNT) { printf("No. of GPU on node %d\n", GPU_N); printf("Two GPUs are required to run simpleCUFFT_2d_MGPU sample code\n"); exit(EXIT_WAIVED); } int *major_minor = (int *)malloc(sizeof(int) * GPU_N * 2); int found2IdenticalGPUs = 0; int nGPUs = 2; int *whichGPUs; whichGPUs = (int *)malloc(sizeof(int) * nGPUs); for (int i = 0; i < GPU_N; i++) { cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, i)); major_minor[i * 2] = deviceProp.major; major_minor[i * 2 + 1] = deviceProp.minor; printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", i, deviceProp.name, deviceProp.major, deviceProp.minor); } for (int i = 0; i < GPU_N; i++) { for (int j = i + 1; j < GPU_N; j++) { if ((major_minor[i * 2] == major_minor[j * 2]) && (major_minor[i * 2 + 1] == major_minor[j * 2 + 1])) { whichGPUs[0] = i; whichGPUs[1] = j; found2IdenticalGPUs = 1; break; } } if (found2IdenticalGPUs) { break; } } free(major_minor); if (!found2IdenticalGPUs) { printf( "No Two GPUs with same architecture found\nWaiving simpleCUFFT_2d_MGPU " "sample\n"); exit(EXIT_WAIVED); } int N = 64; float xMAX = 1.0f, xMIN = 0.0f, yMIN = 0.0f, h = (xMAX - xMIN) / ((float)N), s = 0.1f, s2 = s * s; float *x, *y, *f, *u_a, r2; x = (float *)malloc(sizeof(float) * N * N); y = (float *)malloc(sizeof(float) * N * N); f = (float *)malloc(sizeof(float) * N * N); u_a = (float *)malloc(sizeof(float) * N * N); for (int j = 0; j < N; j++) for (int i = 0; i < N; i++) { x[N * j + i] = xMIN + i * h; y[N * j + i] = yMIN + j * h; r2 = (x[N * j + i] - 0.5f) * (x[N * j + i] - 0.5f) + (y[N * j + i] - 0.5f) * (y[N * j + i] - 0.5f); f[N * j + i] = (r2 - 2 * s2) / (s2 * s2) * exp(-r2 / (2 * s2)); u_a[N * j + i] = exp(-r2 / (2 * s2)); // analytical solution } float *k, *d_k[GPU_COUNT]; k = (float *)malloc(sizeof(float) * N); for (int i = 0; i <= N / 2; i++) { k[i] = i * 2 * (float)M_PI; } for (int i = N / 2 + 1; i < N; i++) { k[i] = (i - N) * 2 * (float)M_PI; } // Create a complex variable on host Complex *h_f = (Complex *)malloc(sizeof(Complex) * N * N); // Initialize the memory for the signal for (int i = 0; i < (N * N); i++) { h_f[i].x = f[i]; h_f[i].y = 0.0f; } // cufftCreate() - Create an empty plan cufftResult result; cufftHandle planComplex; result = cufftCreate(&planComplex); if (result != CUFFT_SUCCESS) { printf("cufftCreate failed\n"); exit(EXIT_FAILURE); } // cufftXtSetGPUs() - Define which GPUs to use result = cufftXtSetGPUs(planComplex, nGPUs, whichGPUs); if (result == CUFFT_INVALID_DEVICE) { printf("This sample requires two GPUs on the same board.\n"); printf("No such board was found. Waiving sample.\n"); exit(EXIT_WAIVED); } else if (result != CUFFT_SUCCESS) { printf("cufftXtSetGPUs failed\n"); exit(EXIT_FAILURE); } // Print the device information to run the code printf("\nRunning on GPUs\n"); for (int i = 0; i < 2; i++) { cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, whichGPUs[i])); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", whichGPUs[i], deviceProp.name, deviceProp.major, deviceProp.minor); } size_t *worksize; worksize = (size_t *)malloc(sizeof(size_t) * nGPUs); // cufftMakePlan2d() - Create the plan result = cufftMakePlan2d(planComplex, N, N, CUFFT_C2C, worksize); if (result != CUFFT_SUCCESS) { printf("*MakePlan* failed\n"); exit(EXIT_FAILURE); } for (int i = 0; i < nGPUs; i++) { cudaSetDevice(whichGPUs[i]); cudaMalloc((void **)&d_k[i], sizeof(float) * N); cudaMemcpy(d_k[i], k, sizeof(float) * N, cudaMemcpyHostToDevice); } // Create a variable on device // d_f - variable on device to store the input data // d_d_f - variable that store the natural order of d_f data // d_out - device output cudaLibXtDesc *d_f, *d_d_f, *d_out; // cufftXtMalloc() - Malloc data on multiple GPUs result = cufftXtMalloc(planComplex, (cudaLibXtDesc **)&d_f, CUFFT_XT_FORMAT_INPLACE); if (result != CUFFT_SUCCESS) { printf("*XtMalloc failed\n"); exit(EXIT_FAILURE); } result = cufftXtMalloc(planComplex, (cudaLibXtDesc **)&d_d_f, CUFFT_XT_FORMAT_INPLACE); if (result != CUFFT_SUCCESS) { printf("*XtMalloc failed\n"); exit(EXIT_FAILURE); } result = cufftXtMalloc(planComplex, (cudaLibXtDesc **)&d_out, CUFFT_XT_FORMAT_INPLACE); if (result != CUFFT_SUCCESS) { printf("*XtMalloc failed\n"); exit(EXIT_FAILURE); } // cufftXtMemcpy() - Copy the data from host to device result = cufftXtMemcpy(planComplex, d_f, h_f, CUFFT_COPY_HOST_TO_DEVICE); if (result != CUFFT_SUCCESS) { printf("*XtMemcpy failed\n"); exit(EXIT_FAILURE); } // cufftXtExecDescriptorC2C() - Execute FFT on data on multiple GPUs printf("Forward 2d FFT on multiple GPUs\n"); result = cufftXtExecDescriptorC2C(planComplex, d_f, d_f, CUFFT_FORWARD); if (result != CUFFT_SUCCESS) { printf("*XtExecC2C failed\n"); exit(EXIT_FAILURE); } // cufftXtMemcpy() - Copy the data to natural order on GPUs result = cufftXtMemcpy(planComplex, d_d_f, d_f, CUFFT_COPY_DEVICE_TO_DEVICE); if (result != CUFFT_SUCCESS) { printf("*XtMemcpy failed\n"); exit(EXIT_FAILURE); } printf("Solve Poisson Equation\n"); solvePoissonEquation(d_d_f, d_out, d_k, N, nGPUs); printf("Inverse 2d FFT on multiple GPUs\n"); // cufftXtExecDescriptorC2C() - Execute inverse FFT on data on multiple GPUs result = cufftXtExecDescriptorC2C(planComplex, d_out, d_out, CUFFT_INVERSE); if (result != CUFFT_SUCCESS) { printf("*XtExecC2C failed\n"); exit(EXIT_FAILURE); } // Create a variable on host to copy the data from device // h_d_out - variable store the output of device Complex *h_d_out = (Complex *)malloc(sizeof(Complex) * N * N); // cufftXtMemcpy() - Copy data from multiple GPUs to host result = cufftXtMemcpy(planComplex, h_d_out, d_out, CUFFT_COPY_DEVICE_TO_HOST); if (result != CUFFT_SUCCESS) { printf("*XtMemcpy failed\n"); exit(EXIT_FAILURE); } float *out = (float *)malloc(sizeof(float) * N * N); float constant = h_d_out[0].x / N * N; for (int i = 0; i < N * N; i++) { // subtract u[0] to force the arbitrary constant to be 0 out[i] = (h_d_out[i].x / (N * N)) - constant; } // cleanup memory free(h_f); free(k); free(out); free(h_d_out); free(x); free(whichGPUs); free(y); free(f); free(u_a); free(worksize); // cudaXtFree() - Free GPU memory for (int i = 0; i < GPU_COUNT; i++) { cudaFree(d_k[i]); } result = cufftXtFree(d_out); if (result != CUFFT_SUCCESS) { printf("*XtFree failed\n"); exit(EXIT_FAILURE); } result = cufftXtFree(d_f); if (result != CUFFT_SUCCESS) { printf("*XtFree failed\n"); exit(EXIT_FAILURE); } result = cufftXtFree(d_d_f); if (result != CUFFT_SUCCESS) { printf("*XtFree failed\n"); exit(EXIT_FAILURE); } // cufftDestroy() - Destroy FFT plan result = cufftDestroy(planComplex); if (result != CUFFT_SUCCESS) { printf("cufftDestroy failed: code %d\n", (int)result); exit(EXIT_FAILURE); } exit(EXIT_SUCCESS); } //////////////////////////////////////////////////////////////////////////////////// // Launch kernel on multiple GPU /////////////////////////////////////////////////////////////////////////////////// void solvePoissonEquation(cudaLibXtDesc *d_ft, cudaLibXtDesc *d_ft_k, float **k, int N, int nGPUs) { int device; dim3 dimGrid(int(N / BSZ_X), int((N / 2) / BSZ_Y)); dim3 dimBlock(BSZ_X, BSZ_Y); for (int i = 0; i < nGPUs; i++) { device = d_ft_k->descriptor->GPUs[i]; cudaSetDevice(device); solvePoisson<<<dimGrid, dimBlock>>>( (cufftComplex *)d_ft->descriptor->data[i], (cufftComplex *)d_ft_k->descriptor->data[i], k[i], N, i, nGPUs); } // Wait for device to finish all operation for (int i = 0; i < nGPUs; i++) { device = d_ft_k->descriptor->GPUs[i]; cudaSetDevice(device); cudaDeviceSynchronize(); // Check if kernel execution generated and error getLastCudaError("Kernel execution failed [ solvePoisson ]"); } } //////////////////////////////////////////////////////////////////////////////// // Kernel for Solving Poisson equation on GPU //////////////////////////////////////////////////////////////////////////////// __global__ void solvePoisson(cufftComplex *ft, cufftComplex *ft_k, float *k, int N, int gpu_id, int n_gpu) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int index = j * N + i; if (i < N && j < N / n_gpu) { float k2 = k[i] * k[i] + k[j + gpu_id * N / n_gpu] * k[j + gpu_id * N / n_gpu]; if (i == 0 && j == 0 && gpu_id == 0) { k2 = 1.0f; } ft_k[index].x = -ft[index].x * 1 / k2; ft_k[index].y = -ft[index].y * 1 / k2; } }
the_stack
* \file * cub::PersistentBlockScan implements an abstraction of CUDA thread blocks for * participating in device-wide prefix scan. */ #pragma once #include <iterator> #include "../grid/grid_mapping.cuh" #include "../grid/grid_even_share.cuh" #include "../grid/grid_queue.cuh" #include "../../block/block_load.cuh" #include "../../block/block_reduce.cuh" #include "../../util_vector.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { enum { BLOCK_SCAN_OOB, BLOCK_SCAN_INVALID, BLOCK_SCAN_PARTIAL, BLOCK_SCAN_PREFIX, }; /** * Tuning policy for PersistentBlockScan */ template < int _BLOCK_THREADS, int _ITEMS_PER_THREAD, BlockLoadPolicy _LOAD_POLICY, BlockStorePolicy _STORE_POLICY, BlockScanAlgorithm _SCAN_ALGORITHM> struct PersistentBlockScanPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ITEMS_PER_THREAD = _ITEMS_PER_THREAD, }; static const BlockLoadPolicy LOAD_POLICY = _LOAD_POLICY; static const BlockStorePolicy STORE_POLICY = _STORE_POLICY; static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; // Data type of block-signaling flag typedef int BlockFlag; }; /** * \brief PersistentBlockScan implements an abstraction of CUDA thread blocks for * participating in device-wide reduction. */ template < typename PersistentBlockScanPolicy, typename InputIteratorRA, typename OutputIteratorRA, typename SizeT> class PersistentBlockScan { public: //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Data type of input iterator typedef typename std::iterator_traits<InputIteratorRA>::value_type T; // Data type of block-signaling flag typedef typename PersistentBlockScanPolicy::BlockFlag BlockFlag; // Constants enum { TILE_ITEMS = PersistentBlockScanPolicy::BLOCK_THREADS * PersistentBlockScanPolicy::ITEMS_PER_THREAD, }; struct Signal { BlockFlag flag; T value; }; template <typename ScanOp> struct SignalReduceOp { ScanOp scan_op; // Constructor __device__ __forceinline__ SignalReduceOp(ScanOp scan_op) : scan_op(scan_op) {} __device__ __forceinline__ Signal operator()(const Signal& first, const Signal& second) { if ((first.flag == BLOCK_SCAN_OOB) || (second.flag == BLOCK_SCAN_PREFIX)) return second; Signal retval; retval.flag = first.flag; retval.value = scan_op(first.value, second.value); return retval; } }; // Parameterized block load typedef BlockLoad< InputIteratorRA, PersistentBlockScanPolicy::BLOCK_THREADS, PersistentBlockScanPolicy::ITEMS_PER_THREAD, PersistentBlockScanPolicy::LOAD_POLICY> BlockLoadT; // Parameterized block store typedef BlockStore< OutputIteratorRA, PersistentBlockScanPolicy::BLOCK_THREADS, PersistentBlockScanPolicy::ITEMS_PER_THREAD, PersistentBlockScanPolicy::STORE_POLICY> BlockStoreT; // Parameterized block scan typedef BlockScan< T, PersistentBlockScanPolicy::BLOCK_THREADS, PersistentBlockScanPolicy::SCAN_ALGORITHM> BlockScanT; // Parameterized warp reduce typedef WarpReduce<Signal> WarpReduceT; // Shared memory type for this threadblock struct SmemStorage { union { typename BlockLoadT::SmemStorage load; // Smem needed for tile loading typename BlockStoreT::SmemStorage store; // Smem needed for tile storing typename BlockScanT::SmemStorage scan; // Smem needed for tile scanning }; typename WarpReduceT::SmemStorage warp_reduce; // Smem needed for warp reduction }; // Stateful prefix functor template <typename ScanOp> struct BlockPrefixOp { T* d_partials; T* d_prefixes; BlockFlag* d_flags; ScanOp scan_op; SmemStorage& smem_storage; // Constructor __device__ __forceinline__ BlockPrefixOp( T* d_partials, T* d_prefixes, BlockFlag* d_flags, ScanOp scan_op, SmemStorage& smem_storage) : d_partials(d_partials), d_prefixes(d_prefixes), d_flags(d_flags), scan_op(scan_op), smem_storage(smem_storage) {} // Prefix functor (called by the first warp) __device__ __forceinline__ T operator()(T block_aggregate) { // Update our partial if (threadIdx.x == 0) { d_partials[PtxArchProps::WARP_THREADS + blockIdx.x] = block_aggregate; __threadfence(); d_flags[PtxArchProps::WARP_THREADS + blockIdx.x] = BLOCK_SCAN_PARTIAL; } // Wait for predecessor blocks to become valid and at least one prefix to show up. Signal predecessor_signal; unsigned int predecessor_idx = PtxArchProps::WARP_THREADS + blockIdx.x - threadIdx.x; do { predecessor_signal.flag = ThreadLoad<PTX_LOAD_CG>(d_flags + predecessor_idx); } while (__any(predecessor_signal.flag == BLOCK_SCAN_INVALID) || __all(predecessor_signal.flag != BLOCK_SCAN_PREFIX)); // Grab predecessor block's corresponding partial/prefix predecessor_signal.value = (predecessor_signal.flag == BLOCK_SCAN_PREFIX) ? d_prefixes[predecessor_idx] : d_partials[predecessor_idx]; // Reduce predecessor partials/prefixes to get our block-wide exclusive prefix Signal prefix_signal = WarpReduceT::Reduce( smem_storage.warp_reduce, predecessor_signal, SignalReduceOp(scan_op)); // Update the signals with our inclusive prefix if (threadIdx.x == 0) { T inclusive_prefix = scan_op(prefix_signal.value, block_aggregate); d_prefixes[PtxArchProps::WARP_THREADS + blockIdx.x] = inclusive_prefix; __threadfence(); d_flags[PtxArchProps::WARP_THREADS + blockIdx.x] = BLOCK_SCAN_PREFIX; } // Return block-wide exclusive prefix return prefix_signal.value; } }; //--------------------------------------------------------------------- // Utility operations //--------------------------------------------------------------------- /** * Process a single, full tile. Specialized for d_in is an iterator (not a native pointer) * * Each thread reduces only the values it loads. If \p FIRST_TILE, * this partial reduction is stored into \p thread_aggregate. Otherwise * it is accumulated into \p thread_aggregate. */ template < typename SizeT, typename ScanOp> static __device__ __forceinline__ void ConsumeFullTile( SmemStorage &smem_storage, InputIteratorRA d_in, OutputIteratorRA d_out, SizeT block_offset, ScanOp &scan_op, T &thread_aggregate) { T items[PersistentBlockScanPolicy::ITEMS_PER_THREAD]; BlockLoadT::Load(smem_storage.load, d_in + block_offset, items); __syncthreads(); BlockScanT::Load(smem_storage.load, d_in + block_offset, items); __syncthreads(); BlockStoreT::Store(smem_storage.load, d_in + block_offset, items); } /** * Process a single, full tile. Specialized for native pointers * * Each thread reduces only the values it loads. If \p FIRST_TILE, * this partial reduction is stored into \p thread_aggregate. Otherwise * it is accumulated into \p thread_aggregate. * * Performs a block-wide barrier synchronization */ template < bool FIRST_TILE, typename SizeT, typename ScanOp> static __device__ __forceinline__ void ConsumeFullTile( SmemStorage &smem_storage, T *d_in, SizeT block_offset, ScanOp &scan_op, T &thread_aggregate) { if ((size_t(d_in) & (VECTOR_LOAD_LENGTH - 1)) == 0) { T items[ITEMS_PER_THREAD]; typedef typename VectorHelper<T, VECTOR_LOAD_LENGTH>::Type VectorT; // Alias items as an array of VectorT and load it in striped fashion BlockLoadDirectStriped( reinterpret_cast<VectorT*>(d_in + block_offset), reinterpret_cast<VectorT (&)[ITEMS_PER_THREAD / VECTOR_LOAD_LENGTH]>(items)); // Prevent hoisting __syncthreads(); T partial = ThreadReduce(items, scan_op); thread_aggregate = (FIRST_TILE) ? partial : scan_op(thread_aggregate, partial); } else { T items[ITEMS_PER_THREAD]; BlockLoadDirectStriped( d_in + block_offset, items); // Prevent hoisting __syncthreads(); T partial = ThreadReduce(items, scan_op); thread_aggregate = (FIRST_TILE) ? partial : scan_op(thread_aggregate, partial); } } /** * Process a single, partial tile. * * Each thread reduces only the values it loads. If \p FIRST_TILE, * this partial reduction is stored into \p thread_aggregate. Otherwise * it is accumulated into \p thread_aggregate. */ template < bool FIRST_TILE, typename SizeT, typename ScanOp> static __device__ __forceinline__ void ConsumePartialTile( SmemStorage &smem_storage, InputIteratorRA d_in, SizeT block_offset, const SizeT &block_oob, ScanOp &scan_op, T &thread_aggregate) { SizeT thread_offset = block_offset + threadIdx.x; if ((FIRST_TILE) && (thread_offset < block_oob)) { thread_aggregate = ThreadLoad<LOAD_MODIFIER>(d_in + thread_offset); thread_offset += BLOCK_THREADS; } while (thread_offset < block_oob) { T item = ThreadLoad<LOAD_MODIFIER>(d_in + thread_offset); thread_aggregate = scan_op(thread_aggregate, item); thread_offset += BLOCK_THREADS; } } public: //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /** * \brief Consumes input tiles using an even-share policy, computing a threadblock-wide reduction for thread<sub>0</sub> using the specified binary reduction functor. * * The return value is undefined in threads other than thread<sub>0</sub>. */ template <typename SizeT, typename ScanOp> static __device__ __forceinline__ T ProcessPersistentBlockEvenShare( SmemStorage &smem_storage, InputIteratorRA d_in, SizeT block_offset, const SizeT &block_oob, ScanOp &scan_op) { if (block_offset + TILE_ITEMS <= block_oob) { // We have at least one full tile to consume T thread_aggregate; ConsumeFullTile<true>(smem_storage, d_in, block_offset, scan_op, thread_aggregate); block_offset += TILE_ITEMS; // Consume any other full tiles while (block_offset + TILE_ITEMS <= block_oob) { ConsumeFullTile<false>(smem_storage, d_in, block_offset, scan_op, thread_aggregate); block_offset += TILE_ITEMS; } // Consume any remaining input ConsumePartialTile<false>(smem_storage, d_in, block_offset, block_oob, scan_op, thread_aggregate); // Compute the block-wide reduction (every thread has a valid input) return BlockReduceT::Reduce(smem_storage.reduce, thread_aggregate, scan_op); } else { // We have less than a full tile to consume T thread_aggregate; ConsumePartialTile<true>(smem_storage, d_in, block_offset, block_oob, scan_op, thread_aggregate); // Compute the block-wide reduction (up to block_items threads have valid inputs) SizeT block_items = block_oob - block_offset; return BlockReduceT::Reduce(smem_storage.reduce, thread_aggregate, scan_op, block_items); } } /** * \brief Consumes input tiles using a dynamic queue policy, computing a threadblock-wide reduction for thread<sub>0</sub> using the specified binary reduction functor. * * The return value is undefined in threads other than thread<sub>0</sub>. */ template <typename SizeT, typename ScanOp> static __device__ __forceinline__ T ProcessPersistentBlockDynamic( SmemStorage &smem_storage, InputIteratorRA d_in, SizeT num_items, GridQueue<SizeT> &queue, ScanOp &scan_op) { // Each thread block is statically assigned at some input, otherwise its // block_aggregate will be undefined. SizeT block_offset = blockIdx.x * TILE_ITEMS; if (block_offset + TILE_ITEMS <= num_items) { // We have a full tile to consume T thread_aggregate; ConsumeFullTile<true>(smem_storage, d_in, block_offset, scan_op, thread_aggregate); // Dynamically consume other tiles SizeT even_share_base = gridDim.x * TILE_ITEMS; if (even_share_base < num_items) { // There are tiles left to consume while (true) { // Dequeue up to TILE_ITEMS if (threadIdx.x == 0) { smem_storage.block_offset = queue.Drain(TILE_ITEMS) + even_share_base; } __syncthreads(); block_offset = smem_storage.block_offset; if (block_offset + TILE_ITEMS > num_items) { if (block_offset < num_items) { // We have less than a full tile to consume ConsumePartialTile<false>(smem_storage, d_in, block_offset, num_items, scan_op, thread_aggregate); } // No more work to do break; } // We have a full tile to consume (which performs a barrier to protect smem_storage.block_offset WARs) ConsumeFullTile<false>(smem_storage, d_in, block_offset, scan_op, thread_aggregate); } } // Compute the block-wide reduction (every thread has a valid input) return BlockReduceT::Reduce(smem_storage.reduce, thread_aggregate, scan_op); } else { // We have less than a full tile to consume T thread_aggregate; SizeT block_items = num_items - block_offset; ConsumePartialTile<true>(smem_storage, d_in, block_offset, num_items, scan_op, thread_aggregate); // Compute the block-wide reduction (up to block_items threads have valid inputs) return BlockReduceT::Reduce(smem_storage.reduce, thread_aggregate, scan_op, block_items); } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#pragma once // Julien Demouth's implementation of the Volkov strategy showed it is // better to fully unroll by hand and have as much stuff as possible // compile to immediate instructions. // 2008. Volkov and Kazian, Fitting FFT onto the G80 Architecture // // Write our own using a mix of Volkov for vertical FFTs and // [1412.7580] Fast Convolutional Nets With fbfft // for horizontal FFTs. // This trades off shared memory usage for shuffle instructions in the // horizontal step. ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// #include "cuda/Complex.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/fbfft/FBFFTCommon.cuh" #define ENABLE_CUDA_DEBUG #include "cuda/CudaDebugUtils.cuh" #include <cuda_runtime.h> #include <cassert> namespace facebook { namespace cuda { namespace fbfft { namespace detail { template< int > __device__ inline int bitReversal(int); template<> __device__ inline int bitReversal<4>(int i) { switch (i) { case 0: return 0; case 1: return 2; case 2: return 1; case 3: return 3; default: return 0; } } template<> __device__ inline int bitReversal<8>(int i) { switch (i) { case 0: return 0; case 1: return 4; case 2: return 2; case 3: return 6; case 4: return 1; case 5: return 5; case 6: return 3; case 7: return 7; default: return 0; } } template<> __device__ inline int bitReversal<16>(int i) { switch (i) { case 0: return 0; case 1: return 8; case 2: return 4; case 3: return 12; case 4: return 2; case 5: return 10; case 6: return 6; case 7: return 14; case 8: return 1; case 9: return 9; case 10: return 5; case 11: return 13; case 12: return 3; case 13: return 11; case 14: return 7; case 15: return 15; default: return 0; } } template<> __device__ inline int bitReversal<32>(int i) { switch (i) { case 0: return 0; case 1: return 16; case 2: return 8; case 3: return 24; case 4: return 4; case 5: return 20; case 6: return 12; case 7: return 28; case 8: return 2; case 9: return 18; case 10: return 10; case 11: return 26; case 12: return 6; case 13: return 22; case 14: return 14; case 15: return 30; case 16: return 1; case 17: return 17; case 18: return 9; case 19: return 25; case 20: return 5; case 21: return 21; case 22: return 13; case 23: return 29; case 24: return 3; case 25: return 19; case 26: return 11; case 27: return 27; case 28: return 7; case 29: return 23; case 30: return 15; case 31: return 31; default: return 0; } } template<typename T> __device__ inline void swap(T& a, T& b) { T t = a; a = b; b = t; } __device__ inline void FFT2(Complex& a, Complex& b) { float t; t = a.re(); a.re() += b.re(); b.re() = t - b.re(); t = a.im(); a.im() += b.im(); b.im() = t - b.im(); } template<int FFTSize> __device__ __forceinline__ void swapHorizontal(Complex& a) { int LogFFTSize = cuda::getMSB(FFTSize); a = shfl(a, reverse(threadIdx.x, LogFFTSize), FFTSize); } template<bool forward> __device__ inline void FFT4( Complex& a0, Complex& a1, Complex& a2, Complex& a3) { FFT2(a0, a2); FFT2(a1, a3); a3 = a3 * ((forward) ? cexp<4>(2).conjugate() : cexp<4>(2)); FFT2(a0, a1); FFT2(a2, a3); } template<bool forward> __device__ inline void FFT8(Complex* a) { #pragma unroll for (int i = 0; i < 4; ++i) { FFT2(a[i], a[4 + i]); } a[5] *= ((forward) ? cexp<8>(2).conjugate() : cexp<8>(2)); a[6] *= ((forward) ? cexp<8>(4).conjugate() : cexp<8>(4)); a[7] *= ((forward) ? cexp<8>(6).conjugate() : cexp<8>(6)); FFT4<forward>(a[ 0], a[ 1], a[ 2], a[ 3]); FFT4<forward>(a[ 4], a[ 5], a[ 6], a[ 7]); } template<bool forward> __device__ inline void FFT16(Complex* a) { #pragma unroll for (int i = 0; i < 8; ++i) { FFT2(a[i], a[8 + i]); } a[9] *= ((forward) ? cexp<16>(2).conjugate() : cexp<16>(2)); a[10] *= ((forward) ? cexp<16>(4).conjugate() : cexp<16>(4)); a[11] *= ((forward) ? cexp<16>(6).conjugate() : cexp<16>(6)); a[12] *= ((forward) ? cexp<16>(8).conjugate() : cexp<16>(8)); a[13] *= ((forward) ? cexp<16>(10).conjugate() : cexp<16>(10)); a[14] *= ((forward) ? cexp<16>(12).conjugate() : cexp<16>(12)); a[15] *= ((forward) ? cexp<16>(14).conjugate() : cexp<16>(14)); FFT8<forward>(a); FFT8<forward>(a + 8); } template<bool forward> __device__ inline void FFT32(Complex* a) { #pragma unroll for (int i = 0; i < 16; ++i) { FFT2(a[i], a[16 + i]); } a[17] *= ((forward) ? cexp<32>(2).conjugate() : cexp<32>(2)); a[18] *= ((forward) ? cexp<32>(4).conjugate() : cexp<32>(4)); a[19] *= ((forward) ? cexp<32>(6).conjugate() : cexp<32>(6)); a[20] *= ((forward) ? cexp<32>(8).conjugate() : cexp<32>(8)); a[21] *= ((forward) ? cexp<32>(10).conjugate() : cexp<32>(10)); a[22] *= ((forward) ? cexp<32>(12).conjugate() : cexp<32>(12)); a[23] *= ((forward) ? cexp<32>(14).conjugate() : cexp<32>(14)); a[24] *= ((forward) ? cexp<32>(16).conjugate() : cexp<32>(16)); a[25] *= ((forward) ? cexp<32>(18).conjugate() : cexp<32>(18)); a[26] *= ((forward) ? cexp<32>(20).conjugate() : cexp<32>(20)); a[27] *= ((forward) ? cexp<32>(22).conjugate() : cexp<32>(22)); a[28] *= ((forward) ? cexp<32>(24).conjugate() : cexp<32>(24)); a[29] *= ((forward) ? cexp<32>(26).conjugate() : cexp<32>(26)); a[30] *= ((forward) ? cexp<32>(28).conjugate() : cexp<32>(28)); a[31] *= ((forward) ? cexp<32>(30).conjugate() : cexp<32>(30)); FFT16<forward>(a); FFT16<forward>(a + 16); } template<int N, bool forward = true> __device__ inline void fft2dVertical(Complex *a); template<> void fft2dVertical<4, true>(Complex* a) { FFT4<true>(a[0], a[1], a[2], a[3]); } template<> void fft2dVertical<4, false>(Complex* a) { FFT4<false>(a[0], a[1], a[2], a[3]); } template<> void fft2dVertical<8, true>(Complex* a) { FFT8<true>(a); } template<> void fft2dVertical<8, false>(Complex* a) { FFT8<false>(a); } template<> void fft2dVertical<16, true>(Complex* a) { FFT16<true>(a); } template<> void fft2dVertical<16, false>(Complex* a) { FFT16<false>(a); } template<> void fft2dVertical<32, true>(Complex* a) { FFT32<true>(a); } template<> void fft2dVertical<32, false>(Complex* a) { FFT32<false>(a); } //////////////////////////// FBFFT Generic //////////////////////////////// template <int FFTSize> __device__ inline void fbfft2DVerticalCoreForward(Complex* a) { constexpr int HalfFFTSize = FFTSize / 2; // Vertical FFT: real FFTs as complex // Vertical FFT: bit reversal // Let the compiler unroll and optimize fft2dVertical<HalfFFTSize, true>(a); #pragma unroll for (int i = 0; i < HalfFFTSize; ++i) { if (i < detail::bitReversal<HalfFFTSize>(i)) { // Avoid double swap swap(a[i], a[detail::bitReversal<HalfFFTSize>(i)]); } } a[0] = a[0].conjugate() + a[0].transpose(); #pragma unroll for (int i = 1; i < HalfFFTSize / 2; ++i) { Complex Si = Complex(0.5f) * (a[i] + a[HalfFFTSize - i].conjugate()); Complex Di = Complex(0.5f) * (a[i] - a[HalfFFTSize - i].conjugate()); a[i] = Si - Di * cexp<HalfFFTSize>(i).transpose(); a[HalfFFTSize - i] = Si.conjugate() - (-Di.conjugate()) * cexp<HalfFFTSize>(HalfFFTSize - i).transpose(); } a[HalfFFTSize / 2] = a[HalfFFTSize / 2].conjugate(); FFT1DRoots<FFTSize> roots; roots.template twiddles<true>(); // Horizontal FFT: complex FFTs as complex #pragma unroll for (int i = 0; i < HalfFFTSize; ++i) { decimateInFrequency1DWarp<FFTSize>(a[i], roots[0]); } // With a proper DIT / DIF this could disappear and save us some shuffles // Horizontal FFT: bit reversal across threads #pragma unroll for (int i = 0; i < HalfFFTSize; ++i) { swapHorizontal<FFTSize>(a[i]); } } // Only unpack IFFT supported atm template <int FFTSize, bool PackedInput = false> __device__ inline void fbfft2DVerticalCoreInverse(Complex* a) { // Prepare horizontal FFT // Twiddles is the same as for 1D but fully data parallel across threadIdx.y FFT1DRegisterTwiddles<FFTSize> roots(false); // If the input does not come in packed format, invert the unpacking done by // the forward pass. if (!PackedInput) { if (threadIdx.x > 0) { a[0] = a[0] - a[FFTSize / 2].transpose().conjugate(); } else { a[0] = Complex(a[0].re(), a[FFTSize / 2].re()); } } constexpr int HalfFFTSize = FFTSize / 2; // Horizontal FFT: complex FFTs as complex #pragma unroll for (int i = 0; i < HalfFFTSize; ++i) { decimateInFrequency1DWarp<FFTSize>(a[i], roots); } // Horizontal FFT: bit reversal across threads #pragma unroll for (int i = 0; i < HalfFFTSize; ++i) { swapHorizontal<FFTSize>(a[i]); } a[0] = a[0].conjugate() + a[0].transpose(); #pragma unroll for (int i = 1; i < HalfFFTSize / 2; ++i) { Complex Si = a[i] + a[HalfFFTSize - i].conjugate(); Complex Di = a[i] - a[HalfFFTSize - i].conjugate(); a[i] = Si - Di * cexp<HalfFFTSize>(i).transpose().conjugate(); a[HalfFFTSize - i] = Si.conjugate() - (-Di.conjugate() * cexp<HalfFFTSize>(HalfFFTSize - i).transpose().conjugate()); } Complex Si = a[HalfFFTSize / 2] + a[HalfFFTSize - HalfFFTSize / 2].conjugate(); Complex Di = a[HalfFFTSize / 2] - a[HalfFFTSize - HalfFFTSize / 2].conjugate(); a[HalfFFTSize / 2] = Si - (-Di.conjugate() * cexp<HalfFFTSize>(HalfFFTSize / 2).transpose().conjugate()); // Vertical FFT: real FFTs as complex fft2dVertical<FFTSize / 2, false>(a); // Vertical FFT: bit reversal // Let the compiler unroll and optimize #pragma unroll for (int i = 0; i < FFTSize / 2; ++i) { if (i < detail::bitReversal<FFTSize / 2>(i)) { // Avoid double swap swap(a[i], a[detail::bitReversal<FFTSize / 2>(i)]); } } } template <int BatchDims, int FFTSize, int BatchesPerBlock, bool Unpack = true> __device__ __forceinline__ void fbfft2DVertical( DeviceTensor<float, BatchDims + 2> real, DeviceTensor<float, BatchDims + 3> complexAsFloat, const int padL, const int padU) { static_assert(FFTSize == 8 || FFTSize == 16 || FFTSize == 32, "FBFFT supported only for sizes 8, 16, 32 atm"); static_assert(BatchesPerBlock >= 1, "BatchesPerBlock should be >= 1"); assert(gridDim.z == 1); assert(blockDim.z == 1); const int batch = BatchesPerBlock * (blockIdx.x * gridDim.y + blockIdx.y ) + threadIdx.y; // Early exit if we would overflow if (batch >= real.getSize(0)) { return; } // Hermitian packed symmetry: Perform 2 FFTs in place Complex a[FFTSize / 2]; // A. read data in // TODO: read as float2 // TODO: f16 implementation #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { float f1 = inBounds(i, threadIdx.x, padU, padL, real) ? real[batch][i - padU][threadIdx.x - padL].ldg() : 0.0f; float f2 = inBounds(i + 1, threadIdx.x, padU, padL, real) ? real[batch][i + 1 - padU][threadIdx.x - padL].ldg() : 0.0f; a[i / 2] = Complex(f1, f2); } // Vertical FFT first within a thread then horizontal across threads // Hermitian packed symmetry is used to first compute FFTSize real as // FTSize / 2 complex // Hermitian packed symmetry is further used to pack 2 real FFTs // (a[0] and a[FFTSize / 2 - 1]) into a single complex FFT. fbfft2DVerticalCoreForward<FFTSize>(a); // This latter symmetry needs unpacking to use with gemm routines. if (Unpack) { #pragma unroll for (int i = 1 ; i < FFTSize / 2; ++i) { complexAsFloat[batch][i][threadIdx.x].template as<Complex>() = a[i]; } if (threadIdx.x > 0) { Complex other = shfl(a[0], FFTSize - threadIdx.x, FFTSize); complexAsFloat[batch][0][threadIdx.x].template as<Complex>() = Complex(0.5f) * (other.conjugate() + a[0]); complexAsFloat[batch][FFTSize / 2][threadIdx.x].template as<Complex>() = Complex(0.5f) * (other.conjugate() - a[0]).conjugate().transpose(); } else { complexAsFloat[batch][0][threadIdx.x].template as<Complex>() = Complex(a[0].re()); complexAsFloat[batch][FFTSize / 2][threadIdx.x].template as<Complex>() = Complex(a[0].im()); } } else { // If a specialized gemm kernel is available, no need to unpack #pragma unroll for (int i = 0 ; i < FFTSize / 2; ++i) { complexAsFloat[batch][i][threadIdx.x].template as<Complex>() = a[i]; } } } template <int BatchDims, int FFTSize, int BatchesPerBlock, bool PackedInput = false> __device__ __forceinline__ void fbifft2DVertical( DeviceTensor<Complex, BatchDims + 2> complexSrc, DeviceTensor<float, BatchDims + 2> realDst, const int padL, const int padU) { const int batch = BatchesPerBlock * (blockIdx.x * gridDim.y + blockIdx.y ) + threadIdx.y; // Early exit if we would overflow if (batch >= realDst.getSize(0)) { return; } // If input is not packed, read FFTSize / 2 + 1 entries and repack them later. constexpr int UB = (PackedInput) ? FFTSize / 2 : FFTSize / 2 + 1; // Perform 2 FFTs in place Complex a[UB]; // Read data in // TODO: f16 implementation #pragma unroll for (int i = 0 ; i < UB; ++i) { a[i] = ldg(complexSrc[batch][i][threadIdx.x].data()); } // Work it fbfft2DVerticalCoreInverse<FFTSize, PackedInput>(a); // Write the results back to memory. // No need for conjugation as we know we have real results. for (int i = 0 ; i < FFTSize; i += 2) { if (inBounds(i, threadIdx.x, padU, padL, realDst)) { realDst[batch][i - padU][threadIdx.x - padL] = a[i / 2].re(); } if (inBounds(i + 1, threadIdx.x, padU, padL, realDst)) { realDst[batch][i + 1 - padU][threadIdx.x - padL] = a[i / 2].im(); } } } template <int BatchDims, int BatchesPerBlock> __global__ void fbfft2DVertical_8( DeviceTensor<float, BatchDims + 2> real, DeviceTensor<float, BatchDims + 3> complexAsFloat, const int padL, const int padU) { fbfft2DVertical<BatchDims, 8, BatchesPerBlock>( real, complexAsFloat, padL, padU); } template <int BatchDims, int BatchesPerBlock> __global__ void fbfft2DVertical_16( DeviceTensor<float, BatchDims + 2> real, DeviceTensor<float, BatchDims + 3> complexAsFloat, const int padL, const int padU) { fbfft2DVertical<BatchDims, 16, BatchesPerBlock>( real, complexAsFloat, padL, padU); } template <int BatchDims, int BatchesPerBlock> __launch_bounds__(1024, 1) // 64 registers is best __global__ void fbfft2DVertical_32( DeviceTensor<float, BatchDims + 2> real, DeviceTensor<float, BatchDims + 3> complexAsFloat, const int padL, const int padU) { fbfft2DVertical<BatchDims, 32, BatchesPerBlock>( real, complexAsFloat, padL, padU); } template <int BatchDims, int BatchesPerBlock> __global__ void fbifft2DVertical_8( DeviceTensor<Complex, BatchDims + 2> complexSrc, DeviceTensor<float, BatchDims + 2> realDst, const int padL, const int padU) { fbifft2DVertical<BatchDims, 8, BatchesPerBlock>( complexSrc, realDst, padL, padU); } template <int BatchDims, int BatchesPerBlock> __global__ void fbifft2DVertical_16( DeviceTensor<Complex, BatchDims + 2> complexSrc, DeviceTensor<float, BatchDims + 2> realDst, const int padL, const int padU) { fbifft2DVertical<BatchDims, 16, BatchesPerBlock>( complexSrc, realDst, padL, padU); } template <int BatchDims, int BatchesPerBlock> __launch_bounds__(1024, 1) // 64 registers is best __global__ void fbifft2DVertical_32( DeviceTensor<Complex, BatchDims + 2> complexSrc, DeviceTensor<float, BatchDims + 2> realDst, const int padL, const int padU) { fbifft2DVertical<BatchDims, 32, BatchesPerBlock>( complexSrc, realDst, padL, padU); } }}}}
the_stack
#include <kat/on_device/ptx.cuh> #include <kat/common.hpp> #if ! __CUDA_ARCH__ >= 300 #error "This code can only target devices of compute capability 3.0 or higher." #endif ///@cond #include <kat/detail/execution_space_specifiers.hpp> ///@endcond namespace kat { // TODO: Consider using "sized types" (from cstdint) more. It makes more sense when hardware is involved, // although - CUDA-provided functions don't really do that and just make implicit assumptions about // the sizes of int, unsigned, long etc. namespace builtins { template <> KAT_FD int multiplication_high_bits<int >( int x, int y) { return __mulhi(x, y); } template <> KAT_FD unsigned multiplication_high_bits<unsigned >( unsigned x, unsigned y) { return __umulhi(x, y); } template <> KAT_FD long long multiplication_high_bits<long long >( long long x, long long y) { return __mul64hi(x, y); } template <> KAT_FD unsigned long long multiplication_high_bits<unsigned long long>( unsigned long long x, unsigned long long y) { return __umul64hi(x, y); } template <> KAT_FD float divide<float >(float dividend, float divisor) { return fdividef(dividend, divisor); } template <> KAT_FD double divide<double>(double dividend, double divisor) { return fdividef(dividend, divisor); } // TODO: Does this really work only for single-precision floats? template <> KAT_FD float clamp_to_unit_segment<float>(float x) { return __saturatef(x); } // TODO: Does this really translate into a single instruction? I'm worried the casting might incur more than a single one for types of smaller sizes. template <typename I> KAT_FD int population_count(I x) { static_assert(std::is_integral<I>::value, "Only integral types are supported"); static_assert(sizeof(I) <= sizeof(unsigned long long), "Unexpectedly large type"); using native_popc_type = typename std::conditional< sizeof(I) <= sizeof(unsigned), unsigned, unsigned long long >::type; return population_count<native_popc_type>(static_cast<native_popc_type>(x)); } //template <> KAT_FD int population_count<unsigned char>(unsigned char x) { return __popc(x); } //template <> KAT_FD int population_count<unsigned short>(unsigned short x) { return __popc(x); } template <> KAT_FD int population_count<unsigned>(unsigned x) { return __popc(x); } template <> KAT_FD int population_count<unsigned long long>(unsigned long long x) { return __popcll(x); } template <typename I> KAT_FD typename std::make_unsigned<I>::type sum_with_absolute_difference(I x, I y, typename std::make_unsigned<I>::type addend) { static_assert( std::is_same<I, uint16_t>::value or std::is_same<I, int16_t>::value or std::is_same<I, uint32_t>::value or std::is_same<I, int32_t>::value or std::is_same<I, uint64_t>::value or std::is_same<I, int64_t>::value , "No sad instruction for requested parameter type"); return ptx::sad(x, y, addend); // Note: We're not using __sad() nor __usad(), since those (should be) equivalent to our own wrappers // for the cases of I = int32_t and I = uint32_t, and it's simple and clearer to maintain uniformity } template <> KAT_FD int absolute_value<int >(int x) { return abs(x); } template <> KAT_FD long absolute_value<long >(long x) { return labs(x); } template <> KAT_FD long long absolute_value<long long >(long long x) { return llabs(x); } template <> KAT_FD float absolute_value<float >(float x) { return fabsf(x); } template <> KAT_FD double absolute_value<double >(double x) { return fabs(x); } template <> KAT_FD unsigned bit_reverse<unsigned >(unsigned x) { return __brev(x); } template <> KAT_FD unsigned long long bit_reverse<unsigned long long>(unsigned long long x) { return __brevll(x); } template <> KAT_FD unsigned long bit_reverse<unsigned long >(unsigned long x) { return (sizeof(unsigned long) == sizeof(int)) ? bit_reverse<unsigned>(x) : bit_reverse<unsigned long long>(x); } namespace special_registers { // TODO: Should we really specify the types here, or just DRY and forward the registers' types using auto? KAT_FD unsigned lane_index() { return ptx::special_registers::laneid(); } KAT_FD unsigned symmetric_multiprocessor_index() { return ptx::special_registers::smid(); } KAT_FD unsigned long long grid_index() { return ptx::special_registers::gridid(); } KAT_FD unsigned int dynamic_shared_memory_size() { return ptx::special_registers::dynamic_smem_size(); } KAT_FD unsigned int total_shared_memory_size() { return ptx::special_registers::total_smem_size(); } } // namespace special_registers namespace bit_field { template <> KAT_FD uint32_t extract_bits<uint32_t>(uint32_t bit_field, unsigned start_pos, unsigned num_bits) { return ptx::bfe(bit_field, start_pos, num_bits); } template <> KAT_FD uint64_t extract_bits<uint64_t>(uint64_t bit_field, unsigned start_pos, unsigned num_bits) { return ptx::bfe(bit_field, start_pos, num_bits); } template <> KAT_FD int32_t extract_bits<int32_t>(int32_t bit_field, unsigned start_pos, unsigned num_bits) { return ptx::bfe(bit_field, start_pos, num_bits); } template <> KAT_FD int64_t extract_bits<int64_t>(int64_t bit_field, unsigned start_pos, unsigned num_bits) { return ptx::bfe(bit_field, start_pos, num_bits); } template <> KAT_FD uint32_t replace_bits(uint32_t bits_to_insert, uint32_t existing_bit_field, uint32_t start_pos, uint32_t num_bits) { return ptx::bfi(bits_to_insert, existing_bit_field, start_pos, num_bits); } template <> KAT_FD uint64_t replace_bits(uint64_t bits_to_insert, uint64_t existing_bit_field, uint32_t start_pos, uint32_t num_bits) { return ptx::bfi(bits_to_insert, existing_bit_field, start_pos, num_bits); } } // namespace bit_field KAT_FD unsigned permute_bytes(unsigned first, unsigned second, unsigned byte_selectors) { return ptx::prmt(first, second, byte_selectors); } #if ! defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 320 template < funnel_shift_amount_resolution_mode_t AmountResolutionMode > KAT_FD uint32_t funnel_shift_right( uint32_t low_word, uint32_t high_word, uint32_t shift_amount) { return (AmountResolutionMode == funnel_shift_amount_resolution_mode_t::take_lower_bits_of_amount) ? __funnelshift_r(low_word, high_word, shift_amount) : __funnelshift_rc(low_word, high_word, shift_amount); } template < funnel_shift_amount_resolution_mode_t AmountResolutionMode > KAT_FD uint32_t funnel_shift_left( uint32_t low_word, uint32_t high_word, uint32_t shift_amount) { return (AmountResolutionMode == funnel_shift_amount_resolution_mode_t::take_lower_bits_of_amount) ? __funnelshift_l(low_word, high_word, shift_amount) : __funnelshift_lc(low_word, high_word, shift_amount); } #endif template<> KAT_FD int average<int >(int x, int y) { return __hadd(x,y); } template<> KAT_FD unsigned average<unsigned>(unsigned x, unsigned y) { return __uhadd(x,y); } template<> KAT_FD int average_rounded_up<int >(int x, int y) { return __rhadd(x,y); } template<> KAT_FD unsigned average_rounded_up<unsigned>(unsigned x, unsigned y) { return __urhadd(x,y); } namespace warp { #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD lane_mask_t ballot (int condition) { return __ballot(condition); } KAT_FD int all_lanes_satisfy(int condition) { return __all(condition); } KAT_FD int any_lanes_satisfy(int condition) { return __any(condition); } #else KAT_FD lane_mask_t ballot (int condition, lane_mask_t lane_mask) { return __ballot_sync(lane_mask, condition); } KAT_FD int all_lanes_satisfy(int condition, lane_mask_t lane_mask) { return __all_sync(lane_mask, condition); } KAT_FD int any_lanes_satisfy(int condition, lane_mask_t lane_mask) { return __any_sync(lane_mask, condition); } KAT_FD int all_lanes_agree (int condition, lane_mask_t lane_mask) { return __uni_sync(lane_mask, condition); } #endif #if (__CUDACC_VER_MAJOR__ >= 9) #if ! defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 700 template <typename T> KAT_FD lane_mask_t propagate_mask_if_lanes_agree(T value, lane_mask_t lane_mask) { // __match_all_sync has a weirdly redundant signature and semantics! // Consult the CUDA Programming guide v9 or later for more details int dummy; return __match_all_sync(lane_mask, value, &dummy); } template <typename T> KAT_FD lane_mask_t propagate_mask_if_warp_agrees(T value) { return propagate_mask_if_lanes_agree<T>(value, full_warp_mask); } template <typename T> KAT_FD lane_mask_t get_matching_lanes(T value, lane_mask_t lanes) { return __match_any_sync(lanes, value); } #endif #endif namespace mask_of_lanes { // Note: These builtins are actually not fast to use. KAT_FD lane_mask_t preceding() { return ptx::special_registers::lanemask_lt(); } KAT_FD lane_mask_t preceding_and_self() { return ptx::special_registers::lanemask_le(); } KAT_FD lane_mask_t self() { return ptx::special_registers::lanemask_eq(); } KAT_FD lane_mask_t succeeding_and_self() { return ptx::special_registers::lanemask_ge(); } KAT_FD lane_mask_t succeeding() { return ptx::special_registers::lanemask_gt(); } } // namespace mask_of_lanes namespace shuffle { #if (__CUDACC_VER_MAJOR__ < 9) template <typename T> KAT_FD T arbitrary(T x, int source_lane, int width) { return __shfl(x, source_lane, width); } template <typename T> KAT_FD T down(T x, unsigned delta, int width) { return __shfl_down(x, delta, width); } template <typename T> KAT_FD T up(T x, unsigned delta, int width) { return __shfl_up(x, delta, width); } template <typename T> KAT_FD T xor_(T x, int xoring_mask_for_lane_id, int width) { return __shfl_xor(x, xoring_mask_for_lane_id, width); } // we have to use xor_ here since xor is a reserved word #else template <typename T> KAT_FD T arbitrary( T x, int source_lane, int width, lane_mask_t participating_lanes) { return __shfl_sync(participating_lanes, x, source_lane, width); } template <typename T> KAT_FD T down(T x, unsigned delta, int width, lane_mask_t participating_lanes) { return __shfl_down_sync(participating_lanes, x, delta, width); } template <typename T> KAT_FD T up(T x, unsigned delta, int width, lane_mask_t participating_lanes) { return __shfl_up_sync(participating_lanes, x, delta, width); } template <typename T> KAT_FD T xor_(T x, int lane_id_xoring_mask, int width, lane_mask_t participating_lanes) { return __shfl_xor_sync(participating_lanes, x, lane_id_xoring_mask, width); } #endif } // namespace shuffle } // namespace warp template <> KAT_FD uint32_t find_leading_non_sign_bit<int >(int x) { return ptx::bfind(x); } template <> KAT_FD uint32_t find_leading_non_sign_bit<unsigned >(unsigned x) { return ptx::bfind(x); } template <> KAT_FD uint32_t find_leading_non_sign_bit<long >(long x) { return ptx::bfind((int64_t) x); } template <> KAT_FD uint32_t find_leading_non_sign_bit<unsigned long >(unsigned long x) { return ptx::bfind((uint64_t) x); } template <> KAT_FD uint32_t find_leading_non_sign_bit<long long >(long long x) { return ptx::bfind((int64_t) x); } template <> KAT_FD uint32_t find_leading_non_sign_bit<unsigned long long>(unsigned long long x) { return ptx::bfind((uint64_t) x); } #if __CUDA_ARCH__ >= 320 /** * @brief Load data through the read-only data cache * * @note See the <a href="http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#ldg-function">relevant section</a> * of the CUDA C Programming guide for details on using this instruction for loading. * * @param ptr The global memory location from which to load * @return the value at location @p ptr , loaded through the read-only data cache rather than * through the usual (read-write) caches */ template <typename T> KAT_FD T load_global_with_non_coherent_cache(const T* ptr) { static_assert(is_any_of<T, long, unsigned char, signed short, int long, char2, char4, short2, short4, int2, int4, longlong2, float, double, float2, float4, double2> "type not directly supported for non-coherent loading (ldg)"); return __ldg(ptr); } template <> KAT_FD int load_global_with_non_coherent_cache<int>(const int* ptr); #endif // Note: We can't generalize clz to an arbitrary type, without subtracting the size difference from the result of the builtin clz instruction. template <> KAT_FD int count_leading_zeros<int >(int x) { return __clz(x); } template <> KAT_FD int count_leading_zeros<unsigned >(unsigned x) { return __clz(x); } template <> KAT_FD int count_leading_zeros<long >(long x) { return __clzll(x); } template <> KAT_FD int count_leading_zeros<unsigned long >(unsigned long x) { return __clzll(x); } template <> KAT_FD int count_leading_zeros<long long >(long long x) { return __clzll(x); } template <> KAT_FD int count_leading_zeros<unsigned long long>(unsigned long long x) { return __clzll(x); } template <> KAT_FD int minimum<int >(int x, int y) { return min(x,y); } template <> KAT_FD unsigned int minimum<unsigned >(unsigned int x, unsigned int y) { return umin(x,y); } template <> KAT_FD long minimum<long >(long x, long y) { return llmin(x,y); } template <> KAT_FD unsigned long minimum<unsigned long >(unsigned long x, unsigned long y) { return ullmin(x,y); } template <> KAT_FD long long minimum<long long >(long long x, long long y) { return llmin(x,y); } template <> KAT_FD unsigned long long minimum<unsigned long long>(unsigned long long x, unsigned long long y) { return ullmin(x,y); } template <> KAT_FD float minimum<float >(float x, float y) { return fminf(x,y); } template <> KAT_FD double minimum<double >(double x, double y) { return fmin(x,y); } template <> KAT_FD int maximum<int >(int x, int y) { return max(x,y); } template <> KAT_FD unsigned int maximum<unsigned >(unsigned int x, unsigned int y) { return umax(x,y); } template <> KAT_FD long maximum<long >(long x, long y) { return llmax(x,y); } template <> KAT_FD unsigned long maximum<unsigned long >(unsigned long x, unsigned long y) { return ullmax(x,y); } template <> KAT_FD long long maximum<long long >(long long x, long long y) { return llmax(x,y); } template <> KAT_FD unsigned long long maximum<unsigned long long>(unsigned long long x, unsigned long long y) { return ullmax(x,y); } template <> KAT_FD float maximum<float >(float x, float y) { return fmaxf(x,y); } template <> KAT_FD double maximum<double >(double x, double y) { return fmax(x,y); } } // namespace builtins } // namespace kat #endif // CUDA_KAT_ON_DEVICE_BUILTINS_DETAIL_CUH_
the_stack